xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/via/via-rhine.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun 	Written 1998-2001 by Donald Becker.
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 	Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun 	This software may be used and distributed according to the terms of
8*4882a593Smuzhiyun 	the GNU General Public License (GPL), incorporated herein by reference.
9*4882a593Smuzhiyun 	Drivers based on or derived from this code fall under the GPL and must
10*4882a593Smuzhiyun 	retain the authorship, copyright and license notice.  This file is not
11*4882a593Smuzhiyun 	a complete program and may only be used when the entire operating
12*4882a593Smuzhiyun 	system is licensed under the GPL.
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 	This driver is designed for the VIA VT86C100A Rhine-I.
15*4882a593Smuzhiyun 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16*4882a593Smuzhiyun 	and management NIC 6105M).
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	The author may be reached as becker@scyld.com, or C/O
19*4882a593Smuzhiyun 	Scyld Computing Corporation
20*4882a593Smuzhiyun 	410 Severn Ave., Suite 210
21*4882a593Smuzhiyun 	Annapolis MD 21403
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	This driver contains some changes from the original Donald Becker
25*4882a593Smuzhiyun 	version. He may or may not be interested in bug reports on this
26*4882a593Smuzhiyun 	code. You can find his versions at:
27*4882a593Smuzhiyun 	http://www.scyld.com/network/via-rhine.html
28*4882a593Smuzhiyun 	[link no longer provides useful info -jgarzik]
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define DRV_NAME	"via-rhine"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/types.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /* A few user-configurable values.
39*4882a593Smuzhiyun    These may be modified when a driver module is loaded. */
40*4882a593Smuzhiyun static int debug = 0;
41*4882a593Smuzhiyun #define RHINE_MSG_DEFAULT \
42*4882a593Smuzhiyun         (0x0000)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
45*4882a593Smuzhiyun    Setting to > 1518 effectively disables this feature. */
46*4882a593Smuzhiyun #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
47*4882a593Smuzhiyun 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
48*4882a593Smuzhiyun 	defined(__sh__) || defined(__mips__)
49*4882a593Smuzhiyun static int rx_copybreak = 1518;
50*4882a593Smuzhiyun #else
51*4882a593Smuzhiyun static int rx_copybreak;
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* Work-around for broken BIOSes: they are unable to get the chip back out of
55*4882a593Smuzhiyun    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
56*4882a593Smuzhiyun static bool avoid_D3;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * In case you are looking for 'options[]' or 'full_duplex[]', they
60*4882a593Smuzhiyun  * are gone. Use ethtool(8) instead.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64*4882a593Smuzhiyun    The Rhine has a 64 element 8390-like hash table. */
65*4882a593Smuzhiyun static const int multicast_filter_limit = 32;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* Operational parameters that are set at compile time. */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* Keep the ring sizes a power of two for compile efficiency.
71*4882a593Smuzhiyun  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
72*4882a593Smuzhiyun  * Making the Tx ring too large decreases the effectiveness of channel
73*4882a593Smuzhiyun  * bonding and packet priority.
74*4882a593Smuzhiyun  * With BQL support, we can increase TX ring safely.
75*4882a593Smuzhiyun  * There are no ill effects from too-large receive rings.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #define TX_RING_SIZE	64
78*4882a593Smuzhiyun #define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
79*4882a593Smuzhiyun #define RX_RING_SIZE	64
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* Operational parameters that usually are not changed. */
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Time in jiffies before concluding the transmitter is hung. */
84*4882a593Smuzhiyun #define TX_TIMEOUT	(2*HZ)
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #include <linux/module.h>
89*4882a593Smuzhiyun #include <linux/moduleparam.h>
90*4882a593Smuzhiyun #include <linux/kernel.h>
91*4882a593Smuzhiyun #include <linux/string.h>
92*4882a593Smuzhiyun #include <linux/timer.h>
93*4882a593Smuzhiyun #include <linux/errno.h>
94*4882a593Smuzhiyun #include <linux/ioport.h>
95*4882a593Smuzhiyun #include <linux/interrupt.h>
96*4882a593Smuzhiyun #include <linux/pci.h>
97*4882a593Smuzhiyun #include <linux/of_device.h>
98*4882a593Smuzhiyun #include <linux/of_irq.h>
99*4882a593Smuzhiyun #include <linux/platform_device.h>
100*4882a593Smuzhiyun #include <linux/dma-mapping.h>
101*4882a593Smuzhiyun #include <linux/netdevice.h>
102*4882a593Smuzhiyun #include <linux/etherdevice.h>
103*4882a593Smuzhiyun #include <linux/skbuff.h>
104*4882a593Smuzhiyun #include <linux/init.h>
105*4882a593Smuzhiyun #include <linux/delay.h>
106*4882a593Smuzhiyun #include <linux/mii.h>
107*4882a593Smuzhiyun #include <linux/ethtool.h>
108*4882a593Smuzhiyun #include <linux/crc32.h>
109*4882a593Smuzhiyun #include <linux/if_vlan.h>
110*4882a593Smuzhiyun #include <linux/bitops.h>
111*4882a593Smuzhiyun #include <linux/workqueue.h>
112*4882a593Smuzhiyun #include <asm/processor.h>	/* Processor type for cache alignment. */
113*4882a593Smuzhiyun #include <asm/io.h>
114*4882a593Smuzhiyun #include <asm/irq.h>
115*4882a593Smuzhiyun #include <linux/uaccess.h>
116*4882a593Smuzhiyun #include <linux/dmi.h>
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
119*4882a593Smuzhiyun MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
120*4882a593Smuzhiyun MODULE_LICENSE("GPL");
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun module_param(debug, int, 0);
123*4882a593Smuzhiyun module_param(rx_copybreak, int, 0);
124*4882a593Smuzhiyun module_param(avoid_D3, bool, 0);
125*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
126*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
127*4882a593Smuzhiyun MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define MCAM_SIZE	32
130*4882a593Smuzhiyun #define VCAM_SIZE	32
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun 		Theory of Operation
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun I. Board Compatibility
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
138*4882a593Smuzhiyun controller.
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun II. Board-specific settings
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun Boards with this chip are functional only in a bus-master PCI slot.
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun Many operational settings are loaded from the EEPROM to the Config word at
145*4882a593Smuzhiyun offset 0x78. For most of these settings, this driver assumes that they are
146*4882a593Smuzhiyun correct.
147*4882a593Smuzhiyun If this driver is compiled to use PCI memory space operations the EEPROM
148*4882a593Smuzhiyun must be configured to enable memory ops.
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun III. Driver operation
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun IIIa. Ring buffers
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun This driver uses two statically allocated fixed-size descriptor lists
155*4882a593Smuzhiyun formed into rings by a branch from the final descriptor to the beginning of
156*4882a593Smuzhiyun the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun IIIb/c. Transmit/Receive Structure
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun This driver attempts to use a zero-copy receive and transmit scheme.
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun Alas, all data buffers are required to start on a 32 bit boundary, so
163*4882a593Smuzhiyun the driver must often copy transmit packets into bounce buffers.
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun The driver allocates full frame size skbuffs for the Rx ring buffers at
166*4882a593Smuzhiyun open() time and passes the skb->data field to the chip as receive data
167*4882a593Smuzhiyun buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
168*4882a593Smuzhiyun a fresh skbuff is allocated and the frame is copied to the new skbuff.
169*4882a593Smuzhiyun When the incoming frame is larger, the skbuff is passed directly up the
170*4882a593Smuzhiyun protocol stack. Buffers consumed this way are replaced by newly allocated
171*4882a593Smuzhiyun skbuffs in the last phase of rhine_rx().
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun The RX_COPYBREAK value is chosen to trade-off the memory wasted by
174*4882a593Smuzhiyun using a full-sized skbuff for small frames vs. the copying costs of larger
175*4882a593Smuzhiyun frames. New boards are typically used in generously configured machines
176*4882a593Smuzhiyun and the underfilled buffers have negligible impact compared to the benefit of
177*4882a593Smuzhiyun a single allocation size, so the default value of zero results in never
178*4882a593Smuzhiyun copying packets. When copying is done, the cost is usually mitigated by using
179*4882a593Smuzhiyun a combined copy/checksum routine. Copying also preloads the cache, which is
180*4882a593Smuzhiyun most useful with small frames.
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun Since the VIA chips are only able to transfer data to buffers on 32 bit
183*4882a593Smuzhiyun boundaries, the IP header at offset 14 in an ethernet frame isn't
184*4882a593Smuzhiyun longword aligned for further processing. Copying these unaligned buffers
185*4882a593Smuzhiyun has the beneficial effect of 16-byte aligning the IP header.
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun IIId. Synchronization
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun The driver runs as two independent, single-threaded flows of control. One
190*4882a593Smuzhiyun is the send-packet routine, which enforces single-threaded use by the
191*4882a593Smuzhiyun netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
192*4882a593Smuzhiyun which is single threaded by the hardware and interrupt handling software.
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun The send packet thread has partial control over the Tx ring. It locks the
195*4882a593Smuzhiyun netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
196*4882a593Smuzhiyun the ring is not available it stops the transmit queue by
197*4882a593Smuzhiyun calling netif_stop_queue.
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun The interrupt handler has exclusive control over the Rx ring and records stats
200*4882a593Smuzhiyun from the Tx ring. After reaping the stats, it marks the Tx queue entry as
201*4882a593Smuzhiyun empty by incrementing the dirty_tx mark. If at least half of the entries in
202*4882a593Smuzhiyun the Rx ring are available the transmit queue is woken up if it was stopped.
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun IV. Notes
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun IVb. References
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun Preliminary VT86C100A manual from http://www.via.com.tw/
209*4882a593Smuzhiyun http://www.scyld.com/expert/100mbps.html
210*4882a593Smuzhiyun http://www.scyld.com/expert/NWay.html
211*4882a593Smuzhiyun ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
212*4882a593Smuzhiyun ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun IVc. Errata
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun The VT86C100A manual is not reliable information.
218*4882a593Smuzhiyun The 3043 chip does not handle unaligned transmit or receive buffers, resulting
219*4882a593Smuzhiyun in significant performance degradation for bounce buffer copies on transmit
220*4882a593Smuzhiyun and unaligned IP headers on receive.
221*4882a593Smuzhiyun The chip does not pad to minimum transmit length.
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /* This table drives the PCI probe routines. It's mostly boilerplate in all
227*4882a593Smuzhiyun    of the drivers, and will likely be provided by some future kernel.
228*4882a593Smuzhiyun    Note the matching code -- the first table entry matchs all 56** cards but
229*4882a593Smuzhiyun    second only the 1234 card.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun enum rhine_revs {
233*4882a593Smuzhiyun 	VT86C100A	= 0x00,
234*4882a593Smuzhiyun 	VTunknown0	= 0x20,
235*4882a593Smuzhiyun 	VT6102		= 0x40,
236*4882a593Smuzhiyun 	VT8231		= 0x50,	/* Integrated MAC */
237*4882a593Smuzhiyun 	VT8233		= 0x60,	/* Integrated MAC */
238*4882a593Smuzhiyun 	VT8235		= 0x74,	/* Integrated MAC */
239*4882a593Smuzhiyun 	VT8237		= 0x78,	/* Integrated MAC */
240*4882a593Smuzhiyun 	VT8251		= 0x7C,	/* Integrated MAC */
241*4882a593Smuzhiyun 	VT6105		= 0x80,
242*4882a593Smuzhiyun 	VT6105_B0	= 0x83,
243*4882a593Smuzhiyun 	VT6105L		= 0x8A,
244*4882a593Smuzhiyun 	VT6107		= 0x8C,
245*4882a593Smuzhiyun 	VTunknown2	= 0x8E,
246*4882a593Smuzhiyun 	VT6105M		= 0x90,	/* Management adapter */
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun enum rhine_quirks {
250*4882a593Smuzhiyun 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
251*4882a593Smuzhiyun 	rqForceReset	= 0x0002,
252*4882a593Smuzhiyun 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
253*4882a593Smuzhiyun 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
254*4882a593Smuzhiyun 	rqRhineI	= 0x0100,	/* See comment below */
255*4882a593Smuzhiyun 	rqIntPHY	= 0x0200,	/* Integrated PHY */
256*4882a593Smuzhiyun 	rqMgmt		= 0x0400,	/* Management adapter */
257*4882a593Smuzhiyun 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
258*4882a593Smuzhiyun 					 * switched from PIO mode to MMIO
259*4882a593Smuzhiyun 					 * (only applies to PCI)
260*4882a593Smuzhiyun 					 */
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
264*4882a593Smuzhiyun  * MMIO as well as for the collision counter and the Tx FIFO underflow
265*4882a593Smuzhiyun  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
266*4882a593Smuzhiyun  */
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /* Beware of PCI posted writes */
269*4882a593Smuzhiyun #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun static const struct pci_device_id rhine_pci_tbl[] = {
272*4882a593Smuzhiyun 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
273*4882a593Smuzhiyun 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
274*4882a593Smuzhiyun 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
275*4882a593Smuzhiyun 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
276*4882a593Smuzhiyun 	{ }	/* terminate list */
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /* OpenFirmware identifiers for platform-bus devices
281*4882a593Smuzhiyun  * The .data field is currently only used to store quirks
282*4882a593Smuzhiyun  */
283*4882a593Smuzhiyun static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
284*4882a593Smuzhiyun static const struct of_device_id rhine_of_tbl[] = {
285*4882a593Smuzhiyun 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
286*4882a593Smuzhiyun 	{ }	/* terminate list */
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rhine_of_tbl);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /* Offsets to the device registers. */
291*4882a593Smuzhiyun enum register_offsets {
292*4882a593Smuzhiyun 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
293*4882a593Smuzhiyun 	ChipCmd1=0x09, TQWake=0x0A,
294*4882a593Smuzhiyun 	IntrStatus=0x0C, IntrEnable=0x0E,
295*4882a593Smuzhiyun 	MulticastFilter0=0x10, MulticastFilter1=0x14,
296*4882a593Smuzhiyun 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
297*4882a593Smuzhiyun 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
298*4882a593Smuzhiyun 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
299*4882a593Smuzhiyun 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
300*4882a593Smuzhiyun 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
301*4882a593Smuzhiyun 	StickyHW=0x83, IntrStatus2=0x84,
302*4882a593Smuzhiyun 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
303*4882a593Smuzhiyun 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
304*4882a593Smuzhiyun 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
305*4882a593Smuzhiyun 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
306*4882a593Smuzhiyun };
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /* Bits in ConfigD */
309*4882a593Smuzhiyun enum backoff_bits {
310*4882a593Smuzhiyun 	BackOptional=0x01, BackModify=0x02,
311*4882a593Smuzhiyun 	BackCaptureEffect=0x04, BackRandom=0x08
312*4882a593Smuzhiyun };
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /* Bits in the TxConfig (TCR) register */
315*4882a593Smuzhiyun enum tcr_bits {
316*4882a593Smuzhiyun 	TCR_PQEN=0x01,
317*4882a593Smuzhiyun 	TCR_LB0=0x02,		/* loopback[0] */
318*4882a593Smuzhiyun 	TCR_LB1=0x04,		/* loopback[1] */
319*4882a593Smuzhiyun 	TCR_OFSET=0x08,
320*4882a593Smuzhiyun 	TCR_RTGOPT=0x10,
321*4882a593Smuzhiyun 	TCR_RTFT0=0x20,
322*4882a593Smuzhiyun 	TCR_RTFT1=0x40,
323*4882a593Smuzhiyun 	TCR_RTSF=0x80,
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun /* Bits in the CamCon (CAMC) register */
327*4882a593Smuzhiyun enum camcon_bits {
328*4882a593Smuzhiyun 	CAMC_CAMEN=0x01,
329*4882a593Smuzhiyun 	CAMC_VCAMSL=0x02,
330*4882a593Smuzhiyun 	CAMC_CAMWR=0x04,
331*4882a593Smuzhiyun 	CAMC_CAMRD=0x08,
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /* Bits in the PCIBusConfig1 (BCR1) register */
335*4882a593Smuzhiyun enum bcr1_bits {
336*4882a593Smuzhiyun 	BCR1_POT0=0x01,
337*4882a593Smuzhiyun 	BCR1_POT1=0x02,
338*4882a593Smuzhiyun 	BCR1_POT2=0x04,
339*4882a593Smuzhiyun 	BCR1_CTFT0=0x08,
340*4882a593Smuzhiyun 	BCR1_CTFT1=0x10,
341*4882a593Smuzhiyun 	BCR1_CTSF=0x20,
342*4882a593Smuzhiyun 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
343*4882a593Smuzhiyun 	BCR1_VIDFR=0x80,	/* for VT6105 */
344*4882a593Smuzhiyun 	BCR1_MED0=0x40,		/* for VT6102 */
345*4882a593Smuzhiyun 	BCR1_MED1=0x80,		/* for VT6102 */
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /* Registers we check that mmio and reg are the same. */
349*4882a593Smuzhiyun static const int mmio_verify_registers[] = {
350*4882a593Smuzhiyun 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
351*4882a593Smuzhiyun 	0
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /* Bits in the interrupt status/mask registers. */
355*4882a593Smuzhiyun enum intr_status_bits {
356*4882a593Smuzhiyun 	IntrRxDone	= 0x0001,
357*4882a593Smuzhiyun 	IntrTxDone	= 0x0002,
358*4882a593Smuzhiyun 	IntrRxErr	= 0x0004,
359*4882a593Smuzhiyun 	IntrTxError	= 0x0008,
360*4882a593Smuzhiyun 	IntrRxEmpty	= 0x0020,
361*4882a593Smuzhiyun 	IntrPCIErr	= 0x0040,
362*4882a593Smuzhiyun 	IntrStatsMax	= 0x0080,
363*4882a593Smuzhiyun 	IntrRxEarly	= 0x0100,
364*4882a593Smuzhiyun 	IntrTxUnderrun	= 0x0210,
365*4882a593Smuzhiyun 	IntrRxOverflow	= 0x0400,
366*4882a593Smuzhiyun 	IntrRxDropped	= 0x0800,
367*4882a593Smuzhiyun 	IntrRxNoBuf	= 0x1000,
368*4882a593Smuzhiyun 	IntrTxAborted	= 0x2000,
369*4882a593Smuzhiyun 	IntrLinkChange	= 0x4000,
370*4882a593Smuzhiyun 	IntrRxWakeUp	= 0x8000,
371*4882a593Smuzhiyun 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
372*4882a593Smuzhiyun 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
373*4882a593Smuzhiyun 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
374*4882a593Smuzhiyun 				  IntrTxUnderrun,
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
378*4882a593Smuzhiyun enum wol_bits {
379*4882a593Smuzhiyun 	WOLucast	= 0x10,
380*4882a593Smuzhiyun 	WOLmagic	= 0x20,
381*4882a593Smuzhiyun 	WOLbmcast	= 0x30,
382*4882a593Smuzhiyun 	WOLlnkon	= 0x40,
383*4882a593Smuzhiyun 	WOLlnkoff	= 0x80,
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /* The Rx and Tx buffer descriptors. */
387*4882a593Smuzhiyun struct rx_desc {
388*4882a593Smuzhiyun 	__le32 rx_status;
389*4882a593Smuzhiyun 	__le32 desc_length; /* Chain flag, Buffer/frame length */
390*4882a593Smuzhiyun 	__le32 addr;
391*4882a593Smuzhiyun 	__le32 next_desc;
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun struct tx_desc {
394*4882a593Smuzhiyun 	__le32 tx_status;
395*4882a593Smuzhiyun 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
396*4882a593Smuzhiyun 	__le32 addr;
397*4882a593Smuzhiyun 	__le32 next_desc;
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
401*4882a593Smuzhiyun #define TXDESC		0x00e08000
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun enum rx_status_bits {
404*4882a593Smuzhiyun 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
405*4882a593Smuzhiyun };
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /* Bits in *_desc.*_status */
408*4882a593Smuzhiyun enum desc_status_bits {
409*4882a593Smuzhiyun 	DescOwn=0x80000000
410*4882a593Smuzhiyun };
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /* Bits in *_desc.*_length */
413*4882a593Smuzhiyun enum desc_length_bits {
414*4882a593Smuzhiyun 	DescTag=0x00010000
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /* Bits in ChipCmd. */
418*4882a593Smuzhiyun enum chip_cmd_bits {
419*4882a593Smuzhiyun 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
420*4882a593Smuzhiyun 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
421*4882a593Smuzhiyun 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
422*4882a593Smuzhiyun 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun struct rhine_stats {
426*4882a593Smuzhiyun 	u64		packets;
427*4882a593Smuzhiyun 	u64		bytes;
428*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
429*4882a593Smuzhiyun };
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun struct rhine_private {
432*4882a593Smuzhiyun 	/* Bit mask for configured VLAN ids */
433*4882a593Smuzhiyun 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	/* Descriptor rings */
436*4882a593Smuzhiyun 	struct rx_desc *rx_ring;
437*4882a593Smuzhiyun 	struct tx_desc *tx_ring;
438*4882a593Smuzhiyun 	dma_addr_t rx_ring_dma;
439*4882a593Smuzhiyun 	dma_addr_t tx_ring_dma;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/* The addresses of receive-in-place skbuffs. */
442*4882a593Smuzhiyun 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
443*4882a593Smuzhiyun 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
446*4882a593Smuzhiyun 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
447*4882a593Smuzhiyun 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* Tx bounce buffers (Rhine-I only) */
450*4882a593Smuzhiyun 	unsigned char *tx_buf[TX_RING_SIZE];
451*4882a593Smuzhiyun 	unsigned char *tx_bufs;
452*4882a593Smuzhiyun 	dma_addr_t tx_bufs_dma;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	int irq;
455*4882a593Smuzhiyun 	long pioaddr;
456*4882a593Smuzhiyun 	struct net_device *dev;
457*4882a593Smuzhiyun 	struct napi_struct napi;
458*4882a593Smuzhiyun 	spinlock_t lock;
459*4882a593Smuzhiyun 	struct mutex task_lock;
460*4882a593Smuzhiyun 	bool task_enable;
461*4882a593Smuzhiyun 	struct work_struct slow_event_task;
462*4882a593Smuzhiyun 	struct work_struct reset_task;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	u32 msg_enable;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Frequently used values: keep some adjacent for cache effect. */
467*4882a593Smuzhiyun 	u32 quirks;
468*4882a593Smuzhiyun 	unsigned int cur_rx;
469*4882a593Smuzhiyun 	unsigned int cur_tx, dirty_tx;
470*4882a593Smuzhiyun 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
471*4882a593Smuzhiyun 	struct rhine_stats rx_stats;
472*4882a593Smuzhiyun 	struct rhine_stats tx_stats;
473*4882a593Smuzhiyun 	u8 wolopts;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	u8 tx_thresh, rx_thresh;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	struct mii_if_info mii_if;
478*4882a593Smuzhiyun 	void __iomem *base;
479*4882a593Smuzhiyun };
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
482*4882a593Smuzhiyun #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
483*4882a593Smuzhiyun #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
486*4882a593Smuzhiyun #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
487*4882a593Smuzhiyun #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
490*4882a593Smuzhiyun #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
491*4882a593Smuzhiyun #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
494*4882a593Smuzhiyun #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
495*4882a593Smuzhiyun #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun static int  mdio_read(struct net_device *dev, int phy_id, int location);
499*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
500*4882a593Smuzhiyun static int  rhine_open(struct net_device *dev);
501*4882a593Smuzhiyun static void rhine_reset_task(struct work_struct *work);
502*4882a593Smuzhiyun static void rhine_slow_event_task(struct work_struct *work);
503*4882a593Smuzhiyun static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
504*4882a593Smuzhiyun static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
505*4882a593Smuzhiyun 				  struct net_device *dev);
506*4882a593Smuzhiyun static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
507*4882a593Smuzhiyun static void rhine_tx(struct net_device *dev);
508*4882a593Smuzhiyun static int rhine_rx(struct net_device *dev, int limit);
509*4882a593Smuzhiyun static void rhine_set_rx_mode(struct net_device *dev);
510*4882a593Smuzhiyun static void rhine_get_stats64(struct net_device *dev,
511*4882a593Smuzhiyun 			      struct rtnl_link_stats64 *stats);
512*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513*4882a593Smuzhiyun static const struct ethtool_ops netdev_ethtool_ops;
514*4882a593Smuzhiyun static int  rhine_close(struct net_device *dev);
515*4882a593Smuzhiyun static int rhine_vlan_rx_add_vid(struct net_device *dev,
516*4882a593Smuzhiyun 				 __be16 proto, u16 vid);
517*4882a593Smuzhiyun static int rhine_vlan_rx_kill_vid(struct net_device *dev,
518*4882a593Smuzhiyun 				  __be16 proto, u16 vid);
519*4882a593Smuzhiyun static void rhine_restart_tx(struct net_device *dev);
520*4882a593Smuzhiyun 
rhine_wait_bit(struct rhine_private * rp,u8 reg,u8 mask,bool low)521*4882a593Smuzhiyun static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
524*4882a593Smuzhiyun 	int i;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	for (i = 0; i < 1024; i++) {
527*4882a593Smuzhiyun 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 		if (low ^ has_mask_bits)
530*4882a593Smuzhiyun 			break;
531*4882a593Smuzhiyun 		udelay(10);
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 	if (i > 64) {
534*4882a593Smuzhiyun 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
535*4882a593Smuzhiyun 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
rhine_wait_bit_high(struct rhine_private * rp,u8 reg,u8 mask)539*4882a593Smuzhiyun static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	rhine_wait_bit(rp, reg, mask, false);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
rhine_wait_bit_low(struct rhine_private * rp,u8 reg,u8 mask)544*4882a593Smuzhiyun static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	rhine_wait_bit(rp, reg, mask, true);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
rhine_get_events(struct rhine_private * rp)549*4882a593Smuzhiyun static u32 rhine_get_events(struct rhine_private *rp)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
552*4882a593Smuzhiyun 	u32 intr_status;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	intr_status = ioread16(ioaddr + IntrStatus);
555*4882a593Smuzhiyun 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
556*4882a593Smuzhiyun 	if (rp->quirks & rqStatusWBRace)
557*4882a593Smuzhiyun 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
558*4882a593Smuzhiyun 	return intr_status;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
rhine_ack_events(struct rhine_private * rp,u32 mask)561*4882a593Smuzhiyun static void rhine_ack_events(struct rhine_private *rp, u32 mask)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (rp->quirks & rqStatusWBRace)
566*4882a593Smuzhiyun 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
567*4882a593Smuzhiyun 	iowrite16(mask, ioaddr + IntrStatus);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun /*
571*4882a593Smuzhiyun  * Get power related registers into sane state.
572*4882a593Smuzhiyun  * Notify user about past WOL event.
573*4882a593Smuzhiyun  */
rhine_power_init(struct net_device * dev)574*4882a593Smuzhiyun static void rhine_power_init(struct net_device *dev)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
577*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
578*4882a593Smuzhiyun 	u16 wolstat;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (rp->quirks & rqWOL) {
581*4882a593Smuzhiyun 		/* Make sure chip is in power state D0 */
582*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		/* Disable "force PME-enable" */
585*4882a593Smuzhiyun 		iowrite8(0x80, ioaddr + WOLcgClr);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 		/* Clear power-event config bits (WOL) */
588*4882a593Smuzhiyun 		iowrite8(0xFF, ioaddr + WOLcrClr);
589*4882a593Smuzhiyun 		/* More recent cards can manage two additional patterns */
590*4882a593Smuzhiyun 		if (rp->quirks & rq6patterns)
591*4882a593Smuzhiyun 			iowrite8(0x03, ioaddr + WOLcrClr1);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 		/* Save power-event status bits */
594*4882a593Smuzhiyun 		wolstat = ioread8(ioaddr + PwrcsrSet);
595*4882a593Smuzhiyun 		if (rp->quirks & rq6patterns)
596*4882a593Smuzhiyun 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 		/* Clear power-event status bits */
599*4882a593Smuzhiyun 		iowrite8(0xFF, ioaddr + PwrcsrClr);
600*4882a593Smuzhiyun 		if (rp->quirks & rq6patterns)
601*4882a593Smuzhiyun 			iowrite8(0x03, ioaddr + PwrcsrClr1);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		if (wolstat) {
604*4882a593Smuzhiyun 			char *reason;
605*4882a593Smuzhiyun 			switch (wolstat) {
606*4882a593Smuzhiyun 			case WOLmagic:
607*4882a593Smuzhiyun 				reason = "Magic packet";
608*4882a593Smuzhiyun 				break;
609*4882a593Smuzhiyun 			case WOLlnkon:
610*4882a593Smuzhiyun 				reason = "Link went up";
611*4882a593Smuzhiyun 				break;
612*4882a593Smuzhiyun 			case WOLlnkoff:
613*4882a593Smuzhiyun 				reason = "Link went down";
614*4882a593Smuzhiyun 				break;
615*4882a593Smuzhiyun 			case WOLucast:
616*4882a593Smuzhiyun 				reason = "Unicast packet";
617*4882a593Smuzhiyun 				break;
618*4882a593Smuzhiyun 			case WOLbmcast:
619*4882a593Smuzhiyun 				reason = "Multicast/broadcast packet";
620*4882a593Smuzhiyun 				break;
621*4882a593Smuzhiyun 			default:
622*4882a593Smuzhiyun 				reason = "Unknown";
623*4882a593Smuzhiyun 			}
624*4882a593Smuzhiyun 			netdev_info(dev, "Woke system up. Reason: %s\n",
625*4882a593Smuzhiyun 				    reason);
626*4882a593Smuzhiyun 		}
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
rhine_chip_reset(struct net_device * dev)630*4882a593Smuzhiyun static void rhine_chip_reset(struct net_device *dev)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
633*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
634*4882a593Smuzhiyun 	u8 cmd1;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
637*4882a593Smuzhiyun 	IOSYNC;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
640*4882a593Smuzhiyun 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		/* Force reset */
643*4882a593Smuzhiyun 		if (rp->quirks & rqForceReset)
644*4882a593Smuzhiyun 			iowrite8(0x40, ioaddr + MiscCmd);
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 		/* Reset can take somewhat longer (rare) */
647*4882a593Smuzhiyun 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	cmd1 = ioread8(ioaddr + ChipCmd1);
651*4882a593Smuzhiyun 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
652*4882a593Smuzhiyun 		   "failed" : "succeeded");
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
enable_mmio(long pioaddr,u32 quirks)655*4882a593Smuzhiyun static void enable_mmio(long pioaddr, u32 quirks)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	int n;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (quirks & rqNeedEnMMIO) {
660*4882a593Smuzhiyun 		if (quirks & rqRhineI) {
661*4882a593Smuzhiyun 			/* More recent docs say that this bit is reserved */
662*4882a593Smuzhiyun 			n = inb(pioaddr + ConfigA) | 0x20;
663*4882a593Smuzhiyun 			outb(n, pioaddr + ConfigA);
664*4882a593Smuzhiyun 		} else {
665*4882a593Smuzhiyun 			n = inb(pioaddr + ConfigD) | 0x80;
666*4882a593Smuzhiyun 			outb(n, pioaddr + ConfigD);
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
verify_mmio(struct device * hwdev,long pioaddr,void __iomem * ioaddr,u32 quirks)671*4882a593Smuzhiyun static inline int verify_mmio(struct device *hwdev,
672*4882a593Smuzhiyun 			      long pioaddr,
673*4882a593Smuzhiyun 			      void __iomem *ioaddr,
674*4882a593Smuzhiyun 			      u32 quirks)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	if (quirks & rqNeedEnMMIO) {
677*4882a593Smuzhiyun 		int i = 0;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		/* Check that selected MMIO registers match the PIO ones */
680*4882a593Smuzhiyun 		while (mmio_verify_registers[i]) {
681*4882a593Smuzhiyun 			int reg = mmio_verify_registers[i++];
682*4882a593Smuzhiyun 			unsigned char a = inb(pioaddr+reg);
683*4882a593Smuzhiyun 			unsigned char b = readb(ioaddr+reg);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 			if (a != b) {
686*4882a593Smuzhiyun 				dev_err(hwdev,
687*4882a593Smuzhiyun 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
688*4882a593Smuzhiyun 					reg, a, b);
689*4882a593Smuzhiyun 				return -EIO;
690*4882a593Smuzhiyun 			}
691*4882a593Smuzhiyun 		}
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 	return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
698*4882a593Smuzhiyun  * (plus 0x6C for Rhine-I/II)
699*4882a593Smuzhiyun  */
rhine_reload_eeprom(long pioaddr,struct net_device * dev)700*4882a593Smuzhiyun static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
703*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
704*4882a593Smuzhiyun 	int i;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	outb(0x20, pioaddr + MACRegEEcsr);
707*4882a593Smuzhiyun 	for (i = 0; i < 1024; i++) {
708*4882a593Smuzhiyun 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
709*4882a593Smuzhiyun 			break;
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 	if (i > 512)
712*4882a593Smuzhiyun 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	/*
715*4882a593Smuzhiyun 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
716*4882a593Smuzhiyun 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
717*4882a593Smuzhiyun 	 * it is not known if that still works with the "win98-reboot" problem.
718*4882a593Smuzhiyun 	 */
719*4882a593Smuzhiyun 	enable_mmio(pioaddr, rp->quirks);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	/* Turn off EEPROM-controlled wake-up (magic packet) */
722*4882a593Smuzhiyun 	if (rp->quirks & rqWOL)
723*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
rhine_poll(struct net_device * dev)728*4882a593Smuzhiyun static void rhine_poll(struct net_device *dev)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
731*4882a593Smuzhiyun 	const int irq = rp->irq;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	disable_irq(irq);
734*4882a593Smuzhiyun 	rhine_interrupt(irq, dev);
735*4882a593Smuzhiyun 	enable_irq(irq);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun #endif
738*4882a593Smuzhiyun 
rhine_kick_tx_threshold(struct rhine_private * rp)739*4882a593Smuzhiyun static void rhine_kick_tx_threshold(struct rhine_private *rp)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	if (rp->tx_thresh < 0xe0) {
742*4882a593Smuzhiyun 		void __iomem *ioaddr = rp->base;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 		rp->tx_thresh += 0x20;
745*4882a593Smuzhiyun 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
rhine_tx_err(struct rhine_private * rp,u32 status)749*4882a593Smuzhiyun static void rhine_tx_err(struct rhine_private *rp, u32 status)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	struct net_device *dev = rp->dev;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	if (status & IntrTxAborted) {
754*4882a593Smuzhiyun 		netif_info(rp, tx_err, dev,
755*4882a593Smuzhiyun 			   "Abort %08x, frame dropped\n", status);
756*4882a593Smuzhiyun 	}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (status & IntrTxUnderrun) {
759*4882a593Smuzhiyun 		rhine_kick_tx_threshold(rp);
760*4882a593Smuzhiyun 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
761*4882a593Smuzhiyun 			   "Tx threshold now %02x\n", rp->tx_thresh);
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (status & IntrTxDescRace)
765*4882a593Smuzhiyun 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if ((status & IntrTxError) &&
768*4882a593Smuzhiyun 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
769*4882a593Smuzhiyun 		rhine_kick_tx_threshold(rp);
770*4882a593Smuzhiyun 		netif_info(rp, tx_err, dev, "Unspecified error. "
771*4882a593Smuzhiyun 			   "Tx threshold now %02x\n", rp->tx_thresh);
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	rhine_restart_tx(dev);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
rhine_update_rx_crc_and_missed_errord(struct rhine_private * rp)777*4882a593Smuzhiyun static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
780*4882a593Smuzhiyun 	struct net_device_stats *stats = &rp->dev->stats;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
783*4882a593Smuzhiyun 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/*
786*4882a593Smuzhiyun 	 * Clears the "tally counters" for CRC errors and missed frames(?).
787*4882a593Smuzhiyun 	 * It has been reported that some chips need a write of 0 to clear
788*4882a593Smuzhiyun 	 * these, for others the counters are set to 1 when written to and
789*4882a593Smuzhiyun 	 * instead cleared when read. So we clear them both ways ...
790*4882a593Smuzhiyun 	 */
791*4882a593Smuzhiyun 	iowrite32(0, ioaddr + RxMissed);
792*4882a593Smuzhiyun 	ioread16(ioaddr + RxCRCErrs);
793*4882a593Smuzhiyun 	ioread16(ioaddr + RxMissed);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
797*4882a593Smuzhiyun 				 IntrRxErr | \
798*4882a593Smuzhiyun 				 IntrRxEmpty | \
799*4882a593Smuzhiyun 				 IntrRxOverflow	| \
800*4882a593Smuzhiyun 				 IntrRxDropped | \
801*4882a593Smuzhiyun 				 IntrRxNoBuf | \
802*4882a593Smuzhiyun 				 IntrRxWakeUp)
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
805*4882a593Smuzhiyun 				 IntrTxAborted | \
806*4882a593Smuzhiyun 				 IntrTxUnderrun | \
807*4882a593Smuzhiyun 				 IntrTxDescRace)
808*4882a593Smuzhiyun #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
811*4882a593Smuzhiyun 				 RHINE_EVENT_NAPI_TX | \
812*4882a593Smuzhiyun 				 IntrStatsMax)
813*4882a593Smuzhiyun #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
814*4882a593Smuzhiyun #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
815*4882a593Smuzhiyun 
rhine_napipoll(struct napi_struct * napi,int budget)816*4882a593Smuzhiyun static int rhine_napipoll(struct napi_struct *napi, int budget)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
819*4882a593Smuzhiyun 	struct net_device *dev = rp->dev;
820*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
821*4882a593Smuzhiyun 	u16 enable_mask = RHINE_EVENT & 0xffff;
822*4882a593Smuzhiyun 	int work_done = 0;
823*4882a593Smuzhiyun 	u32 status;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	status = rhine_get_events(rp);
826*4882a593Smuzhiyun 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (status & RHINE_EVENT_NAPI_RX)
829*4882a593Smuzhiyun 		work_done += rhine_rx(dev, budget);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (status & RHINE_EVENT_NAPI_TX) {
832*4882a593Smuzhiyun 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
833*4882a593Smuzhiyun 			/* Avoid scavenging before Tx engine turned off */
834*4882a593Smuzhiyun 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
835*4882a593Smuzhiyun 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
836*4882a593Smuzhiyun 				netif_warn(rp, tx_err, dev, "Tx still on\n");
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 		rhine_tx(dev);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		if (status & RHINE_EVENT_NAPI_TX_ERR)
842*4882a593Smuzhiyun 			rhine_tx_err(rp, status);
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (status & IntrStatsMax) {
846*4882a593Smuzhiyun 		spin_lock(&rp->lock);
847*4882a593Smuzhiyun 		rhine_update_rx_crc_and_missed_errord(rp);
848*4882a593Smuzhiyun 		spin_unlock(&rp->lock);
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	if (status & RHINE_EVENT_SLOW) {
852*4882a593Smuzhiyun 		enable_mask &= ~RHINE_EVENT_SLOW;
853*4882a593Smuzhiyun 		schedule_work(&rp->slow_event_task);
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	if (work_done < budget) {
857*4882a593Smuzhiyun 		napi_complete_done(napi, work_done);
858*4882a593Smuzhiyun 		iowrite16(enable_mask, ioaddr + IntrEnable);
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 	return work_done;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
rhine_hw_init(struct net_device * dev,long pioaddr)863*4882a593Smuzhiyun static void rhine_hw_init(struct net_device *dev, long pioaddr)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	/* Reset the chip to erase previous misconfiguration. */
868*4882a593Smuzhiyun 	rhine_chip_reset(dev);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
871*4882a593Smuzhiyun 	if (rp->quirks & rqRhineI)
872*4882a593Smuzhiyun 		msleep(5);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	/* Reload EEPROM controlled bytes cleared by soft reset */
875*4882a593Smuzhiyun 	if (dev_is_pci(dev->dev.parent))
876*4882a593Smuzhiyun 		rhine_reload_eeprom(pioaddr, dev);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun static const struct net_device_ops rhine_netdev_ops = {
880*4882a593Smuzhiyun 	.ndo_open		 = rhine_open,
881*4882a593Smuzhiyun 	.ndo_stop		 = rhine_close,
882*4882a593Smuzhiyun 	.ndo_start_xmit		 = rhine_start_tx,
883*4882a593Smuzhiyun 	.ndo_get_stats64	 = rhine_get_stats64,
884*4882a593Smuzhiyun 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
885*4882a593Smuzhiyun 	.ndo_validate_addr	 = eth_validate_addr,
886*4882a593Smuzhiyun 	.ndo_set_mac_address 	 = eth_mac_addr,
887*4882a593Smuzhiyun 	.ndo_do_ioctl		 = netdev_ioctl,
888*4882a593Smuzhiyun 	.ndo_tx_timeout 	 = rhine_tx_timeout,
889*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
890*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
891*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
892*4882a593Smuzhiyun 	.ndo_poll_controller	 = rhine_poll,
893*4882a593Smuzhiyun #endif
894*4882a593Smuzhiyun };
895*4882a593Smuzhiyun 
rhine_init_one_common(struct device * hwdev,u32 quirks,long pioaddr,void __iomem * ioaddr,int irq)896*4882a593Smuzhiyun static int rhine_init_one_common(struct device *hwdev, u32 quirks,
897*4882a593Smuzhiyun 				 long pioaddr, void __iomem *ioaddr, int irq)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	struct net_device *dev;
900*4882a593Smuzhiyun 	struct rhine_private *rp;
901*4882a593Smuzhiyun 	int i, rc, phy_id;
902*4882a593Smuzhiyun 	const char *name;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	/* this should always be supported */
905*4882a593Smuzhiyun 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
906*4882a593Smuzhiyun 	if (rc) {
907*4882a593Smuzhiyun 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
908*4882a593Smuzhiyun 		goto err_out;
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct rhine_private));
912*4882a593Smuzhiyun 	if (!dev) {
913*4882a593Smuzhiyun 		rc = -ENOMEM;
914*4882a593Smuzhiyun 		goto err_out;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, hwdev);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	rp = netdev_priv(dev);
919*4882a593Smuzhiyun 	rp->dev = dev;
920*4882a593Smuzhiyun 	rp->quirks = quirks;
921*4882a593Smuzhiyun 	rp->pioaddr = pioaddr;
922*4882a593Smuzhiyun 	rp->base = ioaddr;
923*4882a593Smuzhiyun 	rp->irq = irq;
924*4882a593Smuzhiyun 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	u64_stats_init(&rp->tx_stats.syncp);
929*4882a593Smuzhiyun 	u64_stats_init(&rp->rx_stats.syncp);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	/* Get chip registers into a sane state */
932*4882a593Smuzhiyun 	rhine_power_init(dev);
933*4882a593Smuzhiyun 	rhine_hw_init(dev, pioaddr);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
936*4882a593Smuzhiyun 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (!is_valid_ether_addr(dev->dev_addr)) {
939*4882a593Smuzhiyun 		/* Report it and use a random ethernet address instead */
940*4882a593Smuzhiyun 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
941*4882a593Smuzhiyun 		eth_hw_addr_random(dev);
942*4882a593Smuzhiyun 		netdev_info(dev, "Using random MAC address: %pM\n",
943*4882a593Smuzhiyun 			    dev->dev_addr);
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
947*4882a593Smuzhiyun 	if (!phy_id)
948*4882a593Smuzhiyun 		phy_id = ioread8(ioaddr + 0x6C);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	spin_lock_init(&rp->lock);
951*4882a593Smuzhiyun 	mutex_init(&rp->task_lock);
952*4882a593Smuzhiyun 	INIT_WORK(&rp->reset_task, rhine_reset_task);
953*4882a593Smuzhiyun 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	rp->mii_if.dev = dev;
956*4882a593Smuzhiyun 	rp->mii_if.mdio_read = mdio_read;
957*4882a593Smuzhiyun 	rp->mii_if.mdio_write = mdio_write;
958*4882a593Smuzhiyun 	rp->mii_if.phy_id_mask = 0x1f;
959*4882a593Smuzhiyun 	rp->mii_if.reg_num_mask = 0x1f;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	/* The chip-specific entries in the device structure. */
962*4882a593Smuzhiyun 	dev->netdev_ops = &rhine_netdev_ops;
963*4882a593Smuzhiyun 	dev->ethtool_ops = &netdev_ethtool_ops;
964*4882a593Smuzhiyun 	dev->watchdog_timeo = TX_TIMEOUT;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (rp->quirks & rqRhineI)
969*4882a593Smuzhiyun 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (rp->quirks & rqMgmt)
972*4882a593Smuzhiyun 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
973*4882a593Smuzhiyun 				 NETIF_F_HW_VLAN_CTAG_RX |
974*4882a593Smuzhiyun 				 NETIF_F_HW_VLAN_CTAG_FILTER;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* dev->name not defined before register_netdev()! */
977*4882a593Smuzhiyun 	rc = register_netdev(dev);
978*4882a593Smuzhiyun 	if (rc)
979*4882a593Smuzhiyun 		goto err_out_free_netdev;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	if (rp->quirks & rqRhineI)
982*4882a593Smuzhiyun 		name = "Rhine";
983*4882a593Smuzhiyun 	else if (rp->quirks & rqStatusWBRace)
984*4882a593Smuzhiyun 		name = "Rhine II";
985*4882a593Smuzhiyun 	else if (rp->quirks & rqMgmt)
986*4882a593Smuzhiyun 		name = "Rhine III (Management Adapter)";
987*4882a593Smuzhiyun 	else
988*4882a593Smuzhiyun 		name = "Rhine III";
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
991*4882a593Smuzhiyun 		    name, ioaddr, dev->dev_addr, rp->irq);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	dev_set_drvdata(hwdev, dev);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	{
996*4882a593Smuzhiyun 		u16 mii_cmd;
997*4882a593Smuzhiyun 		int mii_status = mdio_read(dev, phy_id, 1);
998*4882a593Smuzhiyun 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
999*4882a593Smuzhiyun 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1000*4882a593Smuzhiyun 		if (mii_status != 0xffff && mii_status != 0x0000) {
1001*4882a593Smuzhiyun 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1002*4882a593Smuzhiyun 			netdev_info(dev,
1003*4882a593Smuzhiyun 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1004*4882a593Smuzhiyun 				    phy_id,
1005*4882a593Smuzhiyun 				    mii_status, rp->mii_if.advertising,
1006*4882a593Smuzhiyun 				    mdio_read(dev, phy_id, 5));
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 			/* set IFF_RUNNING */
1009*4882a593Smuzhiyun 			if (mii_status & BMSR_LSTATUS)
1010*4882a593Smuzhiyun 				netif_carrier_on(dev);
1011*4882a593Smuzhiyun 			else
1012*4882a593Smuzhiyun 				netif_carrier_off(dev);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 		}
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 	rp->mii_if.phy_id = phy_id;
1017*4882a593Smuzhiyun 	if (avoid_D3)
1018*4882a593Smuzhiyun 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	return 0;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun err_out_free_netdev:
1023*4882a593Smuzhiyun 	free_netdev(dev);
1024*4882a593Smuzhiyun err_out:
1025*4882a593Smuzhiyun 	return rc;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun 
rhine_init_one_pci(struct pci_dev * pdev,const struct pci_device_id * ent)1028*4882a593Smuzhiyun static int rhine_init_one_pci(struct pci_dev *pdev,
1029*4882a593Smuzhiyun 			      const struct pci_device_id *ent)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct device *hwdev = &pdev->dev;
1032*4882a593Smuzhiyun 	int rc;
1033*4882a593Smuzhiyun 	long pioaddr, memaddr;
1034*4882a593Smuzhiyun 	void __iomem *ioaddr;
1035*4882a593Smuzhiyun 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /* This driver was written to use PCI memory space. Some early versions
1038*4882a593Smuzhiyun  * of the Rhine may only work correctly with I/O space accesses.
1039*4882a593Smuzhiyun  * TODO: determine for which revisions this is true and assign the flag
1040*4882a593Smuzhiyun  *	 in code as opposed to this Kconfig option (???)
1041*4882a593Smuzhiyun  */
1042*4882a593Smuzhiyun #ifdef CONFIG_VIA_RHINE_MMIO
1043*4882a593Smuzhiyun 	u32 quirks = rqNeedEnMMIO;
1044*4882a593Smuzhiyun #else
1045*4882a593Smuzhiyun 	u32 quirks = 0;
1046*4882a593Smuzhiyun #endif
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	rc = pci_enable_device(pdev);
1049*4882a593Smuzhiyun 	if (rc)
1050*4882a593Smuzhiyun 		goto err_out;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	if (pdev->revision < VTunknown0) {
1053*4882a593Smuzhiyun 		quirks |= rqRhineI;
1054*4882a593Smuzhiyun 	} else if (pdev->revision >= VT6102) {
1055*4882a593Smuzhiyun 		quirks |= rqWOL | rqForceReset;
1056*4882a593Smuzhiyun 		if (pdev->revision < VT6105) {
1057*4882a593Smuzhiyun 			quirks |= rqStatusWBRace;
1058*4882a593Smuzhiyun 		} else {
1059*4882a593Smuzhiyun 			quirks |= rqIntPHY;
1060*4882a593Smuzhiyun 			if (pdev->revision >= VT6105_B0)
1061*4882a593Smuzhiyun 				quirks |= rq6patterns;
1062*4882a593Smuzhiyun 			if (pdev->revision >= VT6105M)
1063*4882a593Smuzhiyun 				quirks |= rqMgmt;
1064*4882a593Smuzhiyun 		}
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* sanity check */
1068*4882a593Smuzhiyun 	if ((pci_resource_len(pdev, 0) < io_size) ||
1069*4882a593Smuzhiyun 	    (pci_resource_len(pdev, 1) < io_size)) {
1070*4882a593Smuzhiyun 		rc = -EIO;
1071*4882a593Smuzhiyun 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1072*4882a593Smuzhiyun 		goto err_out_pci_disable;
1073*4882a593Smuzhiyun 	}
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	pioaddr = pci_resource_start(pdev, 0);
1076*4882a593Smuzhiyun 	memaddr = pci_resource_start(pdev, 1);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	pci_set_master(pdev);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	rc = pci_request_regions(pdev, DRV_NAME);
1081*4882a593Smuzhiyun 	if (rc)
1082*4882a593Smuzhiyun 		goto err_out_pci_disable;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1085*4882a593Smuzhiyun 	if (!ioaddr) {
1086*4882a593Smuzhiyun 		rc = -EIO;
1087*4882a593Smuzhiyun 		dev_err(hwdev,
1088*4882a593Smuzhiyun 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1089*4882a593Smuzhiyun 			dev_name(hwdev), io_size, memaddr);
1090*4882a593Smuzhiyun 		goto err_out_free_res;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	enable_mmio(pioaddr, quirks);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1096*4882a593Smuzhiyun 	if (rc)
1097*4882a593Smuzhiyun 		goto err_out_unmap;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	rc = rhine_init_one_common(&pdev->dev, quirks,
1100*4882a593Smuzhiyun 				   pioaddr, ioaddr, pdev->irq);
1101*4882a593Smuzhiyun 	if (!rc)
1102*4882a593Smuzhiyun 		return 0;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun err_out_unmap:
1105*4882a593Smuzhiyun 	pci_iounmap(pdev, ioaddr);
1106*4882a593Smuzhiyun err_out_free_res:
1107*4882a593Smuzhiyun 	pci_release_regions(pdev);
1108*4882a593Smuzhiyun err_out_pci_disable:
1109*4882a593Smuzhiyun 	pci_disable_device(pdev);
1110*4882a593Smuzhiyun err_out:
1111*4882a593Smuzhiyun 	return rc;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun 
rhine_init_one_platform(struct platform_device * pdev)1114*4882a593Smuzhiyun static int rhine_init_one_platform(struct platform_device *pdev)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun 	const struct of_device_id *match;
1117*4882a593Smuzhiyun 	const u32 *quirks;
1118*4882a593Smuzhiyun 	int irq;
1119*4882a593Smuzhiyun 	void __iomem *ioaddr;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	match = of_match_device(rhine_of_tbl, &pdev->dev);
1122*4882a593Smuzhiyun 	if (!match)
1123*4882a593Smuzhiyun 		return -EINVAL;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	ioaddr = devm_platform_ioremap_resource(pdev, 0);
1126*4882a593Smuzhiyun 	if (IS_ERR(ioaddr))
1127*4882a593Smuzhiyun 		return PTR_ERR(ioaddr);
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1130*4882a593Smuzhiyun 	if (!irq)
1131*4882a593Smuzhiyun 		return -EINVAL;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	quirks = match->data;
1134*4882a593Smuzhiyun 	if (!quirks)
1135*4882a593Smuzhiyun 		return -EINVAL;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	return rhine_init_one_common(&pdev->dev, *quirks,
1138*4882a593Smuzhiyun 				     (long)ioaddr, ioaddr, irq);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
alloc_ring(struct net_device * dev)1141*4882a593Smuzhiyun static int alloc_ring(struct net_device* dev)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1144*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1145*4882a593Smuzhiyun 	void *ring;
1146*4882a593Smuzhiyun 	dma_addr_t ring_dma;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	ring = dma_alloc_coherent(hwdev,
1149*4882a593Smuzhiyun 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1150*4882a593Smuzhiyun 				  TX_RING_SIZE * sizeof(struct tx_desc),
1151*4882a593Smuzhiyun 				  &ring_dma,
1152*4882a593Smuzhiyun 				  GFP_ATOMIC);
1153*4882a593Smuzhiyun 	if (!ring) {
1154*4882a593Smuzhiyun 		netdev_err(dev, "Could not allocate DMA memory\n");
1155*4882a593Smuzhiyun 		return -ENOMEM;
1156*4882a593Smuzhiyun 	}
1157*4882a593Smuzhiyun 	if (rp->quirks & rqRhineI) {
1158*4882a593Smuzhiyun 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1159*4882a593Smuzhiyun 						 PKT_BUF_SZ * TX_RING_SIZE,
1160*4882a593Smuzhiyun 						 &rp->tx_bufs_dma,
1161*4882a593Smuzhiyun 						 GFP_ATOMIC);
1162*4882a593Smuzhiyun 		if (rp->tx_bufs == NULL) {
1163*4882a593Smuzhiyun 			dma_free_coherent(hwdev,
1164*4882a593Smuzhiyun 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1165*4882a593Smuzhiyun 					  TX_RING_SIZE * sizeof(struct tx_desc),
1166*4882a593Smuzhiyun 					  ring, ring_dma);
1167*4882a593Smuzhiyun 			return -ENOMEM;
1168*4882a593Smuzhiyun 		}
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	rp->rx_ring = ring;
1172*4882a593Smuzhiyun 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1173*4882a593Smuzhiyun 	rp->rx_ring_dma = ring_dma;
1174*4882a593Smuzhiyun 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	return 0;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun 
free_ring(struct net_device * dev)1179*4882a593Smuzhiyun static void free_ring(struct net_device* dev)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1182*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	dma_free_coherent(hwdev,
1185*4882a593Smuzhiyun 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1186*4882a593Smuzhiyun 			  TX_RING_SIZE * sizeof(struct tx_desc),
1187*4882a593Smuzhiyun 			  rp->rx_ring, rp->rx_ring_dma);
1188*4882a593Smuzhiyun 	rp->tx_ring = NULL;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	if (rp->tx_bufs)
1191*4882a593Smuzhiyun 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1192*4882a593Smuzhiyun 				  rp->tx_bufs, rp->tx_bufs_dma);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	rp->tx_bufs = NULL;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun struct rhine_skb_dma {
1199*4882a593Smuzhiyun 	struct sk_buff *skb;
1200*4882a593Smuzhiyun 	dma_addr_t dma;
1201*4882a593Smuzhiyun };
1202*4882a593Smuzhiyun 
rhine_skb_dma_init(struct net_device * dev,struct rhine_skb_dma * sd)1203*4882a593Smuzhiyun static inline int rhine_skb_dma_init(struct net_device *dev,
1204*4882a593Smuzhiyun 				     struct rhine_skb_dma *sd)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1207*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1208*4882a593Smuzhiyun 	const int size = rp->rx_buf_sz;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	sd->skb = netdev_alloc_skb(dev, size);
1211*4882a593Smuzhiyun 	if (!sd->skb)
1212*4882a593Smuzhiyun 		return -ENOMEM;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1215*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1216*4882a593Smuzhiyun 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1217*4882a593Smuzhiyun 		dev_kfree_skb_any(sd->skb);
1218*4882a593Smuzhiyun 		return -EIO;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	return 0;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
rhine_reset_rbufs(struct rhine_private * rp)1224*4882a593Smuzhiyun static void rhine_reset_rbufs(struct rhine_private *rp)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun 	int i;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	rp->cur_rx = 0;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++)
1231*4882a593Smuzhiyun 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun 
rhine_skb_dma_nic_store(struct rhine_private * rp,struct rhine_skb_dma * sd,int entry)1234*4882a593Smuzhiyun static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1235*4882a593Smuzhiyun 					   struct rhine_skb_dma *sd, int entry)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun 	rp->rx_skbuff_dma[entry] = sd->dma;
1238*4882a593Smuzhiyun 	rp->rx_skbuff[entry] = sd->skb;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1241*4882a593Smuzhiyun 	dma_wmb();
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun static void free_rbufs(struct net_device* dev);
1245*4882a593Smuzhiyun 
alloc_rbufs(struct net_device * dev)1246*4882a593Smuzhiyun static int alloc_rbufs(struct net_device *dev)
1247*4882a593Smuzhiyun {
1248*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1249*4882a593Smuzhiyun 	dma_addr_t next;
1250*4882a593Smuzhiyun 	int rc, i;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1253*4882a593Smuzhiyun 	next = rp->rx_ring_dma;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	/* Init the ring entries */
1256*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
1257*4882a593Smuzhiyun 		rp->rx_ring[i].rx_status = 0;
1258*4882a593Smuzhiyun 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1259*4882a593Smuzhiyun 		next += sizeof(struct rx_desc);
1260*4882a593Smuzhiyun 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1261*4882a593Smuzhiyun 		rp->rx_skbuff[i] = NULL;
1262*4882a593Smuzhiyun 	}
1263*4882a593Smuzhiyun 	/* Mark the last entry as wrapping the ring. */
1264*4882a593Smuzhiyun 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1267*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
1268*4882a593Smuzhiyun 		struct rhine_skb_dma sd;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 		rc = rhine_skb_dma_init(dev, &sd);
1271*4882a593Smuzhiyun 		if (rc < 0) {
1272*4882a593Smuzhiyun 			free_rbufs(dev);
1273*4882a593Smuzhiyun 			goto out;
1274*4882a593Smuzhiyun 		}
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 		rhine_skb_dma_nic_store(rp, &sd, i);
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	rhine_reset_rbufs(rp);
1280*4882a593Smuzhiyun out:
1281*4882a593Smuzhiyun 	return rc;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun 
free_rbufs(struct net_device * dev)1284*4882a593Smuzhiyun static void free_rbufs(struct net_device* dev)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1287*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1288*4882a593Smuzhiyun 	int i;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	/* Free all the skbuffs in the Rx queue. */
1291*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
1292*4882a593Smuzhiyun 		rp->rx_ring[i].rx_status = 0;
1293*4882a593Smuzhiyun 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1294*4882a593Smuzhiyun 		if (rp->rx_skbuff[i]) {
1295*4882a593Smuzhiyun 			dma_unmap_single(hwdev,
1296*4882a593Smuzhiyun 					 rp->rx_skbuff_dma[i],
1297*4882a593Smuzhiyun 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1298*4882a593Smuzhiyun 			dev_kfree_skb(rp->rx_skbuff[i]);
1299*4882a593Smuzhiyun 		}
1300*4882a593Smuzhiyun 		rp->rx_skbuff[i] = NULL;
1301*4882a593Smuzhiyun 	}
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun 
alloc_tbufs(struct net_device * dev)1304*4882a593Smuzhiyun static void alloc_tbufs(struct net_device* dev)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1307*4882a593Smuzhiyun 	dma_addr_t next;
1308*4882a593Smuzhiyun 	int i;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	rp->dirty_tx = rp->cur_tx = 0;
1311*4882a593Smuzhiyun 	next = rp->tx_ring_dma;
1312*4882a593Smuzhiyun 	for (i = 0; i < TX_RING_SIZE; i++) {
1313*4882a593Smuzhiyun 		rp->tx_skbuff[i] = NULL;
1314*4882a593Smuzhiyun 		rp->tx_ring[i].tx_status = 0;
1315*4882a593Smuzhiyun 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1316*4882a593Smuzhiyun 		next += sizeof(struct tx_desc);
1317*4882a593Smuzhiyun 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1318*4882a593Smuzhiyun 		if (rp->quirks & rqRhineI)
1319*4882a593Smuzhiyun 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1320*4882a593Smuzhiyun 	}
1321*4882a593Smuzhiyun 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	netdev_reset_queue(dev);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun 
free_tbufs(struct net_device * dev)1326*4882a593Smuzhiyun static void free_tbufs(struct net_device* dev)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1329*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1330*4882a593Smuzhiyun 	int i;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	for (i = 0; i < TX_RING_SIZE; i++) {
1333*4882a593Smuzhiyun 		rp->tx_ring[i].tx_status = 0;
1334*4882a593Smuzhiyun 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1335*4882a593Smuzhiyun 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1336*4882a593Smuzhiyun 		if (rp->tx_skbuff[i]) {
1337*4882a593Smuzhiyun 			if (rp->tx_skbuff_dma[i]) {
1338*4882a593Smuzhiyun 				dma_unmap_single(hwdev,
1339*4882a593Smuzhiyun 						 rp->tx_skbuff_dma[i],
1340*4882a593Smuzhiyun 						 rp->tx_skbuff[i]->len,
1341*4882a593Smuzhiyun 						 DMA_TO_DEVICE);
1342*4882a593Smuzhiyun 			}
1343*4882a593Smuzhiyun 			dev_kfree_skb(rp->tx_skbuff[i]);
1344*4882a593Smuzhiyun 		}
1345*4882a593Smuzhiyun 		rp->tx_skbuff[i] = NULL;
1346*4882a593Smuzhiyun 		rp->tx_buf[i] = NULL;
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun 
rhine_check_media(struct net_device * dev,unsigned int init_media)1350*4882a593Smuzhiyun static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1353*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	if (!rp->mii_if.force_media)
1356*4882a593Smuzhiyun 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (rp->mii_if.full_duplex)
1359*4882a593Smuzhiyun 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1360*4882a593Smuzhiyun 		   ioaddr + ChipCmd1);
1361*4882a593Smuzhiyun 	else
1362*4882a593Smuzhiyun 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1363*4882a593Smuzhiyun 		   ioaddr + ChipCmd1);
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1366*4882a593Smuzhiyun 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun /* Called after status of force_media possibly changed */
rhine_set_carrier(struct mii_if_info * mii)1370*4882a593Smuzhiyun static void rhine_set_carrier(struct mii_if_info *mii)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun 	struct net_device *dev = mii->dev;
1373*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	if (mii->force_media) {
1376*4882a593Smuzhiyun 		/* autoneg is off: Link is always assumed to be up */
1377*4882a593Smuzhiyun 		if (!netif_carrier_ok(dev))
1378*4882a593Smuzhiyun 			netif_carrier_on(dev);
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	rhine_check_media(dev, 0);
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1384*4882a593Smuzhiyun 		   mii->force_media, netif_carrier_ok(dev));
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun /**
1388*4882a593Smuzhiyun  * rhine_set_cam - set CAM multicast filters
1389*4882a593Smuzhiyun  * @ioaddr: register block of this Rhine
1390*4882a593Smuzhiyun  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1391*4882a593Smuzhiyun  * @addr: multicast address (6 bytes)
1392*4882a593Smuzhiyun  *
1393*4882a593Smuzhiyun  * Load addresses into multicast filters.
1394*4882a593Smuzhiyun  */
rhine_set_cam(void __iomem * ioaddr,int idx,u8 * addr)1395*4882a593Smuzhiyun static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun 	int i;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1400*4882a593Smuzhiyun 	wmb();
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	/* Paranoid -- idx out of range should never happen */
1403*4882a593Smuzhiyun 	idx &= (MCAM_SIZE - 1);
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	iowrite8((u8) idx, ioaddr + CamAddr);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	for (i = 0; i < 6; i++, addr++)
1408*4882a593Smuzhiyun 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1409*4882a593Smuzhiyun 	udelay(10);
1410*4882a593Smuzhiyun 	wmb();
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1413*4882a593Smuzhiyun 	udelay(10);
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	iowrite8(0, ioaddr + CamCon);
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun /**
1419*4882a593Smuzhiyun  * rhine_set_vlan_cam - set CAM VLAN filters
1420*4882a593Smuzhiyun  * @ioaddr: register block of this Rhine
1421*4882a593Smuzhiyun  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1422*4882a593Smuzhiyun  * @addr: VLAN ID (2 bytes)
1423*4882a593Smuzhiyun  *
1424*4882a593Smuzhiyun  * Load addresses into VLAN filters.
1425*4882a593Smuzhiyun  */
rhine_set_vlan_cam(void __iomem * ioaddr,int idx,u8 * addr)1426*4882a593Smuzhiyun static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1429*4882a593Smuzhiyun 	wmb();
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	/* Paranoid -- idx out of range should never happen */
1432*4882a593Smuzhiyun 	idx &= (VCAM_SIZE - 1);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	iowrite8((u8) idx, ioaddr + CamAddr);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1437*4882a593Smuzhiyun 	udelay(10);
1438*4882a593Smuzhiyun 	wmb();
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1441*4882a593Smuzhiyun 	udelay(10);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	iowrite8(0, ioaddr + CamCon);
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun /**
1447*4882a593Smuzhiyun  * rhine_set_cam_mask - set multicast CAM mask
1448*4882a593Smuzhiyun  * @ioaddr: register block of this Rhine
1449*4882a593Smuzhiyun  * @mask: multicast CAM mask
1450*4882a593Smuzhiyun  *
1451*4882a593Smuzhiyun  * Mask sets multicast filters active/inactive.
1452*4882a593Smuzhiyun  */
rhine_set_cam_mask(void __iomem * ioaddr,u32 mask)1453*4882a593Smuzhiyun static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1456*4882a593Smuzhiyun 	wmb();
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	/* write mask */
1459*4882a593Smuzhiyun 	iowrite32(mask, ioaddr + CamMask);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	/* disable CAMEN */
1462*4882a593Smuzhiyun 	iowrite8(0, ioaddr + CamCon);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun /**
1466*4882a593Smuzhiyun  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1467*4882a593Smuzhiyun  * @ioaddr: register block of this Rhine
1468*4882a593Smuzhiyun  * @mask: VLAN CAM mask
1469*4882a593Smuzhiyun  *
1470*4882a593Smuzhiyun  * Mask sets VLAN filters active/inactive.
1471*4882a593Smuzhiyun  */
rhine_set_vlan_cam_mask(void __iomem * ioaddr,u32 mask)1472*4882a593Smuzhiyun static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1475*4882a593Smuzhiyun 	wmb();
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	/* write mask */
1478*4882a593Smuzhiyun 	iowrite32(mask, ioaddr + CamMask);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/* disable CAMEN */
1481*4882a593Smuzhiyun 	iowrite8(0, ioaddr + CamCon);
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun /**
1485*4882a593Smuzhiyun  * rhine_init_cam_filter - initialize CAM filters
1486*4882a593Smuzhiyun  * @dev: network device
1487*4882a593Smuzhiyun  *
1488*4882a593Smuzhiyun  * Initialize (disable) hardware VLAN and multicast support on this
1489*4882a593Smuzhiyun  * Rhine.
1490*4882a593Smuzhiyun  */
rhine_init_cam_filter(struct net_device * dev)1491*4882a593Smuzhiyun static void rhine_init_cam_filter(struct net_device *dev)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1494*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	/* Disable all CAMs */
1497*4882a593Smuzhiyun 	rhine_set_vlan_cam_mask(ioaddr, 0);
1498*4882a593Smuzhiyun 	rhine_set_cam_mask(ioaddr, 0);
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	/* disable hardware VLAN support */
1501*4882a593Smuzhiyun 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1502*4882a593Smuzhiyun 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun /**
1506*4882a593Smuzhiyun  * rhine_update_vcam - update VLAN CAM filters
1507*4882a593Smuzhiyun  * @dev: rhine_private data of this Rhine
1508*4882a593Smuzhiyun  *
1509*4882a593Smuzhiyun  * Update VLAN CAM filters to match configuration change.
1510*4882a593Smuzhiyun  */
rhine_update_vcam(struct net_device * dev)1511*4882a593Smuzhiyun static void rhine_update_vcam(struct net_device *dev)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1514*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1515*4882a593Smuzhiyun 	u16 vid;
1516*4882a593Smuzhiyun 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1517*4882a593Smuzhiyun 	unsigned int i = 0;
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1520*4882a593Smuzhiyun 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1521*4882a593Smuzhiyun 		vCAMmask |= 1 << i;
1522*4882a593Smuzhiyun 		if (++i >= VCAM_SIZE)
1523*4882a593Smuzhiyun 			break;
1524*4882a593Smuzhiyun 	}
1525*4882a593Smuzhiyun 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun 
rhine_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1528*4882a593Smuzhiyun static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	spin_lock_bh(&rp->lock);
1533*4882a593Smuzhiyun 	set_bit(vid, rp->active_vlans);
1534*4882a593Smuzhiyun 	rhine_update_vcam(dev);
1535*4882a593Smuzhiyun 	spin_unlock_bh(&rp->lock);
1536*4882a593Smuzhiyun 	return 0;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun 
rhine_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1539*4882a593Smuzhiyun static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	spin_lock_bh(&rp->lock);
1544*4882a593Smuzhiyun 	clear_bit(vid, rp->active_vlans);
1545*4882a593Smuzhiyun 	rhine_update_vcam(dev);
1546*4882a593Smuzhiyun 	spin_unlock_bh(&rp->lock);
1547*4882a593Smuzhiyun 	return 0;
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun 
init_registers(struct net_device * dev)1550*4882a593Smuzhiyun static void init_registers(struct net_device *dev)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1553*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1554*4882a593Smuzhiyun 	int i;
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
1557*4882a593Smuzhiyun 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	/* Initialize other registers. */
1560*4882a593Smuzhiyun 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1561*4882a593Smuzhiyun 	/* Configure initial FIFO thresholds. */
1562*4882a593Smuzhiyun 	iowrite8(0x20, ioaddr + TxConfig);
1563*4882a593Smuzhiyun 	rp->tx_thresh = 0x20;
1564*4882a593Smuzhiyun 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1567*4882a593Smuzhiyun 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	rhine_set_rx_mode(dev);
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	if (rp->quirks & rqMgmt)
1572*4882a593Smuzhiyun 		rhine_init_cam_filter(dev);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	napi_enable(&rp->napi);
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1579*4882a593Smuzhiyun 	       ioaddr + ChipCmd);
1580*4882a593Smuzhiyun 	rhine_check_media(dev, 1);
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun /* Enable MII link status auto-polling (required for IntrLinkChange) */
rhine_enable_linkmon(struct rhine_private * rp)1584*4882a593Smuzhiyun static void rhine_enable_linkmon(struct rhine_private *rp)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	iowrite8(0, ioaddr + MIICmd);
1589*4882a593Smuzhiyun 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1590*4882a593Smuzhiyun 	iowrite8(0x80, ioaddr + MIICmd);
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun /* Disable MII link status auto-polling (required for MDIO access) */
rhine_disable_linkmon(struct rhine_private * rp)1598*4882a593Smuzhiyun static void rhine_disable_linkmon(struct rhine_private *rp)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	iowrite8(0, ioaddr + MIICmd);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (rp->quirks & rqRhineI) {
1605*4882a593Smuzhiyun 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		/* Can be called from ISR. Evil. */
1608*4882a593Smuzhiyun 		mdelay(1);
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		/* 0x80 must be set immediately before turning it off */
1611*4882a593Smuzhiyun 		iowrite8(0x80, ioaddr + MIICmd);
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 		/* Heh. Now clear 0x80 again. */
1616*4882a593Smuzhiyun 		iowrite8(0, ioaddr + MIICmd);
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 	else
1619*4882a593Smuzhiyun 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun /* Read and write over the MII Management Data I/O (MDIO) interface. */
1623*4882a593Smuzhiyun 
mdio_read(struct net_device * dev,int phy_id,int regnum)1624*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1627*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1628*4882a593Smuzhiyun 	int result;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	rhine_disable_linkmon(rp);
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	/* rhine_disable_linkmon already cleared MIICmd */
1633*4882a593Smuzhiyun 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1634*4882a593Smuzhiyun 	iowrite8(regnum, ioaddr + MIIRegAddr);
1635*4882a593Smuzhiyun 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1636*4882a593Smuzhiyun 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1637*4882a593Smuzhiyun 	result = ioread16(ioaddr + MIIData);
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	rhine_enable_linkmon(rp);
1640*4882a593Smuzhiyun 	return result;
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun 
mdio_write(struct net_device * dev,int phy_id,int regnum,int value)1643*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1646*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	rhine_disable_linkmon(rp);
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	/* rhine_disable_linkmon already cleared MIICmd */
1651*4882a593Smuzhiyun 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1652*4882a593Smuzhiyun 	iowrite8(regnum, ioaddr + MIIRegAddr);
1653*4882a593Smuzhiyun 	iowrite16(value, ioaddr + MIIData);
1654*4882a593Smuzhiyun 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1655*4882a593Smuzhiyun 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	rhine_enable_linkmon(rp);
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun 
rhine_task_disable(struct rhine_private * rp)1660*4882a593Smuzhiyun static void rhine_task_disable(struct rhine_private *rp)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
1663*4882a593Smuzhiyun 	rp->task_enable = false;
1664*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	cancel_work_sync(&rp->slow_event_task);
1667*4882a593Smuzhiyun 	cancel_work_sync(&rp->reset_task);
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun 
rhine_task_enable(struct rhine_private * rp)1670*4882a593Smuzhiyun static void rhine_task_enable(struct rhine_private *rp)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
1673*4882a593Smuzhiyun 	rp->task_enable = true;
1674*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun 
rhine_open(struct net_device * dev)1677*4882a593Smuzhiyun static int rhine_open(struct net_device *dev)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1680*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1681*4882a593Smuzhiyun 	int rc;
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1684*4882a593Smuzhiyun 	if (rc)
1685*4882a593Smuzhiyun 		goto out;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	rc = alloc_ring(dev);
1690*4882a593Smuzhiyun 	if (rc < 0)
1691*4882a593Smuzhiyun 		goto out_free_irq;
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	rc = alloc_rbufs(dev);
1694*4882a593Smuzhiyun 	if (rc < 0)
1695*4882a593Smuzhiyun 		goto out_free_ring;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	alloc_tbufs(dev);
1698*4882a593Smuzhiyun 	enable_mmio(rp->pioaddr, rp->quirks);
1699*4882a593Smuzhiyun 	rhine_power_init(dev);
1700*4882a593Smuzhiyun 	rhine_chip_reset(dev);
1701*4882a593Smuzhiyun 	rhine_task_enable(rp);
1702*4882a593Smuzhiyun 	init_registers(dev);
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1705*4882a593Smuzhiyun 		  __func__, ioread16(ioaddr + ChipCmd),
1706*4882a593Smuzhiyun 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	netif_start_queue(dev);
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun out:
1711*4882a593Smuzhiyun 	return rc;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun out_free_ring:
1714*4882a593Smuzhiyun 	free_ring(dev);
1715*4882a593Smuzhiyun out_free_irq:
1716*4882a593Smuzhiyun 	free_irq(rp->irq, dev);
1717*4882a593Smuzhiyun 	goto out;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun 
rhine_reset_task(struct work_struct * work)1720*4882a593Smuzhiyun static void rhine_reset_task(struct work_struct *work)
1721*4882a593Smuzhiyun {
1722*4882a593Smuzhiyun 	struct rhine_private *rp = container_of(work, struct rhine_private,
1723*4882a593Smuzhiyun 						reset_task);
1724*4882a593Smuzhiyun 	struct net_device *dev = rp->dev;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	if (!rp->task_enable)
1729*4882a593Smuzhiyun 		goto out_unlock;
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	napi_disable(&rp->napi);
1732*4882a593Smuzhiyun 	netif_tx_disable(dev);
1733*4882a593Smuzhiyun 	spin_lock_bh(&rp->lock);
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	/* clear all descriptors */
1736*4882a593Smuzhiyun 	free_tbufs(dev);
1737*4882a593Smuzhiyun 	alloc_tbufs(dev);
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	rhine_reset_rbufs(rp);
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	/* Reinitialize the hardware. */
1742*4882a593Smuzhiyun 	rhine_chip_reset(dev);
1743*4882a593Smuzhiyun 	init_registers(dev);
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	spin_unlock_bh(&rp->lock);
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
1748*4882a593Smuzhiyun 	dev->stats.tx_errors++;
1749*4882a593Smuzhiyun 	netif_wake_queue(dev);
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun out_unlock:
1752*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun 
rhine_tx_timeout(struct net_device * dev,unsigned int txqueue)1755*4882a593Smuzhiyun static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1756*4882a593Smuzhiyun {
1757*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1758*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1761*4882a593Smuzhiyun 		    ioread16(ioaddr + IntrStatus),
1762*4882a593Smuzhiyun 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun 	schedule_work(&rp->reset_task);
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
rhine_tx_queue_full(struct rhine_private * rp)1767*4882a593Smuzhiyun static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun 	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun 
rhine_start_tx(struct sk_buff * skb,struct net_device * dev)1772*4882a593Smuzhiyun static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1773*4882a593Smuzhiyun 				  struct net_device *dev)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1776*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1777*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
1778*4882a593Smuzhiyun 	unsigned entry;
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	/* Caution: the write order is important here, set the field
1781*4882a593Smuzhiyun 	   with the "ownership" bits last. */
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	/* Calculate the next Tx descriptor entry. */
1784*4882a593Smuzhiyun 	entry = rp->cur_tx % TX_RING_SIZE;
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	if (skb_padto(skb, ETH_ZLEN))
1787*4882a593Smuzhiyun 		return NETDEV_TX_OK;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	rp->tx_skbuff[entry] = skb;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	if ((rp->quirks & rqRhineI) &&
1792*4882a593Smuzhiyun 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1793*4882a593Smuzhiyun 		/* Must use alignment buffer. */
1794*4882a593Smuzhiyun 		if (skb->len > PKT_BUF_SZ) {
1795*4882a593Smuzhiyun 			/* packet too long, drop it */
1796*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
1797*4882a593Smuzhiyun 			rp->tx_skbuff[entry] = NULL;
1798*4882a593Smuzhiyun 			dev->stats.tx_dropped++;
1799*4882a593Smuzhiyun 			return NETDEV_TX_OK;
1800*4882a593Smuzhiyun 		}
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 		/* Padding is not copied and so must be redone. */
1803*4882a593Smuzhiyun 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1804*4882a593Smuzhiyun 		if (skb->len < ETH_ZLEN)
1805*4882a593Smuzhiyun 			memset(rp->tx_buf[entry] + skb->len, 0,
1806*4882a593Smuzhiyun 			       ETH_ZLEN - skb->len);
1807*4882a593Smuzhiyun 		rp->tx_skbuff_dma[entry] = 0;
1808*4882a593Smuzhiyun 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1809*4882a593Smuzhiyun 						      (rp->tx_buf[entry] -
1810*4882a593Smuzhiyun 						       rp->tx_bufs));
1811*4882a593Smuzhiyun 	} else {
1812*4882a593Smuzhiyun 		rp->tx_skbuff_dma[entry] =
1813*4882a593Smuzhiyun 			dma_map_single(hwdev, skb->data, skb->len,
1814*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
1815*4882a593Smuzhiyun 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1816*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
1817*4882a593Smuzhiyun 			rp->tx_skbuff_dma[entry] = 0;
1818*4882a593Smuzhiyun 			dev->stats.tx_dropped++;
1819*4882a593Smuzhiyun 			return NETDEV_TX_OK;
1820*4882a593Smuzhiyun 		}
1821*4882a593Smuzhiyun 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1822*4882a593Smuzhiyun 	}
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	rp->tx_ring[entry].desc_length =
1825*4882a593Smuzhiyun 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	if (unlikely(skb_vlan_tag_present(skb))) {
1828*4882a593Smuzhiyun 		u16 vid_pcp = skb_vlan_tag_get(skb);
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 		/* drop CFI/DEI bit, register needs VID and PCP */
1831*4882a593Smuzhiyun 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1832*4882a593Smuzhiyun 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1833*4882a593Smuzhiyun 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1834*4882a593Smuzhiyun 		/* request tagging */
1835*4882a593Smuzhiyun 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1836*4882a593Smuzhiyun 	}
1837*4882a593Smuzhiyun 	else
1838*4882a593Smuzhiyun 		rp->tx_ring[entry].tx_status = 0;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	netdev_sent_queue(dev, skb->len);
1841*4882a593Smuzhiyun 	/* lock eth irq */
1842*4882a593Smuzhiyun 	dma_wmb();
1843*4882a593Smuzhiyun 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1844*4882a593Smuzhiyun 	wmb();
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	rp->cur_tx++;
1847*4882a593Smuzhiyun 	/*
1848*4882a593Smuzhiyun 	 * Nobody wants cur_tx write to rot for ages after the NIC will have
1849*4882a593Smuzhiyun 	 * seen the transmit request, especially as the transmit completion
1850*4882a593Smuzhiyun 	 * handler could miss it.
1851*4882a593Smuzhiyun 	 */
1852*4882a593Smuzhiyun 	smp_wmb();
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	/* Non-x86 Todo: explicitly flush cache lines here. */
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb))
1857*4882a593Smuzhiyun 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1858*4882a593Smuzhiyun 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	/* Wake the potentially-idle transmit channel */
1861*4882a593Smuzhiyun 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1862*4882a593Smuzhiyun 	       ioaddr + ChipCmd1);
1863*4882a593Smuzhiyun 	IOSYNC;
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1866*4882a593Smuzhiyun 	if (rhine_tx_queue_full(rp)) {
1867*4882a593Smuzhiyun 		netif_stop_queue(dev);
1868*4882a593Smuzhiyun 		smp_rmb();
1869*4882a593Smuzhiyun 		/* Rejuvenate. */
1870*4882a593Smuzhiyun 		if (!rhine_tx_queue_full(rp))
1871*4882a593Smuzhiyun 			netif_wake_queue(dev);
1872*4882a593Smuzhiyun 	}
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1875*4882a593Smuzhiyun 		  rp->cur_tx - 1, entry);
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun 
rhine_irq_disable(struct rhine_private * rp)1880*4882a593Smuzhiyun static void rhine_irq_disable(struct rhine_private *rp)
1881*4882a593Smuzhiyun {
1882*4882a593Smuzhiyun 	iowrite16(0x0000, rp->base + IntrEnable);
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun /* The interrupt handler does all of the Rx thread work and cleans up
1886*4882a593Smuzhiyun    after the Tx thread. */
rhine_interrupt(int irq,void * dev_instance)1887*4882a593Smuzhiyun static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1888*4882a593Smuzhiyun {
1889*4882a593Smuzhiyun 	struct net_device *dev = dev_instance;
1890*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1891*4882a593Smuzhiyun 	u32 status;
1892*4882a593Smuzhiyun 	int handled = 0;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	status = rhine_get_events(rp);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	if (status & RHINE_EVENT) {
1899*4882a593Smuzhiyun 		handled = 1;
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 		rhine_irq_disable(rp);
1902*4882a593Smuzhiyun 		napi_schedule(&rp->napi);
1903*4882a593Smuzhiyun 	}
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1906*4882a593Smuzhiyun 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1907*4882a593Smuzhiyun 			  status);
1908*4882a593Smuzhiyun 	}
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun /* This routine is logically part of the interrupt handler, but isolated
1914*4882a593Smuzhiyun    for clarity. */
rhine_tx(struct net_device * dev)1915*4882a593Smuzhiyun static void rhine_tx(struct net_device *dev)
1916*4882a593Smuzhiyun {
1917*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
1918*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
1919*4882a593Smuzhiyun 	unsigned int pkts_compl = 0, bytes_compl = 0;
1920*4882a593Smuzhiyun 	unsigned int dirty_tx = rp->dirty_tx;
1921*4882a593Smuzhiyun 	unsigned int cur_tx;
1922*4882a593Smuzhiyun 	struct sk_buff *skb;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	/*
1925*4882a593Smuzhiyun 	 * The race with rhine_start_tx does not matter here as long as the
1926*4882a593Smuzhiyun 	 * driver enforces a value of cur_tx that was relevant when the
1927*4882a593Smuzhiyun 	 * packet was scheduled to the network chipset.
1928*4882a593Smuzhiyun 	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1929*4882a593Smuzhiyun 	 */
1930*4882a593Smuzhiyun 	smp_rmb();
1931*4882a593Smuzhiyun 	cur_tx = rp->cur_tx;
1932*4882a593Smuzhiyun 	/* find and cleanup dirty tx descriptors */
1933*4882a593Smuzhiyun 	while (dirty_tx != cur_tx) {
1934*4882a593Smuzhiyun 		unsigned int entry = dirty_tx % TX_RING_SIZE;
1935*4882a593Smuzhiyun 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1938*4882a593Smuzhiyun 			  entry, txstatus);
1939*4882a593Smuzhiyun 		if (txstatus & DescOwn)
1940*4882a593Smuzhiyun 			break;
1941*4882a593Smuzhiyun 		skb = rp->tx_skbuff[entry];
1942*4882a593Smuzhiyun 		if (txstatus & 0x8000) {
1943*4882a593Smuzhiyun 			netif_dbg(rp, tx_done, dev,
1944*4882a593Smuzhiyun 				  "Transmit error, Tx status %08x\n", txstatus);
1945*4882a593Smuzhiyun 			dev->stats.tx_errors++;
1946*4882a593Smuzhiyun 			if (txstatus & 0x0400)
1947*4882a593Smuzhiyun 				dev->stats.tx_carrier_errors++;
1948*4882a593Smuzhiyun 			if (txstatus & 0x0200)
1949*4882a593Smuzhiyun 				dev->stats.tx_window_errors++;
1950*4882a593Smuzhiyun 			if (txstatus & 0x0100)
1951*4882a593Smuzhiyun 				dev->stats.tx_aborted_errors++;
1952*4882a593Smuzhiyun 			if (txstatus & 0x0080)
1953*4882a593Smuzhiyun 				dev->stats.tx_heartbeat_errors++;
1954*4882a593Smuzhiyun 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1955*4882a593Smuzhiyun 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1956*4882a593Smuzhiyun 				dev->stats.tx_fifo_errors++;
1957*4882a593Smuzhiyun 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1958*4882a593Smuzhiyun 				break; /* Keep the skb - we try again */
1959*4882a593Smuzhiyun 			}
1960*4882a593Smuzhiyun 			/* Transmitter restarted in 'abnormal' handler. */
1961*4882a593Smuzhiyun 		} else {
1962*4882a593Smuzhiyun 			if (rp->quirks & rqRhineI)
1963*4882a593Smuzhiyun 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1964*4882a593Smuzhiyun 			else
1965*4882a593Smuzhiyun 				dev->stats.collisions += txstatus & 0x0F;
1966*4882a593Smuzhiyun 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1967*4882a593Smuzhiyun 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 			u64_stats_update_begin(&rp->tx_stats.syncp);
1970*4882a593Smuzhiyun 			rp->tx_stats.bytes += skb->len;
1971*4882a593Smuzhiyun 			rp->tx_stats.packets++;
1972*4882a593Smuzhiyun 			u64_stats_update_end(&rp->tx_stats.syncp);
1973*4882a593Smuzhiyun 		}
1974*4882a593Smuzhiyun 		/* Free the original skb. */
1975*4882a593Smuzhiyun 		if (rp->tx_skbuff_dma[entry]) {
1976*4882a593Smuzhiyun 			dma_unmap_single(hwdev,
1977*4882a593Smuzhiyun 					 rp->tx_skbuff_dma[entry],
1978*4882a593Smuzhiyun 					 skb->len,
1979*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
1980*4882a593Smuzhiyun 		}
1981*4882a593Smuzhiyun 		bytes_compl += skb->len;
1982*4882a593Smuzhiyun 		pkts_compl++;
1983*4882a593Smuzhiyun 		dev_consume_skb_any(skb);
1984*4882a593Smuzhiyun 		rp->tx_skbuff[entry] = NULL;
1985*4882a593Smuzhiyun 		dirty_tx++;
1986*4882a593Smuzhiyun 	}
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	rp->dirty_tx = dirty_tx;
1989*4882a593Smuzhiyun 	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
1990*4882a593Smuzhiyun 	smp_wmb();
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
1995*4882a593Smuzhiyun 	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
1996*4882a593Smuzhiyun 		netif_wake_queue(dev);
1997*4882a593Smuzhiyun 		smp_rmb();
1998*4882a593Smuzhiyun 		/* Rejuvenate. */
1999*4882a593Smuzhiyun 		if (rhine_tx_queue_full(rp))
2000*4882a593Smuzhiyun 			netif_stop_queue(dev);
2001*4882a593Smuzhiyun 	}
2002*4882a593Smuzhiyun }
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun /**
2005*4882a593Smuzhiyun  * rhine_get_vlan_tci - extract TCI from Rx data buffer
2006*4882a593Smuzhiyun  * @skb: pointer to sk_buff
2007*4882a593Smuzhiyun  * @data_size: used data area of the buffer including CRC
2008*4882a593Smuzhiyun  *
2009*4882a593Smuzhiyun  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2010*4882a593Smuzhiyun  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2011*4882a593Smuzhiyun  * aligned following the CRC.
2012*4882a593Smuzhiyun  */
rhine_get_vlan_tci(struct sk_buff * skb,int data_size)2013*4882a593Smuzhiyun static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2016*4882a593Smuzhiyun 	return be16_to_cpup((__be16 *)trailer);
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun 
rhine_rx_vlan_tag(struct sk_buff * skb,struct rx_desc * desc,int data_size)2019*4882a593Smuzhiyun static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2020*4882a593Smuzhiyun 				     int data_size)
2021*4882a593Smuzhiyun {
2022*4882a593Smuzhiyun 	dma_rmb();
2023*4882a593Smuzhiyun 	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2024*4882a593Smuzhiyun 		u16 vlan_tci;
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2027*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2028*4882a593Smuzhiyun 	}
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun /* Process up to limit frames from receive ring */
rhine_rx(struct net_device * dev,int limit)2032*4882a593Smuzhiyun static int rhine_rx(struct net_device *dev, int limit)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2035*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
2036*4882a593Smuzhiyun 	int entry = rp->cur_rx % RX_RING_SIZE;
2037*4882a593Smuzhiyun 	int count;
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2040*4882a593Smuzhiyun 		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2043*4882a593Smuzhiyun 	for (count = 0; count < limit; ++count) {
2044*4882a593Smuzhiyun 		struct rx_desc *desc = rp->rx_ring + entry;
2045*4882a593Smuzhiyun 		u32 desc_status = le32_to_cpu(desc->rx_status);
2046*4882a593Smuzhiyun 		int data_size = desc_status >> 16;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 		if (desc_status & DescOwn)
2049*4882a593Smuzhiyun 			break;
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2052*4882a593Smuzhiyun 			  desc_status);
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2055*4882a593Smuzhiyun 			if ((desc_status & RxWholePkt) != RxWholePkt) {
2056*4882a593Smuzhiyun 				netdev_warn(dev,
2057*4882a593Smuzhiyun 	"Oversized Ethernet frame spanned multiple buffers, "
2058*4882a593Smuzhiyun 	"entry %#x length %d status %08x!\n",
2059*4882a593Smuzhiyun 					    entry, data_size,
2060*4882a593Smuzhiyun 					    desc_status);
2061*4882a593Smuzhiyun 				dev->stats.rx_length_errors++;
2062*4882a593Smuzhiyun 			} else if (desc_status & RxErr) {
2063*4882a593Smuzhiyun 				/* There was a error. */
2064*4882a593Smuzhiyun 				netif_dbg(rp, rx_err, dev,
2065*4882a593Smuzhiyun 					  "%s() Rx error %08x\n", __func__,
2066*4882a593Smuzhiyun 					  desc_status);
2067*4882a593Smuzhiyun 				dev->stats.rx_errors++;
2068*4882a593Smuzhiyun 				if (desc_status & 0x0030)
2069*4882a593Smuzhiyun 					dev->stats.rx_length_errors++;
2070*4882a593Smuzhiyun 				if (desc_status & 0x0048)
2071*4882a593Smuzhiyun 					dev->stats.rx_fifo_errors++;
2072*4882a593Smuzhiyun 				if (desc_status & 0x0004)
2073*4882a593Smuzhiyun 					dev->stats.rx_frame_errors++;
2074*4882a593Smuzhiyun 				if (desc_status & 0x0002) {
2075*4882a593Smuzhiyun 					/* this can also be updated outside the interrupt handler */
2076*4882a593Smuzhiyun 					spin_lock(&rp->lock);
2077*4882a593Smuzhiyun 					dev->stats.rx_crc_errors++;
2078*4882a593Smuzhiyun 					spin_unlock(&rp->lock);
2079*4882a593Smuzhiyun 				}
2080*4882a593Smuzhiyun 			}
2081*4882a593Smuzhiyun 		} else {
2082*4882a593Smuzhiyun 			/* Length should omit the CRC */
2083*4882a593Smuzhiyun 			int pkt_len = data_size - 4;
2084*4882a593Smuzhiyun 			struct sk_buff *skb;
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 			/* Check if the packet is long enough to accept without
2087*4882a593Smuzhiyun 			   copying to a minimally-sized skbuff. */
2088*4882a593Smuzhiyun 			if (pkt_len < rx_copybreak) {
2089*4882a593Smuzhiyun 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2090*4882a593Smuzhiyun 				if (unlikely(!skb))
2091*4882a593Smuzhiyun 					goto drop;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 				dma_sync_single_for_cpu(hwdev,
2094*4882a593Smuzhiyun 							rp->rx_skbuff_dma[entry],
2095*4882a593Smuzhiyun 							rp->rx_buf_sz,
2096*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 				skb_copy_to_linear_data(skb,
2099*4882a593Smuzhiyun 						 rp->rx_skbuff[entry]->data,
2100*4882a593Smuzhiyun 						 pkt_len);
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 				dma_sync_single_for_device(hwdev,
2103*4882a593Smuzhiyun 							   rp->rx_skbuff_dma[entry],
2104*4882a593Smuzhiyun 							   rp->rx_buf_sz,
2105*4882a593Smuzhiyun 							   DMA_FROM_DEVICE);
2106*4882a593Smuzhiyun 			} else {
2107*4882a593Smuzhiyun 				struct rhine_skb_dma sd;
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2110*4882a593Smuzhiyun 					goto drop;
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 				skb = rp->rx_skbuff[entry];
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 				dma_unmap_single(hwdev,
2115*4882a593Smuzhiyun 						 rp->rx_skbuff_dma[entry],
2116*4882a593Smuzhiyun 						 rp->rx_buf_sz,
2117*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
2118*4882a593Smuzhiyun 				rhine_skb_dma_nic_store(rp, &sd, entry);
2119*4882a593Smuzhiyun 			}
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 			skb_put(skb, pkt_len);
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 			rhine_rx_vlan_tag(skb, desc, data_size);
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun 			netif_receive_skb(skb);
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 			u64_stats_update_begin(&rp->rx_stats.syncp);
2130*4882a593Smuzhiyun 			rp->rx_stats.bytes += pkt_len;
2131*4882a593Smuzhiyun 			rp->rx_stats.packets++;
2132*4882a593Smuzhiyun 			u64_stats_update_end(&rp->rx_stats.syncp);
2133*4882a593Smuzhiyun 		}
2134*4882a593Smuzhiyun give_descriptor_to_nic:
2135*4882a593Smuzhiyun 		desc->rx_status = cpu_to_le32(DescOwn);
2136*4882a593Smuzhiyun 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2137*4882a593Smuzhiyun 	}
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	return count;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun drop:
2142*4882a593Smuzhiyun 	dev->stats.rx_dropped++;
2143*4882a593Smuzhiyun 	goto give_descriptor_to_nic;
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun 
rhine_restart_tx(struct net_device * dev)2146*4882a593Smuzhiyun static void rhine_restart_tx(struct net_device *dev) {
2147*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2148*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
2149*4882a593Smuzhiyun 	int entry = rp->dirty_tx % TX_RING_SIZE;
2150*4882a593Smuzhiyun 	u32 intr_status;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	/*
2153*4882a593Smuzhiyun 	 * If new errors occurred, we need to sort them out before doing Tx.
2154*4882a593Smuzhiyun 	 * In that case the ISR will be back here RSN anyway.
2155*4882a593Smuzhiyun 	 */
2156*4882a593Smuzhiyun 	intr_status = rhine_get_events(rp);
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	if ((intr_status & IntrTxErrSummary) == 0) {
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 		/* We know better than the chip where it should continue. */
2161*4882a593Smuzhiyun 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2162*4882a593Smuzhiyun 		       ioaddr + TxRingPtr);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2165*4882a593Smuzhiyun 		       ioaddr + ChipCmd);
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2168*4882a593Smuzhiyun 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2169*4882a593Smuzhiyun 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2172*4882a593Smuzhiyun 		       ioaddr + ChipCmd1);
2173*4882a593Smuzhiyun 		IOSYNC;
2174*4882a593Smuzhiyun 	}
2175*4882a593Smuzhiyun 	else {
2176*4882a593Smuzhiyun 		/* This should never happen */
2177*4882a593Smuzhiyun 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2178*4882a593Smuzhiyun 			   intr_status);
2179*4882a593Smuzhiyun 	}
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun 
rhine_slow_event_task(struct work_struct * work)2183*4882a593Smuzhiyun static void rhine_slow_event_task(struct work_struct *work)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun 	struct rhine_private *rp =
2186*4882a593Smuzhiyun 		container_of(work, struct rhine_private, slow_event_task);
2187*4882a593Smuzhiyun 	struct net_device *dev = rp->dev;
2188*4882a593Smuzhiyun 	u32 intr_status;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	if (!rp->task_enable)
2193*4882a593Smuzhiyun 		goto out_unlock;
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	intr_status = rhine_get_events(rp);
2196*4882a593Smuzhiyun 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 	if (intr_status & IntrLinkChange)
2199*4882a593Smuzhiyun 		rhine_check_media(dev, 0);
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 	if (intr_status & IntrPCIErr)
2202*4882a593Smuzhiyun 		netif_warn(rp, hw, dev, "PCI error\n");
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun out_unlock:
2207*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun static void
rhine_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)2211*4882a593Smuzhiyun rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2212*4882a593Smuzhiyun {
2213*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2214*4882a593Smuzhiyun 	unsigned int start;
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	spin_lock_bh(&rp->lock);
2217*4882a593Smuzhiyun 	rhine_update_rx_crc_and_missed_errord(rp);
2218*4882a593Smuzhiyun 	spin_unlock_bh(&rp->lock);
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	netdev_stats_to_stats64(stats, &dev->stats);
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	do {
2223*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2224*4882a593Smuzhiyun 		stats->rx_packets = rp->rx_stats.packets;
2225*4882a593Smuzhiyun 		stats->rx_bytes = rp->rx_stats.bytes;
2226*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 	do {
2229*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2230*4882a593Smuzhiyun 		stats->tx_packets = rp->tx_stats.packets;
2231*4882a593Smuzhiyun 		stats->tx_bytes = rp->tx_stats.bytes;
2232*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun 
rhine_set_rx_mode(struct net_device * dev)2235*4882a593Smuzhiyun static void rhine_set_rx_mode(struct net_device *dev)
2236*4882a593Smuzhiyun {
2237*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2238*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
2239*4882a593Smuzhiyun 	u32 mc_filter[2];	/* Multicast hash filter */
2240*4882a593Smuzhiyun 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2241*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2244*4882a593Smuzhiyun 		rx_mode = 0x1C;
2245*4882a593Smuzhiyun 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2246*4882a593Smuzhiyun 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2247*4882a593Smuzhiyun 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2248*4882a593Smuzhiyun 		   (dev->flags & IFF_ALLMULTI)) {
2249*4882a593Smuzhiyun 		/* Too many to match, or accept all multicasts. */
2250*4882a593Smuzhiyun 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2251*4882a593Smuzhiyun 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2252*4882a593Smuzhiyun 	} else if (rp->quirks & rqMgmt) {
2253*4882a593Smuzhiyun 		int i = 0;
2254*4882a593Smuzhiyun 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2255*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev) {
2256*4882a593Smuzhiyun 			if (i == MCAM_SIZE)
2257*4882a593Smuzhiyun 				break;
2258*4882a593Smuzhiyun 			rhine_set_cam(ioaddr, i, ha->addr);
2259*4882a593Smuzhiyun 			mCAMmask |= 1 << i;
2260*4882a593Smuzhiyun 			i++;
2261*4882a593Smuzhiyun 		}
2262*4882a593Smuzhiyun 		rhine_set_cam_mask(ioaddr, mCAMmask);
2263*4882a593Smuzhiyun 	} else {
2264*4882a593Smuzhiyun 		memset(mc_filter, 0, sizeof(mc_filter));
2265*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev) {
2266*4882a593Smuzhiyun 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2269*4882a593Smuzhiyun 		}
2270*4882a593Smuzhiyun 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2271*4882a593Smuzhiyun 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2272*4882a593Smuzhiyun 	}
2273*4882a593Smuzhiyun 	/* enable/disable VLAN receive filtering */
2274*4882a593Smuzhiyun 	if (rp->quirks & rqMgmt) {
2275*4882a593Smuzhiyun 		if (dev->flags & IFF_PROMISC)
2276*4882a593Smuzhiyun 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2277*4882a593Smuzhiyun 		else
2278*4882a593Smuzhiyun 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2279*4882a593Smuzhiyun 	}
2280*4882a593Smuzhiyun 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2283*4882a593Smuzhiyun static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2284*4882a593Smuzhiyun {
2285*4882a593Smuzhiyun 	struct device *hwdev = dev->dev.parent;
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2288*4882a593Smuzhiyun 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun 
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2291*4882a593Smuzhiyun static int netdev_get_link_ksettings(struct net_device *dev,
2292*4882a593Smuzhiyun 				     struct ethtool_link_ksettings *cmd)
2293*4882a593Smuzhiyun {
2294*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
2297*4882a593Smuzhiyun 	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2298*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	return 0;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun 
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2303*4882a593Smuzhiyun static int netdev_set_link_ksettings(struct net_device *dev,
2304*4882a593Smuzhiyun 				     const struct ethtool_link_ksettings *cmd)
2305*4882a593Smuzhiyun {
2306*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2307*4882a593Smuzhiyun 	int rc;
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
2310*4882a593Smuzhiyun 	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2311*4882a593Smuzhiyun 	rhine_set_carrier(&rp->mii_if);
2312*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	return rc;
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun 
netdev_nway_reset(struct net_device * dev)2317*4882a593Smuzhiyun static int netdev_nway_reset(struct net_device *dev)
2318*4882a593Smuzhiyun {
2319*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	return mii_nway_restart(&rp->mii_if);
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun 
netdev_get_link(struct net_device * dev)2324*4882a593Smuzhiyun static u32 netdev_get_link(struct net_device *dev)
2325*4882a593Smuzhiyun {
2326*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	return mii_link_ok(&rp->mii_if);
2329*4882a593Smuzhiyun }
2330*4882a593Smuzhiyun 
netdev_get_msglevel(struct net_device * dev)2331*4882a593Smuzhiyun static u32 netdev_get_msglevel(struct net_device *dev)
2332*4882a593Smuzhiyun {
2333*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	return rp->msg_enable;
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun 
netdev_set_msglevel(struct net_device * dev,u32 value)2338*4882a593Smuzhiyun static void netdev_set_msglevel(struct net_device *dev, u32 value)
2339*4882a593Smuzhiyun {
2340*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2341*4882a593Smuzhiyun 
2342*4882a593Smuzhiyun 	rp->msg_enable = value;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun 
rhine_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2345*4882a593Smuzhiyun static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2346*4882a593Smuzhiyun {
2347*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 	if (!(rp->quirks & rqWOL))
2350*4882a593Smuzhiyun 		return;
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	spin_lock_irq(&rp->lock);
2353*4882a593Smuzhiyun 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2354*4882a593Smuzhiyun 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2355*4882a593Smuzhiyun 	wol->wolopts = rp->wolopts;
2356*4882a593Smuzhiyun 	spin_unlock_irq(&rp->lock);
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun 
rhine_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2359*4882a593Smuzhiyun static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2362*4882a593Smuzhiyun 	u32 support = WAKE_PHY | WAKE_MAGIC |
2363*4882a593Smuzhiyun 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	if (!(rp->quirks & rqWOL))
2366*4882a593Smuzhiyun 		return -EINVAL;
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	if (wol->wolopts & ~support)
2369*4882a593Smuzhiyun 		return -EINVAL;
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	spin_lock_irq(&rp->lock);
2372*4882a593Smuzhiyun 	rp->wolopts = wol->wolopts;
2373*4882a593Smuzhiyun 	spin_unlock_irq(&rp->lock);
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 	return 0;
2376*4882a593Smuzhiyun }
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun static const struct ethtool_ops netdev_ethtool_ops = {
2379*4882a593Smuzhiyun 	.get_drvinfo		= netdev_get_drvinfo,
2380*4882a593Smuzhiyun 	.nway_reset		= netdev_nway_reset,
2381*4882a593Smuzhiyun 	.get_link		= netdev_get_link,
2382*4882a593Smuzhiyun 	.get_msglevel		= netdev_get_msglevel,
2383*4882a593Smuzhiyun 	.set_msglevel		= netdev_set_msglevel,
2384*4882a593Smuzhiyun 	.get_wol		= rhine_get_wol,
2385*4882a593Smuzhiyun 	.set_wol		= rhine_set_wol,
2386*4882a593Smuzhiyun 	.get_link_ksettings	= netdev_get_link_ksettings,
2387*4882a593Smuzhiyun 	.set_link_ksettings	= netdev_set_link_ksettings,
2388*4882a593Smuzhiyun };
2389*4882a593Smuzhiyun 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2390*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2391*4882a593Smuzhiyun {
2392*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2393*4882a593Smuzhiyun 	int rc;
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	if (!netif_running(dev))
2396*4882a593Smuzhiyun 		return -EINVAL;
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 	mutex_lock(&rp->task_lock);
2399*4882a593Smuzhiyun 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2400*4882a593Smuzhiyun 	rhine_set_carrier(&rp->mii_if);
2401*4882a593Smuzhiyun 	mutex_unlock(&rp->task_lock);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	return rc;
2404*4882a593Smuzhiyun }
2405*4882a593Smuzhiyun 
rhine_close(struct net_device * dev)2406*4882a593Smuzhiyun static int rhine_close(struct net_device *dev)
2407*4882a593Smuzhiyun {
2408*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2409*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	rhine_task_disable(rp);
2412*4882a593Smuzhiyun 	napi_disable(&rp->napi);
2413*4882a593Smuzhiyun 	netif_stop_queue(dev);
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2416*4882a593Smuzhiyun 		  ioread16(ioaddr + ChipCmd));
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 	/* Switch to loopback mode to avoid hardware races. */
2419*4882a593Smuzhiyun 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	rhine_irq_disable(rp);
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun 	/* Stop the chip's Tx and Rx processes. */
2424*4882a593Smuzhiyun 	iowrite16(CmdStop, ioaddr + ChipCmd);
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	free_irq(rp->irq, dev);
2427*4882a593Smuzhiyun 	free_rbufs(dev);
2428*4882a593Smuzhiyun 	free_tbufs(dev);
2429*4882a593Smuzhiyun 	free_ring(dev);
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	return 0;
2432*4882a593Smuzhiyun }
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 
rhine_remove_one_pci(struct pci_dev * pdev)2435*4882a593Smuzhiyun static void rhine_remove_one_pci(struct pci_dev *pdev)
2436*4882a593Smuzhiyun {
2437*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
2438*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	unregister_netdev(dev);
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	pci_iounmap(pdev, rp->base);
2443*4882a593Smuzhiyun 	pci_release_regions(pdev);
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	free_netdev(dev);
2446*4882a593Smuzhiyun 	pci_disable_device(pdev);
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun 
rhine_remove_one_platform(struct platform_device * pdev)2449*4882a593Smuzhiyun static int rhine_remove_one_platform(struct platform_device *pdev)
2450*4882a593Smuzhiyun {
2451*4882a593Smuzhiyun 	struct net_device *dev = platform_get_drvdata(pdev);
2452*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	unregister_netdev(dev);
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 	iounmap(rp->base);
2457*4882a593Smuzhiyun 
2458*4882a593Smuzhiyun 	free_netdev(dev);
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	return 0;
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun 
rhine_shutdown_pci(struct pci_dev * pdev)2463*4882a593Smuzhiyun static void rhine_shutdown_pci(struct pci_dev *pdev)
2464*4882a593Smuzhiyun {
2465*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
2466*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2467*4882a593Smuzhiyun 	void __iomem *ioaddr = rp->base;
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	if (!(rp->quirks & rqWOL))
2470*4882a593Smuzhiyun 		return; /* Nothing to do for non-WOL adapters */
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	rhine_power_init(dev);
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2475*4882a593Smuzhiyun 	if (rp->quirks & rq6patterns)
2476*4882a593Smuzhiyun 		iowrite8(0x04, ioaddr + WOLcgClr);
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 	spin_lock(&rp->lock);
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 	if (rp->wolopts & WAKE_MAGIC) {
2481*4882a593Smuzhiyun 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2482*4882a593Smuzhiyun 		/*
2483*4882a593Smuzhiyun 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2484*4882a593Smuzhiyun 		 * not cooperate otherwise.
2485*4882a593Smuzhiyun 		 */
2486*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2487*4882a593Smuzhiyun 	}
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2490*4882a593Smuzhiyun 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	if (rp->wolopts & WAKE_PHY)
2493*4882a593Smuzhiyun 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	if (rp->wolopts & WAKE_UCAST)
2496*4882a593Smuzhiyun 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 	if (rp->wolopts) {
2499*4882a593Smuzhiyun 		/* Enable legacy WOL (for old motherboards) */
2500*4882a593Smuzhiyun 		iowrite8(0x01, ioaddr + PwcfgSet);
2501*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2502*4882a593Smuzhiyun 	}
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun 	spin_unlock(&rp->lock);
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2507*4882a593Smuzhiyun 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 		pci_wake_from_d3(pdev, true);
2510*4882a593Smuzhiyun 		pci_set_power_state(pdev, PCI_D3hot);
2511*4882a593Smuzhiyun 	}
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
rhine_suspend(struct device * device)2515*4882a593Smuzhiyun static int rhine_suspend(struct device *device)
2516*4882a593Smuzhiyun {
2517*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(device);
2518*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	if (!netif_running(dev))
2521*4882a593Smuzhiyun 		return 0;
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	rhine_task_disable(rp);
2524*4882a593Smuzhiyun 	rhine_irq_disable(rp);
2525*4882a593Smuzhiyun 	napi_disable(&rp->napi);
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	netif_device_detach(dev);
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 	if (dev_is_pci(device))
2530*4882a593Smuzhiyun 		rhine_shutdown_pci(to_pci_dev(device));
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	return 0;
2533*4882a593Smuzhiyun }
2534*4882a593Smuzhiyun 
rhine_resume(struct device * device)2535*4882a593Smuzhiyun static int rhine_resume(struct device *device)
2536*4882a593Smuzhiyun {
2537*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(device);
2538*4882a593Smuzhiyun 	struct rhine_private *rp = netdev_priv(dev);
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	if (!netif_running(dev))
2541*4882a593Smuzhiyun 		return 0;
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	enable_mmio(rp->pioaddr, rp->quirks);
2544*4882a593Smuzhiyun 	rhine_power_init(dev);
2545*4882a593Smuzhiyun 	free_tbufs(dev);
2546*4882a593Smuzhiyun 	alloc_tbufs(dev);
2547*4882a593Smuzhiyun 	rhine_reset_rbufs(rp);
2548*4882a593Smuzhiyun 	rhine_task_enable(rp);
2549*4882a593Smuzhiyun 	spin_lock_bh(&rp->lock);
2550*4882a593Smuzhiyun 	init_registers(dev);
2551*4882a593Smuzhiyun 	spin_unlock_bh(&rp->lock);
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	netif_device_attach(dev);
2554*4882a593Smuzhiyun 
2555*4882a593Smuzhiyun 	return 0;
2556*4882a593Smuzhiyun }
2557*4882a593Smuzhiyun 
2558*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2559*4882a593Smuzhiyun #define RHINE_PM_OPS	(&rhine_pm_ops)
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun #else
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun #define RHINE_PM_OPS	NULL
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun #endif /* !CONFIG_PM_SLEEP */
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun static struct pci_driver rhine_driver_pci = {
2568*4882a593Smuzhiyun 	.name		= DRV_NAME,
2569*4882a593Smuzhiyun 	.id_table	= rhine_pci_tbl,
2570*4882a593Smuzhiyun 	.probe		= rhine_init_one_pci,
2571*4882a593Smuzhiyun 	.remove		= rhine_remove_one_pci,
2572*4882a593Smuzhiyun 	.shutdown	= rhine_shutdown_pci,
2573*4882a593Smuzhiyun 	.driver.pm	= RHINE_PM_OPS,
2574*4882a593Smuzhiyun };
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun static struct platform_driver rhine_driver_platform = {
2577*4882a593Smuzhiyun 	.probe		= rhine_init_one_platform,
2578*4882a593Smuzhiyun 	.remove		= rhine_remove_one_platform,
2579*4882a593Smuzhiyun 	.driver = {
2580*4882a593Smuzhiyun 		.name	= DRV_NAME,
2581*4882a593Smuzhiyun 		.of_match_table	= rhine_of_tbl,
2582*4882a593Smuzhiyun 		.pm		= RHINE_PM_OPS,
2583*4882a593Smuzhiyun 	}
2584*4882a593Smuzhiyun };
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2587*4882a593Smuzhiyun 	{
2588*4882a593Smuzhiyun 		.ident = "EPIA-M",
2589*4882a593Smuzhiyun 		.matches = {
2590*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2591*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2592*4882a593Smuzhiyun 		},
2593*4882a593Smuzhiyun 	},
2594*4882a593Smuzhiyun 	{
2595*4882a593Smuzhiyun 		.ident = "KV7",
2596*4882a593Smuzhiyun 		.matches = {
2597*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2598*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2599*4882a593Smuzhiyun 		},
2600*4882a593Smuzhiyun 	},
2601*4882a593Smuzhiyun 	{ NULL }
2602*4882a593Smuzhiyun };
2603*4882a593Smuzhiyun 
rhine_init(void)2604*4882a593Smuzhiyun static int __init rhine_init(void)
2605*4882a593Smuzhiyun {
2606*4882a593Smuzhiyun 	int ret_pci, ret_platform;
2607*4882a593Smuzhiyun 
2608*4882a593Smuzhiyun /* when a module, this is printed whether or not devices are found in probe */
2609*4882a593Smuzhiyun 	if (dmi_check_system(rhine_dmi_table)) {
2610*4882a593Smuzhiyun 		/* these BIOSes fail at PXE boot if chip is in D3 */
2611*4882a593Smuzhiyun 		avoid_D3 = true;
2612*4882a593Smuzhiyun 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2613*4882a593Smuzhiyun 	}
2614*4882a593Smuzhiyun 	else if (avoid_D3)
2615*4882a593Smuzhiyun 		pr_info("avoid_D3 set\n");
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun 	ret_pci = pci_register_driver(&rhine_driver_pci);
2618*4882a593Smuzhiyun 	ret_platform = platform_driver_register(&rhine_driver_platform);
2619*4882a593Smuzhiyun 	if ((ret_pci < 0) && (ret_platform < 0))
2620*4882a593Smuzhiyun 		return ret_pci;
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 	return 0;
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 
rhine_cleanup(void)2626*4882a593Smuzhiyun static void __exit rhine_cleanup(void)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun 	platform_driver_unregister(&rhine_driver_platform);
2629*4882a593Smuzhiyun 	pci_unregister_driver(&rhine_driver_pci);
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 
2633*4882a593Smuzhiyun module_init(rhine_init);
2634*4882a593Smuzhiyun module_exit(rhine_cleanup);
2635