xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/realtek/8139too.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun 	8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux.
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 	Maintained by Jeff Garzik <jgarzik@pobox.com>
6*4882a593Smuzhiyun 	Copyright 2000-2002 Jeff Garzik
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 	Much code comes from Donald Becker's rtl8139.c driver,
9*4882a593Smuzhiyun 	versions 1.13 and older.  This driver was originally based
10*4882a593Smuzhiyun 	on rtl8139.c version 1.07.  Header of rtl8139.c version 1.13:
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun 	-----<snip>-----
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun         	Written 1997-2001 by Donald Becker.
15*4882a593Smuzhiyun 		This software may be used and distributed according to the
16*4882a593Smuzhiyun 		terms of the GNU General Public License (GPL), incorporated
17*4882a593Smuzhiyun 		herein by reference.  Drivers based on or derived from this
18*4882a593Smuzhiyun 		code fall under the GPL and must retain the authorship,
19*4882a593Smuzhiyun 		copyright and license notice.  This file is not a complete
20*4882a593Smuzhiyun 		program and may only be used when the entire operating
21*4882a593Smuzhiyun 		system is licensed under the GPL.
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 		This driver is for boards based on the RTL8129 and RTL8139
24*4882a593Smuzhiyun 		PCI ethernet chips.
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 		The author may be reached as becker@scyld.com, or C/O Scyld
27*4882a593Smuzhiyun 		Computing Corporation 410 Severn Ave., Suite 210 Annapolis
28*4882a593Smuzhiyun 		MD 21403
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 		Support and updates available at
31*4882a593Smuzhiyun 		http://www.scyld.com/network/rtl8139.html
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 		Twister-tuning table provided by Kinston
34*4882a593Smuzhiyun 		<shangh@realtek.com.tw>.
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	-----<snip>-----
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	This software may be used and distributed according to the terms
39*4882a593Smuzhiyun 	of the GNU General Public License, incorporated herein by reference.
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	Contributors:
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 		Donald Becker - he wrote the original driver, kudos to him!
44*4882a593Smuzhiyun 		(but please don't e-mail him for support, this isn't his driver)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		Tigran Aivazian - bug fixes, skbuff free cleanup
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 		Martin Mares - suggestions for PCI cleanup
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 		David S. Miller - PCI DMA and softnet updates
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		Ernst Gill - fixes ported from BSD driver
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 		Daniel Kobras - identified specific locations of
55*4882a593Smuzhiyun 			posted MMIO write bugginess
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 		Gerard Sharp - bug fix, testing and feedback
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 		David Ford - Rx ring wrap fix
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		Dan DeMaggio - swapped RTL8139 cards with me, and allowed me
62*4882a593Smuzhiyun 		to find and fix a crucial bug on older chipsets.
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		Donald Becker/Chris Butterworth/Marcus Westergren -
65*4882a593Smuzhiyun 		Noticed various Rx packet size-related buglets.
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 		Santiago Garcia Mantinan - testing and feedback
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 		Jens David - 2.2.x kernel backports
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 		Martin Dennett - incredibly helpful insight on undocumented
72*4882a593Smuzhiyun 		features of the 8139 chips
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		Jean-Jacques Michel - bug fix
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		Tobias Ringström - Rx interrupt status checking suggestion
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		Andrew Morton - Clear blocked signals, avoid
79*4882a593Smuzhiyun 		buffer overrun setting current->comm.
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		Kalle Olavi Niemitalo - Wake-on-LAN ioctls
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		Robert Kuebel - Save kernel thread from dying on any signal.
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	Submitting bug reports:
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 		"rtl8139-diag -mmmaaavvveefN" output
88*4882a593Smuzhiyun 		enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define DRV_NAME	"8139too"
95*4882a593Smuzhiyun #define DRV_VERSION	"0.9.28"
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #include <linux/module.h>
99*4882a593Smuzhiyun #include <linux/kernel.h>
100*4882a593Smuzhiyun #include <linux/compiler.h>
101*4882a593Smuzhiyun #include <linux/pci.h>
102*4882a593Smuzhiyun #include <linux/init.h>
103*4882a593Smuzhiyun #include <linux/interrupt.h>
104*4882a593Smuzhiyun #include <linux/netdevice.h>
105*4882a593Smuzhiyun #include <linux/etherdevice.h>
106*4882a593Smuzhiyun #include <linux/rtnetlink.h>
107*4882a593Smuzhiyun #include <linux/delay.h>
108*4882a593Smuzhiyun #include <linux/ethtool.h>
109*4882a593Smuzhiyun #include <linux/mii.h>
110*4882a593Smuzhiyun #include <linux/completion.h>
111*4882a593Smuzhiyun #include <linux/crc32.h>
112*4882a593Smuzhiyun #include <linux/io.h>
113*4882a593Smuzhiyun #include <linux/uaccess.h>
114*4882a593Smuzhiyun #include <linux/gfp.h>
115*4882a593Smuzhiyun #include <linux/if_vlan.h>
116*4882a593Smuzhiyun #include <asm/irq.h>
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define RTL8139_DRIVER_NAME   DRV_NAME " Fast Ethernet driver " DRV_VERSION
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* Default Message level */
121*4882a593Smuzhiyun #define RTL8139_DEF_MSG_ENABLE   (NETIF_MSG_DRV   | \
122*4882a593Smuzhiyun                                  NETIF_MSG_PROBE  | \
123*4882a593Smuzhiyun                                  NETIF_MSG_LINK)
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* define to 1, 2 or 3 to enable copious debugging info */
127*4882a593Smuzhiyun #define RTL8139_DEBUG 0
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* define to 1 to disable lightweight runtime debugging checks */
130*4882a593Smuzhiyun #undef RTL8139_NDEBUG
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #ifdef RTL8139_NDEBUG
134*4882a593Smuzhiyun #  define assert(expr) do {} while (0)
135*4882a593Smuzhiyun #else
136*4882a593Smuzhiyun #  define assert(expr) \
137*4882a593Smuzhiyun         if (unlikely(!(expr))) {				\
138*4882a593Smuzhiyun 		pr_err("Assertion failed! %s,%s,%s,line=%d\n",	\
139*4882a593Smuzhiyun 		       #expr, __FILE__, __func__, __LINE__);	\
140*4882a593Smuzhiyun         }
141*4882a593Smuzhiyun #endif
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* A few user-configurable values. */
145*4882a593Smuzhiyun /* media options */
146*4882a593Smuzhiyun #define MAX_UNITS 8
147*4882a593Smuzhiyun static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
148*4882a593Smuzhiyun static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /* Whether to use MMIO or PIO. Default to MMIO. */
151*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_PIO
152*4882a593Smuzhiyun static bool use_io = true;
153*4882a593Smuzhiyun #else
154*4882a593Smuzhiyun static bool use_io = false;
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
158*4882a593Smuzhiyun    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
159*4882a593Smuzhiyun static int multicast_filter_limit = 32;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /* bitmapped message enable number */
162*4882a593Smuzhiyun static int debug = -1;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * Receive ring size
166*4882a593Smuzhiyun  * Warning: 64K ring has hardware issues and may lock up.
167*4882a593Smuzhiyun  */
168*4882a593Smuzhiyun #if defined(CONFIG_SH_DREAMCAST)
169*4882a593Smuzhiyun #define RX_BUF_IDX 0	/* 8K ring */
170*4882a593Smuzhiyun #else
171*4882a593Smuzhiyun #define RX_BUF_IDX	2	/* 32K ring */
172*4882a593Smuzhiyun #endif
173*4882a593Smuzhiyun #define RX_BUF_LEN	(8192 << RX_BUF_IDX)
174*4882a593Smuzhiyun #define RX_BUF_PAD	16
175*4882a593Smuzhiyun #define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #if RX_BUF_LEN == 65536
178*4882a593Smuzhiyun #define RX_BUF_TOT_LEN	RX_BUF_LEN
179*4882a593Smuzhiyun #else
180*4882a593Smuzhiyun #define RX_BUF_TOT_LEN	(RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /* Number of Tx descriptor registers. */
184*4882a593Smuzhiyun #define NUM_TX_DESC	4
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/
187*4882a593Smuzhiyun #define MAX_ETH_FRAME_SIZE	1792
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /* max supported payload size */
190*4882a593Smuzhiyun #define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */
193*4882a593Smuzhiyun #define TX_BUF_SIZE	MAX_ETH_FRAME_SIZE
194*4882a593Smuzhiyun #define TX_BUF_TOT_LEN	(TX_BUF_SIZE * NUM_TX_DESC)
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /* PCI Tuning Parameters
197*4882a593Smuzhiyun    Threshold is bytes transferred to chip before transmission starts. */
198*4882a593Smuzhiyun #define TX_FIFO_THRESH 256	/* In bytes, rounded down to 32 byte units. */
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
201*4882a593Smuzhiyun #define RX_FIFO_THRESH	7	/* Rx buffer level before first PCI xfer.  */
202*4882a593Smuzhiyun #define RX_DMA_BURST	7	/* Maximum PCI burst, '6' is 1024 */
203*4882a593Smuzhiyun #define TX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
204*4882a593Smuzhiyun #define TX_RETRY	8	/* 0-15.  retries = 16 + (TX_RETRY * 16) */
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /* Operational parameters that usually are not changed. */
207*4882a593Smuzhiyun /* Time in jiffies before concluding the transmitter is hung. */
208*4882a593Smuzhiyun #define TX_TIMEOUT  (6*HZ)
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun enum {
212*4882a593Smuzhiyun 	HAS_MII_XCVR = 0x010000,
213*4882a593Smuzhiyun 	HAS_CHIP_XCVR = 0x020000,
214*4882a593Smuzhiyun 	HAS_LNK_CHNG = 0x040000,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #define RTL_NUM_STATS 4		/* number of ETHTOOL_GSTATS u64's */
218*4882a593Smuzhiyun #define RTL_REGS_VER 1		/* version of reg. data in ETHTOOL_GREGS */
219*4882a593Smuzhiyun #define RTL_MIN_IO_SIZE 0x80
220*4882a593Smuzhiyun #define RTL8139B_IO_SIZE 256
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun #define RTL8129_CAPS	HAS_MII_XCVR
223*4882a593Smuzhiyun #define RTL8139_CAPS	(HAS_CHIP_XCVR|HAS_LNK_CHNG)
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun typedef enum {
226*4882a593Smuzhiyun 	RTL8139 = 0,
227*4882a593Smuzhiyun 	RTL8129,
228*4882a593Smuzhiyun } board_t;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /* indexed by board_t, above */
232*4882a593Smuzhiyun static const struct {
233*4882a593Smuzhiyun 	const char *name;
234*4882a593Smuzhiyun 	u32 hw_flags;
235*4882a593Smuzhiyun } board_info[] = {
236*4882a593Smuzhiyun 	{ "RealTek RTL8139", RTL8139_CAPS },
237*4882a593Smuzhiyun 	{ "RealTek RTL8129", RTL8129_CAPS },
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun static const struct pci_device_id rtl8139_pci_tbl[] = {
242*4882a593Smuzhiyun 	{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
243*4882a593Smuzhiyun 	{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
244*4882a593Smuzhiyun 	{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
245*4882a593Smuzhiyun 	{0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
246*4882a593Smuzhiyun 	{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
247*4882a593Smuzhiyun 	{0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
248*4882a593Smuzhiyun 	{0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
249*4882a593Smuzhiyun 	{0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
250*4882a593Smuzhiyun 	{0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
251*4882a593Smuzhiyun 	{0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
252*4882a593Smuzhiyun 	{0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
253*4882a593Smuzhiyun 	{0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
254*4882a593Smuzhiyun 	{0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
255*4882a593Smuzhiyun 	{0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
256*4882a593Smuzhiyun 	{0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
257*4882a593Smuzhiyun 	{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
258*4882a593Smuzhiyun 	{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
259*4882a593Smuzhiyun 	{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
260*4882a593Smuzhiyun 	{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
261*4882a593Smuzhiyun 	{0x16ec, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #ifdef CONFIG_SH_SECUREEDGE5410
264*4882a593Smuzhiyun 	/* Bogus 8139 silicon reports 8129 without external PROM :-( */
265*4882a593Smuzhiyun 	{0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
268*4882a593Smuzhiyun 	{0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/* some crazy cards report invalid vendor ids like
272*4882a593Smuzhiyun 	 * 0x0001 here.  The other ids are valid and constant,
273*4882a593Smuzhiyun 	 * so we simply don't match on the main vendor id.
274*4882a593Smuzhiyun 	 */
275*4882a593Smuzhiyun 	{PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
276*4882a593Smuzhiyun 	{PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 },
277*4882a593Smuzhiyun 	{PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 },
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	{0,}
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun static struct {
284*4882a593Smuzhiyun 	const char str[ETH_GSTRING_LEN];
285*4882a593Smuzhiyun } ethtool_stats_keys[] = {
286*4882a593Smuzhiyun 	{ "early_rx" },
287*4882a593Smuzhiyun 	{ "tx_buf_mapped" },
288*4882a593Smuzhiyun 	{ "tx_timeouts" },
289*4882a593Smuzhiyun 	{ "rx_lost_in_ring" },
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /* The rest of these values should never change. */
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /* Symbolic offsets to registers. */
295*4882a593Smuzhiyun enum RTL8139_registers {
296*4882a593Smuzhiyun 	MAC0		= 0,	 /* Ethernet hardware address. */
297*4882a593Smuzhiyun 	MAR0		= 8,	 /* Multicast filter. */
298*4882a593Smuzhiyun 	TxStatus0	= 0x10,	 /* Transmit status (Four 32bit registers). */
299*4882a593Smuzhiyun 	TxAddr0		= 0x20,	 /* Tx descriptors (also four 32bit). */
300*4882a593Smuzhiyun 	RxBuf		= 0x30,
301*4882a593Smuzhiyun 	ChipCmd		= 0x37,
302*4882a593Smuzhiyun 	RxBufPtr	= 0x38,
303*4882a593Smuzhiyun 	RxBufAddr	= 0x3A,
304*4882a593Smuzhiyun 	IntrMask	= 0x3C,
305*4882a593Smuzhiyun 	IntrStatus	= 0x3E,
306*4882a593Smuzhiyun 	TxConfig	= 0x40,
307*4882a593Smuzhiyun 	RxConfig	= 0x44,
308*4882a593Smuzhiyun 	Timer		= 0x48,	 /* A general-purpose counter. */
309*4882a593Smuzhiyun 	RxMissed	= 0x4C,  /* 24 bits valid, write clears. */
310*4882a593Smuzhiyun 	Cfg9346		= 0x50,
311*4882a593Smuzhiyun 	Config0		= 0x51,
312*4882a593Smuzhiyun 	Config1		= 0x52,
313*4882a593Smuzhiyun 	TimerInt	= 0x54,
314*4882a593Smuzhiyun 	MediaStatus	= 0x58,
315*4882a593Smuzhiyun 	Config3		= 0x59,
316*4882a593Smuzhiyun 	Config4		= 0x5A,	 /* absent on RTL-8139A */
317*4882a593Smuzhiyun 	HltClk		= 0x5B,
318*4882a593Smuzhiyun 	MultiIntr	= 0x5C,
319*4882a593Smuzhiyun 	TxSummary	= 0x60,
320*4882a593Smuzhiyun 	BasicModeCtrl	= 0x62,
321*4882a593Smuzhiyun 	BasicModeStatus	= 0x64,
322*4882a593Smuzhiyun 	NWayAdvert	= 0x66,
323*4882a593Smuzhiyun 	NWayLPAR	= 0x68,
324*4882a593Smuzhiyun 	NWayExpansion	= 0x6A,
325*4882a593Smuzhiyun 	/* Undocumented registers, but required for proper operation. */
326*4882a593Smuzhiyun 	FIFOTMS		= 0x70,	 /* FIFO Control and test. */
327*4882a593Smuzhiyun 	CSCR		= 0x74,	 /* Chip Status and Configuration Register. */
328*4882a593Smuzhiyun 	PARA78		= 0x78,
329*4882a593Smuzhiyun 	FlashReg	= 0xD4,	/* Communication with Flash ROM, four bytes. */
330*4882a593Smuzhiyun 	PARA7c		= 0x7c,	 /* Magic transceiver parameter register. */
331*4882a593Smuzhiyun 	Config5		= 0xD8,	 /* absent on RTL-8139A */
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun enum ClearBitMasks {
335*4882a593Smuzhiyun 	MultiIntrClear	= 0xF000,
336*4882a593Smuzhiyun 	ChipCmdClear	= 0xE2,
337*4882a593Smuzhiyun 	Config1Clear	= (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun enum ChipCmdBits {
341*4882a593Smuzhiyun 	CmdReset	= 0x10,
342*4882a593Smuzhiyun 	CmdRxEnb	= 0x08,
343*4882a593Smuzhiyun 	CmdTxEnb	= 0x04,
344*4882a593Smuzhiyun 	RxBufEmpty	= 0x01,
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun /* Interrupt register bits, using my own meaningful names. */
348*4882a593Smuzhiyun enum IntrStatusBits {
349*4882a593Smuzhiyun 	PCIErr		= 0x8000,
350*4882a593Smuzhiyun 	PCSTimeout	= 0x4000,
351*4882a593Smuzhiyun 	RxFIFOOver	= 0x40,
352*4882a593Smuzhiyun 	RxUnderrun	= 0x20,
353*4882a593Smuzhiyun 	RxOverflow	= 0x10,
354*4882a593Smuzhiyun 	TxErr		= 0x08,
355*4882a593Smuzhiyun 	TxOK		= 0x04,
356*4882a593Smuzhiyun 	RxErr		= 0x02,
357*4882a593Smuzhiyun 	RxOK		= 0x01,
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	RxAckBits	= RxFIFOOver | RxOverflow | RxOK,
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun enum TxStatusBits {
363*4882a593Smuzhiyun 	TxHostOwns	= 0x2000,
364*4882a593Smuzhiyun 	TxUnderrun	= 0x4000,
365*4882a593Smuzhiyun 	TxStatOK	= 0x8000,
366*4882a593Smuzhiyun 	TxOutOfWindow	= 0x20000000,
367*4882a593Smuzhiyun 	TxAborted	= 0x40000000,
368*4882a593Smuzhiyun 	TxCarrierLost	= 0x80000000,
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun enum RxStatusBits {
371*4882a593Smuzhiyun 	RxMulticast	= 0x8000,
372*4882a593Smuzhiyun 	RxPhysical	= 0x4000,
373*4882a593Smuzhiyun 	RxBroadcast	= 0x2000,
374*4882a593Smuzhiyun 	RxBadSymbol	= 0x0020,
375*4882a593Smuzhiyun 	RxRunt		= 0x0010,
376*4882a593Smuzhiyun 	RxTooLong	= 0x0008,
377*4882a593Smuzhiyun 	RxCRCErr	= 0x0004,
378*4882a593Smuzhiyun 	RxBadAlign	= 0x0002,
379*4882a593Smuzhiyun 	RxStatusOK	= 0x0001,
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /* Bits in RxConfig. */
383*4882a593Smuzhiyun enum rx_mode_bits {
384*4882a593Smuzhiyun 	AcceptErr	= 0x20,
385*4882a593Smuzhiyun 	AcceptRunt	= 0x10,
386*4882a593Smuzhiyun 	AcceptBroadcast	= 0x08,
387*4882a593Smuzhiyun 	AcceptMulticast	= 0x04,
388*4882a593Smuzhiyun 	AcceptMyPhys	= 0x02,
389*4882a593Smuzhiyun 	AcceptAllPhys	= 0x01,
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun /* Bits in TxConfig. */
393*4882a593Smuzhiyun enum tx_config_bits {
394*4882a593Smuzhiyun         /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
395*4882a593Smuzhiyun         TxIFGShift	= 24,
396*4882a593Smuzhiyun         TxIFG84		= (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */
397*4882a593Smuzhiyun         TxIFG88		= (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */
398*4882a593Smuzhiyun         TxIFG92		= (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */
399*4882a593Smuzhiyun         TxIFG96		= (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	TxLoopBack	= (1 << 18) | (1 << 17), /* enable loopback test mode */
402*4882a593Smuzhiyun 	TxCRC		= (1 << 16),	/* DISABLE Tx pkt CRC append */
403*4882a593Smuzhiyun 	TxClearAbt	= (1 << 0),	/* Clear abort (WO) */
404*4882a593Smuzhiyun 	TxDMAShift	= 8, /* DMA burst value (0-7) is shifted X many bits */
405*4882a593Smuzhiyun 	TxRetryShift	= 4, /* TXRR value (0-15) is shifted X many bits */
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	TxVersionMask	= 0x7C800000, /* mask out version bits 30-26, 23 */
408*4882a593Smuzhiyun };
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /* Bits in Config1 */
411*4882a593Smuzhiyun enum Config1Bits {
412*4882a593Smuzhiyun 	Cfg1_PM_Enable	= 0x01,
413*4882a593Smuzhiyun 	Cfg1_VPD_Enable	= 0x02,
414*4882a593Smuzhiyun 	Cfg1_PIO	= 0x04,
415*4882a593Smuzhiyun 	Cfg1_MMIO	= 0x08,
416*4882a593Smuzhiyun 	LWAKE		= 0x10,		/* not on 8139, 8139A */
417*4882a593Smuzhiyun 	Cfg1_Driver_Load = 0x20,
418*4882a593Smuzhiyun 	Cfg1_LED0	= 0x40,
419*4882a593Smuzhiyun 	Cfg1_LED1	= 0x80,
420*4882a593Smuzhiyun 	SLEEP		= (1 << 1),	/* only on 8139, 8139A */
421*4882a593Smuzhiyun 	PWRDN		= (1 << 0),	/* only on 8139, 8139A */
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /* Bits in Config3 */
425*4882a593Smuzhiyun enum Config3Bits {
426*4882a593Smuzhiyun 	Cfg3_FBtBEn   	= (1 << 0), /* 1	= Fast Back to Back */
427*4882a593Smuzhiyun 	Cfg3_FuncRegEn	= (1 << 1), /* 1	= enable CardBus Function registers */
428*4882a593Smuzhiyun 	Cfg3_CLKRUN_En	= (1 << 2), /* 1	= enable CLKRUN */
429*4882a593Smuzhiyun 	Cfg3_CardB_En 	= (1 << 3), /* 1	= enable CardBus registers */
430*4882a593Smuzhiyun 	Cfg3_LinkUp   	= (1 << 4), /* 1	= wake up on link up */
431*4882a593Smuzhiyun 	Cfg3_Magic    	= (1 << 5), /* 1	= wake up on Magic Packet (tm) */
432*4882a593Smuzhiyun 	Cfg3_PARM_En  	= (1 << 6), /* 0	= software can set twister parameters */
433*4882a593Smuzhiyun 	Cfg3_GNTSel   	= (1 << 7), /* 1	= delay 1 clock from PCI GNT signal */
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun /* Bits in Config4 */
437*4882a593Smuzhiyun enum Config4Bits {
438*4882a593Smuzhiyun 	LWPTN	= (1 << 2),	/* not on 8139, 8139A */
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /* Bits in Config5 */
442*4882a593Smuzhiyun enum Config5Bits {
443*4882a593Smuzhiyun 	Cfg5_PME_STS   	= (1 << 0), /* 1	= PCI reset resets PME_Status */
444*4882a593Smuzhiyun 	Cfg5_LANWake   	= (1 << 1), /* 1	= enable LANWake signal */
445*4882a593Smuzhiyun 	Cfg5_LDPS      	= (1 << 2), /* 0	= save power when link is down */
446*4882a593Smuzhiyun 	Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */
447*4882a593Smuzhiyun 	Cfg5_UWF        = (1 << 4), /* 1 = accept unicast wakeup frame */
448*4882a593Smuzhiyun 	Cfg5_MWF        = (1 << 5), /* 1 = accept multicast wakeup frame */
449*4882a593Smuzhiyun 	Cfg5_BWF        = (1 << 6), /* 1 = accept broadcast wakeup frame */
450*4882a593Smuzhiyun };
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun enum RxConfigBits {
453*4882a593Smuzhiyun 	/* rx fifo threshold */
454*4882a593Smuzhiyun 	RxCfgFIFOShift	= 13,
455*4882a593Smuzhiyun 	RxCfgFIFONone	= (7 << RxCfgFIFOShift),
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* Max DMA burst */
458*4882a593Smuzhiyun 	RxCfgDMAShift	= 8,
459*4882a593Smuzhiyun 	RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* rx ring buffer length */
462*4882a593Smuzhiyun 	RxCfgRcv8K	= 0,
463*4882a593Smuzhiyun 	RxCfgRcv16K	= (1 << 11),
464*4882a593Smuzhiyun 	RxCfgRcv32K	= (1 << 12),
465*4882a593Smuzhiyun 	RxCfgRcv64K	= (1 << 11) | (1 << 12),
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* Disable packet wrap at end of Rx buffer. (not possible with 64k) */
468*4882a593Smuzhiyun 	RxNoWrap	= (1 << 7),
469*4882a593Smuzhiyun };
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /* Twister tuning parameters from RealTek.
472*4882a593Smuzhiyun    Completely undocumented, but required to tune bad links on some boards. */
473*4882a593Smuzhiyun enum CSCRBits {
474*4882a593Smuzhiyun 	CSCR_LinkOKBit		= 0x0400,
475*4882a593Smuzhiyun 	CSCR_LinkChangeBit	= 0x0800,
476*4882a593Smuzhiyun 	CSCR_LinkStatusBits	= 0x0f000,
477*4882a593Smuzhiyun 	CSCR_LinkDownOffCmd	= 0x003c0,
478*4882a593Smuzhiyun 	CSCR_LinkDownCmd	= 0x0f3c0,
479*4882a593Smuzhiyun };
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun enum Cfg9346Bits {
482*4882a593Smuzhiyun 	Cfg9346_Lock	= 0x00,
483*4882a593Smuzhiyun 	Cfg9346_Unlock	= 0xC0,
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun typedef enum {
487*4882a593Smuzhiyun 	CH_8139	= 0,
488*4882a593Smuzhiyun 	CH_8139_K,
489*4882a593Smuzhiyun 	CH_8139A,
490*4882a593Smuzhiyun 	CH_8139A_G,
491*4882a593Smuzhiyun 	CH_8139B,
492*4882a593Smuzhiyun 	CH_8130,
493*4882a593Smuzhiyun 	CH_8139C,
494*4882a593Smuzhiyun 	CH_8100,
495*4882a593Smuzhiyun 	CH_8100B_8139D,
496*4882a593Smuzhiyun 	CH_8101,
497*4882a593Smuzhiyun } chip_t;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun enum chip_flags {
500*4882a593Smuzhiyun 	HasHltClk	= (1 << 0),
501*4882a593Smuzhiyun 	HasLWake	= (1 << 1),
502*4882a593Smuzhiyun };
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun #define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \
505*4882a593Smuzhiyun 	(b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22)
506*4882a593Smuzhiyun #define HW_REVID_MASK	HW_REVID(1, 1, 1, 1, 1, 1, 1)
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun /* directly indexed by chip_t, above */
509*4882a593Smuzhiyun static const struct {
510*4882a593Smuzhiyun 	const char *name;
511*4882a593Smuzhiyun 	u32 version; /* from RTL8139C/RTL8139D docs */
512*4882a593Smuzhiyun 	u32 flags;
513*4882a593Smuzhiyun } rtl_chip_info[] = {
514*4882a593Smuzhiyun 	{ "RTL-8139",
515*4882a593Smuzhiyun 	  HW_REVID(1, 0, 0, 0, 0, 0, 0),
516*4882a593Smuzhiyun 	  HasHltClk,
517*4882a593Smuzhiyun 	},
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	{ "RTL-8139 rev K",
520*4882a593Smuzhiyun 	  HW_REVID(1, 1, 0, 0, 0, 0, 0),
521*4882a593Smuzhiyun 	  HasHltClk,
522*4882a593Smuzhiyun 	},
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	{ "RTL-8139A",
525*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 0, 0, 0, 0),
526*4882a593Smuzhiyun 	  HasHltClk, /* XXX undocumented? */
527*4882a593Smuzhiyun 	},
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	{ "RTL-8139A rev G",
530*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 0, 0, 1, 0),
531*4882a593Smuzhiyun 	  HasHltClk, /* XXX undocumented? */
532*4882a593Smuzhiyun 	},
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	{ "RTL-8139B",
535*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 1, 0, 0, 0),
536*4882a593Smuzhiyun 	  HasLWake,
537*4882a593Smuzhiyun 	},
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	{ "RTL-8130",
540*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 1, 1, 0, 0),
541*4882a593Smuzhiyun 	  HasLWake,
542*4882a593Smuzhiyun 	},
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	{ "RTL-8139C",
545*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 0, 1, 0, 0),
546*4882a593Smuzhiyun 	  HasLWake,
547*4882a593Smuzhiyun 	},
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	{ "RTL-8100",
550*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 1, 0, 1, 0),
551*4882a593Smuzhiyun  	  HasLWake,
552*4882a593Smuzhiyun  	},
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	{ "RTL-8100B/8139D",
555*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 0, 1, 0, 1),
556*4882a593Smuzhiyun 	  HasHltClk /* XXX undocumented? */
557*4882a593Smuzhiyun 	| HasLWake,
558*4882a593Smuzhiyun 	},
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	{ "RTL-8101",
561*4882a593Smuzhiyun 	  HW_REVID(1, 1, 1, 0, 1, 1, 1),
562*4882a593Smuzhiyun 	  HasLWake,
563*4882a593Smuzhiyun 	},
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun struct rtl_extra_stats {
567*4882a593Smuzhiyun 	unsigned long early_rx;
568*4882a593Smuzhiyun 	unsigned long tx_buf_mapped;
569*4882a593Smuzhiyun 	unsigned long tx_timeouts;
570*4882a593Smuzhiyun 	unsigned long rx_lost_in_ring;
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun struct rtl8139_stats {
574*4882a593Smuzhiyun 	u64	packets;
575*4882a593Smuzhiyun 	u64	bytes;
576*4882a593Smuzhiyun 	struct u64_stats_sync	syncp;
577*4882a593Smuzhiyun };
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun struct rtl8139_private {
580*4882a593Smuzhiyun 	void __iomem		*mmio_addr;
581*4882a593Smuzhiyun 	int			drv_flags;
582*4882a593Smuzhiyun 	struct pci_dev		*pci_dev;
583*4882a593Smuzhiyun 	u32			msg_enable;
584*4882a593Smuzhiyun 	struct napi_struct	napi;
585*4882a593Smuzhiyun 	struct net_device	*dev;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	unsigned char		*rx_ring;
588*4882a593Smuzhiyun 	unsigned int		cur_rx;	/* RX buf index of next pkt */
589*4882a593Smuzhiyun 	struct rtl8139_stats	rx_stats;
590*4882a593Smuzhiyun 	dma_addr_t		rx_ring_dma;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	unsigned int		tx_flag;
593*4882a593Smuzhiyun 	unsigned long		cur_tx;
594*4882a593Smuzhiyun 	unsigned long		dirty_tx;
595*4882a593Smuzhiyun 	struct rtl8139_stats	tx_stats;
596*4882a593Smuzhiyun 	unsigned char		*tx_buf[NUM_TX_DESC];	/* Tx bounce buffers */
597*4882a593Smuzhiyun 	unsigned char		*tx_bufs;	/* Tx bounce buffer region. */
598*4882a593Smuzhiyun 	dma_addr_t		tx_bufs_dma;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	signed char		phys[4];	/* MII device addresses. */
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 				/* Twister tune state. */
603*4882a593Smuzhiyun 	char			twistie, twist_row, twist_col;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	unsigned int		watchdog_fired : 1;
606*4882a593Smuzhiyun 	unsigned int		default_port : 4; /* Last dev->if_port value. */
607*4882a593Smuzhiyun 	unsigned int		have_thread : 1;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	spinlock_t		lock;
610*4882a593Smuzhiyun 	spinlock_t		rx_lock;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	chip_t			chipset;
613*4882a593Smuzhiyun 	u32			rx_config;
614*4882a593Smuzhiyun 	struct rtl_extra_stats	xstats;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	struct delayed_work	thread;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	struct mii_if_info	mii;
619*4882a593Smuzhiyun 	unsigned int		regs_len;
620*4882a593Smuzhiyun 	unsigned long		fifo_copy_timeout;
621*4882a593Smuzhiyun };
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
624*4882a593Smuzhiyun MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
625*4882a593Smuzhiyun MODULE_LICENSE("GPL");
626*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun module_param(use_io, bool, 0);
629*4882a593Smuzhiyun MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
630*4882a593Smuzhiyun module_param(multicast_filter_limit, int, 0);
631*4882a593Smuzhiyun module_param_array(media, int, NULL, 0);
632*4882a593Smuzhiyun module_param_array(full_duplex, int, NULL, 0);
633*4882a593Smuzhiyun module_param(debug, int, 0);
634*4882a593Smuzhiyun MODULE_PARM_DESC (debug, "8139too bitmapped message enable number");
635*4882a593Smuzhiyun MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
636*4882a593Smuzhiyun MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
637*4882a593Smuzhiyun MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun static int read_eeprom (void __iomem *ioaddr, int location, int addr_len);
640*4882a593Smuzhiyun static int rtl8139_open (struct net_device *dev);
641*4882a593Smuzhiyun static int mdio_read (struct net_device *dev, int phy_id, int location);
642*4882a593Smuzhiyun static void mdio_write (struct net_device *dev, int phy_id, int location,
643*4882a593Smuzhiyun 			int val);
644*4882a593Smuzhiyun static void rtl8139_start_thread(struct rtl8139_private *tp);
645*4882a593Smuzhiyun static void rtl8139_tx_timeout (struct net_device *dev, unsigned int txqueue);
646*4882a593Smuzhiyun static void rtl8139_init_ring (struct net_device *dev);
647*4882a593Smuzhiyun static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
648*4882a593Smuzhiyun 				       struct net_device *dev);
649*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
650*4882a593Smuzhiyun static void rtl8139_poll_controller(struct net_device *dev);
651*4882a593Smuzhiyun #endif
652*4882a593Smuzhiyun static int rtl8139_set_mac_address(struct net_device *dev, void *p);
653*4882a593Smuzhiyun static int rtl8139_poll(struct napi_struct *napi, int budget);
654*4882a593Smuzhiyun static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
655*4882a593Smuzhiyun static int rtl8139_close (struct net_device *dev);
656*4882a593Smuzhiyun static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
657*4882a593Smuzhiyun static void rtl8139_get_stats64(struct net_device *dev,
658*4882a593Smuzhiyun 				struct rtnl_link_stats64 *stats);
659*4882a593Smuzhiyun static void rtl8139_set_rx_mode (struct net_device *dev);
660*4882a593Smuzhiyun static void __set_rx_mode (struct net_device *dev);
661*4882a593Smuzhiyun static void rtl8139_hw_start (struct net_device *dev);
662*4882a593Smuzhiyun static void rtl8139_thread (struct work_struct *work);
663*4882a593Smuzhiyun static void rtl8139_tx_timeout_task(struct work_struct *work);
664*4882a593Smuzhiyun static const struct ethtool_ops rtl8139_ethtool_ops;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /* write MMIO register, with flush */
667*4882a593Smuzhiyun /* Flush avoids rtl8139 bug w/ posted MMIO writes */
668*4882a593Smuzhiyun #define RTL_W8_F(reg, val8)	do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0)
669*4882a593Smuzhiyun #define RTL_W16_F(reg, val16)	do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0)
670*4882a593Smuzhiyun #define RTL_W32_F(reg, val32)	do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0)
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun /* write MMIO register */
673*4882a593Smuzhiyun #define RTL_W8(reg, val8)	iowrite8 ((val8), ioaddr + (reg))
674*4882a593Smuzhiyun #define RTL_W16(reg, val16)	iowrite16 ((val16), ioaddr + (reg))
675*4882a593Smuzhiyun #define RTL_W32(reg, val32)	iowrite32 ((val32), ioaddr + (reg))
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun /* read MMIO register */
678*4882a593Smuzhiyun #define RTL_R8(reg)		ioread8 (ioaddr + (reg))
679*4882a593Smuzhiyun #define RTL_R16(reg)		ioread16 (ioaddr + (reg))
680*4882a593Smuzhiyun #define RTL_R32(reg)		ioread32 (ioaddr + (reg))
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun static const u16 rtl8139_intr_mask =
684*4882a593Smuzhiyun 	PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
685*4882a593Smuzhiyun 	TxErr | TxOK | RxErr | RxOK;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun static const u16 rtl8139_norx_intr_mask =
688*4882a593Smuzhiyun 	PCIErr | PCSTimeout | RxUnderrun |
689*4882a593Smuzhiyun 	TxErr | TxOK | RxErr ;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun #if RX_BUF_IDX == 0
692*4882a593Smuzhiyun static const unsigned int rtl8139_rx_config =
693*4882a593Smuzhiyun 	RxCfgRcv8K | RxNoWrap |
694*4882a593Smuzhiyun 	(RX_FIFO_THRESH << RxCfgFIFOShift) |
695*4882a593Smuzhiyun 	(RX_DMA_BURST << RxCfgDMAShift);
696*4882a593Smuzhiyun #elif RX_BUF_IDX == 1
697*4882a593Smuzhiyun static const unsigned int rtl8139_rx_config =
698*4882a593Smuzhiyun 	RxCfgRcv16K | RxNoWrap |
699*4882a593Smuzhiyun 	(RX_FIFO_THRESH << RxCfgFIFOShift) |
700*4882a593Smuzhiyun 	(RX_DMA_BURST << RxCfgDMAShift);
701*4882a593Smuzhiyun #elif RX_BUF_IDX == 2
702*4882a593Smuzhiyun static const unsigned int rtl8139_rx_config =
703*4882a593Smuzhiyun 	RxCfgRcv32K | RxNoWrap |
704*4882a593Smuzhiyun 	(RX_FIFO_THRESH << RxCfgFIFOShift) |
705*4882a593Smuzhiyun 	(RX_DMA_BURST << RxCfgDMAShift);
706*4882a593Smuzhiyun #elif RX_BUF_IDX == 3
707*4882a593Smuzhiyun static const unsigned int rtl8139_rx_config =
708*4882a593Smuzhiyun 	RxCfgRcv64K |
709*4882a593Smuzhiyun 	(RX_FIFO_THRESH << RxCfgFIFOShift) |
710*4882a593Smuzhiyun 	(RX_DMA_BURST << RxCfgDMAShift);
711*4882a593Smuzhiyun #else
712*4882a593Smuzhiyun #error "Invalid configuration for 8139_RXBUF_IDX"
713*4882a593Smuzhiyun #endif
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun static const unsigned int rtl8139_tx_config =
716*4882a593Smuzhiyun 	TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
717*4882a593Smuzhiyun 
__rtl8139_cleanup_dev(struct net_device * dev)718*4882a593Smuzhiyun static void __rtl8139_cleanup_dev (struct net_device *dev)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
721*4882a593Smuzhiyun 	struct pci_dev *pdev;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	assert (dev != NULL);
724*4882a593Smuzhiyun 	assert (tp->pci_dev != NULL);
725*4882a593Smuzhiyun 	pdev = tp->pci_dev;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	if (tp->mmio_addr)
728*4882a593Smuzhiyun 		pci_iounmap (pdev, tp->mmio_addr);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* it's ok to call this even if we have no regions to free */
731*4882a593Smuzhiyun 	pci_release_regions (pdev);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	free_netdev(dev);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 
rtl8139_chip_reset(void __iomem * ioaddr)737*4882a593Smuzhiyun static void rtl8139_chip_reset (void __iomem *ioaddr)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	int i;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	/* Soft reset the chip. */
742*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, CmdReset);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	/* Check that the chip has finished the reset. */
745*4882a593Smuzhiyun 	for (i = 1000; i > 0; i--) {
746*4882a593Smuzhiyun 		barrier();
747*4882a593Smuzhiyun 		if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
748*4882a593Smuzhiyun 			break;
749*4882a593Smuzhiyun 		udelay (10);
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 
rtl8139_init_board(struct pci_dev * pdev)754*4882a593Smuzhiyun static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	struct device *d = &pdev->dev;
757*4882a593Smuzhiyun 	void __iomem *ioaddr;
758*4882a593Smuzhiyun 	struct net_device *dev;
759*4882a593Smuzhiyun 	struct rtl8139_private *tp;
760*4882a593Smuzhiyun 	u8 tmp8;
761*4882a593Smuzhiyun 	int rc, disable_dev_on_err = 0;
762*4882a593Smuzhiyun 	unsigned int i, bar;
763*4882a593Smuzhiyun 	unsigned long io_len;
764*4882a593Smuzhiyun 	u32 version;
765*4882a593Smuzhiyun 	static const struct {
766*4882a593Smuzhiyun 		unsigned long mask;
767*4882a593Smuzhiyun 		char *type;
768*4882a593Smuzhiyun 	} res[] = {
769*4882a593Smuzhiyun 		{ IORESOURCE_IO,  "PIO" },
770*4882a593Smuzhiyun 		{ IORESOURCE_MEM, "MMIO" }
771*4882a593Smuzhiyun 	};
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	assert (pdev != NULL);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	/* dev and priv zeroed in alloc_etherdev */
776*4882a593Smuzhiyun 	dev = alloc_etherdev (sizeof (*tp));
777*4882a593Smuzhiyun 	if (dev == NULL)
778*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	tp = netdev_priv(dev);
783*4882a593Smuzhiyun 	tp->pci_dev = pdev;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
786*4882a593Smuzhiyun 	rc = pci_enable_device (pdev);
787*4882a593Smuzhiyun 	if (rc)
788*4882a593Smuzhiyun 		goto err_out;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	disable_dev_on_err = 1;
791*4882a593Smuzhiyun 	rc = pci_request_regions (pdev, DRV_NAME);
792*4882a593Smuzhiyun 	if (rc)
793*4882a593Smuzhiyun 		goto err_out;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	pci_set_master (pdev);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	u64_stats_init(&tp->rx_stats.syncp);
798*4882a593Smuzhiyun 	u64_stats_init(&tp->tx_stats.syncp);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun retry:
801*4882a593Smuzhiyun 	/* PIO bar register comes first. */
802*4882a593Smuzhiyun 	bar = !use_io;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	io_len = pci_resource_len(pdev, bar);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
809*4882a593Smuzhiyun 		dev_err(d, "region #%d not a %s resource, aborting\n", bar,
810*4882a593Smuzhiyun 			res[bar].type);
811*4882a593Smuzhiyun 		rc = -ENODEV;
812*4882a593Smuzhiyun 		goto err_out;
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 	if (io_len < RTL_MIN_IO_SIZE) {
815*4882a593Smuzhiyun 		dev_err(d, "Invalid PCI %s region size(s), aborting\n",
816*4882a593Smuzhiyun 			res[bar].type);
817*4882a593Smuzhiyun 		rc = -ENODEV;
818*4882a593Smuzhiyun 		goto err_out;
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	ioaddr = pci_iomap(pdev, bar, 0);
822*4882a593Smuzhiyun 	if (!ioaddr) {
823*4882a593Smuzhiyun 		dev_err(d, "cannot map %s\n", res[bar].type);
824*4882a593Smuzhiyun 		if (!use_io) {
825*4882a593Smuzhiyun 			use_io = true;
826*4882a593Smuzhiyun 			goto retry;
827*4882a593Smuzhiyun 		}
828*4882a593Smuzhiyun 		rc = -ENODEV;
829*4882a593Smuzhiyun 		goto err_out;
830*4882a593Smuzhiyun 	}
831*4882a593Smuzhiyun 	tp->regs_len = io_len;
832*4882a593Smuzhiyun 	tp->mmio_addr = ioaddr;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* Bring old chips out of low-power mode. */
835*4882a593Smuzhiyun 	RTL_W8 (HltClk, 'R');
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	/* check for missing/broken hardware */
838*4882a593Smuzhiyun 	if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
839*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Chip not responding, ignoring board\n");
840*4882a593Smuzhiyun 		rc = -EIO;
841*4882a593Smuzhiyun 		goto err_out;
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* identify chip attached to board */
845*4882a593Smuzhiyun 	version = RTL_R32 (TxConfig) & HW_REVID_MASK;
846*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
847*4882a593Smuzhiyun 		if (version == rtl_chip_info[i].version) {
848*4882a593Smuzhiyun 			tp->chipset = i;
849*4882a593Smuzhiyun 			goto match;
850*4882a593Smuzhiyun 		}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	/* if unknown chip, assume array element #0, original RTL-8139 in this case */
853*4882a593Smuzhiyun 	i = 0;
854*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
855*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
856*4882a593Smuzhiyun 	tp->chipset = 0;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun match:
859*4882a593Smuzhiyun 	pr_debug("chipset id (%d) == index %d, '%s'\n",
860*4882a593Smuzhiyun 		 version, i, rtl_chip_info[i].name);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	if (tp->chipset >= CH_8139B) {
863*4882a593Smuzhiyun 		u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
864*4882a593Smuzhiyun 		pr_debug("PCI PM wakeup\n");
865*4882a593Smuzhiyun 		if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
866*4882a593Smuzhiyun 		    (tmp8 & LWAKE))
867*4882a593Smuzhiyun 			new_tmp8 &= ~LWAKE;
868*4882a593Smuzhiyun 		new_tmp8 |= Cfg1_PM_Enable;
869*4882a593Smuzhiyun 		if (new_tmp8 != tmp8) {
870*4882a593Smuzhiyun 			RTL_W8 (Cfg9346, Cfg9346_Unlock);
871*4882a593Smuzhiyun 			RTL_W8 (Config1, tmp8);
872*4882a593Smuzhiyun 			RTL_W8 (Cfg9346, Cfg9346_Lock);
873*4882a593Smuzhiyun 		}
874*4882a593Smuzhiyun 		if (rtl_chip_info[tp->chipset].flags & HasLWake) {
875*4882a593Smuzhiyun 			tmp8 = RTL_R8 (Config4);
876*4882a593Smuzhiyun 			if (tmp8 & LWPTN) {
877*4882a593Smuzhiyun 				RTL_W8 (Cfg9346, Cfg9346_Unlock);
878*4882a593Smuzhiyun 				RTL_W8 (Config4, tmp8 & ~LWPTN);
879*4882a593Smuzhiyun 				RTL_W8 (Cfg9346, Cfg9346_Lock);
880*4882a593Smuzhiyun 			}
881*4882a593Smuzhiyun 		}
882*4882a593Smuzhiyun 	} else {
883*4882a593Smuzhiyun 		pr_debug("Old chip wakeup\n");
884*4882a593Smuzhiyun 		tmp8 = RTL_R8 (Config1);
885*4882a593Smuzhiyun 		tmp8 &= ~(SLEEP | PWRDN);
886*4882a593Smuzhiyun 		RTL_W8 (Config1, tmp8);
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	rtl8139_chip_reset (ioaddr);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	return dev;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun err_out:
894*4882a593Smuzhiyun 	__rtl8139_cleanup_dev (dev);
895*4882a593Smuzhiyun 	if (disable_dev_on_err)
896*4882a593Smuzhiyun 		pci_disable_device (pdev);
897*4882a593Smuzhiyun 	return ERR_PTR(rc);
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
rtl8139_set_features(struct net_device * dev,netdev_features_t features)900*4882a593Smuzhiyun static int rtl8139_set_features(struct net_device *dev, netdev_features_t features)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
903*4882a593Smuzhiyun 	unsigned long flags;
904*4882a593Smuzhiyun 	netdev_features_t changed = features ^ dev->features;
905*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	if (!(changed & (NETIF_F_RXALL)))
908*4882a593Smuzhiyun 		return 0;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	spin_lock_irqsave(&tp->lock, flags);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	if (changed & NETIF_F_RXALL) {
913*4882a593Smuzhiyun 		int rx_mode = tp->rx_config;
914*4882a593Smuzhiyun 		if (features & NETIF_F_RXALL)
915*4882a593Smuzhiyun 			rx_mode |= (AcceptErr | AcceptRunt);
916*4882a593Smuzhiyun 		else
917*4882a593Smuzhiyun 			rx_mode &= ~(AcceptErr | AcceptRunt);
918*4882a593Smuzhiyun 		tp->rx_config = rtl8139_rx_config | rx_mode;
919*4882a593Smuzhiyun 		RTL_W32_F(RxConfig, tp->rx_config);
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tp->lock, flags);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	return 0;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun static const struct net_device_ops rtl8139_netdev_ops = {
928*4882a593Smuzhiyun 	.ndo_open		= rtl8139_open,
929*4882a593Smuzhiyun 	.ndo_stop		= rtl8139_close,
930*4882a593Smuzhiyun 	.ndo_get_stats64	= rtl8139_get_stats64,
931*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
932*4882a593Smuzhiyun 	.ndo_set_mac_address 	= rtl8139_set_mac_address,
933*4882a593Smuzhiyun 	.ndo_start_xmit		= rtl8139_start_xmit,
934*4882a593Smuzhiyun 	.ndo_set_rx_mode	= rtl8139_set_rx_mode,
935*4882a593Smuzhiyun 	.ndo_do_ioctl		= netdev_ioctl,
936*4882a593Smuzhiyun 	.ndo_tx_timeout		= rtl8139_tx_timeout,
937*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
938*4882a593Smuzhiyun 	.ndo_poll_controller	= rtl8139_poll_controller,
939*4882a593Smuzhiyun #endif
940*4882a593Smuzhiyun 	.ndo_set_features	= rtl8139_set_features,
941*4882a593Smuzhiyun };
942*4882a593Smuzhiyun 
rtl8139_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)943*4882a593Smuzhiyun static int rtl8139_init_one(struct pci_dev *pdev,
944*4882a593Smuzhiyun 			    const struct pci_device_id *ent)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	struct net_device *dev = NULL;
947*4882a593Smuzhiyun 	struct rtl8139_private *tp;
948*4882a593Smuzhiyun 	int i, addr_len, option;
949*4882a593Smuzhiyun 	void __iomem *ioaddr;
950*4882a593Smuzhiyun 	static int board_idx = -1;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	assert (pdev != NULL);
953*4882a593Smuzhiyun 	assert (ent != NULL);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	board_idx++;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	/* when we're built into the kernel, the driver version message
958*4882a593Smuzhiyun 	 * is only printed if at least one 8139 board has been found
959*4882a593Smuzhiyun 	 */
960*4882a593Smuzhiyun #ifndef MODULE
961*4882a593Smuzhiyun 	{
962*4882a593Smuzhiyun 		static int printed_version;
963*4882a593Smuzhiyun 		if (!printed_version++)
964*4882a593Smuzhiyun 			pr_info(RTL8139_DRIVER_NAME "\n");
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun #endif
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
969*4882a593Smuzhiyun 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
970*4882a593Smuzhiyun 		dev_info(&pdev->dev,
971*4882a593Smuzhiyun 			   "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
972*4882a593Smuzhiyun 		       	   pdev->vendor, pdev->device, pdev->revision);
973*4882a593Smuzhiyun 		return -ENODEV;
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
977*4882a593Smuzhiyun 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
978*4882a593Smuzhiyun 	    pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
979*4882a593Smuzhiyun 	    pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
980*4882a593Smuzhiyun 		pr_info("OQO Model 2 detected. Forcing PIO\n");
981*4882a593Smuzhiyun 		use_io = true;
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	dev = rtl8139_init_board (pdev);
985*4882a593Smuzhiyun 	if (IS_ERR(dev))
986*4882a593Smuzhiyun 		return PTR_ERR(dev);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	assert (dev != NULL);
989*4882a593Smuzhiyun 	tp = netdev_priv(dev);
990*4882a593Smuzhiyun 	tp->dev = dev;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	ioaddr = tp->mmio_addr;
993*4882a593Smuzhiyun 	assert (ioaddr != NULL);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
996*4882a593Smuzhiyun 	for (i = 0; i < 3; i++)
997*4882a593Smuzhiyun 		((__le16 *) (dev->dev_addr))[i] =
998*4882a593Smuzhiyun 		    cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	/* The Rtl8139-specific entries in the device structure. */
1001*4882a593Smuzhiyun 	dev->netdev_ops = &rtl8139_netdev_ops;
1002*4882a593Smuzhiyun 	dev->ethtool_ops = &rtl8139_ethtool_ops;
1003*4882a593Smuzhiyun 	dev->watchdog_timeo = TX_TIMEOUT;
1004*4882a593Smuzhiyun 	netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	/* note: the hardware is not capable of sg/csum/highdma, however
1007*4882a593Smuzhiyun 	 * through the use of skb_copy_and_csum_dev we enable these
1008*4882a593Smuzhiyun 	 * features
1009*4882a593Smuzhiyun 	 */
1010*4882a593Smuzhiyun 	dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
1011*4882a593Smuzhiyun 	dev->vlan_features = dev->features;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	dev->hw_features |= NETIF_F_RXALL;
1014*4882a593Smuzhiyun 	dev->hw_features |= NETIF_F_RXFCS;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	/* MTU range: 68 - 1770 */
1017*4882a593Smuzhiyun 	dev->min_mtu = ETH_MIN_MTU;
1018*4882a593Smuzhiyun 	dev->max_mtu = MAX_ETH_DATA_SIZE;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	/* tp zeroed and aligned in alloc_etherdev */
1021*4882a593Smuzhiyun 	tp = netdev_priv(dev);
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	/* note: tp->chipset set in rtl8139_init_board */
1024*4882a593Smuzhiyun 	tp->drv_flags = board_info[ent->driver_data].hw_flags;
1025*4882a593Smuzhiyun 	tp->mmio_addr = ioaddr;
1026*4882a593Smuzhiyun 	tp->msg_enable =
1027*4882a593Smuzhiyun 		(debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
1028*4882a593Smuzhiyun 	spin_lock_init (&tp->lock);
1029*4882a593Smuzhiyun 	spin_lock_init (&tp->rx_lock);
1030*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
1031*4882a593Smuzhiyun 	tp->mii.dev = dev;
1032*4882a593Smuzhiyun 	tp->mii.mdio_read = mdio_read;
1033*4882a593Smuzhiyun 	tp->mii.mdio_write = mdio_write;
1034*4882a593Smuzhiyun 	tp->mii.phy_id_mask = 0x3f;
1035*4882a593Smuzhiyun 	tp->mii.reg_num_mask = 0x1f;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* dev is fully set up and ready to use now */
1038*4882a593Smuzhiyun 	pr_debug("about to register device named %s (%p)...\n",
1039*4882a593Smuzhiyun 		 dev->name, dev);
1040*4882a593Smuzhiyun 	i = register_netdev (dev);
1041*4882a593Smuzhiyun 	if (i) goto err_out;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	pci_set_drvdata (pdev, dev);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
1046*4882a593Smuzhiyun 		    board_info[ent->driver_data].name,
1047*4882a593Smuzhiyun 		    ioaddr, dev->dev_addr, pdev->irq);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
1050*4882a593Smuzhiyun 		   rtl_chip_info[tp->chipset].name);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	/* Find the connected MII xcvrs.
1053*4882a593Smuzhiyun 	   Doing this in open() would allow detecting external xcvrs later, but
1054*4882a593Smuzhiyun 	   takes too much time. */
1055*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
1056*4882a593Smuzhiyun 	if (tp->drv_flags & HAS_MII_XCVR) {
1057*4882a593Smuzhiyun 		int phy, phy_idx = 0;
1058*4882a593Smuzhiyun 		for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
1059*4882a593Smuzhiyun 			int mii_status = mdio_read(dev, phy, 1);
1060*4882a593Smuzhiyun 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
1061*4882a593Smuzhiyun 				u16 advertising = mdio_read(dev, phy, 4);
1062*4882a593Smuzhiyun 				tp->phys[phy_idx++] = phy;
1063*4882a593Smuzhiyun 				netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
1064*4882a593Smuzhiyun 					    phy, mii_status, advertising);
1065*4882a593Smuzhiyun 			}
1066*4882a593Smuzhiyun 		}
1067*4882a593Smuzhiyun 		if (phy_idx == 0) {
1068*4882a593Smuzhiyun 			netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
1069*4882a593Smuzhiyun 			tp->phys[0] = 32;
1070*4882a593Smuzhiyun 		}
1071*4882a593Smuzhiyun 	} else
1072*4882a593Smuzhiyun #endif
1073*4882a593Smuzhiyun 		tp->phys[0] = 32;
1074*4882a593Smuzhiyun 	tp->mii.phy_id = tp->phys[0];
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	/* The lower four bits are the media type. */
1077*4882a593Smuzhiyun 	option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
1078*4882a593Smuzhiyun 	if (option > 0) {
1079*4882a593Smuzhiyun 		tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
1080*4882a593Smuzhiyun 		tp->default_port = option & 0xFF;
1081*4882a593Smuzhiyun 		if (tp->default_port)
1082*4882a593Smuzhiyun 			tp->mii.force_media = 1;
1083*4882a593Smuzhiyun 	}
1084*4882a593Smuzhiyun 	if (board_idx < MAX_UNITS  &&  full_duplex[board_idx] > 0)
1085*4882a593Smuzhiyun 		tp->mii.full_duplex = full_duplex[board_idx];
1086*4882a593Smuzhiyun 	if (tp->mii.full_duplex) {
1087*4882a593Smuzhiyun 		netdev_info(dev, "Media type forced to Full Duplex\n");
1088*4882a593Smuzhiyun 		/* Changing the MII-advertised media because might prevent
1089*4882a593Smuzhiyun 		   re-connection. */
1090*4882a593Smuzhiyun 		tp->mii.force_media = 1;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 	if (tp->default_port) {
1093*4882a593Smuzhiyun 		netdev_info(dev, "  Forcing %dMbps %s-duplex operation\n",
1094*4882a593Smuzhiyun 			    (option & 0x20 ? 100 : 10),
1095*4882a593Smuzhiyun 			    (option & 0x10 ? "full" : "half"));
1096*4882a593Smuzhiyun 		mdio_write(dev, tp->phys[0], 0,
1097*4882a593Smuzhiyun 				   ((option & 0x20) ? 0x2000 : 0) | 	/* 100Mbps? */
1098*4882a593Smuzhiyun 				   ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
1099*4882a593Smuzhiyun 	}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	/* Put the chip into low-power mode. */
1102*4882a593Smuzhiyun 	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
1103*4882a593Smuzhiyun 		RTL_W8 (HltClk, 'H');	/* 'R' would leave the clock running. */
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	return 0;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun err_out:
1108*4882a593Smuzhiyun 	__rtl8139_cleanup_dev (dev);
1109*4882a593Smuzhiyun 	pci_disable_device (pdev);
1110*4882a593Smuzhiyun 	return i;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 
rtl8139_remove_one(struct pci_dev * pdev)1114*4882a593Smuzhiyun static void rtl8139_remove_one(struct pci_dev *pdev)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata (pdev);
1117*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	assert (dev != NULL);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	cancel_delayed_work_sync(&tp->thread);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	unregister_netdev (dev);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	__rtl8139_cleanup_dev (dev);
1126*4882a593Smuzhiyun 	pci_disable_device (pdev);
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun /* Serial EEPROM section. */
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun /*  EEPROM_Ctrl bits. */
1133*4882a593Smuzhiyun #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1134*4882a593Smuzhiyun #define EE_CS			0x08	/* EEPROM chip select. */
1135*4882a593Smuzhiyun #define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1136*4882a593Smuzhiyun #define EE_WRITE_0		0x00
1137*4882a593Smuzhiyun #define EE_WRITE_1		0x02
1138*4882a593Smuzhiyun #define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1139*4882a593Smuzhiyun #define EE_ENB			(0x80 | EE_CS)
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun /* Delay between EEPROM clock transitions.
1142*4882a593Smuzhiyun    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1143*4882a593Smuzhiyun  */
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun #define eeprom_delay()	(void)RTL_R8(Cfg9346)
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun /* The EEPROM commands include the alway-set leading bit. */
1148*4882a593Smuzhiyun #define EE_WRITE_CMD	(5)
1149*4882a593Smuzhiyun #define EE_READ_CMD		(6)
1150*4882a593Smuzhiyun #define EE_ERASE_CMD	(7)
1151*4882a593Smuzhiyun 
read_eeprom(void __iomem * ioaddr,int location,int addr_len)1152*4882a593Smuzhiyun static int read_eeprom(void __iomem *ioaddr, int location, int addr_len)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun 	int i;
1155*4882a593Smuzhiyun 	unsigned retval = 0;
1156*4882a593Smuzhiyun 	int read_cmd = location | (EE_READ_CMD << addr_len);
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, EE_ENB & ~EE_CS);
1159*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, EE_ENB);
1160*4882a593Smuzhiyun 	eeprom_delay ();
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	/* Shift the read command bits out. */
1163*4882a593Smuzhiyun 	for (i = 4 + addr_len; i >= 0; i--) {
1164*4882a593Smuzhiyun 		int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1165*4882a593Smuzhiyun 		RTL_W8 (Cfg9346, EE_ENB | dataval);
1166*4882a593Smuzhiyun 		eeprom_delay ();
1167*4882a593Smuzhiyun 		RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK);
1168*4882a593Smuzhiyun 		eeprom_delay ();
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, EE_ENB);
1171*4882a593Smuzhiyun 	eeprom_delay ();
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	for (i = 16; i > 0; i--) {
1174*4882a593Smuzhiyun 		RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK);
1175*4882a593Smuzhiyun 		eeprom_delay ();
1176*4882a593Smuzhiyun 		retval =
1177*4882a593Smuzhiyun 		    (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 :
1178*4882a593Smuzhiyun 				     0);
1179*4882a593Smuzhiyun 		RTL_W8 (Cfg9346, EE_ENB);
1180*4882a593Smuzhiyun 		eeprom_delay ();
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	/* Terminate the EEPROM access. */
1184*4882a593Smuzhiyun 	RTL_W8(Cfg9346, 0);
1185*4882a593Smuzhiyun 	eeprom_delay ();
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	return retval;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun /* MII serial management: mostly bogus for now. */
1191*4882a593Smuzhiyun /* Read and write the MII management registers using software-generated
1192*4882a593Smuzhiyun    serial MDIO protocol.
1193*4882a593Smuzhiyun    The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
1194*4882a593Smuzhiyun    met by back-to-back PCI I/O cycles, but we insert a delay to avoid
1195*4882a593Smuzhiyun    "overclocking" issues. */
1196*4882a593Smuzhiyun #define MDIO_DIR		0x80
1197*4882a593Smuzhiyun #define MDIO_DATA_OUT	0x04
1198*4882a593Smuzhiyun #define MDIO_DATA_IN	0x02
1199*4882a593Smuzhiyun #define MDIO_CLK		0x01
1200*4882a593Smuzhiyun #define MDIO_WRITE0 (MDIO_DIR)
1201*4882a593Smuzhiyun #define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun #define mdio_delay()	RTL_R8(Config4)
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun static const char mii_2_8139_map[8] = {
1207*4882a593Smuzhiyun 	BasicModeCtrl,
1208*4882a593Smuzhiyun 	BasicModeStatus,
1209*4882a593Smuzhiyun 	0,
1210*4882a593Smuzhiyun 	0,
1211*4882a593Smuzhiyun 	NWayAdvert,
1212*4882a593Smuzhiyun 	NWayLPAR,
1213*4882a593Smuzhiyun 	NWayExpansion,
1214*4882a593Smuzhiyun 	0
1215*4882a593Smuzhiyun };
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
1219*4882a593Smuzhiyun /* Syncronize the MII management interface by shifting 32 one bits out. */
mdio_sync(void __iomem * ioaddr)1220*4882a593Smuzhiyun static void mdio_sync (void __iomem *ioaddr)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun 	int i;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	for (i = 32; i >= 0; i--) {
1225*4882a593Smuzhiyun 		RTL_W8 (Config4, MDIO_WRITE1);
1226*4882a593Smuzhiyun 		mdio_delay ();
1227*4882a593Smuzhiyun 		RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK);
1228*4882a593Smuzhiyun 		mdio_delay ();
1229*4882a593Smuzhiyun 	}
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun #endif
1232*4882a593Smuzhiyun 
mdio_read(struct net_device * dev,int phy_id,int location)1233*4882a593Smuzhiyun static int mdio_read (struct net_device *dev, int phy_id, int location)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1236*4882a593Smuzhiyun 	int retval = 0;
1237*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
1238*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1239*4882a593Smuzhiyun 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
1240*4882a593Smuzhiyun 	int i;
1241*4882a593Smuzhiyun #endif
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	if (phy_id > 31) {	/* Really a 8139.  Use internal registers. */
1244*4882a593Smuzhiyun 		void __iomem *ioaddr = tp->mmio_addr;
1245*4882a593Smuzhiyun 		return location < 8 && mii_2_8139_map[location] ?
1246*4882a593Smuzhiyun 		    RTL_R16 (mii_2_8139_map[location]) : 0;
1247*4882a593Smuzhiyun 	}
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
1250*4882a593Smuzhiyun 	mdio_sync (ioaddr);
1251*4882a593Smuzhiyun 	/* Shift the read command bits out. */
1252*4882a593Smuzhiyun 	for (i = 15; i >= 0; i--) {
1253*4882a593Smuzhiyun 		int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 		RTL_W8 (Config4, MDIO_DIR | dataval);
1256*4882a593Smuzhiyun 		mdio_delay ();
1257*4882a593Smuzhiyun 		RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK);
1258*4882a593Smuzhiyun 		mdio_delay ();
1259*4882a593Smuzhiyun 	}
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	/* Read the two transition, 16 data, and wire-idle bits. */
1262*4882a593Smuzhiyun 	for (i = 19; i > 0; i--) {
1263*4882a593Smuzhiyun 		RTL_W8 (Config4, 0);
1264*4882a593Smuzhiyun 		mdio_delay ();
1265*4882a593Smuzhiyun 		retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0);
1266*4882a593Smuzhiyun 		RTL_W8 (Config4, MDIO_CLK);
1267*4882a593Smuzhiyun 		mdio_delay ();
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun #endif
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	return (retval >> 1) & 0xffff;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 
mdio_write(struct net_device * dev,int phy_id,int location,int value)1275*4882a593Smuzhiyun static void mdio_write (struct net_device *dev, int phy_id, int location,
1276*4882a593Smuzhiyun 			int value)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1279*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
1280*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1281*4882a593Smuzhiyun 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
1282*4882a593Smuzhiyun 	int i;
1283*4882a593Smuzhiyun #endif
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (phy_id > 31) {	/* Really a 8139.  Use internal registers. */
1286*4882a593Smuzhiyun 		void __iomem *ioaddr = tp->mmio_addr;
1287*4882a593Smuzhiyun 		if (location == 0) {
1288*4882a593Smuzhiyun 			RTL_W8 (Cfg9346, Cfg9346_Unlock);
1289*4882a593Smuzhiyun 			RTL_W16 (BasicModeCtrl, value);
1290*4882a593Smuzhiyun 			RTL_W8 (Cfg9346, Cfg9346_Lock);
1291*4882a593Smuzhiyun 		} else if (location < 8 && mii_2_8139_map[location])
1292*4882a593Smuzhiyun 			RTL_W16 (mii_2_8139_map[location], value);
1293*4882a593Smuzhiyun 		return;
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun #ifdef CONFIG_8139TOO_8129
1297*4882a593Smuzhiyun 	mdio_sync (ioaddr);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	/* Shift the command bits out. */
1300*4882a593Smuzhiyun 	for (i = 31; i >= 0; i--) {
1301*4882a593Smuzhiyun 		int dataval =
1302*4882a593Smuzhiyun 		    (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
1303*4882a593Smuzhiyun 		RTL_W8 (Config4, dataval);
1304*4882a593Smuzhiyun 		mdio_delay ();
1305*4882a593Smuzhiyun 		RTL_W8 (Config4, dataval | MDIO_CLK);
1306*4882a593Smuzhiyun 		mdio_delay ();
1307*4882a593Smuzhiyun 	}
1308*4882a593Smuzhiyun 	/* Clear out extra bits. */
1309*4882a593Smuzhiyun 	for (i = 2; i > 0; i--) {
1310*4882a593Smuzhiyun 		RTL_W8 (Config4, 0);
1311*4882a593Smuzhiyun 		mdio_delay ();
1312*4882a593Smuzhiyun 		RTL_W8 (Config4, MDIO_CLK);
1313*4882a593Smuzhiyun 		mdio_delay ();
1314*4882a593Smuzhiyun 	}
1315*4882a593Smuzhiyun #endif
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 
rtl8139_open(struct net_device * dev)1319*4882a593Smuzhiyun static int rtl8139_open (struct net_device *dev)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1322*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1323*4882a593Smuzhiyun 	const int irq = tp->pci_dev->irq;
1324*4882a593Smuzhiyun 	int retval;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
1327*4882a593Smuzhiyun 	if (retval)
1328*4882a593Smuzhiyun 		return retval;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
1331*4882a593Smuzhiyun 					   &tp->tx_bufs_dma, GFP_KERNEL);
1332*4882a593Smuzhiyun 	tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1333*4882a593Smuzhiyun 					   &tp->rx_ring_dma, GFP_KERNEL);
1334*4882a593Smuzhiyun 	if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
1335*4882a593Smuzhiyun 		free_irq(irq, dev);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 		if (tp->tx_bufs)
1338*4882a593Smuzhiyun 			dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
1339*4882a593Smuzhiyun 					    tp->tx_bufs, tp->tx_bufs_dma);
1340*4882a593Smuzhiyun 		if (tp->rx_ring)
1341*4882a593Smuzhiyun 			dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1342*4882a593Smuzhiyun 					    tp->rx_ring, tp->rx_ring_dma);
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 		return -ENOMEM;
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	}
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	napi_enable(&tp->napi);
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	tp->mii.full_duplex = tp->mii.force_media;
1351*4882a593Smuzhiyun 	tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	rtl8139_init_ring (dev);
1354*4882a593Smuzhiyun 	rtl8139_hw_start (dev);
1355*4882a593Smuzhiyun 	netif_start_queue (dev);
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	netif_dbg(tp, ifup, dev,
1358*4882a593Smuzhiyun 		  "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1359*4882a593Smuzhiyun 		  __func__,
1360*4882a593Smuzhiyun 		  (unsigned long long)pci_resource_start (tp->pci_dev, 1),
1361*4882a593Smuzhiyun 		  irq, RTL_R8 (MediaStatus),
1362*4882a593Smuzhiyun 		  tp->mii.full_duplex ? "full" : "half");
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	rtl8139_start_thread(tp);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	return 0;
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 
rtl_check_media(struct net_device * dev,unsigned int init_media)1370*4882a593Smuzhiyun static void rtl_check_media (struct net_device *dev, unsigned int init_media)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	if (tp->phys[0] >= 0) {
1375*4882a593Smuzhiyun 		mii_check_media(&tp->mii, netif_msg_link(tp), init_media);
1376*4882a593Smuzhiyun 	}
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun /* Start the hardware at open or resume. */
rtl8139_hw_start(struct net_device * dev)1380*4882a593Smuzhiyun static void rtl8139_hw_start (struct net_device *dev)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1383*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1384*4882a593Smuzhiyun 	u32 i;
1385*4882a593Smuzhiyun 	u8 tmp;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	/* Bring old chips out of low-power mode. */
1388*4882a593Smuzhiyun 	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
1389*4882a593Smuzhiyun 		RTL_W8 (HltClk, 'R');
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	rtl8139_chip_reset (ioaddr);
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	/* unlock Config[01234] and BMCR register writes */
1394*4882a593Smuzhiyun 	RTL_W8_F (Cfg9346, Cfg9346_Unlock);
1395*4882a593Smuzhiyun 	/* Restore our idea of the MAC address. */
1396*4882a593Smuzhiyun 	RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1397*4882a593Smuzhiyun 	RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	tp->cur_rx = 0;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	/* init Rx ring buffer DMA address */
1402*4882a593Smuzhiyun 	RTL_W32_F (RxBuf, tp->rx_ring_dma);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	/* Must enable Tx/Rx before setting transfer thresholds! */
1405*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
1408*4882a593Smuzhiyun 	RTL_W32 (RxConfig, tp->rx_config);
1409*4882a593Smuzhiyun 	RTL_W32 (TxConfig, rtl8139_tx_config);
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	rtl_check_media (dev, 1);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	if (tp->chipset >= CH_8139B) {
1414*4882a593Smuzhiyun 		/* Disable magic packet scanning, which is enabled
1415*4882a593Smuzhiyun 		 * when PM is enabled in Config1.  It can be reenabled
1416*4882a593Smuzhiyun 		 * via ETHTOOL_SWOL if desired.  */
1417*4882a593Smuzhiyun 		RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
1418*4882a593Smuzhiyun 	}
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	netdev_dbg(dev, "init buffer addresses\n");
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	/* Lock Config[01234] and BMCR register writes */
1423*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, Cfg9346_Lock);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	/* init Tx buffer DMA addresses */
1426*4882a593Smuzhiyun 	for (i = 0; i < NUM_TX_DESC; i++)
1427*4882a593Smuzhiyun 		RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	RTL_W32 (RxMissed, 0);
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	rtl8139_set_rx_mode (dev);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	/* no early-rx interrupts */
1434*4882a593Smuzhiyun 	RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	/* make sure RxTx has started */
1437*4882a593Smuzhiyun 	tmp = RTL_R8 (ChipCmd);
1438*4882a593Smuzhiyun 	if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
1439*4882a593Smuzhiyun 		RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	/* Enable all known interrupts by setting the interrupt mask. */
1442*4882a593Smuzhiyun 	RTL_W16 (IntrMask, rtl8139_intr_mask);
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
rtl8139_init_ring(struct net_device * dev)1447*4882a593Smuzhiyun static void rtl8139_init_ring (struct net_device *dev)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1450*4882a593Smuzhiyun 	int i;
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	tp->cur_rx = 0;
1453*4882a593Smuzhiyun 	tp->cur_tx = 0;
1454*4882a593Smuzhiyun 	tp->dirty_tx = 0;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	for (i = 0; i < NUM_TX_DESC; i++)
1457*4882a593Smuzhiyun 		tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun /* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */
1462*4882a593Smuzhiyun static int next_tick = 3 * HZ;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun #ifndef CONFIG_8139TOO_TUNE_TWISTER
rtl8139_tune_twister(struct net_device * dev,struct rtl8139_private * tp)1465*4882a593Smuzhiyun static inline void rtl8139_tune_twister (struct net_device *dev,
1466*4882a593Smuzhiyun 				  struct rtl8139_private *tp) {}
1467*4882a593Smuzhiyun #else
1468*4882a593Smuzhiyun enum TwisterParamVals {
1469*4882a593Smuzhiyun 	PARA78_default	= 0x78fa8388,
1470*4882a593Smuzhiyun 	PARA7c_default	= 0xcb38de43,	/* param[0][3] */
1471*4882a593Smuzhiyun 	PARA7c_xxx	= 0xcb38de43,
1472*4882a593Smuzhiyun };
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun static const unsigned long param[4][4] = {
1475*4882a593Smuzhiyun 	{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1476*4882a593Smuzhiyun 	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1477*4882a593Smuzhiyun 	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1478*4882a593Smuzhiyun 	{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1479*4882a593Smuzhiyun };
1480*4882a593Smuzhiyun 
rtl8139_tune_twister(struct net_device * dev,struct rtl8139_private * tp)1481*4882a593Smuzhiyun static void rtl8139_tune_twister (struct net_device *dev,
1482*4882a593Smuzhiyun 				  struct rtl8139_private *tp)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	int linkcase;
1485*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	/* This is a complicated state machine to configure the "twister" for
1488*4882a593Smuzhiyun 	   impedance/echos based on the cable length.
1489*4882a593Smuzhiyun 	   All of this is magic and undocumented.
1490*4882a593Smuzhiyun 	 */
1491*4882a593Smuzhiyun 	switch (tp->twistie) {
1492*4882a593Smuzhiyun 	case 1:
1493*4882a593Smuzhiyun 		if (RTL_R16 (CSCR) & CSCR_LinkOKBit) {
1494*4882a593Smuzhiyun 			/* We have link beat, let us tune the twister. */
1495*4882a593Smuzhiyun 			RTL_W16 (CSCR, CSCR_LinkDownOffCmd);
1496*4882a593Smuzhiyun 			tp->twistie = 2;	/* Change to state 2. */
1497*4882a593Smuzhiyun 			next_tick = HZ / 10;
1498*4882a593Smuzhiyun 		} else {
1499*4882a593Smuzhiyun 			/* Just put in some reasonable defaults for when beat returns. */
1500*4882a593Smuzhiyun 			RTL_W16 (CSCR, CSCR_LinkDownCmd);
1501*4882a593Smuzhiyun 			RTL_W32 (FIFOTMS, 0x20);	/* Turn on cable test mode. */
1502*4882a593Smuzhiyun 			RTL_W32 (PARA78, PARA78_default);
1503*4882a593Smuzhiyun 			RTL_W32 (PARA7c, PARA7c_default);
1504*4882a593Smuzhiyun 			tp->twistie = 0;	/* Bail from future actions. */
1505*4882a593Smuzhiyun 		}
1506*4882a593Smuzhiyun 		break;
1507*4882a593Smuzhiyun 	case 2:
1508*4882a593Smuzhiyun 		/* Read how long it took to hear the echo. */
1509*4882a593Smuzhiyun 		linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits;
1510*4882a593Smuzhiyun 		if (linkcase == 0x7000)
1511*4882a593Smuzhiyun 			tp->twist_row = 3;
1512*4882a593Smuzhiyun 		else if (linkcase == 0x3000)
1513*4882a593Smuzhiyun 			tp->twist_row = 2;
1514*4882a593Smuzhiyun 		else if (linkcase == 0x1000)
1515*4882a593Smuzhiyun 			tp->twist_row = 1;
1516*4882a593Smuzhiyun 		else
1517*4882a593Smuzhiyun 			tp->twist_row = 0;
1518*4882a593Smuzhiyun 		tp->twist_col = 0;
1519*4882a593Smuzhiyun 		tp->twistie = 3;	/* Change to state 2. */
1520*4882a593Smuzhiyun 		next_tick = HZ / 10;
1521*4882a593Smuzhiyun 		break;
1522*4882a593Smuzhiyun 	case 3:
1523*4882a593Smuzhiyun 		/* Put out four tuning parameters, one per 100msec. */
1524*4882a593Smuzhiyun 		if (tp->twist_col == 0)
1525*4882a593Smuzhiyun 			RTL_W16 (FIFOTMS, 0);
1526*4882a593Smuzhiyun 		RTL_W32 (PARA7c, param[(int) tp->twist_row]
1527*4882a593Smuzhiyun 			 [(int) tp->twist_col]);
1528*4882a593Smuzhiyun 		next_tick = HZ / 10;
1529*4882a593Smuzhiyun 		if (++tp->twist_col >= 4) {
1530*4882a593Smuzhiyun 			/* For short cables we are done.
1531*4882a593Smuzhiyun 			   For long cables (row == 3) check for mistune. */
1532*4882a593Smuzhiyun 			tp->twistie =
1533*4882a593Smuzhiyun 			    (tp->twist_row == 3) ? 4 : 0;
1534*4882a593Smuzhiyun 		}
1535*4882a593Smuzhiyun 		break;
1536*4882a593Smuzhiyun 	case 4:
1537*4882a593Smuzhiyun 		/* Special case for long cables: check for mistune. */
1538*4882a593Smuzhiyun 		if ((RTL_R16 (CSCR) &
1539*4882a593Smuzhiyun 		     CSCR_LinkStatusBits) == 0x7000) {
1540*4882a593Smuzhiyun 			tp->twistie = 0;
1541*4882a593Smuzhiyun 			break;
1542*4882a593Smuzhiyun 		} else {
1543*4882a593Smuzhiyun 			RTL_W32 (PARA7c, 0xfb38de03);
1544*4882a593Smuzhiyun 			tp->twistie = 5;
1545*4882a593Smuzhiyun 			next_tick = HZ / 10;
1546*4882a593Smuzhiyun 		}
1547*4882a593Smuzhiyun 		break;
1548*4882a593Smuzhiyun 	case 5:
1549*4882a593Smuzhiyun 		/* Retune for shorter cable (column 2). */
1550*4882a593Smuzhiyun 		RTL_W32 (FIFOTMS, 0x20);
1551*4882a593Smuzhiyun 		RTL_W32 (PARA78, PARA78_default);
1552*4882a593Smuzhiyun 		RTL_W32 (PARA7c, PARA7c_default);
1553*4882a593Smuzhiyun 		RTL_W32 (FIFOTMS, 0x00);
1554*4882a593Smuzhiyun 		tp->twist_row = 2;
1555*4882a593Smuzhiyun 		tp->twist_col = 0;
1556*4882a593Smuzhiyun 		tp->twistie = 3;
1557*4882a593Smuzhiyun 		next_tick = HZ / 10;
1558*4882a593Smuzhiyun 		break;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	default:
1561*4882a593Smuzhiyun 		/* do nothing */
1562*4882a593Smuzhiyun 		break;
1563*4882a593Smuzhiyun 	}
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun #endif /* CONFIG_8139TOO_TUNE_TWISTER */
1566*4882a593Smuzhiyun 
rtl8139_thread_iter(struct net_device * dev,struct rtl8139_private * tp,void __iomem * ioaddr)1567*4882a593Smuzhiyun static inline void rtl8139_thread_iter (struct net_device *dev,
1568*4882a593Smuzhiyun 				 struct rtl8139_private *tp,
1569*4882a593Smuzhiyun 				 void __iomem *ioaddr)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun 	int mii_lpa;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	if (!tp->mii.force_media && mii_lpa != 0xffff) {
1576*4882a593Smuzhiyun 		int duplex = ((mii_lpa & LPA_100FULL) ||
1577*4882a593Smuzhiyun 			      (mii_lpa & 0x01C0) == 0x0040);
1578*4882a593Smuzhiyun 		if (tp->mii.full_duplex != duplex) {
1579*4882a593Smuzhiyun 			tp->mii.full_duplex = duplex;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 			if (mii_lpa) {
1582*4882a593Smuzhiyun 				netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
1583*4882a593Smuzhiyun 					    tp->mii.full_duplex ? "full" : "half",
1584*4882a593Smuzhiyun 					    tp->phys[0], mii_lpa);
1585*4882a593Smuzhiyun 			} else {
1586*4882a593Smuzhiyun 				netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
1587*4882a593Smuzhiyun 			}
1588*4882a593Smuzhiyun #if 0
1589*4882a593Smuzhiyun 			RTL_W8 (Cfg9346, Cfg9346_Unlock);
1590*4882a593Smuzhiyun 			RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
1591*4882a593Smuzhiyun 			RTL_W8 (Cfg9346, Cfg9346_Lock);
1592*4882a593Smuzhiyun #endif
1593*4882a593Smuzhiyun 		}
1594*4882a593Smuzhiyun 	}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	next_tick = HZ * 60;
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	rtl8139_tune_twister (dev, tp);
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
1601*4882a593Smuzhiyun 		   RTL_R16(NWayLPAR));
1602*4882a593Smuzhiyun 	netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
1603*4882a593Smuzhiyun 		   RTL_R16(IntrMask), RTL_R16(IntrStatus));
1604*4882a593Smuzhiyun 	netdev_dbg(dev, "Chip config %02x %02x\n",
1605*4882a593Smuzhiyun 		   RTL_R8(Config0), RTL_R8(Config1));
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun 
rtl8139_thread(struct work_struct * work)1608*4882a593Smuzhiyun static void rtl8139_thread (struct work_struct *work)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun 	struct rtl8139_private *tp =
1611*4882a593Smuzhiyun 		container_of(work, struct rtl8139_private, thread.work);
1612*4882a593Smuzhiyun 	struct net_device *dev = tp->mii.dev;
1613*4882a593Smuzhiyun 	unsigned long thr_delay = next_tick;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	rtnl_lock();
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	if (!netif_running(dev))
1618*4882a593Smuzhiyun 		goto out_unlock;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	if (tp->watchdog_fired) {
1621*4882a593Smuzhiyun 		tp->watchdog_fired = 0;
1622*4882a593Smuzhiyun 		rtl8139_tx_timeout_task(work);
1623*4882a593Smuzhiyun 	} else
1624*4882a593Smuzhiyun 		rtl8139_thread_iter(dev, tp, tp->mmio_addr);
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	if (tp->have_thread)
1627*4882a593Smuzhiyun 		schedule_delayed_work(&tp->thread, thr_delay);
1628*4882a593Smuzhiyun out_unlock:
1629*4882a593Smuzhiyun 	rtnl_unlock ();
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun 
rtl8139_start_thread(struct rtl8139_private * tp)1632*4882a593Smuzhiyun static void rtl8139_start_thread(struct rtl8139_private *tp)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	tp->twistie = 0;
1635*4882a593Smuzhiyun 	if (tp->chipset == CH_8139_K)
1636*4882a593Smuzhiyun 		tp->twistie = 1;
1637*4882a593Smuzhiyun 	else if (tp->drv_flags & HAS_LNK_CHNG)
1638*4882a593Smuzhiyun 		return;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	tp->have_thread = 1;
1641*4882a593Smuzhiyun 	tp->watchdog_fired = 0;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	schedule_delayed_work(&tp->thread, next_tick);
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
rtl8139_tx_clear(struct rtl8139_private * tp)1646*4882a593Smuzhiyun static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun 	tp->cur_tx = 0;
1649*4882a593Smuzhiyun 	tp->dirty_tx = 0;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
rtl8139_tx_timeout_task(struct work_struct * work)1654*4882a593Smuzhiyun static void rtl8139_tx_timeout_task (struct work_struct *work)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun 	struct rtl8139_private *tp =
1657*4882a593Smuzhiyun 		container_of(work, struct rtl8139_private, thread.work);
1658*4882a593Smuzhiyun 	struct net_device *dev = tp->mii.dev;
1659*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1660*4882a593Smuzhiyun 	int i;
1661*4882a593Smuzhiyun 	u8 tmp8;
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	napi_disable(&tp->napi);
1664*4882a593Smuzhiyun 	netif_stop_queue(dev);
1665*4882a593Smuzhiyun 	synchronize_rcu();
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
1668*4882a593Smuzhiyun 		   RTL_R8(ChipCmd), RTL_R16(IntrStatus),
1669*4882a593Smuzhiyun 		   RTL_R16(IntrMask), RTL_R8(MediaStatus));
1670*4882a593Smuzhiyun 	/* Emit info to figure out what went wrong. */
1671*4882a593Smuzhiyun 	netdev_dbg(dev, "Tx queue start entry %ld  dirty entry %ld\n",
1672*4882a593Smuzhiyun 		   tp->cur_tx, tp->dirty_tx);
1673*4882a593Smuzhiyun 	for (i = 0; i < NUM_TX_DESC; i++)
1674*4882a593Smuzhiyun 		netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
1675*4882a593Smuzhiyun 			   i, RTL_R32(TxStatus0 + (i * 4)),
1676*4882a593Smuzhiyun 			   i == tp->dirty_tx % NUM_TX_DESC ?
1677*4882a593Smuzhiyun 			   " (queue head)" : "");
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	tp->xstats.tx_timeouts++;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	/* disable Tx ASAP, if not already */
1682*4882a593Smuzhiyun 	tmp8 = RTL_R8 (ChipCmd);
1683*4882a593Smuzhiyun 	if (tmp8 & CmdTxEnb)
1684*4882a593Smuzhiyun 		RTL_W8 (ChipCmd, CmdRxEnb);
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	spin_lock_bh(&tp->rx_lock);
1687*4882a593Smuzhiyun 	/* Disable interrupts by clearing the interrupt mask. */
1688*4882a593Smuzhiyun 	RTL_W16 (IntrMask, 0x0000);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	/* Stop a shared interrupt from scavenging while we are. */
1691*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
1692*4882a593Smuzhiyun 	rtl8139_tx_clear (tp);
1693*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	/* ...and finally, reset everything */
1696*4882a593Smuzhiyun 	napi_enable(&tp->napi);
1697*4882a593Smuzhiyun 	rtl8139_hw_start(dev);
1698*4882a593Smuzhiyun 	netif_wake_queue(dev);
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	spin_unlock_bh(&tp->rx_lock);
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun 
rtl8139_tx_timeout(struct net_device * dev,unsigned int txqueue)1703*4882a593Smuzhiyun static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue)
1704*4882a593Smuzhiyun {
1705*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	tp->watchdog_fired = 1;
1708*4882a593Smuzhiyun 	if (!tp->have_thread) {
1709*4882a593Smuzhiyun 		INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
1710*4882a593Smuzhiyun 		schedule_delayed_work(&tp->thread, next_tick);
1711*4882a593Smuzhiyun 	}
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun 
rtl8139_start_xmit(struct sk_buff * skb,struct net_device * dev)1714*4882a593Smuzhiyun static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1715*4882a593Smuzhiyun 					     struct net_device *dev)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
1718*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1719*4882a593Smuzhiyun 	unsigned int entry;
1720*4882a593Smuzhiyun 	unsigned int len = skb->len;
1721*4882a593Smuzhiyun 	unsigned long flags;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 	/* Calculate the next Tx descriptor entry. */
1724*4882a593Smuzhiyun 	entry = tp->cur_tx % NUM_TX_DESC;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	/* Note: the chip doesn't have auto-pad! */
1727*4882a593Smuzhiyun 	if (likely(len < TX_BUF_SIZE)) {
1728*4882a593Smuzhiyun 		if (len < ETH_ZLEN)
1729*4882a593Smuzhiyun 			memset(tp->tx_buf[entry], 0, ETH_ZLEN);
1730*4882a593Smuzhiyun 		skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
1731*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
1732*4882a593Smuzhiyun 	} else {
1733*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
1734*4882a593Smuzhiyun 		dev->stats.tx_dropped++;
1735*4882a593Smuzhiyun 		return NETDEV_TX_OK;
1736*4882a593Smuzhiyun 	}
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	spin_lock_irqsave(&tp->lock, flags);
1739*4882a593Smuzhiyun 	/*
1740*4882a593Smuzhiyun 	 * Writing to TxStatus triggers a DMA transfer of the data
1741*4882a593Smuzhiyun 	 * copied to tp->tx_buf[entry] above. Use a memory barrier
1742*4882a593Smuzhiyun 	 * to make sure that the device sees the updated data.
1743*4882a593Smuzhiyun 	 */
1744*4882a593Smuzhiyun 	wmb();
1745*4882a593Smuzhiyun 	RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
1746*4882a593Smuzhiyun 		   tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	tp->cur_tx++;
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
1751*4882a593Smuzhiyun 		netif_stop_queue (dev);
1752*4882a593Smuzhiyun 	spin_unlock_irqrestore(&tp->lock, flags);
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
1755*4882a593Smuzhiyun 		  len, entry);
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 
rtl8139_tx_interrupt(struct net_device * dev,struct rtl8139_private * tp,void __iomem * ioaddr)1761*4882a593Smuzhiyun static void rtl8139_tx_interrupt (struct net_device *dev,
1762*4882a593Smuzhiyun 				  struct rtl8139_private *tp,
1763*4882a593Smuzhiyun 				  void __iomem *ioaddr)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun 	unsigned long dirty_tx, tx_left;
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	assert (dev != NULL);
1768*4882a593Smuzhiyun 	assert (ioaddr != NULL);
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	dirty_tx = tp->dirty_tx;
1771*4882a593Smuzhiyun 	tx_left = tp->cur_tx - dirty_tx;
1772*4882a593Smuzhiyun 	while (tx_left > 0) {
1773*4882a593Smuzhiyun 		int entry = dirty_tx % NUM_TX_DESC;
1774*4882a593Smuzhiyun 		int txstatus;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 		txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 		if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
1779*4882a593Smuzhiyun 			break;	/* It still hasn't been Txed */
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 		/* Note: TxCarrierLost is always asserted at 100mbps. */
1782*4882a593Smuzhiyun 		if (txstatus & (TxOutOfWindow | TxAborted)) {
1783*4882a593Smuzhiyun 			/* There was an major error, log it. */
1784*4882a593Smuzhiyun 			netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
1785*4882a593Smuzhiyun 				  txstatus);
1786*4882a593Smuzhiyun 			dev->stats.tx_errors++;
1787*4882a593Smuzhiyun 			if (txstatus & TxAborted) {
1788*4882a593Smuzhiyun 				dev->stats.tx_aborted_errors++;
1789*4882a593Smuzhiyun 				RTL_W32 (TxConfig, TxClearAbt);
1790*4882a593Smuzhiyun 				RTL_W16 (IntrStatus, TxErr);
1791*4882a593Smuzhiyun 				wmb();
1792*4882a593Smuzhiyun 			}
1793*4882a593Smuzhiyun 			if (txstatus & TxCarrierLost)
1794*4882a593Smuzhiyun 				dev->stats.tx_carrier_errors++;
1795*4882a593Smuzhiyun 			if (txstatus & TxOutOfWindow)
1796*4882a593Smuzhiyun 				dev->stats.tx_window_errors++;
1797*4882a593Smuzhiyun 		} else {
1798*4882a593Smuzhiyun 			if (txstatus & TxUnderrun) {
1799*4882a593Smuzhiyun 				/* Add 64 to the Tx FIFO threshold. */
1800*4882a593Smuzhiyun 				if (tp->tx_flag < 0x00300000)
1801*4882a593Smuzhiyun 					tp->tx_flag += 0x00020000;
1802*4882a593Smuzhiyun 				dev->stats.tx_fifo_errors++;
1803*4882a593Smuzhiyun 			}
1804*4882a593Smuzhiyun 			dev->stats.collisions += (txstatus >> 24) & 15;
1805*4882a593Smuzhiyun 			u64_stats_update_begin(&tp->tx_stats.syncp);
1806*4882a593Smuzhiyun 			tp->tx_stats.packets++;
1807*4882a593Smuzhiyun 			tp->tx_stats.bytes += txstatus & 0x7ff;
1808*4882a593Smuzhiyun 			u64_stats_update_end(&tp->tx_stats.syncp);
1809*4882a593Smuzhiyun 		}
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 		dirty_tx++;
1812*4882a593Smuzhiyun 		tx_left--;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun #ifndef RTL8139_NDEBUG
1816*4882a593Smuzhiyun 	if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
1817*4882a593Smuzhiyun 		netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
1818*4882a593Smuzhiyun 			   dirty_tx, tp->cur_tx);
1819*4882a593Smuzhiyun 		dirty_tx += NUM_TX_DESC;
1820*4882a593Smuzhiyun 	}
1821*4882a593Smuzhiyun #endif /* RTL8139_NDEBUG */
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	/* only wake the queue if we did work, and the queue is stopped */
1824*4882a593Smuzhiyun 	if (tp->dirty_tx != dirty_tx) {
1825*4882a593Smuzhiyun 		tp->dirty_tx = dirty_tx;
1826*4882a593Smuzhiyun 		mb();
1827*4882a593Smuzhiyun 		netif_wake_queue (dev);
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun /* TODO: clean this up!  Rx reset need not be this intensive */
rtl8139_rx_err(u32 rx_status,struct net_device * dev,struct rtl8139_private * tp,void __iomem * ioaddr)1833*4882a593Smuzhiyun static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1834*4882a593Smuzhiyun 			    struct rtl8139_private *tp, void __iomem *ioaddr)
1835*4882a593Smuzhiyun {
1836*4882a593Smuzhiyun 	u8 tmp8;
1837*4882a593Smuzhiyun #ifdef CONFIG_8139_OLD_RX_RESET
1838*4882a593Smuzhiyun 	int tmp_work;
1839*4882a593Smuzhiyun #endif
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
1842*4882a593Smuzhiyun 		  rx_status);
1843*4882a593Smuzhiyun 	dev->stats.rx_errors++;
1844*4882a593Smuzhiyun 	if (!(rx_status & RxStatusOK)) {
1845*4882a593Smuzhiyun 		if (rx_status & RxTooLong) {
1846*4882a593Smuzhiyun 			netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
1847*4882a593Smuzhiyun 				   rx_status);
1848*4882a593Smuzhiyun 			/* A.C.: The chip hangs here. */
1849*4882a593Smuzhiyun 		}
1850*4882a593Smuzhiyun 		if (rx_status & (RxBadSymbol | RxBadAlign))
1851*4882a593Smuzhiyun 			dev->stats.rx_frame_errors++;
1852*4882a593Smuzhiyun 		if (rx_status & (RxRunt | RxTooLong))
1853*4882a593Smuzhiyun 			dev->stats.rx_length_errors++;
1854*4882a593Smuzhiyun 		if (rx_status & RxCRCErr)
1855*4882a593Smuzhiyun 			dev->stats.rx_crc_errors++;
1856*4882a593Smuzhiyun 	} else {
1857*4882a593Smuzhiyun 		tp->xstats.rx_lost_in_ring++;
1858*4882a593Smuzhiyun 	}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun #ifndef CONFIG_8139_OLD_RX_RESET
1861*4882a593Smuzhiyun 	tmp8 = RTL_R8 (ChipCmd);
1862*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb);
1863*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, tmp8);
1864*4882a593Smuzhiyun 	RTL_W32 (RxConfig, tp->rx_config);
1865*4882a593Smuzhiyun 	tp->cur_rx = 0;
1866*4882a593Smuzhiyun #else
1867*4882a593Smuzhiyun 	/* Reset the receiver, based on RealTek recommendation. (Bug?) */
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	/* disable receive */
1870*4882a593Smuzhiyun 	RTL_W8_F (ChipCmd, CmdTxEnb);
1871*4882a593Smuzhiyun 	tmp_work = 200;
1872*4882a593Smuzhiyun 	while (--tmp_work > 0) {
1873*4882a593Smuzhiyun 		udelay(1);
1874*4882a593Smuzhiyun 		tmp8 = RTL_R8 (ChipCmd);
1875*4882a593Smuzhiyun 		if (!(tmp8 & CmdRxEnb))
1876*4882a593Smuzhiyun 			break;
1877*4882a593Smuzhiyun 	}
1878*4882a593Smuzhiyun 	if (tmp_work <= 0)
1879*4882a593Smuzhiyun 		netdev_warn(dev, "rx stop wait too long\n");
1880*4882a593Smuzhiyun 	/* restart receive */
1881*4882a593Smuzhiyun 	tmp_work = 200;
1882*4882a593Smuzhiyun 	while (--tmp_work > 0) {
1883*4882a593Smuzhiyun 		RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb);
1884*4882a593Smuzhiyun 		udelay(1);
1885*4882a593Smuzhiyun 		tmp8 = RTL_R8 (ChipCmd);
1886*4882a593Smuzhiyun 		if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
1887*4882a593Smuzhiyun 			break;
1888*4882a593Smuzhiyun 	}
1889*4882a593Smuzhiyun 	if (tmp_work <= 0)
1890*4882a593Smuzhiyun 		netdev_warn(dev, "tx/rx enable wait too long\n");
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 	/* and reinitialize all rx related registers */
1893*4882a593Smuzhiyun 	RTL_W8_F (Cfg9346, Cfg9346_Unlock);
1894*4882a593Smuzhiyun 	/* Must enable Tx/Rx before setting transfer thresholds! */
1895*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
1898*4882a593Smuzhiyun 	RTL_W32 (RxConfig, tp->rx_config);
1899*4882a593Smuzhiyun 	tp->cur_rx = 0;
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	netdev_dbg(dev, "init buffer addresses\n");
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	/* Lock Config[01234] and BMCR register writes */
1904*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, Cfg9346_Lock);
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 	/* init Rx ring buffer DMA address */
1907*4882a593Smuzhiyun 	RTL_W32_F (RxBuf, tp->rx_ring_dma);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	/* A.C.: Reset the multicast list. */
1910*4882a593Smuzhiyun 	__set_rx_mode (dev);
1911*4882a593Smuzhiyun #endif
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun #if RX_BUF_IDX == 3
wrap_copy(struct sk_buff * skb,const unsigned char * ring,u32 offset,unsigned int size)1915*4882a593Smuzhiyun static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
1916*4882a593Smuzhiyun 				 u32 offset, unsigned int size)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun 	u32 left = RX_BUF_LEN - offset;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	if (size > left) {
1921*4882a593Smuzhiyun 		skb_copy_to_linear_data(skb, ring + offset, left);
1922*4882a593Smuzhiyun 		skb_copy_to_linear_data_offset(skb, left, ring, size - left);
1923*4882a593Smuzhiyun 	} else
1924*4882a593Smuzhiyun 		skb_copy_to_linear_data(skb, ring + offset, size);
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun #endif
1927*4882a593Smuzhiyun 
rtl8139_isr_ack(struct rtl8139_private * tp)1928*4882a593Smuzhiyun static void rtl8139_isr_ack(struct rtl8139_private *tp)
1929*4882a593Smuzhiyun {
1930*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1931*4882a593Smuzhiyun 	u16 status;
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	status = RTL_R16 (IntrStatus) & RxAckBits;
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	/* Clear out errors and receive interrupts */
1936*4882a593Smuzhiyun 	if (likely(status != 0)) {
1937*4882a593Smuzhiyun 		if (unlikely(status & (RxFIFOOver | RxOverflow))) {
1938*4882a593Smuzhiyun 			tp->dev->stats.rx_errors++;
1939*4882a593Smuzhiyun 			if (status & RxFIFOOver)
1940*4882a593Smuzhiyun 				tp->dev->stats.rx_fifo_errors++;
1941*4882a593Smuzhiyun 		}
1942*4882a593Smuzhiyun 		RTL_W16_F (IntrStatus, RxAckBits);
1943*4882a593Smuzhiyun 	}
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun 
rtl8139_rx(struct net_device * dev,struct rtl8139_private * tp,int budget)1946*4882a593Smuzhiyun static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1947*4882a593Smuzhiyun 		      int budget)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
1950*4882a593Smuzhiyun 	int received = 0;
1951*4882a593Smuzhiyun 	unsigned char *rx_ring = tp->rx_ring;
1952*4882a593Smuzhiyun 	unsigned int cur_rx = tp->cur_rx;
1953*4882a593Smuzhiyun 	unsigned int rx_size = 0;
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
1956*4882a593Smuzhiyun 		   __func__, (u16)cur_rx,
1957*4882a593Smuzhiyun 		   RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun 	while (netif_running(dev) && received < budget &&
1960*4882a593Smuzhiyun 	       (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
1961*4882a593Smuzhiyun 		u32 ring_offset = cur_rx % RX_BUF_LEN;
1962*4882a593Smuzhiyun 		u32 rx_status;
1963*4882a593Smuzhiyun 		unsigned int pkt_size;
1964*4882a593Smuzhiyun 		struct sk_buff *skb;
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 		rmb();
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 		/* read size+status of next frame from DMA ring buffer */
1969*4882a593Smuzhiyun 		rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
1970*4882a593Smuzhiyun 		rx_size = rx_status >> 16;
1971*4882a593Smuzhiyun 		if (likely(!(dev->features & NETIF_F_RXFCS)))
1972*4882a593Smuzhiyun 			pkt_size = rx_size - 4;
1973*4882a593Smuzhiyun 		else
1974*4882a593Smuzhiyun 			pkt_size = rx_size;
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 		netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
1977*4882a593Smuzhiyun 			  __func__, rx_status, rx_size, cur_rx);
1978*4882a593Smuzhiyun #if RTL8139_DEBUG > 2
1979*4882a593Smuzhiyun 		print_hex_dump(KERN_DEBUG, "Frame contents: ",
1980*4882a593Smuzhiyun 			       DUMP_PREFIX_OFFSET, 16, 1,
1981*4882a593Smuzhiyun 			       &rx_ring[ring_offset], 70, true);
1982*4882a593Smuzhiyun #endif
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 		/* Packet copy from FIFO still in progress.
1985*4882a593Smuzhiyun 		 * Theoretically, this should never happen
1986*4882a593Smuzhiyun 		 * since EarlyRx is disabled.
1987*4882a593Smuzhiyun 		 */
1988*4882a593Smuzhiyun 		if (unlikely(rx_size == 0xfff0)) {
1989*4882a593Smuzhiyun 			if (!tp->fifo_copy_timeout)
1990*4882a593Smuzhiyun 				tp->fifo_copy_timeout = jiffies + 2;
1991*4882a593Smuzhiyun 			else if (time_after(jiffies, tp->fifo_copy_timeout)) {
1992*4882a593Smuzhiyun 				netdev_dbg(dev, "hung FIFO. Reset\n");
1993*4882a593Smuzhiyun 				rx_size = 0;
1994*4882a593Smuzhiyun 				goto no_early_rx;
1995*4882a593Smuzhiyun 			}
1996*4882a593Smuzhiyun 			netif_dbg(tp, intr, dev, "fifo copy in progress\n");
1997*4882a593Smuzhiyun 			tp->xstats.early_rx++;
1998*4882a593Smuzhiyun 			break;
1999*4882a593Smuzhiyun 		}
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun no_early_rx:
2002*4882a593Smuzhiyun 		tp->fifo_copy_timeout = 0;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 		/* If Rx err or invalid rx_size/rx_status received
2005*4882a593Smuzhiyun 		 * (which happens if we get lost in the ring),
2006*4882a593Smuzhiyun 		 * Rx process gets reset, so we abort any further
2007*4882a593Smuzhiyun 		 * Rx processing.
2008*4882a593Smuzhiyun 		 */
2009*4882a593Smuzhiyun 		if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
2010*4882a593Smuzhiyun 			     (rx_size < 8) ||
2011*4882a593Smuzhiyun 			     (!(rx_status & RxStatusOK)))) {
2012*4882a593Smuzhiyun 			if ((dev->features & NETIF_F_RXALL) &&
2013*4882a593Smuzhiyun 			    (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) &&
2014*4882a593Smuzhiyun 			    (rx_size >= 8) &&
2015*4882a593Smuzhiyun 			    (!(rx_status & RxStatusOK))) {
2016*4882a593Smuzhiyun 				/* Length is at least mostly OK, but pkt has
2017*4882a593Smuzhiyun 				 * error.  I'm hoping we can handle some of these
2018*4882a593Smuzhiyun 				 * errors without resetting the chip. --Ben
2019*4882a593Smuzhiyun 				 */
2020*4882a593Smuzhiyun 				dev->stats.rx_errors++;
2021*4882a593Smuzhiyun 				if (rx_status & RxCRCErr) {
2022*4882a593Smuzhiyun 					dev->stats.rx_crc_errors++;
2023*4882a593Smuzhiyun 					goto keep_pkt;
2024*4882a593Smuzhiyun 				}
2025*4882a593Smuzhiyun 				if (rx_status & RxRunt) {
2026*4882a593Smuzhiyun 					dev->stats.rx_length_errors++;
2027*4882a593Smuzhiyun 					goto keep_pkt;
2028*4882a593Smuzhiyun 				}
2029*4882a593Smuzhiyun 			}
2030*4882a593Smuzhiyun 			rtl8139_rx_err (rx_status, dev, tp, ioaddr);
2031*4882a593Smuzhiyun 			received = -1;
2032*4882a593Smuzhiyun 			goto out;
2033*4882a593Smuzhiyun 		}
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun keep_pkt:
2036*4882a593Smuzhiyun 		/* Malloc up new buffer, compatible with net-2e. */
2037*4882a593Smuzhiyun 		/* Omit the four octet CRC from the length. */
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 		skb = napi_alloc_skb(&tp->napi, pkt_size);
2040*4882a593Smuzhiyun 		if (likely(skb)) {
2041*4882a593Smuzhiyun #if RX_BUF_IDX == 3
2042*4882a593Smuzhiyun 			wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2043*4882a593Smuzhiyun #else
2044*4882a593Smuzhiyun 			skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
2045*4882a593Smuzhiyun #endif
2046*4882a593Smuzhiyun 			skb_put (skb, pkt_size);
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 			skb->protocol = eth_type_trans (skb, dev);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 			u64_stats_update_begin(&tp->rx_stats.syncp);
2051*4882a593Smuzhiyun 			tp->rx_stats.packets++;
2052*4882a593Smuzhiyun 			tp->rx_stats.bytes += pkt_size;
2053*4882a593Smuzhiyun 			u64_stats_update_end(&tp->rx_stats.syncp);
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 			netif_receive_skb (skb);
2056*4882a593Smuzhiyun 		} else {
2057*4882a593Smuzhiyun 			dev->stats.rx_dropped++;
2058*4882a593Smuzhiyun 		}
2059*4882a593Smuzhiyun 		received++;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 		cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
2062*4882a593Smuzhiyun 		RTL_W16 (RxBufPtr, (u16) (cur_rx - 16));
2063*4882a593Smuzhiyun 
2064*4882a593Smuzhiyun 		rtl8139_isr_ack(tp);
2065*4882a593Smuzhiyun 	}
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	if (unlikely(!received || rx_size == 0xfff0))
2068*4882a593Smuzhiyun 		rtl8139_isr_ack(tp);
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
2071*4882a593Smuzhiyun 		   __func__, cur_rx,
2072*4882a593Smuzhiyun 		   RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	tp->cur_rx = cur_rx;
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	/*
2077*4882a593Smuzhiyun 	 * The receive buffer should be mostly empty.
2078*4882a593Smuzhiyun 	 * Tell NAPI to reenable the Rx irq.
2079*4882a593Smuzhiyun 	 */
2080*4882a593Smuzhiyun 	if (tp->fifo_copy_timeout)
2081*4882a593Smuzhiyun 		received = budget;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun out:
2084*4882a593Smuzhiyun 	return received;
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 
rtl8139_weird_interrupt(struct net_device * dev,struct rtl8139_private * tp,void __iomem * ioaddr,int status,int link_changed)2088*4882a593Smuzhiyun static void rtl8139_weird_interrupt (struct net_device *dev,
2089*4882a593Smuzhiyun 				     struct rtl8139_private *tp,
2090*4882a593Smuzhiyun 				     void __iomem *ioaddr,
2091*4882a593Smuzhiyun 				     int status, int link_changed)
2092*4882a593Smuzhiyun {
2093*4882a593Smuzhiyun 	netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status);
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	assert (dev != NULL);
2096*4882a593Smuzhiyun 	assert (tp != NULL);
2097*4882a593Smuzhiyun 	assert (ioaddr != NULL);
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	/* Update the error count. */
2100*4882a593Smuzhiyun 	dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2101*4882a593Smuzhiyun 	RTL_W32 (RxMissed, 0);
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 	if ((status & RxUnderrun) && link_changed &&
2104*4882a593Smuzhiyun 	    (tp->drv_flags & HAS_LNK_CHNG)) {
2105*4882a593Smuzhiyun 		rtl_check_media(dev, 0);
2106*4882a593Smuzhiyun 		status &= ~RxUnderrun;
2107*4882a593Smuzhiyun 	}
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	if (status & (RxUnderrun | RxErr))
2110*4882a593Smuzhiyun 		dev->stats.rx_errors++;
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	if (status & PCSTimeout)
2113*4882a593Smuzhiyun 		dev->stats.rx_length_errors++;
2114*4882a593Smuzhiyun 	if (status & RxUnderrun)
2115*4882a593Smuzhiyun 		dev->stats.rx_fifo_errors++;
2116*4882a593Smuzhiyun 	if (status & PCIErr) {
2117*4882a593Smuzhiyun 		u16 pci_cmd_status;
2118*4882a593Smuzhiyun 		pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
2119*4882a593Smuzhiyun 		pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 		netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
2122*4882a593Smuzhiyun 	}
2123*4882a593Smuzhiyun }
2124*4882a593Smuzhiyun 
rtl8139_poll(struct napi_struct * napi,int budget)2125*4882a593Smuzhiyun static int rtl8139_poll(struct napi_struct *napi, int budget)
2126*4882a593Smuzhiyun {
2127*4882a593Smuzhiyun 	struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi);
2128*4882a593Smuzhiyun 	struct net_device *dev = tp->dev;
2129*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2130*4882a593Smuzhiyun 	int work_done;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	spin_lock(&tp->rx_lock);
2133*4882a593Smuzhiyun 	work_done = 0;
2134*4882a593Smuzhiyun 	if (likely(RTL_R16(IntrStatus) & RxAckBits))
2135*4882a593Smuzhiyun 		work_done += rtl8139_rx(dev, tp, budget);
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 	if (work_done < budget) {
2138*4882a593Smuzhiyun 		unsigned long flags;
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 		spin_lock_irqsave(&tp->lock, flags);
2141*4882a593Smuzhiyun 		if (napi_complete_done(napi, work_done))
2142*4882a593Smuzhiyun 			RTL_W16_F(IntrMask, rtl8139_intr_mask);
2143*4882a593Smuzhiyun 		spin_unlock_irqrestore(&tp->lock, flags);
2144*4882a593Smuzhiyun 	}
2145*4882a593Smuzhiyun 	spin_unlock(&tp->rx_lock);
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	return work_done;
2148*4882a593Smuzhiyun }
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun /* The interrupt handler does all of the Rx thread work and cleans up
2151*4882a593Smuzhiyun    after the Tx thread. */
rtl8139_interrupt(int irq,void * dev_instance)2152*4882a593Smuzhiyun static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2153*4882a593Smuzhiyun {
2154*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *) dev_instance;
2155*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2156*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2157*4882a593Smuzhiyun 	u16 status, ackstat;
2158*4882a593Smuzhiyun 	int link_changed = 0; /* avoid bogus "uninit" warning */
2159*4882a593Smuzhiyun 	int handled = 0;
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 	spin_lock (&tp->lock);
2162*4882a593Smuzhiyun 	status = RTL_R16 (IntrStatus);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	/* shared irq? */
2165*4882a593Smuzhiyun 	if (unlikely((status & rtl8139_intr_mask) == 0))
2166*4882a593Smuzhiyun 		goto out;
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 	handled = 1;
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	/* h/w no longer present (hotplug?) or major error, bail */
2171*4882a593Smuzhiyun 	if (unlikely(status == 0xFFFF))
2172*4882a593Smuzhiyun 		goto out;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	/* close possible race's with dev_close */
2175*4882a593Smuzhiyun 	if (unlikely(!netif_running(dev))) {
2176*4882a593Smuzhiyun 		RTL_W16 (IntrMask, 0);
2177*4882a593Smuzhiyun 		goto out;
2178*4882a593Smuzhiyun 	}
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	/* Acknowledge all of the current interrupt sources ASAP, but
2181*4882a593Smuzhiyun 	   an first get an additional status bit from CSCR. */
2182*4882a593Smuzhiyun 	if (unlikely(status & RxUnderrun))
2183*4882a593Smuzhiyun 		link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	ackstat = status & ~(RxAckBits | TxErr);
2186*4882a593Smuzhiyun 	if (ackstat)
2187*4882a593Smuzhiyun 		RTL_W16 (IntrStatus, ackstat);
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	/* Receive packets are processed by poll routine.
2190*4882a593Smuzhiyun 	   If not running start it now. */
2191*4882a593Smuzhiyun 	if (status & RxAckBits){
2192*4882a593Smuzhiyun 		if (napi_schedule_prep(&tp->napi)) {
2193*4882a593Smuzhiyun 			RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
2194*4882a593Smuzhiyun 			__napi_schedule(&tp->napi);
2195*4882a593Smuzhiyun 		}
2196*4882a593Smuzhiyun 	}
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 	/* Check uncommon events with one test. */
2199*4882a593Smuzhiyun 	if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr)))
2200*4882a593Smuzhiyun 		rtl8139_weird_interrupt (dev, tp, ioaddr,
2201*4882a593Smuzhiyun 					 status, link_changed);
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun 	if (status & (TxOK | TxErr)) {
2204*4882a593Smuzhiyun 		rtl8139_tx_interrupt (dev, tp, ioaddr);
2205*4882a593Smuzhiyun 		if (status & TxErr)
2206*4882a593Smuzhiyun 			RTL_W16 (IntrStatus, TxErr);
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun  out:
2209*4882a593Smuzhiyun 	spin_unlock (&tp->lock);
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun 	netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n",
2212*4882a593Smuzhiyun 		   RTL_R16(IntrStatus));
2213*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2217*4882a593Smuzhiyun /*
2218*4882a593Smuzhiyun  * Polling receive - used by netconsole and other diagnostic tools
2219*4882a593Smuzhiyun  * to allow network i/o with interrupts disabled.
2220*4882a593Smuzhiyun  */
rtl8139_poll_controller(struct net_device * dev)2221*4882a593Smuzhiyun static void rtl8139_poll_controller(struct net_device *dev)
2222*4882a593Smuzhiyun {
2223*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2224*4882a593Smuzhiyun 	const int irq = tp->pci_dev->irq;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	disable_irq_nosync(irq);
2227*4882a593Smuzhiyun 	rtl8139_interrupt(irq, dev);
2228*4882a593Smuzhiyun 	enable_irq(irq);
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun #endif
2231*4882a593Smuzhiyun 
rtl8139_set_mac_address(struct net_device * dev,void * p)2232*4882a593Smuzhiyun static int rtl8139_set_mac_address(struct net_device *dev, void *p)
2233*4882a593Smuzhiyun {
2234*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2235*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2236*4882a593Smuzhiyun 	struct sockaddr *addr = p;
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
2239*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 	RTL_W8_F(Cfg9346, Cfg9346_Unlock);
2246*4882a593Smuzhiyun 	RTL_W32_F(MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
2247*4882a593Smuzhiyun 	RTL_W32_F(MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
2248*4882a593Smuzhiyun 	RTL_W8_F(Cfg9346, Cfg9346_Lock);
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2251*4882a593Smuzhiyun 
2252*4882a593Smuzhiyun 	return 0;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun 
rtl8139_close(struct net_device * dev)2255*4882a593Smuzhiyun static int rtl8139_close (struct net_device *dev)
2256*4882a593Smuzhiyun {
2257*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2258*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2259*4882a593Smuzhiyun 	unsigned long flags;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	netif_stop_queue(dev);
2262*4882a593Smuzhiyun 	napi_disable(&tp->napi);
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n",
2265*4882a593Smuzhiyun 		  RTL_R16(IntrStatus));
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun 	spin_lock_irqsave (&tp->lock, flags);
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	/* Stop the chip's Tx and Rx DMA processes. */
2270*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, 0);
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun 	/* Disable interrupts by clearing the interrupt mask. */
2273*4882a593Smuzhiyun 	RTL_W16 (IntrMask, 0);
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	/* Update the error counts. */
2276*4882a593Smuzhiyun 	dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2277*4882a593Smuzhiyun 	RTL_W32 (RxMissed, 0);
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	spin_unlock_irqrestore (&tp->lock, flags);
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	free_irq(tp->pci_dev->irq, dev);
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	rtl8139_tx_clear (tp);
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
2286*4882a593Smuzhiyun 			  tp->rx_ring, tp->rx_ring_dma);
2287*4882a593Smuzhiyun 	dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
2288*4882a593Smuzhiyun 			  tp->tx_bufs, tp->tx_bufs_dma);
2289*4882a593Smuzhiyun 	tp->rx_ring = NULL;
2290*4882a593Smuzhiyun 	tp->tx_bufs = NULL;
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	/* Green! Put the chip in low-power mode. */
2293*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, Cfg9346_Unlock);
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
2296*4882a593Smuzhiyun 		RTL_W8 (HltClk, 'H');	/* 'R' would leave the clock running. */
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	return 0;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun /* Get the ethtool Wake-on-LAN settings.  Assumes that wol points to
2303*4882a593Smuzhiyun    kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and
2304*4882a593Smuzhiyun    other threads or interrupts aren't messing with the 8139.  */
rtl8139_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2305*4882a593Smuzhiyun static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2308*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2311*4882a593Smuzhiyun 	if (rtl_chip_info[tp->chipset].flags & HasLWake) {
2312*4882a593Smuzhiyun 		u8 cfg3 = RTL_R8 (Config3);
2313*4882a593Smuzhiyun 		u8 cfg5 = RTL_R8 (Config5);
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 		wol->supported = WAKE_PHY | WAKE_MAGIC
2316*4882a593Smuzhiyun 			| WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 		wol->wolopts = 0;
2319*4882a593Smuzhiyun 		if (cfg3 & Cfg3_LinkUp)
2320*4882a593Smuzhiyun 			wol->wolopts |= WAKE_PHY;
2321*4882a593Smuzhiyun 		if (cfg3 & Cfg3_Magic)
2322*4882a593Smuzhiyun 			wol->wolopts |= WAKE_MAGIC;
2323*4882a593Smuzhiyun 		/* (KON)FIXME: See how netdev_set_wol() handles the
2324*4882a593Smuzhiyun 		   following constants.  */
2325*4882a593Smuzhiyun 		if (cfg5 & Cfg5_UWF)
2326*4882a593Smuzhiyun 			wol->wolopts |= WAKE_UCAST;
2327*4882a593Smuzhiyun 		if (cfg5 & Cfg5_MWF)
2328*4882a593Smuzhiyun 			wol->wolopts |= WAKE_MCAST;
2329*4882a593Smuzhiyun 		if (cfg5 & Cfg5_BWF)
2330*4882a593Smuzhiyun 			wol->wolopts |= WAKE_BCAST;
2331*4882a593Smuzhiyun 	}
2332*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun /* Set the ethtool Wake-on-LAN settings.  Return 0 or -errno.  Assumes
2337*4882a593Smuzhiyun    that wol points to kernel memory and other threads or interrupts
2338*4882a593Smuzhiyun    aren't messing with the 8139.  */
rtl8139_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2339*4882a593Smuzhiyun static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2342*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2343*4882a593Smuzhiyun 	u32 support;
2344*4882a593Smuzhiyun 	u8 cfg3, cfg5;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	support = ((rtl_chip_info[tp->chipset].flags & HasLWake)
2347*4882a593Smuzhiyun 		   ? (WAKE_PHY | WAKE_MAGIC
2348*4882a593Smuzhiyun 		      | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
2349*4882a593Smuzhiyun 		   : 0);
2350*4882a593Smuzhiyun 	if (wol->wolopts & ~support)
2351*4882a593Smuzhiyun 		return -EINVAL;
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2354*4882a593Smuzhiyun 	cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
2355*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_PHY)
2356*4882a593Smuzhiyun 		cfg3 |= Cfg3_LinkUp;
2357*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_MAGIC)
2358*4882a593Smuzhiyun 		cfg3 |= Cfg3_Magic;
2359*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, Cfg9346_Unlock);
2360*4882a593Smuzhiyun 	RTL_W8 (Config3, cfg3);
2361*4882a593Smuzhiyun 	RTL_W8 (Cfg9346, Cfg9346_Lock);
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF);
2364*4882a593Smuzhiyun 	/* (KON)FIXME: These are untested.  We may have to set the
2365*4882a593Smuzhiyun 	   CRC0, Wakeup0 and LSBCRC0 registers too, but I have no
2366*4882a593Smuzhiyun 	   documentation.  */
2367*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_UCAST)
2368*4882a593Smuzhiyun 		cfg5 |= Cfg5_UWF;
2369*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_MCAST)
2370*4882a593Smuzhiyun 		cfg5 |= Cfg5_MWF;
2371*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_BCAST)
2372*4882a593Smuzhiyun 		cfg5 |= Cfg5_BWF;
2373*4882a593Smuzhiyun 	RTL_W8 (Config5, cfg5);	/* need not unlock via Cfg9346 */
2374*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	return 0;
2377*4882a593Smuzhiyun }
2378*4882a593Smuzhiyun 
rtl8139_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2379*4882a593Smuzhiyun static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2380*4882a593Smuzhiyun {
2381*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2382*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2383*4882a593Smuzhiyun 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2384*4882a593Smuzhiyun 	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun 
rtl8139_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2387*4882a593Smuzhiyun static int rtl8139_get_link_ksettings(struct net_device *dev,
2388*4882a593Smuzhiyun 				      struct ethtool_link_ksettings *cmd)
2389*4882a593Smuzhiyun {
2390*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2391*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2392*4882a593Smuzhiyun 	mii_ethtool_get_link_ksettings(&tp->mii, cmd);
2393*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2394*4882a593Smuzhiyun 	return 0;
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun 
rtl8139_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2397*4882a593Smuzhiyun static int rtl8139_set_link_ksettings(struct net_device *dev,
2398*4882a593Smuzhiyun 				      const struct ethtool_link_ksettings *cmd)
2399*4882a593Smuzhiyun {
2400*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2401*4882a593Smuzhiyun 	int rc;
2402*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2403*4882a593Smuzhiyun 	rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd);
2404*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2405*4882a593Smuzhiyun 	return rc;
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun 
rtl8139_nway_reset(struct net_device * dev)2408*4882a593Smuzhiyun static int rtl8139_nway_reset(struct net_device *dev)
2409*4882a593Smuzhiyun {
2410*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2411*4882a593Smuzhiyun 	return mii_nway_restart(&tp->mii);
2412*4882a593Smuzhiyun }
2413*4882a593Smuzhiyun 
rtl8139_get_link(struct net_device * dev)2414*4882a593Smuzhiyun static u32 rtl8139_get_link(struct net_device *dev)
2415*4882a593Smuzhiyun {
2416*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2417*4882a593Smuzhiyun 	return mii_link_ok(&tp->mii);
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun 
rtl8139_get_msglevel(struct net_device * dev)2420*4882a593Smuzhiyun static u32 rtl8139_get_msglevel(struct net_device *dev)
2421*4882a593Smuzhiyun {
2422*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2423*4882a593Smuzhiyun 	return tp->msg_enable;
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun 
rtl8139_set_msglevel(struct net_device * dev,u32 datum)2426*4882a593Smuzhiyun static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2429*4882a593Smuzhiyun 	tp->msg_enable = datum;
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun 
rtl8139_get_regs_len(struct net_device * dev)2432*4882a593Smuzhiyun static int rtl8139_get_regs_len(struct net_device *dev)
2433*4882a593Smuzhiyun {
2434*4882a593Smuzhiyun 	struct rtl8139_private *tp;
2435*4882a593Smuzhiyun 	/* TODO: we are too slack to do reg dumping for pio, for now */
2436*4882a593Smuzhiyun 	if (use_io)
2437*4882a593Smuzhiyun 		return 0;
2438*4882a593Smuzhiyun 	tp = netdev_priv(dev);
2439*4882a593Smuzhiyun 	return tp->regs_len;
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun 
rtl8139_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * regbuf)2442*4882a593Smuzhiyun static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2443*4882a593Smuzhiyun {
2444*4882a593Smuzhiyun 	struct rtl8139_private *tp;
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	/* TODO: we are too slack to do reg dumping for pio, for now */
2447*4882a593Smuzhiyun 	if (use_io)
2448*4882a593Smuzhiyun 		return;
2449*4882a593Smuzhiyun 	tp = netdev_priv(dev);
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	regs->version = RTL_REGS_VER;
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2454*4882a593Smuzhiyun 	memcpy_fromio(regbuf, tp->mmio_addr, regs->len);
2455*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2456*4882a593Smuzhiyun }
2457*4882a593Smuzhiyun 
rtl8139_get_sset_count(struct net_device * dev,int sset)2458*4882a593Smuzhiyun static int rtl8139_get_sset_count(struct net_device *dev, int sset)
2459*4882a593Smuzhiyun {
2460*4882a593Smuzhiyun 	switch (sset) {
2461*4882a593Smuzhiyun 	case ETH_SS_STATS:
2462*4882a593Smuzhiyun 		return RTL_NUM_STATS;
2463*4882a593Smuzhiyun 	default:
2464*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2465*4882a593Smuzhiyun 	}
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun 
rtl8139_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2468*4882a593Smuzhiyun static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	data[0] = tp->xstats.early_rx;
2473*4882a593Smuzhiyun 	data[1] = tp->xstats.tx_buf_mapped;
2474*4882a593Smuzhiyun 	data[2] = tp->xstats.tx_timeouts;
2475*4882a593Smuzhiyun 	data[3] = tp->xstats.rx_lost_in_ring;
2476*4882a593Smuzhiyun }
2477*4882a593Smuzhiyun 
rtl8139_get_strings(struct net_device * dev,u32 stringset,u8 * data)2478*4882a593Smuzhiyun static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2479*4882a593Smuzhiyun {
2480*4882a593Smuzhiyun 	memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun static const struct ethtool_ops rtl8139_ethtool_ops = {
2484*4882a593Smuzhiyun 	.get_drvinfo		= rtl8139_get_drvinfo,
2485*4882a593Smuzhiyun 	.get_regs_len		= rtl8139_get_regs_len,
2486*4882a593Smuzhiyun 	.get_regs		= rtl8139_get_regs,
2487*4882a593Smuzhiyun 	.nway_reset		= rtl8139_nway_reset,
2488*4882a593Smuzhiyun 	.get_link		= rtl8139_get_link,
2489*4882a593Smuzhiyun 	.get_msglevel		= rtl8139_get_msglevel,
2490*4882a593Smuzhiyun 	.set_msglevel		= rtl8139_set_msglevel,
2491*4882a593Smuzhiyun 	.get_wol		= rtl8139_get_wol,
2492*4882a593Smuzhiyun 	.set_wol		= rtl8139_set_wol,
2493*4882a593Smuzhiyun 	.get_strings		= rtl8139_get_strings,
2494*4882a593Smuzhiyun 	.get_sset_count		= rtl8139_get_sset_count,
2495*4882a593Smuzhiyun 	.get_ethtool_stats	= rtl8139_get_ethtool_stats,
2496*4882a593Smuzhiyun 	.get_link_ksettings	= rtl8139_get_link_ksettings,
2497*4882a593Smuzhiyun 	.set_link_ksettings	= rtl8139_set_link_ksettings,
2498*4882a593Smuzhiyun };
2499*4882a593Smuzhiyun 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2500*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2501*4882a593Smuzhiyun {
2502*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2503*4882a593Smuzhiyun 	int rc;
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	if (!netif_running(dev))
2506*4882a593Smuzhiyun 		return -EINVAL;
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	spin_lock_irq(&tp->lock);
2509*4882a593Smuzhiyun 	rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
2510*4882a593Smuzhiyun 	spin_unlock_irq(&tp->lock);
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	return rc;
2513*4882a593Smuzhiyun }
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 
2516*4882a593Smuzhiyun static void
rtl8139_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)2517*4882a593Smuzhiyun rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2518*4882a593Smuzhiyun {
2519*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2520*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2521*4882a593Smuzhiyun 	unsigned long flags;
2522*4882a593Smuzhiyun 	unsigned int start;
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	if (netif_running(dev)) {
2525*4882a593Smuzhiyun 		spin_lock_irqsave (&tp->lock, flags);
2526*4882a593Smuzhiyun 		dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2527*4882a593Smuzhiyun 		RTL_W32 (RxMissed, 0);
2528*4882a593Smuzhiyun 		spin_unlock_irqrestore (&tp->lock, flags);
2529*4882a593Smuzhiyun 	}
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	netdev_stats_to_stats64(stats, &dev->stats);
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun 	do {
2534*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
2535*4882a593Smuzhiyun 		stats->rx_packets = tp->rx_stats.packets;
2536*4882a593Smuzhiyun 		stats->rx_bytes = tp->rx_stats.bytes;
2537*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	do {
2540*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
2541*4882a593Smuzhiyun 		stats->tx_packets = tp->tx_stats.packets;
2542*4882a593Smuzhiyun 		stats->tx_bytes = tp->tx_stats.bytes;
2543*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun /* Set or clear the multicast filter for this adaptor.
2547*4882a593Smuzhiyun    This routine is not state sensitive and need not be SMP locked. */
2548*4882a593Smuzhiyun 
__set_rx_mode(struct net_device * dev)2549*4882a593Smuzhiyun static void __set_rx_mode (struct net_device *dev)
2550*4882a593Smuzhiyun {
2551*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2552*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2553*4882a593Smuzhiyun 	u32 mc_filter[2];	/* Multicast hash filter */
2554*4882a593Smuzhiyun 	int rx_mode;
2555*4882a593Smuzhiyun 	u32 tmp;
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun 	netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
2558*4882a593Smuzhiyun 		   dev->flags, RTL_R32(RxConfig));
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	/* Note: do not reorder, GCC is clever about common statements. */
2561*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
2562*4882a593Smuzhiyun 		rx_mode =
2563*4882a593Smuzhiyun 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2564*4882a593Smuzhiyun 		    AcceptAllPhys;
2565*4882a593Smuzhiyun 		mc_filter[1] = mc_filter[0] = 0xffffffff;
2566*4882a593Smuzhiyun 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2567*4882a593Smuzhiyun 		   (dev->flags & IFF_ALLMULTI)) {
2568*4882a593Smuzhiyun 		/* Too many to filter perfectly -- accept all multicasts. */
2569*4882a593Smuzhiyun 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2570*4882a593Smuzhiyun 		mc_filter[1] = mc_filter[0] = 0xffffffff;
2571*4882a593Smuzhiyun 	} else {
2572*4882a593Smuzhiyun 		struct netdev_hw_addr *ha;
2573*4882a593Smuzhiyun 		rx_mode = AcceptBroadcast | AcceptMyPhys;
2574*4882a593Smuzhiyun 		mc_filter[1] = mc_filter[0] = 0;
2575*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev) {
2576*4882a593Smuzhiyun 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2579*4882a593Smuzhiyun 			rx_mode |= AcceptMulticast;
2580*4882a593Smuzhiyun 		}
2581*4882a593Smuzhiyun 	}
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	if (dev->features & NETIF_F_RXALL)
2584*4882a593Smuzhiyun 		rx_mode |= (AcceptErr | AcceptRunt);
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 	/* We can safely update without stopping the chip. */
2587*4882a593Smuzhiyun 	tmp = rtl8139_rx_config | rx_mode;
2588*4882a593Smuzhiyun 	if (tp->rx_config != tmp) {
2589*4882a593Smuzhiyun 		RTL_W32_F (RxConfig, tmp);
2590*4882a593Smuzhiyun 		tp->rx_config = tmp;
2591*4882a593Smuzhiyun 	}
2592*4882a593Smuzhiyun 	RTL_W32_F (MAR0 + 0, mc_filter[0]);
2593*4882a593Smuzhiyun 	RTL_W32_F (MAR0 + 4, mc_filter[1]);
2594*4882a593Smuzhiyun }
2595*4882a593Smuzhiyun 
rtl8139_set_rx_mode(struct net_device * dev)2596*4882a593Smuzhiyun static void rtl8139_set_rx_mode (struct net_device *dev)
2597*4882a593Smuzhiyun {
2598*4882a593Smuzhiyun 	unsigned long flags;
2599*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	spin_lock_irqsave (&tp->lock, flags);
2602*4882a593Smuzhiyun 	__set_rx_mode(dev);
2603*4882a593Smuzhiyun 	spin_unlock_irqrestore (&tp->lock, flags);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun 
rtl8139_suspend(struct device * device)2606*4882a593Smuzhiyun static int __maybe_unused rtl8139_suspend(struct device *device)
2607*4882a593Smuzhiyun {
2608*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(device);
2609*4882a593Smuzhiyun 	struct rtl8139_private *tp = netdev_priv(dev);
2610*4882a593Smuzhiyun 	void __iomem *ioaddr = tp->mmio_addr;
2611*4882a593Smuzhiyun 	unsigned long flags;
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 	if (!netif_running (dev))
2614*4882a593Smuzhiyun 		return 0;
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	netif_device_detach (dev);
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	spin_lock_irqsave (&tp->lock, flags);
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	/* Disable interrupts, stop Tx and Rx. */
2621*4882a593Smuzhiyun 	RTL_W16 (IntrMask, 0);
2622*4882a593Smuzhiyun 	RTL_W8 (ChipCmd, 0);
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 	/* Update the error counts. */
2625*4882a593Smuzhiyun 	dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2626*4882a593Smuzhiyun 	RTL_W32 (RxMissed, 0);
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun 	spin_unlock_irqrestore (&tp->lock, flags);
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	return 0;
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun 
rtl8139_resume(struct device * device)2633*4882a593Smuzhiyun static int __maybe_unused rtl8139_resume(struct device *device)
2634*4882a593Smuzhiyun {
2635*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(device);
2636*4882a593Smuzhiyun 
2637*4882a593Smuzhiyun 	if (!netif_running (dev))
2638*4882a593Smuzhiyun 		return 0;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	rtl8139_init_ring (dev);
2641*4882a593Smuzhiyun 	rtl8139_hw_start (dev);
2642*4882a593Smuzhiyun 	netif_device_attach (dev);
2643*4882a593Smuzhiyun 	return 0;
2644*4882a593Smuzhiyun }
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(rtl8139_pm_ops, rtl8139_suspend, rtl8139_resume);
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun static struct pci_driver rtl8139_pci_driver = {
2649*4882a593Smuzhiyun 	.name		= DRV_NAME,
2650*4882a593Smuzhiyun 	.id_table	= rtl8139_pci_tbl,
2651*4882a593Smuzhiyun 	.probe		= rtl8139_init_one,
2652*4882a593Smuzhiyun 	.remove		= rtl8139_remove_one,
2653*4882a593Smuzhiyun 	.driver.pm	= &rtl8139_pm_ops,
2654*4882a593Smuzhiyun };
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 
rtl8139_init_module(void)2657*4882a593Smuzhiyun static int __init rtl8139_init_module (void)
2658*4882a593Smuzhiyun {
2659*4882a593Smuzhiyun 	/* when we're a module, we always print a version message,
2660*4882a593Smuzhiyun 	 * even if no 8139 board is found.
2661*4882a593Smuzhiyun 	 */
2662*4882a593Smuzhiyun #ifdef MODULE
2663*4882a593Smuzhiyun 	pr_info(RTL8139_DRIVER_NAME "\n");
2664*4882a593Smuzhiyun #endif
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	return pci_register_driver(&rtl8139_pci_driver);
2667*4882a593Smuzhiyun }
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 
rtl8139_cleanup_module(void)2670*4882a593Smuzhiyun static void __exit rtl8139_cleanup_module (void)
2671*4882a593Smuzhiyun {
2672*4882a593Smuzhiyun 	pci_unregister_driver (&rtl8139_pci_driver);
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun module_init(rtl8139_init_module);
2677*4882a593Smuzhiyun module_exit(rtl8139_cleanup_module);
2678