1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5*4882a593Smuzhiyun Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6*4882a593Smuzhiyun Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9*4882a593Smuzhiyun genuine driver.
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun This software may be used and distributed according to the terms of
12*4882a593Smuzhiyun the GNU General Public License (GPL), incorporated herein by reference.
13*4882a593Smuzhiyun Drivers based on or derived from this code fall under the GPL and must
14*4882a593Smuzhiyun retain the authorship, copyright and license notice. This file is not
15*4882a593Smuzhiyun a complete program and may only be used when the entire operating
16*4882a593Smuzhiyun system is licensed under the GPL.
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun See the file COPYING in this distribution for more information.
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/interrupt.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/moduleparam.h>
27*4882a593Smuzhiyun #include <linux/netdevice.h>
28*4882a593Smuzhiyun #include <linux/rtnetlink.h>
29*4882a593Smuzhiyun #include <linux/etherdevice.h>
30*4882a593Smuzhiyun #include <linux/ethtool.h>
31*4882a593Smuzhiyun #include <linux/pci.h>
32*4882a593Smuzhiyun #include <linux/mii.h>
33*4882a593Smuzhiyun #include <linux/delay.h>
34*4882a593Smuzhiyun #include <linux/crc32.h>
35*4882a593Smuzhiyun #include <linux/dma-mapping.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <asm/irq.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define PHY_MAX_ADDR 32
40*4882a593Smuzhiyun #define PHY_ID_ANY 0x1f
41*4882a593Smuzhiyun #define MII_REG_ANY 0x1f
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define DRV_VERSION "1.4"
44*4882a593Smuzhiyun #define DRV_NAME "sis190"
45*4882a593Smuzhiyun #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define sis190_rx_skb netif_rx
48*4882a593Smuzhiyun #define sis190_rx_quota(count, quota) count
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define NUM_TX_DESC 64 /* [8..1024] */
51*4882a593Smuzhiyun #define NUM_RX_DESC 64 /* [8..8192] */
52*4882a593Smuzhiyun #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
53*4882a593Smuzhiyun #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
54*4882a593Smuzhiyun #define RX_BUF_SIZE 1536
55*4882a593Smuzhiyun #define RX_BUF_MASK 0xfff8
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define SIS190_REGS_SIZE 0x80
58*4882a593Smuzhiyun #define SIS190_TX_TIMEOUT (6*HZ)
59*4882a593Smuzhiyun #define SIS190_PHY_TIMEOUT (10*HZ)
60*4882a593Smuzhiyun #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61*4882a593Smuzhiyun NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62*4882a593Smuzhiyun NETIF_MSG_IFDOWN)
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Enhanced PHY access register bit definitions */
65*4882a593Smuzhiyun #define EhnMIIread 0x0000
66*4882a593Smuzhiyun #define EhnMIIwrite 0x0020
67*4882a593Smuzhiyun #define EhnMIIdataShift 16
68*4882a593Smuzhiyun #define EhnMIIpmdShift 6 /* 7016 only */
69*4882a593Smuzhiyun #define EhnMIIregShift 11
70*4882a593Smuzhiyun #define EhnMIIreq 0x0010
71*4882a593Smuzhiyun #define EhnMIInotDone 0x0010
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Write/read MMIO register */
74*4882a593Smuzhiyun #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
75*4882a593Smuzhiyun #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
76*4882a593Smuzhiyun #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
77*4882a593Smuzhiyun #define SIS_R8(reg) readb (ioaddr + (reg))
78*4882a593Smuzhiyun #define SIS_R16(reg) readw (ioaddr + (reg))
79*4882a593Smuzhiyun #define SIS_R32(reg) readl (ioaddr + (reg))
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun enum sis190_registers {
84*4882a593Smuzhiyun TxControl = 0x00,
85*4882a593Smuzhiyun TxDescStartAddr = 0x04,
86*4882a593Smuzhiyun rsv0 = 0x08, // reserved
87*4882a593Smuzhiyun TxSts = 0x0c, // unused (Control/Status)
88*4882a593Smuzhiyun RxControl = 0x10,
89*4882a593Smuzhiyun RxDescStartAddr = 0x14,
90*4882a593Smuzhiyun rsv1 = 0x18, // reserved
91*4882a593Smuzhiyun RxSts = 0x1c, // unused
92*4882a593Smuzhiyun IntrStatus = 0x20,
93*4882a593Smuzhiyun IntrMask = 0x24,
94*4882a593Smuzhiyun IntrControl = 0x28,
95*4882a593Smuzhiyun IntrTimer = 0x2c, // unused (Interrupt Timer)
96*4882a593Smuzhiyun PMControl = 0x30, // unused (Power Mgmt Control/Status)
97*4882a593Smuzhiyun rsv2 = 0x34, // reserved
98*4882a593Smuzhiyun ROMControl = 0x38,
99*4882a593Smuzhiyun ROMInterface = 0x3c,
100*4882a593Smuzhiyun StationControl = 0x40,
101*4882a593Smuzhiyun GMIIControl = 0x44,
102*4882a593Smuzhiyun GIoCR = 0x48, // unused (GMAC IO Compensation)
103*4882a593Smuzhiyun GIoCtrl = 0x4c, // unused (GMAC IO Control)
104*4882a593Smuzhiyun TxMacControl = 0x50,
105*4882a593Smuzhiyun TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
106*4882a593Smuzhiyun RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
107*4882a593Smuzhiyun rsv3 = 0x5c, // reserved
108*4882a593Smuzhiyun RxMacControl = 0x60,
109*4882a593Smuzhiyun RxMacAddr = 0x62,
110*4882a593Smuzhiyun RxHashTable = 0x68,
111*4882a593Smuzhiyun // Undocumented = 0x6c,
112*4882a593Smuzhiyun RxWolCtrl = 0x70,
113*4882a593Smuzhiyun RxWolData = 0x74, // unused (Rx WOL Data Access)
114*4882a593Smuzhiyun RxMPSControl = 0x78, // unused (Rx MPS Control)
115*4882a593Smuzhiyun rsv4 = 0x7c, // reserved
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun enum sis190_register_content {
119*4882a593Smuzhiyun /* IntrStatus */
120*4882a593Smuzhiyun SoftInt = 0x40000000, // unused
121*4882a593Smuzhiyun Timeup = 0x20000000, // unused
122*4882a593Smuzhiyun PauseFrame = 0x00080000, // unused
123*4882a593Smuzhiyun MagicPacket = 0x00040000, // unused
124*4882a593Smuzhiyun WakeupFrame = 0x00020000, // unused
125*4882a593Smuzhiyun LinkChange = 0x00010000,
126*4882a593Smuzhiyun RxQEmpty = 0x00000080,
127*4882a593Smuzhiyun RxQInt = 0x00000040,
128*4882a593Smuzhiyun TxQ1Empty = 0x00000020, // unused
129*4882a593Smuzhiyun TxQ1Int = 0x00000010,
130*4882a593Smuzhiyun TxQ0Empty = 0x00000008, // unused
131*4882a593Smuzhiyun TxQ0Int = 0x00000004,
132*4882a593Smuzhiyun RxHalt = 0x00000002,
133*4882a593Smuzhiyun TxHalt = 0x00000001,
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* {Rx/Tx}CmdBits */
136*4882a593Smuzhiyun CmdReset = 0x10,
137*4882a593Smuzhiyun CmdRxEnb = 0x08, // unused
138*4882a593Smuzhiyun CmdTxEnb = 0x01,
139*4882a593Smuzhiyun RxBufEmpty = 0x01, // unused
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* Cfg9346Bits */
142*4882a593Smuzhiyun Cfg9346_Lock = 0x00, // unused
143*4882a593Smuzhiyun Cfg9346_Unlock = 0xc0, // unused
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* RxMacControl */
146*4882a593Smuzhiyun AcceptErr = 0x20, // unused
147*4882a593Smuzhiyun AcceptRunt = 0x10, // unused
148*4882a593Smuzhiyun AcceptBroadcast = 0x0800,
149*4882a593Smuzhiyun AcceptMulticast = 0x0400,
150*4882a593Smuzhiyun AcceptMyPhys = 0x0200,
151*4882a593Smuzhiyun AcceptAllPhys = 0x0100,
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* RxConfigBits */
154*4882a593Smuzhiyun RxCfgFIFOShift = 13,
155*4882a593Smuzhiyun RxCfgDMAShift = 8, // 0x1a in RxControl ?
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* TxConfigBits */
158*4882a593Smuzhiyun TxInterFrameGapShift = 24,
159*4882a593Smuzhiyun TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun LinkStatus = 0x02, // unused
162*4882a593Smuzhiyun FullDup = 0x01, // unused
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* TBICSRBit */
165*4882a593Smuzhiyun TBILinkOK = 0x02000000, // unused
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun struct TxDesc {
169*4882a593Smuzhiyun __le32 PSize;
170*4882a593Smuzhiyun __le32 status;
171*4882a593Smuzhiyun __le32 addr;
172*4882a593Smuzhiyun __le32 size;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun struct RxDesc {
176*4882a593Smuzhiyun __le32 PSize;
177*4882a593Smuzhiyun __le32 status;
178*4882a593Smuzhiyun __le32 addr;
179*4882a593Smuzhiyun __le32 size;
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun enum _DescStatusBit {
183*4882a593Smuzhiyun /* _Desc.status */
184*4882a593Smuzhiyun OWNbit = 0x80000000, // RXOWN/TXOWN
185*4882a593Smuzhiyun INTbit = 0x40000000, // RXINT/TXINT
186*4882a593Smuzhiyun CRCbit = 0x00020000, // CRCOFF/CRCEN
187*4882a593Smuzhiyun PADbit = 0x00010000, // PREADD/PADEN
188*4882a593Smuzhiyun /* _Desc.size */
189*4882a593Smuzhiyun RingEnd = 0x80000000,
190*4882a593Smuzhiyun /* TxDesc.status */
191*4882a593Smuzhiyun LSEN = 0x08000000, // TSO ? -- FR
192*4882a593Smuzhiyun IPCS = 0x04000000,
193*4882a593Smuzhiyun TCPCS = 0x02000000,
194*4882a593Smuzhiyun UDPCS = 0x01000000,
195*4882a593Smuzhiyun BSTEN = 0x00800000,
196*4882a593Smuzhiyun EXTEN = 0x00400000,
197*4882a593Smuzhiyun DEFEN = 0x00200000,
198*4882a593Smuzhiyun BKFEN = 0x00100000,
199*4882a593Smuzhiyun CRSEN = 0x00080000,
200*4882a593Smuzhiyun COLEN = 0x00040000,
201*4882a593Smuzhiyun THOL3 = 0x30000000,
202*4882a593Smuzhiyun THOL2 = 0x20000000,
203*4882a593Smuzhiyun THOL1 = 0x10000000,
204*4882a593Smuzhiyun THOL0 = 0x00000000,
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun WND = 0x00080000,
207*4882a593Smuzhiyun TABRT = 0x00040000,
208*4882a593Smuzhiyun FIFO = 0x00020000,
209*4882a593Smuzhiyun LINK = 0x00010000,
210*4882a593Smuzhiyun ColCountMask = 0x0000ffff,
211*4882a593Smuzhiyun /* RxDesc.status */
212*4882a593Smuzhiyun IPON = 0x20000000,
213*4882a593Smuzhiyun TCPON = 0x10000000,
214*4882a593Smuzhiyun UDPON = 0x08000000,
215*4882a593Smuzhiyun Wakup = 0x00400000,
216*4882a593Smuzhiyun Magic = 0x00200000,
217*4882a593Smuzhiyun Pause = 0x00100000,
218*4882a593Smuzhiyun DEFbit = 0x00200000,
219*4882a593Smuzhiyun BCAST = 0x000c0000,
220*4882a593Smuzhiyun MCAST = 0x00080000,
221*4882a593Smuzhiyun UCAST = 0x00040000,
222*4882a593Smuzhiyun /* RxDesc.PSize */
223*4882a593Smuzhiyun TAGON = 0x80000000,
224*4882a593Smuzhiyun RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
225*4882a593Smuzhiyun ABORT = 0x00800000,
226*4882a593Smuzhiyun SHORT = 0x00400000,
227*4882a593Smuzhiyun LIMIT = 0x00200000,
228*4882a593Smuzhiyun MIIER = 0x00100000,
229*4882a593Smuzhiyun OVRUN = 0x00080000,
230*4882a593Smuzhiyun NIBON = 0x00040000,
231*4882a593Smuzhiyun COLON = 0x00020000,
232*4882a593Smuzhiyun CRCOK = 0x00010000,
233*4882a593Smuzhiyun RxSizeMask = 0x0000ffff
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236*4882a593Smuzhiyun * provide two (unused with Linux) Tx queues. No publicly
237*4882a593Smuzhiyun * available documentation alas.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun enum sis190_eeprom_access_register_bits {
242*4882a593Smuzhiyun EECS = 0x00000001, // unused
243*4882a593Smuzhiyun EECLK = 0x00000002, // unused
244*4882a593Smuzhiyun EEDO = 0x00000008, // unused
245*4882a593Smuzhiyun EEDI = 0x00000004, // unused
246*4882a593Smuzhiyun EEREQ = 0x00000080,
247*4882a593Smuzhiyun EEROP = 0x00000200,
248*4882a593Smuzhiyun EEWOP = 0x00000100 // unused
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* EEPROM Addresses */
252*4882a593Smuzhiyun enum sis190_eeprom_address {
253*4882a593Smuzhiyun EEPROMSignature = 0x00,
254*4882a593Smuzhiyun EEPROMCLK = 0x01, // unused
255*4882a593Smuzhiyun EEPROMInfo = 0x02,
256*4882a593Smuzhiyun EEPROMMACAddr = 0x03
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun enum sis190_feature {
260*4882a593Smuzhiyun F_HAS_RGMII = 1,
261*4882a593Smuzhiyun F_PHY_88E1111 = 2,
262*4882a593Smuzhiyun F_PHY_BCM5461 = 4
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun struct sis190_private {
266*4882a593Smuzhiyun void __iomem *mmio_addr;
267*4882a593Smuzhiyun struct pci_dev *pci_dev;
268*4882a593Smuzhiyun struct net_device *dev;
269*4882a593Smuzhiyun spinlock_t lock;
270*4882a593Smuzhiyun u32 rx_buf_sz;
271*4882a593Smuzhiyun u32 cur_rx;
272*4882a593Smuzhiyun u32 cur_tx;
273*4882a593Smuzhiyun u32 dirty_rx;
274*4882a593Smuzhiyun u32 dirty_tx;
275*4882a593Smuzhiyun dma_addr_t rx_dma;
276*4882a593Smuzhiyun dma_addr_t tx_dma;
277*4882a593Smuzhiyun struct RxDesc *RxDescRing;
278*4882a593Smuzhiyun struct TxDesc *TxDescRing;
279*4882a593Smuzhiyun struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280*4882a593Smuzhiyun struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281*4882a593Smuzhiyun struct work_struct phy_task;
282*4882a593Smuzhiyun struct timer_list timer;
283*4882a593Smuzhiyun u32 msg_enable;
284*4882a593Smuzhiyun struct mii_if_info mii_if;
285*4882a593Smuzhiyun struct list_head first_phy;
286*4882a593Smuzhiyun u32 features;
287*4882a593Smuzhiyun u32 negotiated_lpa;
288*4882a593Smuzhiyun enum {
289*4882a593Smuzhiyun LNK_OFF,
290*4882a593Smuzhiyun LNK_ON,
291*4882a593Smuzhiyun LNK_AUTONEG,
292*4882a593Smuzhiyun } link_status;
293*4882a593Smuzhiyun };
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun struct sis190_phy {
296*4882a593Smuzhiyun struct list_head list;
297*4882a593Smuzhiyun int phy_id;
298*4882a593Smuzhiyun u16 id[2];
299*4882a593Smuzhiyun u16 status;
300*4882a593Smuzhiyun u8 type;
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun enum sis190_phy_type {
304*4882a593Smuzhiyun UNKNOWN = 0x00,
305*4882a593Smuzhiyun HOME = 0x01,
306*4882a593Smuzhiyun LAN = 0x02,
307*4882a593Smuzhiyun MIX = 0x03
308*4882a593Smuzhiyun };
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun static struct mii_chip_info {
311*4882a593Smuzhiyun const char *name;
312*4882a593Smuzhiyun u16 id[2];
313*4882a593Smuzhiyun unsigned int type;
314*4882a593Smuzhiyun u32 feature;
315*4882a593Smuzhiyun } mii_chip_table[] = {
316*4882a593Smuzhiyun { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
317*4882a593Smuzhiyun { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
318*4882a593Smuzhiyun { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319*4882a593Smuzhiyun { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
320*4882a593Smuzhiyun { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
321*4882a593Smuzhiyun { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322*4882a593Smuzhiyun { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
323*4882a593Smuzhiyun { NULL, }
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun static const struct {
327*4882a593Smuzhiyun const char *name;
328*4882a593Smuzhiyun } sis_chip_info[] = {
329*4882a593Smuzhiyun { "SiS 190 PCI Fast Ethernet adapter" },
330*4882a593Smuzhiyun { "SiS 191 PCI Gigabit Ethernet adapter" },
331*4882a593Smuzhiyun };
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun static const struct pci_device_id sis190_pci_tbl[] = {
334*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336*4882a593Smuzhiyun { 0, },
337*4882a593Smuzhiyun };
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun static int rx_copybreak = 200;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun static struct {
344*4882a593Smuzhiyun u32 msg_enable;
345*4882a593Smuzhiyun } debug = { -1 };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348*4882a593Smuzhiyun module_param(rx_copybreak, int, 0);
349*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350*4882a593Smuzhiyun module_param_named(debug, debug.msg_enable, int, 0);
351*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352*4882a593Smuzhiyun MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
353*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
354*4882a593Smuzhiyun MODULE_LICENSE("GPL");
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun static const u32 sis190_intr_mask =
357*4882a593Smuzhiyun RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
361*4882a593Smuzhiyun * The chips use a 64 element hash table based on the Ethernet CRC.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun static const int multicast_filter_limit = 32;
364*4882a593Smuzhiyun
__mdio_cmd(void __iomem * ioaddr,u32 ctl)365*4882a593Smuzhiyun static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun unsigned int i;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun SIS_W32(GMIIControl, ctl);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun msleep(1);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
374*4882a593Smuzhiyun if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375*4882a593Smuzhiyun break;
376*4882a593Smuzhiyun msleep(1);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (i > 99)
380*4882a593Smuzhiyun pr_err("PHY command failed !\n");
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
mdio_write(void __iomem * ioaddr,int phy_id,int reg,int val)383*4882a593Smuzhiyun static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386*4882a593Smuzhiyun (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387*4882a593Smuzhiyun (((u32) val) << EhnMIIdataShift));
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
mdio_read(void __iomem * ioaddr,int phy_id,int reg)390*4882a593Smuzhiyun static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393*4882a593Smuzhiyun (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
__mdio_write(struct net_device * dev,int phy_id,int reg,int val)398*4882a593Smuzhiyun static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun mdio_write(tp->mmio_addr, phy_id, reg, val);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
__mdio_read(struct net_device * dev,int phy_id,int reg)405*4882a593Smuzhiyun static int __mdio_read(struct net_device *dev, int phy_id, int reg)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun return mdio_read(tp->mmio_addr, phy_id, reg);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
mdio_read_latched(void __iomem * ioaddr,int phy_id,int reg)412*4882a593Smuzhiyun static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun mdio_read(ioaddr, phy_id, reg);
415*4882a593Smuzhiyun return mdio_read(ioaddr, phy_id, reg);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
sis190_read_eeprom(void __iomem * ioaddr,u32 reg)418*4882a593Smuzhiyun static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun u16 data = 0xffff;
421*4882a593Smuzhiyun unsigned int i;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (!(SIS_R32(ROMControl) & 0x0002))
424*4882a593Smuzhiyun return 0;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun for (i = 0; i < 200; i++) {
429*4882a593Smuzhiyun if (!(SIS_R32(ROMInterface) & EEREQ)) {
430*4882a593Smuzhiyun data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431*4882a593Smuzhiyun break;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun msleep(1);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return data;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
sis190_irq_mask_and_ack(void __iomem * ioaddr)439*4882a593Smuzhiyun static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun SIS_W32(IntrMask, 0x00);
442*4882a593Smuzhiyun SIS_W32(IntrStatus, 0xffffffff);
443*4882a593Smuzhiyun SIS_PCI_COMMIT();
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
sis190_asic_down(void __iomem * ioaddr)446*4882a593Smuzhiyun static void sis190_asic_down(void __iomem *ioaddr)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun /* Stop the chip's Tx and Rx DMA processes. */
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun SIS_W32(TxControl, 0x1a00);
451*4882a593Smuzhiyun SIS_W32(RxControl, 0x1a00);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun sis190_irq_mask_and_ack(ioaddr);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
sis190_mark_as_last_descriptor(struct RxDesc * desc)456*4882a593Smuzhiyun static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun desc->size |= cpu_to_le32(RingEnd);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
sis190_give_to_asic(struct RxDesc * desc,u32 rx_buf_sz)461*4882a593Smuzhiyun static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun u32 eor = le32_to_cpu(desc->size) & RingEnd;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun desc->PSize = 0x0;
466*4882a593Smuzhiyun desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467*4882a593Smuzhiyun wmb();
468*4882a593Smuzhiyun desc->status = cpu_to_le32(OWNbit | INTbit);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
sis190_map_to_asic(struct RxDesc * desc,dma_addr_t mapping,u32 rx_buf_sz)471*4882a593Smuzhiyun static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472*4882a593Smuzhiyun u32 rx_buf_sz)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun desc->addr = cpu_to_le32(mapping);
475*4882a593Smuzhiyun sis190_give_to_asic(desc, rx_buf_sz);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
sis190_make_unusable_by_asic(struct RxDesc * desc)478*4882a593Smuzhiyun static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun desc->PSize = 0x0;
481*4882a593Smuzhiyun desc->addr = cpu_to_le32(0xdeadbeef);
482*4882a593Smuzhiyun desc->size &= cpu_to_le32(RingEnd);
483*4882a593Smuzhiyun wmb();
484*4882a593Smuzhiyun desc->status = 0x0;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
sis190_alloc_rx_skb(struct sis190_private * tp,struct RxDesc * desc)487*4882a593Smuzhiyun static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488*4882a593Smuzhiyun struct RxDesc *desc)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun u32 rx_buf_sz = tp->rx_buf_sz;
491*4882a593Smuzhiyun struct sk_buff *skb;
492*4882a593Smuzhiyun dma_addr_t mapping;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495*4882a593Smuzhiyun if (unlikely(!skb))
496*4882a593Smuzhiyun goto skb_alloc_failed;
497*4882a593Smuzhiyun mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz,
498*4882a593Smuzhiyun DMA_FROM_DEVICE);
499*4882a593Smuzhiyun if (dma_mapping_error(&tp->pci_dev->dev, mapping))
500*4882a593Smuzhiyun goto out;
501*4882a593Smuzhiyun sis190_map_to_asic(desc, mapping, rx_buf_sz);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return skb;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun out:
506*4882a593Smuzhiyun dev_kfree_skb_any(skb);
507*4882a593Smuzhiyun skb_alloc_failed:
508*4882a593Smuzhiyun sis190_make_unusable_by_asic(desc);
509*4882a593Smuzhiyun return NULL;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
sis190_rx_fill(struct sis190_private * tp,struct net_device * dev,u32 start,u32 end)512*4882a593Smuzhiyun static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513*4882a593Smuzhiyun u32 start, u32 end)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun u32 cur;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun for (cur = start; cur < end; cur++) {
518*4882a593Smuzhiyun unsigned int i = cur % NUM_RX_DESC;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (tp->Rx_skbuff[i])
521*4882a593Smuzhiyun continue;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (!tp->Rx_skbuff[i])
526*4882a593Smuzhiyun break;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun return cur - start;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
sis190_try_rx_copy(struct sis190_private * tp,struct sk_buff ** sk_buff,int pkt_size,dma_addr_t addr)531*4882a593Smuzhiyun static bool sis190_try_rx_copy(struct sis190_private *tp,
532*4882a593Smuzhiyun struct sk_buff **sk_buff, int pkt_size,
533*4882a593Smuzhiyun dma_addr_t addr)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun struct sk_buff *skb;
536*4882a593Smuzhiyun bool done = false;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (pkt_size >= rx_copybreak)
539*4882a593Smuzhiyun goto out;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542*4882a593Smuzhiyun if (!skb)
543*4882a593Smuzhiyun goto out;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, tp->rx_buf_sz,
546*4882a593Smuzhiyun DMA_FROM_DEVICE);
547*4882a593Smuzhiyun skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548*4882a593Smuzhiyun *sk_buff = skb;
549*4882a593Smuzhiyun done = true;
550*4882a593Smuzhiyun out:
551*4882a593Smuzhiyun return done;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
sis190_rx_pkt_err(u32 status,struct net_device_stats * stats)554*4882a593Smuzhiyun static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if ((status & CRCOK) && !(status & ErrMask))
559*4882a593Smuzhiyun return 0;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (!(status & CRCOK))
562*4882a593Smuzhiyun stats->rx_crc_errors++;
563*4882a593Smuzhiyun else if (status & OVRUN)
564*4882a593Smuzhiyun stats->rx_over_errors++;
565*4882a593Smuzhiyun else if (status & (SHORT | LIMIT))
566*4882a593Smuzhiyun stats->rx_length_errors++;
567*4882a593Smuzhiyun else if (status & (MIIER | NIBON | COLON))
568*4882a593Smuzhiyun stats->rx_frame_errors++;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun stats->rx_errors++;
571*4882a593Smuzhiyun return -1;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
sis190_rx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)574*4882a593Smuzhiyun static int sis190_rx_interrupt(struct net_device *dev,
575*4882a593Smuzhiyun struct sis190_private *tp, void __iomem *ioaddr)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
578*4882a593Smuzhiyun u32 rx_left, cur_rx = tp->cur_rx;
579*4882a593Smuzhiyun u32 delta, count;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582*4882a593Smuzhiyun rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun for (; rx_left > 0; rx_left--, cur_rx++) {
585*4882a593Smuzhiyun unsigned int entry = cur_rx % NUM_RX_DESC;
586*4882a593Smuzhiyun struct RxDesc *desc = tp->RxDescRing + entry;
587*4882a593Smuzhiyun u32 status;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (le32_to_cpu(desc->status) & OWNbit)
590*4882a593Smuzhiyun break;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun status = le32_to_cpu(desc->PSize);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (sis190_rx_pkt_err(status, stats) < 0)
597*4882a593Smuzhiyun sis190_give_to_asic(desc, tp->rx_buf_sz);
598*4882a593Smuzhiyun else {
599*4882a593Smuzhiyun struct sk_buff *skb = tp->Rx_skbuff[entry];
600*4882a593Smuzhiyun dma_addr_t addr = le32_to_cpu(desc->addr);
601*4882a593Smuzhiyun int pkt_size = (status & RxSizeMask) - 4;
602*4882a593Smuzhiyun struct pci_dev *pdev = tp->pci_dev;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (unlikely(pkt_size > tp->rx_buf_sz)) {
605*4882a593Smuzhiyun netif_info(tp, intr, dev,
606*4882a593Smuzhiyun "(frag) status = %08x\n", status);
607*4882a593Smuzhiyun stats->rx_dropped++;
608*4882a593Smuzhiyun stats->rx_length_errors++;
609*4882a593Smuzhiyun sis190_give_to_asic(desc, tp->rx_buf_sz);
610*4882a593Smuzhiyun continue;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615*4882a593Smuzhiyun dma_sync_single_for_device(&pdev->dev, addr,
616*4882a593Smuzhiyun tp->rx_buf_sz,
617*4882a593Smuzhiyun DMA_FROM_DEVICE);
618*4882a593Smuzhiyun sis190_give_to_asic(desc, tp->rx_buf_sz);
619*4882a593Smuzhiyun } else {
620*4882a593Smuzhiyun dma_unmap_single(&pdev->dev, addr,
621*4882a593Smuzhiyun tp->rx_buf_sz,
622*4882a593Smuzhiyun DMA_FROM_DEVICE);
623*4882a593Smuzhiyun tp->Rx_skbuff[entry] = NULL;
624*4882a593Smuzhiyun sis190_make_unusable_by_asic(desc);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun skb_put(skb, pkt_size);
628*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun sis190_rx_skb(skb);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun stats->rx_packets++;
633*4882a593Smuzhiyun stats->rx_bytes += pkt_size;
634*4882a593Smuzhiyun if ((status & BCAST) == MCAST)
635*4882a593Smuzhiyun stats->multicast++;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun count = cur_rx - tp->cur_rx;
639*4882a593Smuzhiyun tp->cur_rx = cur_rx;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
642*4882a593Smuzhiyun if (!delta && count)
643*4882a593Smuzhiyun netif_info(tp, intr, dev, "no Rx buffer allocated\n");
644*4882a593Smuzhiyun tp->dirty_rx += delta;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
647*4882a593Smuzhiyun netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun return count;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
sis190_unmap_tx_skb(struct pci_dev * pdev,struct sk_buff * skb,struct TxDesc * desc)652*4882a593Smuzhiyun static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
653*4882a593Smuzhiyun struct TxDesc *desc)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun unsigned int len;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), len,
660*4882a593Smuzhiyun DMA_TO_DEVICE);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun memset(desc, 0x00, sizeof(*desc));
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
sis190_tx_pkt_err(u32 status,struct net_device_stats * stats)665*4882a593Smuzhiyun static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun #define TxErrMask (WND | TABRT | FIFO | LINK)
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (!unlikely(status & TxErrMask))
670*4882a593Smuzhiyun return 0;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (status & WND)
673*4882a593Smuzhiyun stats->tx_window_errors++;
674*4882a593Smuzhiyun if (status & TABRT)
675*4882a593Smuzhiyun stats->tx_aborted_errors++;
676*4882a593Smuzhiyun if (status & FIFO)
677*4882a593Smuzhiyun stats->tx_fifo_errors++;
678*4882a593Smuzhiyun if (status & LINK)
679*4882a593Smuzhiyun stats->tx_carrier_errors++;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun stats->tx_errors++;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun return -1;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
sis190_tx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)686*4882a593Smuzhiyun static void sis190_tx_interrupt(struct net_device *dev,
687*4882a593Smuzhiyun struct sis190_private *tp, void __iomem *ioaddr)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
690*4882a593Smuzhiyun u32 pending, dirty_tx = tp->dirty_tx;
691*4882a593Smuzhiyun /*
692*4882a593Smuzhiyun * It would not be needed if queueing was allowed to be enabled
693*4882a593Smuzhiyun * again too early (hint: think preempt and unclocked smp systems).
694*4882a593Smuzhiyun */
695*4882a593Smuzhiyun unsigned int queue_stopped;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun smp_rmb();
698*4882a593Smuzhiyun pending = tp->cur_tx - dirty_tx;
699*4882a593Smuzhiyun queue_stopped = (pending == NUM_TX_DESC);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun for (; pending; pending--, dirty_tx++) {
702*4882a593Smuzhiyun unsigned int entry = dirty_tx % NUM_TX_DESC;
703*4882a593Smuzhiyun struct TxDesc *txd = tp->TxDescRing + entry;
704*4882a593Smuzhiyun u32 status = le32_to_cpu(txd->status);
705*4882a593Smuzhiyun struct sk_buff *skb;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (status & OWNbit)
708*4882a593Smuzhiyun break;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun skb = tp->Tx_skbuff[entry];
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
713*4882a593Smuzhiyun stats->tx_packets++;
714*4882a593Smuzhiyun stats->tx_bytes += skb->len;
715*4882a593Smuzhiyun stats->collisions += ((status & ColCountMask) - 1);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
719*4882a593Smuzhiyun tp->Tx_skbuff[entry] = NULL;
720*4882a593Smuzhiyun dev_consume_skb_irq(skb);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (tp->dirty_tx != dirty_tx) {
724*4882a593Smuzhiyun tp->dirty_tx = dirty_tx;
725*4882a593Smuzhiyun smp_wmb();
726*4882a593Smuzhiyun if (queue_stopped)
727*4882a593Smuzhiyun netif_wake_queue(dev);
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun * The interrupt handler does all of the Rx thread work and cleans up after
733*4882a593Smuzhiyun * the Tx thread.
734*4882a593Smuzhiyun */
sis190_irq(int irq,void * __dev)735*4882a593Smuzhiyun static irqreturn_t sis190_irq(int irq, void *__dev)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun struct net_device *dev = __dev;
738*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
739*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
740*4882a593Smuzhiyun unsigned int handled = 0;
741*4882a593Smuzhiyun u32 status;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun status = SIS_R32(IntrStatus);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if ((status == 0xffffffff) || !status)
746*4882a593Smuzhiyun goto out;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun handled = 1;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (unlikely(!netif_running(dev))) {
751*4882a593Smuzhiyun sis190_asic_down(ioaddr);
752*4882a593Smuzhiyun goto out;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun SIS_W32(IntrStatus, status);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun // netif_info(tp, intr, dev, "status = %08x\n", status);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (status & LinkChange) {
760*4882a593Smuzhiyun netif_info(tp, intr, dev, "link change\n");
761*4882a593Smuzhiyun del_timer(&tp->timer);
762*4882a593Smuzhiyun schedule_work(&tp->phy_task);
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (status & RxQInt)
766*4882a593Smuzhiyun sis190_rx_interrupt(dev, tp, ioaddr);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (status & TxQ0Int)
769*4882a593Smuzhiyun sis190_tx_interrupt(dev, tp, ioaddr);
770*4882a593Smuzhiyun out:
771*4882a593Smuzhiyun return IRQ_RETVAL(handled);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
sis190_netpoll(struct net_device * dev)775*4882a593Smuzhiyun static void sis190_netpoll(struct net_device *dev)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
778*4882a593Smuzhiyun const int irq = tp->pci_dev->irq;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun disable_irq(irq);
781*4882a593Smuzhiyun sis190_irq(irq, dev);
782*4882a593Smuzhiyun enable_irq(irq);
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun #endif
785*4882a593Smuzhiyun
sis190_free_rx_skb(struct sis190_private * tp,struct sk_buff ** sk_buff,struct RxDesc * desc)786*4882a593Smuzhiyun static void sis190_free_rx_skb(struct sis190_private *tp,
787*4882a593Smuzhiyun struct sk_buff **sk_buff, struct RxDesc *desc)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct pci_dev *pdev = tp->pci_dev;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
792*4882a593Smuzhiyun DMA_FROM_DEVICE);
793*4882a593Smuzhiyun dev_kfree_skb(*sk_buff);
794*4882a593Smuzhiyun *sk_buff = NULL;
795*4882a593Smuzhiyun sis190_make_unusable_by_asic(desc);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
sis190_rx_clear(struct sis190_private * tp)798*4882a593Smuzhiyun static void sis190_rx_clear(struct sis190_private *tp)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun unsigned int i;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun for (i = 0; i < NUM_RX_DESC; i++) {
803*4882a593Smuzhiyun if (!tp->Rx_skbuff[i])
804*4882a593Smuzhiyun continue;
805*4882a593Smuzhiyun sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
sis190_init_ring_indexes(struct sis190_private * tp)809*4882a593Smuzhiyun static void sis190_init_ring_indexes(struct sis190_private *tp)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
sis190_init_ring(struct net_device * dev)814*4882a593Smuzhiyun static int sis190_init_ring(struct net_device *dev)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun sis190_init_ring_indexes(tp);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
821*4882a593Smuzhiyun memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
824*4882a593Smuzhiyun goto err_rx_clear;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun return 0;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun err_rx_clear:
831*4882a593Smuzhiyun sis190_rx_clear(tp);
832*4882a593Smuzhiyun return -ENOMEM;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
sis190_set_rx_mode(struct net_device * dev)835*4882a593Smuzhiyun static void sis190_set_rx_mode(struct net_device *dev)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
838*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
839*4882a593Smuzhiyun unsigned long flags;
840*4882a593Smuzhiyun u32 mc_filter[2]; /* Multicast hash filter */
841*4882a593Smuzhiyun u16 rx_mode;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
844*4882a593Smuzhiyun rx_mode =
845*4882a593Smuzhiyun AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
846*4882a593Smuzhiyun AcceptAllPhys;
847*4882a593Smuzhiyun mc_filter[1] = mc_filter[0] = 0xffffffff;
848*4882a593Smuzhiyun } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
849*4882a593Smuzhiyun (dev->flags & IFF_ALLMULTI)) {
850*4882a593Smuzhiyun /* Too many to filter perfectly -- accept all multicasts. */
851*4882a593Smuzhiyun rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
852*4882a593Smuzhiyun mc_filter[1] = mc_filter[0] = 0xffffffff;
853*4882a593Smuzhiyun } else {
854*4882a593Smuzhiyun struct netdev_hw_addr *ha;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun rx_mode = AcceptBroadcast | AcceptMyPhys;
857*4882a593Smuzhiyun mc_filter[1] = mc_filter[0] = 0;
858*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
859*4882a593Smuzhiyun int bit_nr =
860*4882a593Smuzhiyun ether_crc(ETH_ALEN, ha->addr) & 0x3f;
861*4882a593Smuzhiyun mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
862*4882a593Smuzhiyun rx_mode |= AcceptMulticast;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun spin_lock_irqsave(&tp->lock, flags);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun SIS_W16(RxMacControl, rx_mode | 0x2);
869*4882a593Smuzhiyun SIS_W32(RxHashTable, mc_filter[0]);
870*4882a593Smuzhiyun SIS_W32(RxHashTable + 4, mc_filter[1]);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun spin_unlock_irqrestore(&tp->lock, flags);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
sis190_soft_reset(void __iomem * ioaddr)875*4882a593Smuzhiyun static void sis190_soft_reset(void __iomem *ioaddr)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun SIS_W32(IntrControl, 0x8000);
878*4882a593Smuzhiyun SIS_PCI_COMMIT();
879*4882a593Smuzhiyun SIS_W32(IntrControl, 0x0);
880*4882a593Smuzhiyun sis190_asic_down(ioaddr);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
sis190_hw_start(struct net_device * dev)883*4882a593Smuzhiyun static void sis190_hw_start(struct net_device *dev)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
886*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun sis190_soft_reset(ioaddr);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun SIS_W32(TxDescStartAddr, tp->tx_dma);
891*4882a593Smuzhiyun SIS_W32(RxDescStartAddr, tp->rx_dma);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun SIS_W32(IntrStatus, 0xffffffff);
894*4882a593Smuzhiyun SIS_W32(IntrMask, 0x0);
895*4882a593Smuzhiyun SIS_W32(GMIIControl, 0x0);
896*4882a593Smuzhiyun SIS_W32(TxMacControl, 0x60);
897*4882a593Smuzhiyun SIS_W16(RxMacControl, 0x02);
898*4882a593Smuzhiyun SIS_W32(RxHashTable, 0x0);
899*4882a593Smuzhiyun SIS_W32(0x6c, 0x0);
900*4882a593Smuzhiyun SIS_W32(RxWolCtrl, 0x0);
901*4882a593Smuzhiyun SIS_W32(RxWolData, 0x0);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun SIS_PCI_COMMIT();
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun sis190_set_rx_mode(dev);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Enable all known interrupts by setting the interrupt mask. */
908*4882a593Smuzhiyun SIS_W32(IntrMask, sis190_intr_mask);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
911*4882a593Smuzhiyun SIS_W32(RxControl, 0x1a1d);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun netif_start_queue(dev);
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
sis190_phy_task(struct work_struct * work)916*4882a593Smuzhiyun static void sis190_phy_task(struct work_struct *work)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct sis190_private *tp =
919*4882a593Smuzhiyun container_of(work, struct sis190_private, phy_task);
920*4882a593Smuzhiyun struct net_device *dev = tp->dev;
921*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
922*4882a593Smuzhiyun int phy_id = tp->mii_if.phy_id;
923*4882a593Smuzhiyun u16 val;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun rtnl_lock();
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (!netif_running(dev))
928*4882a593Smuzhiyun goto out_unlock;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun val = mdio_read(ioaddr, phy_id, MII_BMCR);
931*4882a593Smuzhiyun if (val & BMCR_RESET) {
932*4882a593Smuzhiyun // FIXME: needlessly high ? -- FR 02/07/2005
933*4882a593Smuzhiyun mod_timer(&tp->timer, jiffies + HZ/10);
934*4882a593Smuzhiyun goto out_unlock;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
938*4882a593Smuzhiyun if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
939*4882a593Smuzhiyun netif_carrier_off(dev);
940*4882a593Smuzhiyun netif_warn(tp, link, dev, "auto-negotiating...\n");
941*4882a593Smuzhiyun tp->link_status = LNK_AUTONEG;
942*4882a593Smuzhiyun } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
943*4882a593Smuzhiyun /* Rejoice ! */
944*4882a593Smuzhiyun struct {
945*4882a593Smuzhiyun int val;
946*4882a593Smuzhiyun u32 ctl;
947*4882a593Smuzhiyun const char *msg;
948*4882a593Smuzhiyun } reg31[] = {
949*4882a593Smuzhiyun { LPA_1000FULL, 0x07000c00 | 0x00001000,
950*4882a593Smuzhiyun "1000 Mbps Full Duplex" },
951*4882a593Smuzhiyun { LPA_1000HALF, 0x07000c00,
952*4882a593Smuzhiyun "1000 Mbps Half Duplex" },
953*4882a593Smuzhiyun { LPA_100FULL, 0x04000800 | 0x00001000,
954*4882a593Smuzhiyun "100 Mbps Full Duplex" },
955*4882a593Smuzhiyun { LPA_100HALF, 0x04000800,
956*4882a593Smuzhiyun "100 Mbps Half Duplex" },
957*4882a593Smuzhiyun { LPA_10FULL, 0x04000400 | 0x00001000,
958*4882a593Smuzhiyun "10 Mbps Full Duplex" },
959*4882a593Smuzhiyun { LPA_10HALF, 0x04000400,
960*4882a593Smuzhiyun "10 Mbps Half Duplex" },
961*4882a593Smuzhiyun { 0, 0x04000400, "unknown" }
962*4882a593Smuzhiyun }, *p = NULL;
963*4882a593Smuzhiyun u16 adv, autoexp, gigadv, gigrec;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun val = mdio_read(ioaddr, phy_id, 0x1f);
966*4882a593Smuzhiyun netif_info(tp, link, dev, "mii ext = %04x\n", val);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun val = mdio_read(ioaddr, phy_id, MII_LPA);
969*4882a593Smuzhiyun adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
970*4882a593Smuzhiyun autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
971*4882a593Smuzhiyun netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
972*4882a593Smuzhiyun val, adv, autoexp);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
975*4882a593Smuzhiyun /* check for gigabit speed */
976*4882a593Smuzhiyun gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
977*4882a593Smuzhiyun gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
978*4882a593Smuzhiyun val = (gigadv & (gigrec >> 2));
979*4882a593Smuzhiyun if (val & ADVERTISE_1000FULL)
980*4882a593Smuzhiyun p = reg31;
981*4882a593Smuzhiyun else if (val & ADVERTISE_1000HALF)
982*4882a593Smuzhiyun p = reg31 + 1;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun if (!p) {
985*4882a593Smuzhiyun val &= adv;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun for (p = reg31; p->val; p++) {
988*4882a593Smuzhiyun if ((val & p->val) == p->val)
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if ((tp->features & F_HAS_RGMII) &&
996*4882a593Smuzhiyun (tp->features & F_PHY_BCM5461)) {
997*4882a593Smuzhiyun // Set Tx Delay in RGMII mode.
998*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
999*4882a593Smuzhiyun udelay(200);
1000*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
1001*4882a593Smuzhiyun p->ctl |= 0x03000000;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun SIS_W32(StationControl, p->ctl);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (tp->features & F_HAS_RGMII) {
1007*4882a593Smuzhiyun SIS_W32(RGDelay, 0x0441);
1008*4882a593Smuzhiyun SIS_W32(RGDelay, 0x0440);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun tp->negotiated_lpa = p->val;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1014*4882a593Smuzhiyun netif_carrier_on(dev);
1015*4882a593Smuzhiyun tp->link_status = LNK_ON;
1016*4882a593Smuzhiyun } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1017*4882a593Smuzhiyun tp->link_status = LNK_OFF;
1018*4882a593Smuzhiyun mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun out_unlock:
1021*4882a593Smuzhiyun rtnl_unlock();
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
sis190_phy_timer(struct timer_list * t)1024*4882a593Smuzhiyun static void sis190_phy_timer(struct timer_list *t)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun struct sis190_private *tp = from_timer(tp, t, timer);
1027*4882a593Smuzhiyun struct net_device *dev = tp->dev;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun if (likely(netif_running(dev)))
1030*4882a593Smuzhiyun schedule_work(&tp->phy_task);
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
sis190_delete_timer(struct net_device * dev)1033*4882a593Smuzhiyun static inline void sis190_delete_timer(struct net_device *dev)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun del_timer_sync(&tp->timer);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
sis190_request_timer(struct net_device * dev)1040*4882a593Smuzhiyun static inline void sis190_request_timer(struct net_device *dev)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1043*4882a593Smuzhiyun struct timer_list *timer = &tp->timer;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun timer_setup(timer, sis190_phy_timer, 0);
1046*4882a593Smuzhiyun timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1047*4882a593Smuzhiyun add_timer(timer);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
sis190_set_rxbufsize(struct sis190_private * tp,struct net_device * dev)1050*4882a593Smuzhiyun static void sis190_set_rxbufsize(struct sis190_private *tp,
1051*4882a593Smuzhiyun struct net_device *dev)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun unsigned int mtu = dev->mtu;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1056*4882a593Smuzhiyun /* RxDesc->size has a licence to kill the lower bits */
1057*4882a593Smuzhiyun if (tp->rx_buf_sz & 0x07) {
1058*4882a593Smuzhiyun tp->rx_buf_sz += 8;
1059*4882a593Smuzhiyun tp->rx_buf_sz &= RX_BUF_MASK;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
sis190_open(struct net_device * dev)1063*4882a593Smuzhiyun static int sis190_open(struct net_device *dev)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1066*4882a593Smuzhiyun struct pci_dev *pdev = tp->pci_dev;
1067*4882a593Smuzhiyun int rc = -ENOMEM;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun sis190_set_rxbufsize(tp, dev);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /*
1072*4882a593Smuzhiyun * Rx and Tx descriptors need 256 bytes alignment.
1073*4882a593Smuzhiyun * pci_alloc_consistent() guarantees a stronger alignment.
1074*4882a593Smuzhiyun */
1075*4882a593Smuzhiyun tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
1076*4882a593Smuzhiyun &tp->tx_dma, GFP_KERNEL);
1077*4882a593Smuzhiyun if (!tp->TxDescRing)
1078*4882a593Smuzhiyun goto out;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun tp->RxDescRing = dma_alloc_coherent(&pdev->dev, RX_RING_BYTES,
1081*4882a593Smuzhiyun &tp->rx_dma, GFP_KERNEL);
1082*4882a593Smuzhiyun if (!tp->RxDescRing)
1083*4882a593Smuzhiyun goto err_free_tx_0;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun rc = sis190_init_ring(dev);
1086*4882a593Smuzhiyun if (rc < 0)
1087*4882a593Smuzhiyun goto err_free_rx_1;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun sis190_request_timer(dev);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1092*4882a593Smuzhiyun if (rc < 0)
1093*4882a593Smuzhiyun goto err_release_timer_2;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun sis190_hw_start(dev);
1096*4882a593Smuzhiyun out:
1097*4882a593Smuzhiyun return rc;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun err_release_timer_2:
1100*4882a593Smuzhiyun sis190_delete_timer(dev);
1101*4882a593Smuzhiyun sis190_rx_clear(tp);
1102*4882a593Smuzhiyun err_free_rx_1:
1103*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
1104*4882a593Smuzhiyun tp->rx_dma);
1105*4882a593Smuzhiyun err_free_tx_0:
1106*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
1107*4882a593Smuzhiyun tp->tx_dma);
1108*4882a593Smuzhiyun goto out;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
sis190_tx_clear(struct sis190_private * tp)1111*4882a593Smuzhiyun static void sis190_tx_clear(struct sis190_private *tp)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun unsigned int i;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun for (i = 0; i < NUM_TX_DESC; i++) {
1116*4882a593Smuzhiyun struct sk_buff *skb = tp->Tx_skbuff[i];
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if (!skb)
1119*4882a593Smuzhiyun continue;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1122*4882a593Smuzhiyun tp->Tx_skbuff[i] = NULL;
1123*4882a593Smuzhiyun dev_kfree_skb(skb);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun tp->dev->stats.tx_dropped++;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun tp->cur_tx = tp->dirty_tx = 0;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
sis190_down(struct net_device * dev)1130*4882a593Smuzhiyun static void sis190_down(struct net_device *dev)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1133*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1134*4882a593Smuzhiyun unsigned int poll_locked = 0;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun sis190_delete_timer(dev);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun netif_stop_queue(dev);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun do {
1141*4882a593Smuzhiyun spin_lock_irq(&tp->lock);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun sis190_asic_down(ioaddr);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun spin_unlock_irq(&tp->lock);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun synchronize_irq(tp->pci_dev->irq);
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (!poll_locked)
1150*4882a593Smuzhiyun poll_locked++;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun synchronize_rcu();
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun } while (SIS_R32(IntrMask));
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun sis190_tx_clear(tp);
1157*4882a593Smuzhiyun sis190_rx_clear(tp);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
sis190_close(struct net_device * dev)1160*4882a593Smuzhiyun static int sis190_close(struct net_device *dev)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1163*4882a593Smuzhiyun struct pci_dev *pdev = tp->pci_dev;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun sis190_down(dev);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun free_irq(pdev->irq, dev);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
1170*4882a593Smuzhiyun tp->tx_dma);
1171*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
1172*4882a593Smuzhiyun tp->rx_dma);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun tp->TxDescRing = NULL;
1175*4882a593Smuzhiyun tp->RxDescRing = NULL;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun return 0;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
sis190_start_xmit(struct sk_buff * skb,struct net_device * dev)1180*4882a593Smuzhiyun static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1181*4882a593Smuzhiyun struct net_device *dev)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1184*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1185*4882a593Smuzhiyun u32 len, entry, dirty_tx;
1186*4882a593Smuzhiyun struct TxDesc *desc;
1187*4882a593Smuzhiyun dma_addr_t mapping;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (unlikely(skb->len < ETH_ZLEN)) {
1190*4882a593Smuzhiyun if (skb_padto(skb, ETH_ZLEN)) {
1191*4882a593Smuzhiyun dev->stats.tx_dropped++;
1192*4882a593Smuzhiyun goto out;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun len = ETH_ZLEN;
1195*4882a593Smuzhiyun } else {
1196*4882a593Smuzhiyun len = skb->len;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun entry = tp->cur_tx % NUM_TX_DESC;
1200*4882a593Smuzhiyun desc = tp->TxDescRing + entry;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1203*4882a593Smuzhiyun netif_stop_queue(dev);
1204*4882a593Smuzhiyun netif_err(tp, tx_err, dev,
1205*4882a593Smuzhiyun "BUG! Tx Ring full when queue awake!\n");
1206*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
1210*4882a593Smuzhiyun DMA_TO_DEVICE);
1211*4882a593Smuzhiyun if (dma_mapping_error(&tp->pci_dev->dev, mapping)) {
1212*4882a593Smuzhiyun netif_err(tp, tx_err, dev,
1213*4882a593Smuzhiyun "PCI mapping failed, dropping packet");
1214*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun tp->Tx_skbuff[entry] = skb;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun desc->PSize = cpu_to_le32(len);
1220*4882a593Smuzhiyun desc->addr = cpu_to_le32(mapping);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun desc->size = cpu_to_le32(len);
1223*4882a593Smuzhiyun if (entry == (NUM_TX_DESC - 1))
1224*4882a593Smuzhiyun desc->size |= cpu_to_le32(RingEnd);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun wmb();
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1229*4882a593Smuzhiyun if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1230*4882a593Smuzhiyun /* Half Duplex */
1231*4882a593Smuzhiyun desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1232*4882a593Smuzhiyun if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1233*4882a593Smuzhiyun desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun tp->cur_tx++;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun smp_wmb();
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun dirty_tx = tp->dirty_tx;
1243*4882a593Smuzhiyun if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1244*4882a593Smuzhiyun netif_stop_queue(dev);
1245*4882a593Smuzhiyun smp_rmb();
1246*4882a593Smuzhiyun if (dirty_tx != tp->dirty_tx)
1247*4882a593Smuzhiyun netif_wake_queue(dev);
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun out:
1250*4882a593Smuzhiyun return NETDEV_TX_OK;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
sis190_free_phy(struct list_head * first_phy)1253*4882a593Smuzhiyun static void sis190_free_phy(struct list_head *first_phy)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct sis190_phy *cur, *next;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun list_for_each_entry_safe(cur, next, first_phy, list) {
1258*4882a593Smuzhiyun kfree(cur);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun /**
1263*4882a593Smuzhiyun * sis190_default_phy - Select default PHY for sis190 mac.
1264*4882a593Smuzhiyun * @dev: the net device to probe for
1265*4882a593Smuzhiyun *
1266*4882a593Smuzhiyun * Select first detected PHY with link as default.
1267*4882a593Smuzhiyun * If no one is link on, select PHY whose types is HOME as default.
1268*4882a593Smuzhiyun * If HOME doesn't exist, select LAN.
1269*4882a593Smuzhiyun */
sis190_default_phy(struct net_device * dev)1270*4882a593Smuzhiyun static u16 sis190_default_phy(struct net_device *dev)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1273*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1274*4882a593Smuzhiyun struct mii_if_info *mii_if = &tp->mii_if;
1275*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1276*4882a593Smuzhiyun u16 status;
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun phy_home = phy_default = phy_lan = NULL;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun list_for_each_entry(phy, &tp->first_phy, list) {
1281*4882a593Smuzhiyun status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun // Link ON & Not select default PHY & not ghost PHY.
1284*4882a593Smuzhiyun if ((status & BMSR_LSTATUS) &&
1285*4882a593Smuzhiyun !phy_default &&
1286*4882a593Smuzhiyun (phy->type != UNKNOWN)) {
1287*4882a593Smuzhiyun phy_default = phy;
1288*4882a593Smuzhiyun } else {
1289*4882a593Smuzhiyun status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1290*4882a593Smuzhiyun mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1291*4882a593Smuzhiyun status | BMCR_ANENABLE | BMCR_ISOLATE);
1292*4882a593Smuzhiyun if (phy->type == HOME)
1293*4882a593Smuzhiyun phy_home = phy;
1294*4882a593Smuzhiyun else if (phy->type == LAN)
1295*4882a593Smuzhiyun phy_lan = phy;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun if (!phy_default) {
1300*4882a593Smuzhiyun if (phy_home)
1301*4882a593Smuzhiyun phy_default = phy_home;
1302*4882a593Smuzhiyun else if (phy_lan)
1303*4882a593Smuzhiyun phy_default = phy_lan;
1304*4882a593Smuzhiyun else
1305*4882a593Smuzhiyun phy_default = list_first_entry(&tp->first_phy,
1306*4882a593Smuzhiyun struct sis190_phy, list);
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun if (mii_if->phy_id != phy_default->phy_id) {
1310*4882a593Smuzhiyun mii_if->phy_id = phy_default->phy_id;
1311*4882a593Smuzhiyun if (netif_msg_probe(tp))
1312*4882a593Smuzhiyun pr_info("%s: Using transceiver at address %d as default\n",
1313*4882a593Smuzhiyun pci_name(tp->pci_dev), mii_if->phy_id);
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1317*4882a593Smuzhiyun status &= (~BMCR_ISOLATE);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1320*4882a593Smuzhiyun status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun return status;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
sis190_init_phy(struct net_device * dev,struct sis190_private * tp,struct sis190_phy * phy,unsigned int phy_id,u16 mii_status)1325*4882a593Smuzhiyun static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1326*4882a593Smuzhiyun struct sis190_phy *phy, unsigned int phy_id,
1327*4882a593Smuzhiyun u16 mii_status)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1330*4882a593Smuzhiyun struct mii_chip_info *p;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun INIT_LIST_HEAD(&phy->list);
1333*4882a593Smuzhiyun phy->status = mii_status;
1334*4882a593Smuzhiyun phy->phy_id = phy_id;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1337*4882a593Smuzhiyun phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun for (p = mii_chip_table; p->type; p++) {
1340*4882a593Smuzhiyun if ((p->id[0] == phy->id[0]) &&
1341*4882a593Smuzhiyun (p->id[1] == (phy->id[1] & 0xfff0))) {
1342*4882a593Smuzhiyun break;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun if (p->id[1]) {
1347*4882a593Smuzhiyun phy->type = (p->type == MIX) ?
1348*4882a593Smuzhiyun ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1349*4882a593Smuzhiyun LAN : HOME) : p->type;
1350*4882a593Smuzhiyun tp->features |= p->feature;
1351*4882a593Smuzhiyun if (netif_msg_probe(tp))
1352*4882a593Smuzhiyun pr_info("%s: %s transceiver at address %d\n",
1353*4882a593Smuzhiyun pci_name(tp->pci_dev), p->name, phy_id);
1354*4882a593Smuzhiyun } else {
1355*4882a593Smuzhiyun phy->type = UNKNOWN;
1356*4882a593Smuzhiyun if (netif_msg_probe(tp))
1357*4882a593Smuzhiyun pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1358*4882a593Smuzhiyun pci_name(tp->pci_dev),
1359*4882a593Smuzhiyun phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
sis190_mii_probe_88e1111_fixup(struct sis190_private * tp)1363*4882a593Smuzhiyun static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun if (tp->features & F_PHY_88E1111) {
1366*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1367*4882a593Smuzhiyun int phy_id = tp->mii_if.phy_id;
1368*4882a593Smuzhiyun u16 reg[2][2] = {
1369*4882a593Smuzhiyun { 0x808b, 0x0ce1 },
1370*4882a593Smuzhiyun { 0x808f, 0x0c60 }
1371*4882a593Smuzhiyun }, *p;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1376*4882a593Smuzhiyun udelay(200);
1377*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, 0x14, p[1]);
1378*4882a593Smuzhiyun udelay(200);
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun /**
1383*4882a593Smuzhiyun * sis190_mii_probe - Probe MII PHY for sis190
1384*4882a593Smuzhiyun * @dev: the net device to probe for
1385*4882a593Smuzhiyun *
1386*4882a593Smuzhiyun * Search for total of 32 possible mii phy addresses.
1387*4882a593Smuzhiyun * Identify and set current phy if found one,
1388*4882a593Smuzhiyun * return error if it failed to found.
1389*4882a593Smuzhiyun */
sis190_mii_probe(struct net_device * dev)1390*4882a593Smuzhiyun static int sis190_mii_probe(struct net_device *dev)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1393*4882a593Smuzhiyun struct mii_if_info *mii_if = &tp->mii_if;
1394*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1395*4882a593Smuzhiyun int phy_id;
1396*4882a593Smuzhiyun int rc = 0;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun INIT_LIST_HEAD(&tp->first_phy);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1401*4882a593Smuzhiyun struct sis190_phy *phy;
1402*4882a593Smuzhiyun u16 status;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun // Try next mii if the current one is not accessible.
1407*4882a593Smuzhiyun if (status == 0xffff || status == 0x0000)
1408*4882a593Smuzhiyun continue;
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1411*4882a593Smuzhiyun if (!phy) {
1412*4882a593Smuzhiyun sis190_free_phy(&tp->first_phy);
1413*4882a593Smuzhiyun rc = -ENOMEM;
1414*4882a593Smuzhiyun goto out;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun sis190_init_phy(dev, tp, phy, phy_id, status);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun list_add(&tp->first_phy, &phy->list);
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun if (list_empty(&tp->first_phy)) {
1423*4882a593Smuzhiyun if (netif_msg_probe(tp))
1424*4882a593Smuzhiyun pr_info("%s: No MII transceivers found!\n",
1425*4882a593Smuzhiyun pci_name(tp->pci_dev));
1426*4882a593Smuzhiyun rc = -EIO;
1427*4882a593Smuzhiyun goto out;
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun /* Select default PHY for mac */
1431*4882a593Smuzhiyun sis190_default_phy(dev);
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun sis190_mii_probe_88e1111_fixup(tp);
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun mii_if->dev = dev;
1436*4882a593Smuzhiyun mii_if->mdio_read = __mdio_read;
1437*4882a593Smuzhiyun mii_if->mdio_write = __mdio_write;
1438*4882a593Smuzhiyun mii_if->phy_id_mask = PHY_ID_ANY;
1439*4882a593Smuzhiyun mii_if->reg_num_mask = MII_REG_ANY;
1440*4882a593Smuzhiyun out:
1441*4882a593Smuzhiyun return rc;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
sis190_mii_remove(struct net_device * dev)1444*4882a593Smuzhiyun static void sis190_mii_remove(struct net_device *dev)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun sis190_free_phy(&tp->first_phy);
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
sis190_release_board(struct pci_dev * pdev)1451*4882a593Smuzhiyun static void sis190_release_board(struct pci_dev *pdev)
1452*4882a593Smuzhiyun {
1453*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
1454*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun iounmap(tp->mmio_addr);
1457*4882a593Smuzhiyun pci_release_regions(pdev);
1458*4882a593Smuzhiyun pci_disable_device(pdev);
1459*4882a593Smuzhiyun free_netdev(dev);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
sis190_init_board(struct pci_dev * pdev)1462*4882a593Smuzhiyun static struct net_device *sis190_init_board(struct pci_dev *pdev)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun struct sis190_private *tp;
1465*4882a593Smuzhiyun struct net_device *dev;
1466*4882a593Smuzhiyun void __iomem *ioaddr;
1467*4882a593Smuzhiyun int rc;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun dev = alloc_etherdev(sizeof(*tp));
1470*4882a593Smuzhiyun if (!dev) {
1471*4882a593Smuzhiyun rc = -ENOMEM;
1472*4882a593Smuzhiyun goto err_out_0;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun tp = netdev_priv(dev);
1478*4882a593Smuzhiyun tp->dev = dev;
1479*4882a593Smuzhiyun tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun rc = pci_enable_device(pdev);
1482*4882a593Smuzhiyun if (rc < 0) {
1483*4882a593Smuzhiyun if (netif_msg_probe(tp))
1484*4882a593Smuzhiyun pr_err("%s: enable failure\n", pci_name(pdev));
1485*4882a593Smuzhiyun goto err_free_dev_1;
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun rc = -ENODEV;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1491*4882a593Smuzhiyun if (netif_msg_probe(tp))
1492*4882a593Smuzhiyun pr_err("%s: region #0 is no MMIO resource\n",
1493*4882a593Smuzhiyun pci_name(pdev));
1494*4882a593Smuzhiyun goto err_pci_disable_2;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1497*4882a593Smuzhiyun if (netif_msg_probe(tp))
1498*4882a593Smuzhiyun pr_err("%s: invalid PCI region size(s)\n",
1499*4882a593Smuzhiyun pci_name(pdev));
1500*4882a593Smuzhiyun goto err_pci_disable_2;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun rc = pci_request_regions(pdev, DRV_NAME);
1504*4882a593Smuzhiyun if (rc < 0) {
1505*4882a593Smuzhiyun if (netif_msg_probe(tp))
1506*4882a593Smuzhiyun pr_err("%s: could not request regions\n",
1507*4882a593Smuzhiyun pci_name(pdev));
1508*4882a593Smuzhiyun goto err_pci_disable_2;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1512*4882a593Smuzhiyun if (rc < 0) {
1513*4882a593Smuzhiyun if (netif_msg_probe(tp))
1514*4882a593Smuzhiyun pr_err("%s: DMA configuration failed\n",
1515*4882a593Smuzhiyun pci_name(pdev));
1516*4882a593Smuzhiyun goto err_free_res_3;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun pci_set_master(pdev);
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1522*4882a593Smuzhiyun if (!ioaddr) {
1523*4882a593Smuzhiyun if (netif_msg_probe(tp))
1524*4882a593Smuzhiyun pr_err("%s: cannot remap MMIO, aborting\n",
1525*4882a593Smuzhiyun pci_name(pdev));
1526*4882a593Smuzhiyun rc = -EIO;
1527*4882a593Smuzhiyun goto err_free_res_3;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun tp->pci_dev = pdev;
1531*4882a593Smuzhiyun tp->mmio_addr = ioaddr;
1532*4882a593Smuzhiyun tp->link_status = LNK_OFF;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun sis190_irq_mask_and_ack(ioaddr);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun sis190_soft_reset(ioaddr);
1537*4882a593Smuzhiyun out:
1538*4882a593Smuzhiyun return dev;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun err_free_res_3:
1541*4882a593Smuzhiyun pci_release_regions(pdev);
1542*4882a593Smuzhiyun err_pci_disable_2:
1543*4882a593Smuzhiyun pci_disable_device(pdev);
1544*4882a593Smuzhiyun err_free_dev_1:
1545*4882a593Smuzhiyun free_netdev(dev);
1546*4882a593Smuzhiyun err_out_0:
1547*4882a593Smuzhiyun dev = ERR_PTR(rc);
1548*4882a593Smuzhiyun goto out;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
sis190_tx_timeout(struct net_device * dev,unsigned int txqueue)1551*4882a593Smuzhiyun static void sis190_tx_timeout(struct net_device *dev, unsigned int txqueue)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1554*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1555*4882a593Smuzhiyun u8 tmp8;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /* Disable Tx, if not already */
1558*4882a593Smuzhiyun tmp8 = SIS_R8(TxControl);
1559*4882a593Smuzhiyun if (tmp8 & CmdTxEnb)
1560*4882a593Smuzhiyun SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1563*4882a593Smuzhiyun SIS_R32(TxControl), SIS_R32(TxSts));
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun /* Disable interrupts by clearing the interrupt mask. */
1566*4882a593Smuzhiyun SIS_W32(IntrMask, 0x0000);
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /* Stop a shared interrupt from scavenging while we are. */
1569*4882a593Smuzhiyun spin_lock_irq(&tp->lock);
1570*4882a593Smuzhiyun sis190_tx_clear(tp);
1571*4882a593Smuzhiyun spin_unlock_irq(&tp->lock);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /* ...and finally, reset everything. */
1574*4882a593Smuzhiyun sis190_hw_start(dev);
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun netif_wake_queue(dev);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
sis190_set_rgmii(struct sis190_private * tp,u8 reg)1579*4882a593Smuzhiyun static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun
sis190_get_mac_addr_from_eeprom(struct pci_dev * pdev,struct net_device * dev)1584*4882a593Smuzhiyun static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1585*4882a593Smuzhiyun struct net_device *dev)
1586*4882a593Smuzhiyun {
1587*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1588*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1589*4882a593Smuzhiyun u16 sig;
1590*4882a593Smuzhiyun int i;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun if (netif_msg_probe(tp))
1593*4882a593Smuzhiyun pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun /* Check to see if there is a sane EEPROM */
1596*4882a593Smuzhiyun sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun if ((sig == 0xffff) || (sig == 0x0000)) {
1599*4882a593Smuzhiyun if (netif_msg_probe(tp))
1600*4882a593Smuzhiyun pr_info("%s: Error EEPROM read %x\n",
1601*4882a593Smuzhiyun pci_name(pdev), sig);
1602*4882a593Smuzhiyun return -EIO;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun /* Get MAC address from EEPROM */
1606*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN / 2; i++) {
1607*4882a593Smuzhiyun u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun return 0;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun /**
1618*4882a593Smuzhiyun * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1619*4882a593Smuzhiyun * @pdev: PCI device
1620*4882a593Smuzhiyun * @dev: network device to get address for
1621*4882a593Smuzhiyun *
1622*4882a593Smuzhiyun * SiS96x model, use APC CMOS RAM to store MAC address.
1623*4882a593Smuzhiyun * APC CMOS RAM is accessed through ISA bridge.
1624*4882a593Smuzhiyun * MAC address is read into @net_dev->dev_addr.
1625*4882a593Smuzhiyun */
sis190_get_mac_addr_from_apc(struct pci_dev * pdev,struct net_device * dev)1626*4882a593Smuzhiyun static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1627*4882a593Smuzhiyun struct net_device *dev)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
1630*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1631*4882a593Smuzhiyun struct pci_dev *isa_bridge;
1632*4882a593Smuzhiyun u8 reg, tmp8;
1633*4882a593Smuzhiyun unsigned int i;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun if (netif_msg_probe(tp))
1636*4882a593Smuzhiyun pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ids); i++) {
1639*4882a593Smuzhiyun isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1640*4882a593Smuzhiyun if (isa_bridge)
1641*4882a593Smuzhiyun break;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun if (!isa_bridge) {
1645*4882a593Smuzhiyun if (netif_msg_probe(tp))
1646*4882a593Smuzhiyun pr_info("%s: Can not find ISA bridge\n",
1647*4882a593Smuzhiyun pci_name(pdev));
1648*4882a593Smuzhiyun return -EIO;
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun /* Enable port 78h & 79h to access APC Registers. */
1652*4882a593Smuzhiyun pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1653*4882a593Smuzhiyun reg = (tmp8 & ~0x02);
1654*4882a593Smuzhiyun pci_write_config_byte(isa_bridge, 0x48, reg);
1655*4882a593Smuzhiyun udelay(50);
1656*4882a593Smuzhiyun pci_read_config_byte(isa_bridge, 0x48, ®);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN; i++) {
1659*4882a593Smuzhiyun outb(0x9 + i, 0x78);
1660*4882a593Smuzhiyun dev->dev_addr[i] = inb(0x79);
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun outb(0x12, 0x78);
1664*4882a593Smuzhiyun reg = inb(0x79);
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun sis190_set_rgmii(tp, reg);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun /* Restore the value to ISA Bridge */
1669*4882a593Smuzhiyun pci_write_config_byte(isa_bridge, 0x48, tmp8);
1670*4882a593Smuzhiyun pci_dev_put(isa_bridge);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun return 0;
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun /**
1676*4882a593Smuzhiyun * sis190_init_rxfilter - Initialize the Rx filter
1677*4882a593Smuzhiyun * @dev: network device to initialize
1678*4882a593Smuzhiyun *
1679*4882a593Smuzhiyun * Set receive filter address to our MAC address
1680*4882a593Smuzhiyun * and enable packet filtering.
1681*4882a593Smuzhiyun */
sis190_init_rxfilter(struct net_device * dev)1682*4882a593Smuzhiyun static inline void sis190_init_rxfilter(struct net_device *dev)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1685*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1686*4882a593Smuzhiyun u16 ctl;
1687*4882a593Smuzhiyun int i;
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun ctl = SIS_R16(RxMacControl);
1690*4882a593Smuzhiyun /*
1691*4882a593Smuzhiyun * Disable packet filtering before setting filter.
1692*4882a593Smuzhiyun * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1693*4882a593Smuzhiyun * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1694*4882a593Smuzhiyun */
1695*4882a593Smuzhiyun SIS_W16(RxMacControl, ctl & ~0x0f00);
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN; i++)
1698*4882a593Smuzhiyun SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun SIS_W16(RxMacControl, ctl);
1701*4882a593Smuzhiyun SIS_PCI_COMMIT();
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun
sis190_get_mac_addr(struct pci_dev * pdev,struct net_device * dev)1704*4882a593Smuzhiyun static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun int rc;
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1709*4882a593Smuzhiyun if (rc < 0) {
1710*4882a593Smuzhiyun u8 reg;
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun pci_read_config_byte(pdev, 0x73, ®);
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun if (reg & 0x00000001)
1715*4882a593Smuzhiyun rc = sis190_get_mac_addr_from_apc(pdev, dev);
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun return rc;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
sis190_set_speed_auto(struct net_device * dev)1720*4882a593Smuzhiyun static void sis190_set_speed_auto(struct net_device *dev)
1721*4882a593Smuzhiyun {
1722*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1723*4882a593Smuzhiyun void __iomem *ioaddr = tp->mmio_addr;
1724*4882a593Smuzhiyun int phy_id = tp->mii_if.phy_id;
1725*4882a593Smuzhiyun int val;
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1732*4882a593Smuzhiyun // unchanged.
1733*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1734*4882a593Smuzhiyun ADVERTISE_100FULL | ADVERTISE_10FULL |
1735*4882a593Smuzhiyun ADVERTISE_100HALF | ADVERTISE_10HALF);
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun // Enable 1000 Full Mode.
1738*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun // Enable auto-negotiation and restart auto-negotiation.
1741*4882a593Smuzhiyun mdio_write(ioaddr, phy_id, MII_BMCR,
1742*4882a593Smuzhiyun BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1743*4882a593Smuzhiyun }
1744*4882a593Smuzhiyun
sis190_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1745*4882a593Smuzhiyun static int sis190_get_link_ksettings(struct net_device *dev,
1746*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
1747*4882a593Smuzhiyun {
1748*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
1751*4882a593Smuzhiyun
1752*4882a593Smuzhiyun return 0;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
sis190_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1755*4882a593Smuzhiyun static int sis190_set_link_ksettings(struct net_device *dev,
1756*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun
sis190_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1763*4882a593Smuzhiyun static void sis190_get_drvinfo(struct net_device *dev,
1764*4882a593Smuzhiyun struct ethtool_drvinfo *info)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1769*4882a593Smuzhiyun strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1770*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(tp->pci_dev),
1771*4882a593Smuzhiyun sizeof(info->bus_info));
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun
sis190_get_regs_len(struct net_device * dev)1774*4882a593Smuzhiyun static int sis190_get_regs_len(struct net_device *dev)
1775*4882a593Smuzhiyun {
1776*4882a593Smuzhiyun return SIS190_REGS_SIZE;
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun
sis190_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1779*4882a593Smuzhiyun static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1780*4882a593Smuzhiyun void *p)
1781*4882a593Smuzhiyun {
1782*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1783*4882a593Smuzhiyun unsigned long flags;
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun spin_lock_irqsave(&tp->lock, flags);
1786*4882a593Smuzhiyun memcpy_fromio(p, tp->mmio_addr, regs->len);
1787*4882a593Smuzhiyun spin_unlock_irqrestore(&tp->lock, flags);
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
sis190_nway_reset(struct net_device * dev)1790*4882a593Smuzhiyun static int sis190_nway_reset(struct net_device *dev)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun return mii_nway_restart(&tp->mii_if);
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun
sis190_get_msglevel(struct net_device * dev)1797*4882a593Smuzhiyun static u32 sis190_get_msglevel(struct net_device *dev)
1798*4882a593Smuzhiyun {
1799*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun return tp->msg_enable;
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun
sis190_set_msglevel(struct net_device * dev,u32 value)1804*4882a593Smuzhiyun static void sis190_set_msglevel(struct net_device *dev, u32 value)
1805*4882a593Smuzhiyun {
1806*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun tp->msg_enable = value;
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun static const struct ethtool_ops sis190_ethtool_ops = {
1812*4882a593Smuzhiyun .get_drvinfo = sis190_get_drvinfo,
1813*4882a593Smuzhiyun .get_regs_len = sis190_get_regs_len,
1814*4882a593Smuzhiyun .get_regs = sis190_get_regs,
1815*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
1816*4882a593Smuzhiyun .get_msglevel = sis190_get_msglevel,
1817*4882a593Smuzhiyun .set_msglevel = sis190_set_msglevel,
1818*4882a593Smuzhiyun .nway_reset = sis190_nway_reset,
1819*4882a593Smuzhiyun .get_link_ksettings = sis190_get_link_ksettings,
1820*4882a593Smuzhiyun .set_link_ksettings = sis190_set_link_ksettings,
1821*4882a593Smuzhiyun };
1822*4882a593Smuzhiyun
sis190_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1823*4882a593Smuzhiyun static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1824*4882a593Smuzhiyun {
1825*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun return !netif_running(dev) ? -EINVAL :
1828*4882a593Smuzhiyun generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
sis190_mac_addr(struct net_device * dev,void * p)1831*4882a593Smuzhiyun static int sis190_mac_addr(struct net_device *dev, void *p)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun int rc;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun rc = eth_mac_addr(dev, p);
1836*4882a593Smuzhiyun if (!rc)
1837*4882a593Smuzhiyun sis190_init_rxfilter(dev);
1838*4882a593Smuzhiyun return rc;
1839*4882a593Smuzhiyun }
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun static const struct net_device_ops sis190_netdev_ops = {
1842*4882a593Smuzhiyun .ndo_open = sis190_open,
1843*4882a593Smuzhiyun .ndo_stop = sis190_close,
1844*4882a593Smuzhiyun .ndo_do_ioctl = sis190_ioctl,
1845*4882a593Smuzhiyun .ndo_start_xmit = sis190_start_xmit,
1846*4882a593Smuzhiyun .ndo_tx_timeout = sis190_tx_timeout,
1847*4882a593Smuzhiyun .ndo_set_rx_mode = sis190_set_rx_mode,
1848*4882a593Smuzhiyun .ndo_set_mac_address = sis190_mac_addr,
1849*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
1850*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1851*4882a593Smuzhiyun .ndo_poll_controller = sis190_netpoll,
1852*4882a593Smuzhiyun #endif
1853*4882a593Smuzhiyun };
1854*4882a593Smuzhiyun
sis190_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1855*4882a593Smuzhiyun static int sis190_init_one(struct pci_dev *pdev,
1856*4882a593Smuzhiyun const struct pci_device_id *ent)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun static int printed_version = 0;
1859*4882a593Smuzhiyun struct sis190_private *tp;
1860*4882a593Smuzhiyun struct net_device *dev;
1861*4882a593Smuzhiyun void __iomem *ioaddr;
1862*4882a593Smuzhiyun int rc;
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun if (!printed_version) {
1865*4882a593Smuzhiyun if (netif_msg_drv(&debug))
1866*4882a593Smuzhiyun pr_info(SIS190_DRIVER_NAME " loaded\n");
1867*4882a593Smuzhiyun printed_version = 1;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun dev = sis190_init_board(pdev);
1871*4882a593Smuzhiyun if (IS_ERR(dev)) {
1872*4882a593Smuzhiyun rc = PTR_ERR(dev);
1873*4882a593Smuzhiyun goto out;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun tp = netdev_priv(dev);
1879*4882a593Smuzhiyun ioaddr = tp->mmio_addr;
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun rc = sis190_get_mac_addr(pdev, dev);
1882*4882a593Smuzhiyun if (rc < 0)
1883*4882a593Smuzhiyun goto err_release_board;
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun sis190_init_rxfilter(dev);
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun INIT_WORK(&tp->phy_task, sis190_phy_task);
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun dev->netdev_ops = &sis190_netdev_ops;
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun dev->ethtool_ops = &sis190_ethtool_ops;
1892*4882a593Smuzhiyun dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun spin_lock_init(&tp->lock);
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun rc = sis190_mii_probe(dev);
1897*4882a593Smuzhiyun if (rc < 0)
1898*4882a593Smuzhiyun goto err_release_board;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun rc = register_netdev(dev);
1901*4882a593Smuzhiyun if (rc < 0)
1902*4882a593Smuzhiyun goto err_remove_mii;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun if (netif_msg_probe(tp)) {
1905*4882a593Smuzhiyun netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1906*4882a593Smuzhiyun pci_name(pdev),
1907*4882a593Smuzhiyun sis_chip_info[ent->driver_data].name,
1908*4882a593Smuzhiyun ioaddr, pdev->irq, dev->dev_addr);
1909*4882a593Smuzhiyun netdev_info(dev, "%s mode.\n",
1910*4882a593Smuzhiyun (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun netif_carrier_off(dev);
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun sis190_set_speed_auto(dev);
1916*4882a593Smuzhiyun out:
1917*4882a593Smuzhiyun return rc;
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun err_remove_mii:
1920*4882a593Smuzhiyun sis190_mii_remove(dev);
1921*4882a593Smuzhiyun err_release_board:
1922*4882a593Smuzhiyun sis190_release_board(pdev);
1923*4882a593Smuzhiyun goto out;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun
sis190_remove_one(struct pci_dev * pdev)1926*4882a593Smuzhiyun static void sis190_remove_one(struct pci_dev *pdev)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
1929*4882a593Smuzhiyun struct sis190_private *tp = netdev_priv(dev);
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun sis190_mii_remove(dev);
1932*4882a593Smuzhiyun cancel_work_sync(&tp->phy_task);
1933*4882a593Smuzhiyun unregister_netdev(dev);
1934*4882a593Smuzhiyun sis190_release_board(pdev);
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun static struct pci_driver sis190_pci_driver = {
1938*4882a593Smuzhiyun .name = DRV_NAME,
1939*4882a593Smuzhiyun .id_table = sis190_pci_tbl,
1940*4882a593Smuzhiyun .probe = sis190_init_one,
1941*4882a593Smuzhiyun .remove = sis190_remove_one,
1942*4882a593Smuzhiyun };
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun module_pci_driver(sis190_pci_driver);
1945