xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/amd8111e.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
4*4882a593Smuzhiyun  * Copyright (C) 2004 Advanced Micro Devices
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7*4882a593Smuzhiyun  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8*4882a593Smuzhiyun  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9*4882a593Smuzhiyun  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10*4882a593Smuzhiyun  * Copyright 1993 United States Government as represented by the
11*4882a593Smuzhiyun  *	Director, National Security Agency.[ pcnet32.c ]
12*4882a593Smuzhiyun  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13*4882a593Smuzhiyun  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun Module Name:
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	amd8111e.c
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun Abstract:
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun  	 AMD8111 based 10/100 Ethernet Controller Driver.
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun Environment:
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	Kernel Mode
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun Revision History:
29*4882a593Smuzhiyun  	3.0.0
30*4882a593Smuzhiyun 	   Initial Revision.
31*4882a593Smuzhiyun 	3.0.1
32*4882a593Smuzhiyun 	 1. Dynamic interrupt coalescing.
33*4882a593Smuzhiyun 	 2. Removed prev_stats.
34*4882a593Smuzhiyun 	 3. MII support.
35*4882a593Smuzhiyun 	 4. Dynamic IPG support
36*4882a593Smuzhiyun 	3.0.2  05/29/2003
37*4882a593Smuzhiyun 	 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
38*4882a593Smuzhiyun 	 2. Bug fix: Fixed VLAN support failure.
39*4882a593Smuzhiyun 	 3. Bug fix: Fixed receive interrupt coalescing bug.
40*4882a593Smuzhiyun 	 4. Dynamic IPG support is disabled by default.
41*4882a593Smuzhiyun 	3.0.3 06/05/2003
42*4882a593Smuzhiyun 	 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
43*4882a593Smuzhiyun 	3.0.4 12/09/2003
44*4882a593Smuzhiyun 	 1. Added set_mac_address routine for bonding driver support.
45*4882a593Smuzhiyun 	 2. Tested the driver for bonding support
46*4882a593Smuzhiyun 	 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
47*4882a593Smuzhiyun 	    indicated to the h/w.
48*4882a593Smuzhiyun 	 4. Modified amd8111e_rx() routine to receive all the received packets
49*4882a593Smuzhiyun 	    in the first interrupt.
50*4882a593Smuzhiyun 	 5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
51*4882a593Smuzhiyun 	3.0.5 03/22/2004
52*4882a593Smuzhiyun 	 1. Added NAPI support
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #include <linux/module.h>
58*4882a593Smuzhiyun #include <linux/kernel.h>
59*4882a593Smuzhiyun #include <linux/types.h>
60*4882a593Smuzhiyun #include <linux/compiler.h>
61*4882a593Smuzhiyun #include <linux/delay.h>
62*4882a593Smuzhiyun #include <linux/interrupt.h>
63*4882a593Smuzhiyun #include <linux/ioport.h>
64*4882a593Smuzhiyun #include <linux/pci.h>
65*4882a593Smuzhiyun #include <linux/netdevice.h>
66*4882a593Smuzhiyun #include <linux/etherdevice.h>
67*4882a593Smuzhiyun #include <linux/skbuff.h>
68*4882a593Smuzhiyun #include <linux/ethtool.h>
69*4882a593Smuzhiyun #include <linux/mii.h>
70*4882a593Smuzhiyun #include <linux/if_vlan.h>
71*4882a593Smuzhiyun #include <linux/ctype.h>
72*4882a593Smuzhiyun #include <linux/crc32.h>
73*4882a593Smuzhiyun #include <linux/dma-mapping.h>
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #include <asm/io.h>
76*4882a593Smuzhiyun #include <asm/byteorder.h>
77*4882a593Smuzhiyun #include <linux/uaccess.h>
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_VLAN_8021Q)
80*4882a593Smuzhiyun #define AMD8111E_VLAN_TAG_USED 1
81*4882a593Smuzhiyun #else
82*4882a593Smuzhiyun #define AMD8111E_VLAN_TAG_USED 0
83*4882a593Smuzhiyun #endif
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #include "amd8111e.h"
86*4882a593Smuzhiyun #define MODULE_NAME	"amd8111e"
87*4882a593Smuzhiyun MODULE_AUTHOR("Advanced Micro Devices, Inc.");
88*4882a593Smuzhiyun MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
89*4882a593Smuzhiyun MODULE_LICENSE("GPL");
90*4882a593Smuzhiyun module_param_array(speed_duplex, int, NULL, 0);
91*4882a593Smuzhiyun MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
92*4882a593Smuzhiyun module_param_array(coalesce, bool, NULL, 0);
93*4882a593Smuzhiyun MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
94*4882a593Smuzhiyun module_param_array(dynamic_ipg, bool, NULL, 0);
95*4882a593Smuzhiyun MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* This function will read the PHY registers. */
amd8111e_read_phy(struct amd8111e_priv * lp,int phy_id,int reg,u32 * val)98*4882a593Smuzhiyun static int amd8111e_read_phy(struct amd8111e_priv *lp,
99*4882a593Smuzhiyun 			     int phy_id, int reg, u32 *val)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
102*4882a593Smuzhiyun 	unsigned int reg_val;
103*4882a593Smuzhiyun 	unsigned int repeat= REPEAT_CNT;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	reg_val = readl(mmio + PHY_ACCESS);
106*4882a593Smuzhiyun 	while (reg_val & PHY_CMD_ACTIVE)
107*4882a593Smuzhiyun 		reg_val = readl( mmio + PHY_ACCESS );
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
110*4882a593Smuzhiyun 			   ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
111*4882a593Smuzhiyun 	do{
112*4882a593Smuzhiyun 		reg_val = readl(mmio + PHY_ACCESS);
113*4882a593Smuzhiyun 		udelay(30);  /* It takes 30 us to read/write data */
114*4882a593Smuzhiyun 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
115*4882a593Smuzhiyun 	if(reg_val & PHY_RD_ERR)
116*4882a593Smuzhiyun 		goto err_phy_read;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	*val = reg_val & 0xffff;
119*4882a593Smuzhiyun 	return 0;
120*4882a593Smuzhiyun err_phy_read:
121*4882a593Smuzhiyun 	*val = 0;
122*4882a593Smuzhiyun 	return -EINVAL;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* This function will write into PHY registers. */
amd8111e_write_phy(struct amd8111e_priv * lp,int phy_id,int reg,u32 val)127*4882a593Smuzhiyun static int amd8111e_write_phy(struct amd8111e_priv *lp,
128*4882a593Smuzhiyun 			      int phy_id, int reg, u32 val)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	unsigned int repeat = REPEAT_CNT;
131*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
132*4882a593Smuzhiyun 	unsigned int reg_val;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	reg_val = readl(mmio + PHY_ACCESS);
135*4882a593Smuzhiyun 	while (reg_val & PHY_CMD_ACTIVE)
136*4882a593Smuzhiyun 		reg_val = readl( mmio + PHY_ACCESS );
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
139*4882a593Smuzhiyun 			   ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	do{
142*4882a593Smuzhiyun 		reg_val = readl(mmio + PHY_ACCESS);
143*4882a593Smuzhiyun 		udelay(30);  /* It takes 30 us to read/write the data */
144*4882a593Smuzhiyun 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if(reg_val & PHY_RD_ERR)
147*4882a593Smuzhiyun 		goto err_phy_write;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return 0;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun err_phy_write:
152*4882a593Smuzhiyun 	return -EINVAL;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* This is the mii register read function provided to the mii interface. */
amd8111e_mdio_read(struct net_device * dev,int phy_id,int reg_num)157*4882a593Smuzhiyun static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
160*4882a593Smuzhiyun 	unsigned int reg_val;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
163*4882a593Smuzhiyun 	return reg_val;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* This is the mii register write function provided to the mii interface. */
amd8111e_mdio_write(struct net_device * dev,int phy_id,int reg_num,int val)168*4882a593Smuzhiyun static void amd8111e_mdio_write(struct net_device *dev,
169*4882a593Smuzhiyun 				int phy_id, int reg_num, int val)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	amd8111e_write_phy(lp, phy_id, reg_num, val);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /* This function will set PHY speed. During initialization sets
177*4882a593Smuzhiyun  * the original speed to 100 full
178*4882a593Smuzhiyun  */
amd8111e_set_ext_phy(struct net_device * dev)179*4882a593Smuzhiyun static void amd8111e_set_ext_phy(struct net_device *dev)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
182*4882a593Smuzhiyun 	u32 bmcr,advert,tmp;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* Determine mii register values to set the speed */
185*4882a593Smuzhiyun 	advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
186*4882a593Smuzhiyun 	tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
187*4882a593Smuzhiyun 	switch (lp->ext_phy_option){
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		default:
190*4882a593Smuzhiyun 		case SPEED_AUTONEG: /* advertise all values */
191*4882a593Smuzhiyun 			tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
192*4882a593Smuzhiyun 				ADVERTISE_100HALF|ADVERTISE_100FULL) ;
193*4882a593Smuzhiyun 			break;
194*4882a593Smuzhiyun 		case SPEED10_HALF:
195*4882a593Smuzhiyun 			tmp |= ADVERTISE_10HALF;
196*4882a593Smuzhiyun 			break;
197*4882a593Smuzhiyun 		case SPEED10_FULL:
198*4882a593Smuzhiyun 			tmp |= ADVERTISE_10FULL;
199*4882a593Smuzhiyun 			break;
200*4882a593Smuzhiyun 		case SPEED100_HALF:
201*4882a593Smuzhiyun 			tmp |= ADVERTISE_100HALF;
202*4882a593Smuzhiyun 			break;
203*4882a593Smuzhiyun 		case SPEED100_FULL:
204*4882a593Smuzhiyun 			tmp |= ADVERTISE_100FULL;
205*4882a593Smuzhiyun 			break;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if(advert != tmp)
209*4882a593Smuzhiyun 		amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
210*4882a593Smuzhiyun 	/* Restart auto negotiation */
211*4882a593Smuzhiyun 	bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
212*4882a593Smuzhiyun 	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
213*4882a593Smuzhiyun 	amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /* This function will unmap skb->data space and will free
218*4882a593Smuzhiyun  * all transmit and receive skbuffs.
219*4882a593Smuzhiyun  */
amd8111e_free_skbs(struct net_device * dev)220*4882a593Smuzhiyun static int amd8111e_free_skbs(struct net_device *dev)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
223*4882a593Smuzhiyun 	struct sk_buff *rx_skbuff;
224*4882a593Smuzhiyun 	int i;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Freeing transmit skbs */
227*4882a593Smuzhiyun 	for(i = 0; i < NUM_TX_BUFFERS; i++){
228*4882a593Smuzhiyun 		if(lp->tx_skbuff[i]){
229*4882a593Smuzhiyun 			dma_unmap_single(&lp->pci_dev->dev,
230*4882a593Smuzhiyun 					 lp->tx_dma_addr[i],
231*4882a593Smuzhiyun 					 lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
232*4882a593Smuzhiyun 			dev_kfree_skb (lp->tx_skbuff[i]);
233*4882a593Smuzhiyun 			lp->tx_skbuff[i] = NULL;
234*4882a593Smuzhiyun 			lp->tx_dma_addr[i] = 0;
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 	/* Freeing previously allocated receive buffers */
238*4882a593Smuzhiyun 	for (i = 0; i < NUM_RX_BUFFERS; i++){
239*4882a593Smuzhiyun 		rx_skbuff = lp->rx_skbuff[i];
240*4882a593Smuzhiyun 		if(rx_skbuff != NULL){
241*4882a593Smuzhiyun 			dma_unmap_single(&lp->pci_dev->dev,
242*4882a593Smuzhiyun 					 lp->rx_dma_addr[i],
243*4882a593Smuzhiyun 					 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
244*4882a593Smuzhiyun 			dev_kfree_skb(lp->rx_skbuff[i]);
245*4882a593Smuzhiyun 			lp->rx_skbuff[i] = NULL;
246*4882a593Smuzhiyun 			lp->rx_dma_addr[i] = 0;
247*4882a593Smuzhiyun 		}
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /* This will set the receive buffer length corresponding
254*4882a593Smuzhiyun  * to the mtu size of networkinterface.
255*4882a593Smuzhiyun  */
amd8111e_set_rx_buff_len(struct net_device * dev)256*4882a593Smuzhiyun static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
259*4882a593Smuzhiyun 	unsigned int mtu = dev->mtu;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (mtu > ETH_DATA_LEN){
262*4882a593Smuzhiyun 		/* MTU + ethernet header + FCS
263*4882a593Smuzhiyun 		 * + optional VLAN tag + skb reserve space 2
264*4882a593Smuzhiyun 		 */
265*4882a593Smuzhiyun 		lp->rx_buff_len = mtu + ETH_HLEN + 10;
266*4882a593Smuzhiyun 		lp->options |= OPTION_JUMBO_ENABLE;
267*4882a593Smuzhiyun 	} else{
268*4882a593Smuzhiyun 		lp->rx_buff_len = PKT_BUFF_SZ;
269*4882a593Smuzhiyun 		lp->options &= ~OPTION_JUMBO_ENABLE;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /* This function will free all the previously allocated buffers,
274*4882a593Smuzhiyun  * determine new receive buffer length  and will allocate new receive buffers.
275*4882a593Smuzhiyun  * This function also allocates and initializes both the transmitter
276*4882a593Smuzhiyun  * and receive hardware descriptors.
277*4882a593Smuzhiyun  */
amd8111e_init_ring(struct net_device * dev)278*4882a593Smuzhiyun static int amd8111e_init_ring(struct net_device *dev)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
281*4882a593Smuzhiyun 	int i;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	lp->rx_idx = lp->tx_idx = 0;
284*4882a593Smuzhiyun 	lp->tx_complete_idx = 0;
285*4882a593Smuzhiyun 	lp->tx_ring_idx = 0;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if(lp->opened)
289*4882a593Smuzhiyun 		/* Free previously allocated transmit and receive skbs */
290*4882a593Smuzhiyun 		amd8111e_free_skbs(dev);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	else{
293*4882a593Smuzhiyun 		/* allocate the tx and rx descriptors */
294*4882a593Smuzhiyun 		lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
295*4882a593Smuzhiyun 			sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
296*4882a593Smuzhiyun 			&lp->tx_ring_dma_addr, GFP_ATOMIC);
297*4882a593Smuzhiyun 		if (!lp->tx_ring)
298*4882a593Smuzhiyun 			goto err_no_mem;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
301*4882a593Smuzhiyun 			sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
302*4882a593Smuzhiyun 			&lp->rx_ring_dma_addr, GFP_ATOMIC);
303*4882a593Smuzhiyun 		if (!lp->rx_ring)
304*4882a593Smuzhiyun 			goto err_free_tx_ring;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Set new receive buff size */
308*4882a593Smuzhiyun 	amd8111e_set_rx_buff_len(dev);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* Allocating receive  skbs */
311*4882a593Smuzhiyun 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
314*4882a593Smuzhiyun 		if (!lp->rx_skbuff[i]) {
315*4882a593Smuzhiyun 				/* Release previos allocated skbs */
316*4882a593Smuzhiyun 				for(--i; i >= 0 ;i--)
317*4882a593Smuzhiyun 					dev_kfree_skb(lp->rx_skbuff[i]);
318*4882a593Smuzhiyun 				goto err_free_rx_ring;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 		skb_reserve(lp->rx_skbuff[i],2);
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun         /* Initilaizing receive descriptors */
323*4882a593Smuzhiyun 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
324*4882a593Smuzhiyun 		lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
325*4882a593Smuzhiyun 						    lp->rx_skbuff[i]->data,
326*4882a593Smuzhiyun 						    lp->rx_buff_len - 2,
327*4882a593Smuzhiyun 						    DMA_FROM_DEVICE);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 		lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
330*4882a593Smuzhiyun 		lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
331*4882a593Smuzhiyun 		wmb();
332*4882a593Smuzhiyun 		lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* Initializing transmit descriptors */
336*4882a593Smuzhiyun 	for (i = 0; i < NUM_TX_RING_DR; i++) {
337*4882a593Smuzhiyun 		lp->tx_ring[i].buff_phy_addr = 0;
338*4882a593Smuzhiyun 		lp->tx_ring[i].tx_flags = 0;
339*4882a593Smuzhiyun 		lp->tx_ring[i].buff_count = 0;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	return 0;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun err_free_rx_ring:
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	dma_free_coherent(&lp->pci_dev->dev,
347*4882a593Smuzhiyun 			  sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
348*4882a593Smuzhiyun 			  lp->rx_ring, lp->rx_ring_dma_addr);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun err_free_tx_ring:
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	dma_free_coherent(&lp->pci_dev->dev,
353*4882a593Smuzhiyun 			  sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
354*4882a593Smuzhiyun 			  lp->tx_ring, lp->tx_ring_dma_addr);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun err_no_mem:
357*4882a593Smuzhiyun 	return -ENOMEM;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* This function will set the interrupt coalescing according
361*4882a593Smuzhiyun  * to the input arguments
362*4882a593Smuzhiyun  */
amd8111e_set_coalesce(struct net_device * dev,enum coal_mode cmod)363*4882a593Smuzhiyun static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	unsigned int timeout;
366*4882a593Smuzhiyun 	unsigned int event_count;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
369*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
370*4882a593Smuzhiyun 	struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	switch(cmod)
374*4882a593Smuzhiyun 	{
375*4882a593Smuzhiyun 		case RX_INTR_COAL :
376*4882a593Smuzhiyun 			timeout = coal_conf->rx_timeout;
377*4882a593Smuzhiyun 			event_count = coal_conf->rx_event_count;
378*4882a593Smuzhiyun 			if( timeout > MAX_TIMEOUT ||
379*4882a593Smuzhiyun 					event_count > MAX_EVENT_COUNT )
380*4882a593Smuzhiyun 				return -EINVAL;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 			timeout = timeout * DELAY_TIMER_CONV;
383*4882a593Smuzhiyun 			writel(VAL0|STINTEN, mmio+INTEN0);
384*4882a593Smuzhiyun 			writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
385*4882a593Smuzhiyun 							mmio+DLY_INT_A);
386*4882a593Smuzhiyun 			break;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 		case TX_INTR_COAL :
389*4882a593Smuzhiyun 			timeout = coal_conf->tx_timeout;
390*4882a593Smuzhiyun 			event_count = coal_conf->tx_event_count;
391*4882a593Smuzhiyun 			if( timeout > MAX_TIMEOUT ||
392*4882a593Smuzhiyun 					event_count > MAX_EVENT_COUNT )
393*4882a593Smuzhiyun 				return -EINVAL;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 			timeout = timeout * DELAY_TIMER_CONV;
397*4882a593Smuzhiyun 			writel(VAL0|STINTEN,mmio+INTEN0);
398*4882a593Smuzhiyun 			writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
399*4882a593Smuzhiyun 							 mmio+DLY_INT_B);
400*4882a593Smuzhiyun 			break;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		case DISABLE_COAL:
403*4882a593Smuzhiyun 			writel(0,mmio+STVAL);
404*4882a593Smuzhiyun 			writel(STINTEN, mmio+INTEN0);
405*4882a593Smuzhiyun 			writel(0, mmio +DLY_INT_B);
406*4882a593Smuzhiyun 			writel(0, mmio+DLY_INT_A);
407*4882a593Smuzhiyun 			break;
408*4882a593Smuzhiyun 		 case ENABLE_COAL:
409*4882a593Smuzhiyun 		       /* Start the timer */
410*4882a593Smuzhiyun 			writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
411*4882a593Smuzhiyun 			writel(VAL0|STINTEN, mmio+INTEN0);
412*4882a593Smuzhiyun 			break;
413*4882a593Smuzhiyun 		default:
414*4882a593Smuzhiyun 			break;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun    }
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /* This function initializes the device registers  and starts the device. */
amd8111e_restart(struct net_device * dev)422*4882a593Smuzhiyun static int amd8111e_restart(struct net_device *dev)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
425*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
426*4882a593Smuzhiyun 	int i,reg_val;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/* stop the chip */
429*4882a593Smuzhiyun 	writel(RUN, mmio + CMD0);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	if(amd8111e_init_ring(dev))
432*4882a593Smuzhiyun 		return -ENOMEM;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* enable the port manager and set auto negotiation always */
435*4882a593Smuzhiyun 	writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
436*4882a593Smuzhiyun 	writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	amd8111e_set_ext_phy(dev);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	/* set control registers */
441*4882a593Smuzhiyun 	reg_val = readl(mmio + CTRL1);
442*4882a593Smuzhiyun 	reg_val &= ~XMTSP_MASK;
443*4882a593Smuzhiyun 	writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	/* enable interrupt */
446*4882a593Smuzhiyun 	writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
447*4882a593Smuzhiyun 		APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
448*4882a593Smuzhiyun 		SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	/* initialize tx and rx ring base addresses */
453*4882a593Smuzhiyun 	writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
454*4882a593Smuzhiyun 	writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
457*4882a593Smuzhiyun 	writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/* set default IPG to 96 */
460*4882a593Smuzhiyun 	writew((u32)DEFAULT_IPG,mmio+IPG);
461*4882a593Smuzhiyun 	writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if(lp->options & OPTION_JUMBO_ENABLE){
464*4882a593Smuzhiyun 		writel((u32)VAL2|JUMBO, mmio + CMD3);
465*4882a593Smuzhiyun 		/* Reset REX_UFLO */
466*4882a593Smuzhiyun 		writel( REX_UFLO, mmio + CMD2);
467*4882a593Smuzhiyun 		/* Should not set REX_UFLO for jumbo frames */
468*4882a593Smuzhiyun 		writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
469*4882a593Smuzhiyun 	}else{
470*4882a593Smuzhiyun 		writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
471*4882a593Smuzhiyun 		writel((u32)JUMBO, mmio + CMD3);
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
475*4882a593Smuzhiyun 	writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
476*4882a593Smuzhiyun #endif
477*4882a593Smuzhiyun 	writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* Setting the MAC address to the device */
480*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++)
481*4882a593Smuzhiyun 		writeb( dev->dev_addr[i], mmio + PADR + i );
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* Enable interrupt coalesce */
484*4882a593Smuzhiyun 	if(lp->options & OPTION_INTR_COAL_ENABLE){
485*4882a593Smuzhiyun 		netdev_info(dev, "Interrupt Coalescing Enabled.\n");
486*4882a593Smuzhiyun 		amd8111e_set_coalesce(dev,ENABLE_COAL);
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	/* set RUN bit to start the chip */
490*4882a593Smuzhiyun 	writel(VAL2 | RDMD0, mmio + CMD0);
491*4882a593Smuzhiyun 	writel(VAL0 | INTREN | RUN, mmio + CMD0);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* To avoid PCI posting bug */
494*4882a593Smuzhiyun 	readl(mmio+CMD0);
495*4882a593Smuzhiyun 	return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun /* This function clears necessary the device registers. */
amd8111e_init_hw_default(struct amd8111e_priv * lp)499*4882a593Smuzhiyun static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	unsigned int reg_val;
502*4882a593Smuzhiyun 	unsigned int logic_filter[2] ={0,};
503*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun         /* stop the chip */
507*4882a593Smuzhiyun 	writel(RUN, mmio + CMD0);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
510*4882a593Smuzhiyun 	writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* Clear RCV_RING_BASE_ADDR */
513*4882a593Smuzhiyun 	writel(0, mmio + RCV_RING_BASE_ADDR0);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* Clear XMT_RING_BASE_ADDR */
516*4882a593Smuzhiyun 	writel(0, mmio + XMT_RING_BASE_ADDR0);
517*4882a593Smuzhiyun 	writel(0, mmio + XMT_RING_BASE_ADDR1);
518*4882a593Smuzhiyun 	writel(0, mmio + XMT_RING_BASE_ADDR2);
519*4882a593Smuzhiyun 	writel(0, mmio + XMT_RING_BASE_ADDR3);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Clear CMD0  */
522*4882a593Smuzhiyun 	writel(CMD0_CLEAR,mmio + CMD0);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* Clear CMD2 */
525*4882a593Smuzhiyun 	writel(CMD2_CLEAR, mmio +CMD2);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	/* Clear CMD7 */
528*4882a593Smuzhiyun 	writel(CMD7_CLEAR , mmio + CMD7);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	/* Clear DLY_INT_A and DLY_INT_B */
531*4882a593Smuzhiyun 	writel(0x0, mmio + DLY_INT_A);
532*4882a593Smuzhiyun 	writel(0x0, mmio + DLY_INT_B);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* Clear FLOW_CONTROL */
535*4882a593Smuzhiyun 	writel(0x0, mmio + FLOW_CONTROL);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Clear INT0  write 1 to clear register */
538*4882a593Smuzhiyun 	reg_val = readl(mmio + INT0);
539*4882a593Smuzhiyun 	writel(reg_val, mmio + INT0);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/* Clear STVAL */
542*4882a593Smuzhiyun 	writel(0x0, mmio + STVAL);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* Clear INTEN0 */
545*4882a593Smuzhiyun 	writel( INTEN0_CLEAR, mmio + INTEN0);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/* Clear LADRF */
548*4882a593Smuzhiyun 	writel(0x0 , mmio + LADRF);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
551*4882a593Smuzhiyun 	writel( 0x80010,mmio + SRAM_SIZE);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* Clear RCV_RING0_LEN */
554*4882a593Smuzhiyun 	writel(0x0, mmio +  RCV_RING_LEN0);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* Clear XMT_RING0/1/2/3_LEN */
557*4882a593Smuzhiyun 	writel(0x0, mmio +  XMT_RING_LEN0);
558*4882a593Smuzhiyun 	writel(0x0, mmio +  XMT_RING_LEN1);
559*4882a593Smuzhiyun 	writel(0x0, mmio +  XMT_RING_LEN2);
560*4882a593Smuzhiyun 	writel(0x0, mmio +  XMT_RING_LEN3);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* Clear XMT_RING_LIMIT */
563*4882a593Smuzhiyun 	writel(0x0, mmio + XMT_RING_LIMIT);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* Clear MIB */
566*4882a593Smuzhiyun 	writew(MIB_CLEAR, mmio + MIB_ADDR);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/* Clear LARF */
569*4882a593Smuzhiyun 	amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	/* SRAM_SIZE register */
572*4882a593Smuzhiyun 	reg_val = readl(mmio + SRAM_SIZE);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if(lp->options & OPTION_JUMBO_ENABLE)
575*4882a593Smuzhiyun 		writel( VAL2|JUMBO, mmio + CMD3);
576*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
577*4882a593Smuzhiyun 	writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
578*4882a593Smuzhiyun #endif
579*4882a593Smuzhiyun 	/* Set default value to CTRL1 Register */
580*4882a593Smuzhiyun 	writel(CTRL1_DEFAULT, mmio + CTRL1);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/* To avoid PCI posting bug */
583*4882a593Smuzhiyun 	readl(mmio + CMD2);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun /* This function disables the interrupt and clears all the pending
588*4882a593Smuzhiyun  * interrupts in INT0
589*4882a593Smuzhiyun  */
amd8111e_disable_interrupt(struct amd8111e_priv * lp)590*4882a593Smuzhiyun static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	u32 intr0;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* Disable interrupt */
595*4882a593Smuzhiyun 	writel(INTREN, lp->mmio + CMD0);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/* Clear INT0 */
598*4882a593Smuzhiyun 	intr0 = readl(lp->mmio + INT0);
599*4882a593Smuzhiyun 	writel(intr0, lp->mmio + INT0);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/* To avoid PCI posting bug */
602*4882a593Smuzhiyun 	readl(lp->mmio + INT0);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /* This function stops the chip. */
amd8111e_stop_chip(struct amd8111e_priv * lp)607*4882a593Smuzhiyun static void amd8111e_stop_chip(struct amd8111e_priv *lp)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	writel(RUN, lp->mmio + CMD0);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* To avoid PCI posting bug */
612*4882a593Smuzhiyun 	readl(lp->mmio + CMD0);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun /* This function frees the  transmiter and receiver descriptor rings. */
amd8111e_free_ring(struct amd8111e_priv * lp)616*4882a593Smuzhiyun static void amd8111e_free_ring(struct amd8111e_priv *lp)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	/* Free transmit and receive descriptor rings */
619*4882a593Smuzhiyun 	if(lp->rx_ring){
620*4882a593Smuzhiyun 		dma_free_coherent(&lp->pci_dev->dev,
621*4882a593Smuzhiyun 				  sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
622*4882a593Smuzhiyun 				  lp->rx_ring, lp->rx_ring_dma_addr);
623*4882a593Smuzhiyun 		lp->rx_ring = NULL;
624*4882a593Smuzhiyun 	}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if(lp->tx_ring){
627*4882a593Smuzhiyun 		dma_free_coherent(&lp->pci_dev->dev,
628*4882a593Smuzhiyun 				  sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
629*4882a593Smuzhiyun 				  lp->tx_ring, lp->tx_ring_dma_addr);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		lp->tx_ring = NULL;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun /* This function will free all the transmit skbs that are actually
637*4882a593Smuzhiyun  * transmitted by the device. It will check the ownership of the
638*4882a593Smuzhiyun  * skb before freeing the skb.
639*4882a593Smuzhiyun  */
amd8111e_tx(struct net_device * dev)640*4882a593Smuzhiyun static int amd8111e_tx(struct net_device *dev)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
643*4882a593Smuzhiyun 	int tx_index;
644*4882a593Smuzhiyun 	int status;
645*4882a593Smuzhiyun 	/* Complete all the transmit packet */
646*4882a593Smuzhiyun 	while (lp->tx_complete_idx != lp->tx_idx){
647*4882a593Smuzhiyun 		tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
648*4882a593Smuzhiyun 		status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 		if(status & OWN_BIT)
651*4882a593Smuzhiyun 			break;	/* It still hasn't been Txed */
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		lp->tx_ring[tx_index].buff_phy_addr = 0;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 		/* We must free the original skb */
656*4882a593Smuzhiyun 		if (lp->tx_skbuff[tx_index]) {
657*4882a593Smuzhiyun 			dma_unmap_single(&lp->pci_dev->dev,
658*4882a593Smuzhiyun 					 lp->tx_dma_addr[tx_index],
659*4882a593Smuzhiyun 					 lp->tx_skbuff[tx_index]->len,
660*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
661*4882a593Smuzhiyun 			dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
662*4882a593Smuzhiyun 			lp->tx_skbuff[tx_index] = NULL;
663*4882a593Smuzhiyun 			lp->tx_dma_addr[tx_index] = 0;
664*4882a593Smuzhiyun 		}
665*4882a593Smuzhiyun 		lp->tx_complete_idx++;
666*4882a593Smuzhiyun 		/*COAL update tx coalescing parameters */
667*4882a593Smuzhiyun 		lp->coal_conf.tx_packets++;
668*4882a593Smuzhiyun 		lp->coal_conf.tx_bytes +=
669*4882a593Smuzhiyun 			le16_to_cpu(lp->tx_ring[tx_index].buff_count);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		if (netif_queue_stopped(dev) &&
672*4882a593Smuzhiyun 			lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
673*4882a593Smuzhiyun 			/* The ring is no longer full, clear tbusy. */
674*4882a593Smuzhiyun 			/* lp->tx_full = 0; */
675*4882a593Smuzhiyun 			netif_wake_queue (dev);
676*4882a593Smuzhiyun 		}
677*4882a593Smuzhiyun 	}
678*4882a593Smuzhiyun 	return 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun /* This function handles the driver receive operation in polling mode */
amd8111e_rx_poll(struct napi_struct * napi,int budget)682*4882a593Smuzhiyun static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
685*4882a593Smuzhiyun 	struct net_device *dev = lp->amd8111e_net_dev;
686*4882a593Smuzhiyun 	int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
687*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
688*4882a593Smuzhiyun 	struct sk_buff *skb,*new_skb;
689*4882a593Smuzhiyun 	int min_pkt_len, status;
690*4882a593Smuzhiyun 	int num_rx_pkt = 0;
691*4882a593Smuzhiyun 	short pkt_len;
692*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
693*4882a593Smuzhiyun 	short vtag;
694*4882a593Smuzhiyun #endif
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	while (num_rx_pkt < budget) {
697*4882a593Smuzhiyun 		status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
698*4882a593Smuzhiyun 		if (status & OWN_BIT)
699*4882a593Smuzhiyun 			break;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		/* There is a tricky error noted by John Murphy,
702*4882a593Smuzhiyun 		 * <murf@perftech.com> to Russ Nelson: Even with
703*4882a593Smuzhiyun 		 * full-sized * buffers it's possible for a
704*4882a593Smuzhiyun 		 * jabber packet to use two buffers, with only
705*4882a593Smuzhiyun 		 * the last correctly noting the error.
706*4882a593Smuzhiyun 		 */
707*4882a593Smuzhiyun 		if (status & ERR_BIT) {
708*4882a593Smuzhiyun 			/* resetting flags */
709*4882a593Smuzhiyun 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
710*4882a593Smuzhiyun 			goto err_next_pkt;
711*4882a593Smuzhiyun 		}
712*4882a593Smuzhiyun 		/* check for STP and ENP */
713*4882a593Smuzhiyun 		if (!((status & STP_BIT) && (status & ENP_BIT))){
714*4882a593Smuzhiyun 			/* resetting flags */
715*4882a593Smuzhiyun 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
716*4882a593Smuzhiyun 			goto err_next_pkt;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 		pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
721*4882a593Smuzhiyun 		vtag = status & TT_MASK;
722*4882a593Smuzhiyun 		/* MAC will strip vlan tag */
723*4882a593Smuzhiyun 		if (vtag != 0)
724*4882a593Smuzhiyun 			min_pkt_len = MIN_PKT_LEN - 4;
725*4882a593Smuzhiyun 			else
726*4882a593Smuzhiyun #endif
727*4882a593Smuzhiyun 			min_pkt_len = MIN_PKT_LEN;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 		if (pkt_len < min_pkt_len) {
730*4882a593Smuzhiyun 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
731*4882a593Smuzhiyun 			lp->drv_rx_errors++;
732*4882a593Smuzhiyun 			goto err_next_pkt;
733*4882a593Smuzhiyun 		}
734*4882a593Smuzhiyun 		new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
735*4882a593Smuzhiyun 		if (!new_skb) {
736*4882a593Smuzhiyun 			/* if allocation fail,
737*4882a593Smuzhiyun 			 * ignore that pkt and go to next one
738*4882a593Smuzhiyun 			 */
739*4882a593Smuzhiyun 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
740*4882a593Smuzhiyun 			lp->drv_rx_errors++;
741*4882a593Smuzhiyun 			goto err_next_pkt;
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 		skb_reserve(new_skb, 2);
745*4882a593Smuzhiyun 		skb = lp->rx_skbuff[rx_index];
746*4882a593Smuzhiyun 		dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
747*4882a593Smuzhiyun 				 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
748*4882a593Smuzhiyun 		skb_put(skb, pkt_len);
749*4882a593Smuzhiyun 		lp->rx_skbuff[rx_index] = new_skb;
750*4882a593Smuzhiyun 		lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
751*4882a593Smuzhiyun 							   new_skb->data,
752*4882a593Smuzhiyun 							   lp->rx_buff_len - 2,
753*4882a593Smuzhiyun 							   DMA_FROM_DEVICE);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, dev);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
758*4882a593Smuzhiyun 		if (vtag == TT_VLAN_TAGGED){
759*4882a593Smuzhiyun 			u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
760*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
761*4882a593Smuzhiyun 		}
762*4882a593Smuzhiyun #endif
763*4882a593Smuzhiyun 		napi_gro_receive(napi, skb);
764*4882a593Smuzhiyun 		/* COAL update rx coalescing parameters */
765*4882a593Smuzhiyun 		lp->coal_conf.rx_packets++;
766*4882a593Smuzhiyun 		lp->coal_conf.rx_bytes += pkt_len;
767*4882a593Smuzhiyun 		num_rx_pkt++;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun err_next_pkt:
770*4882a593Smuzhiyun 		lp->rx_ring[rx_index].buff_phy_addr
771*4882a593Smuzhiyun 			= cpu_to_le32(lp->rx_dma_addr[rx_index]);
772*4882a593Smuzhiyun 		lp->rx_ring[rx_index].buff_count =
773*4882a593Smuzhiyun 			cpu_to_le16(lp->rx_buff_len-2);
774*4882a593Smuzhiyun 		wmb();
775*4882a593Smuzhiyun 		lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
776*4882a593Smuzhiyun 		rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
777*4882a593Smuzhiyun 	}
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
780*4882a593Smuzhiyun 		unsigned long flags;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 		/* Receive descriptor is empty now */
783*4882a593Smuzhiyun 		spin_lock_irqsave(&lp->lock, flags);
784*4882a593Smuzhiyun 		writel(VAL0|RINTEN0, mmio + INTEN0);
785*4882a593Smuzhiyun 		writel(VAL2 | RDMD0, mmio + CMD0);
786*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lp->lock, flags);
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	return num_rx_pkt;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /* This function will indicate the link status to the kernel. */
amd8111e_link_change(struct net_device * dev)793*4882a593Smuzhiyun static int amd8111e_link_change(struct net_device *dev)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
796*4882a593Smuzhiyun 	int status0,speed;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/* read the link change */
799*4882a593Smuzhiyun      	status0 = readl(lp->mmio + STAT0);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if(status0 & LINK_STATS){
802*4882a593Smuzhiyun 		if(status0 & AUTONEG_COMPLETE)
803*4882a593Smuzhiyun 			lp->link_config.autoneg = AUTONEG_ENABLE;
804*4882a593Smuzhiyun 		else
805*4882a593Smuzhiyun 			lp->link_config.autoneg = AUTONEG_DISABLE;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 		if(status0 & FULL_DPLX)
808*4882a593Smuzhiyun 			lp->link_config.duplex = DUPLEX_FULL;
809*4882a593Smuzhiyun 		else
810*4882a593Smuzhiyun 			lp->link_config.duplex = DUPLEX_HALF;
811*4882a593Smuzhiyun 		speed = (status0 & SPEED_MASK) >> 7;
812*4882a593Smuzhiyun 		if(speed == PHY_SPEED_10)
813*4882a593Smuzhiyun 			lp->link_config.speed = SPEED_10;
814*4882a593Smuzhiyun 		else if(speed == PHY_SPEED_100)
815*4882a593Smuzhiyun 			lp->link_config.speed = SPEED_100;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
818*4882a593Smuzhiyun 			    (lp->link_config.speed == SPEED_100) ?
819*4882a593Smuzhiyun 							"100" : "10",
820*4882a593Smuzhiyun 			    (lp->link_config.duplex == DUPLEX_FULL) ?
821*4882a593Smuzhiyun 							"Full" : "Half");
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 		netif_carrier_on(dev);
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 	else{
826*4882a593Smuzhiyun 		lp->link_config.speed = SPEED_INVALID;
827*4882a593Smuzhiyun 		lp->link_config.duplex = DUPLEX_INVALID;
828*4882a593Smuzhiyun 		lp->link_config.autoneg = AUTONEG_INVALID;
829*4882a593Smuzhiyun 		netdev_info(dev, "Link is Down.\n");
830*4882a593Smuzhiyun 		netif_carrier_off(dev);
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	return 0;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun /* This function reads the mib counters. */
amd8111e_read_mib(void __iomem * mmio,u8 MIB_COUNTER)837*4882a593Smuzhiyun static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	unsigned int  status;
840*4882a593Smuzhiyun 	unsigned  int data;
841*4882a593Smuzhiyun 	unsigned int repeat = REPEAT_CNT;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
844*4882a593Smuzhiyun 	do {
845*4882a593Smuzhiyun 		status = readw(mmio + MIB_ADDR);
846*4882a593Smuzhiyun 		udelay(2);	/* controller takes MAX 2 us to get mib data */
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 	while (--repeat && (status & MIB_CMD_ACTIVE));
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	data = readl(mmio + MIB_DATA);
851*4882a593Smuzhiyun 	return data;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun /* This function reads the mib registers and returns the hardware statistics.
855*4882a593Smuzhiyun  * It updates previous internal driver statistics with new values.
856*4882a593Smuzhiyun  */
amd8111e_get_stats(struct net_device * dev)857*4882a593Smuzhiyun static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
860*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
861*4882a593Smuzhiyun 	unsigned long flags;
862*4882a593Smuzhiyun 	struct net_device_stats *new_stats = &dev->stats;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (!lp->opened)
865*4882a593Smuzhiyun 		return new_stats;
866*4882a593Smuzhiyun 	spin_lock_irqsave (&lp->lock, flags);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/* stats.rx_packets */
869*4882a593Smuzhiyun 	new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
870*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_multicast_pkts)+
871*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_unicast_pkts);
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	/* stats.tx_packets */
874*4882a593Smuzhiyun 	new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	/*stats.rx_bytes */
877*4882a593Smuzhiyun 	new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	/* stats.tx_bytes */
880*4882a593Smuzhiyun 	new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	/* stats.rx_errors */
883*4882a593Smuzhiyun 	/* hw errors + errors driver reported */
884*4882a593Smuzhiyun 	new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
885*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_fragments)+
886*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_jabbers)+
887*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_alignment_errors)+
888*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_fcs_errors)+
889*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, rcv_miss_pkts)+
890*4882a593Smuzhiyun 				lp->drv_rx_errors;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	/* stats.tx_errors */
893*4882a593Smuzhiyun 	new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	/* stats.rx_dropped*/
896*4882a593Smuzhiyun 	new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/* stats.tx_dropped*/
899*4882a593Smuzhiyun 	new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/* stats.multicast*/
902*4882a593Smuzhiyun 	new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	/* stats.collisions*/
905*4882a593Smuzhiyun 	new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	/* stats.rx_length_errors*/
908*4882a593Smuzhiyun 	new_stats->rx_length_errors =
909*4882a593Smuzhiyun 		amd8111e_read_mib(mmio, rcv_undersize_pkts)+
910*4882a593Smuzhiyun 		amd8111e_read_mib(mmio, rcv_oversize_pkts);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	/* stats.rx_over_errors*/
913*4882a593Smuzhiyun 	new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	/* stats.rx_crc_errors*/
916*4882a593Smuzhiyun 	new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	/* stats.rx_frame_errors*/
919*4882a593Smuzhiyun 	new_stats->rx_frame_errors =
920*4882a593Smuzhiyun 		amd8111e_read_mib(mmio, rcv_alignment_errors);
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	/* stats.rx_fifo_errors */
923*4882a593Smuzhiyun 	new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	/* stats.rx_missed_errors */
926*4882a593Smuzhiyun 	new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	/* stats.tx_aborted_errors*/
929*4882a593Smuzhiyun 	new_stats->tx_aborted_errors =
930*4882a593Smuzhiyun 		amd8111e_read_mib(mmio, xmt_excessive_collision);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	/* stats.tx_carrier_errors*/
933*4882a593Smuzhiyun 	new_stats->tx_carrier_errors =
934*4882a593Smuzhiyun 		amd8111e_read_mib(mmio, xmt_loss_carrier);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	/* stats.tx_fifo_errors*/
937*4882a593Smuzhiyun 	new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	/* stats.tx_window_errors*/
940*4882a593Smuzhiyun 	new_stats->tx_window_errors =
941*4882a593Smuzhiyun 		amd8111e_read_mib(mmio, xmt_late_collision);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	/* Reset the mibs for collecting new statistics */
944*4882a593Smuzhiyun 	/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	spin_unlock_irqrestore (&lp->lock, flags);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	return new_stats;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun /* This function recalculate the interrupt coalescing  mode on every interrupt
952*4882a593Smuzhiyun  * according to the datarate and the packet rate.
953*4882a593Smuzhiyun  */
amd8111e_calc_coalesce(struct net_device * dev)954*4882a593Smuzhiyun static int amd8111e_calc_coalesce(struct net_device *dev)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
957*4882a593Smuzhiyun 	struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
958*4882a593Smuzhiyun 	int tx_pkt_rate;
959*4882a593Smuzhiyun 	int rx_pkt_rate;
960*4882a593Smuzhiyun 	int tx_data_rate;
961*4882a593Smuzhiyun 	int rx_data_rate;
962*4882a593Smuzhiyun 	int rx_pkt_size;
963*4882a593Smuzhiyun 	int tx_pkt_size;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
966*4882a593Smuzhiyun 	coal_conf->tx_prev_packets =  coal_conf->tx_packets;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
969*4882a593Smuzhiyun 	coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
972*4882a593Smuzhiyun 	coal_conf->rx_prev_packets =  coal_conf->rx_packets;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
975*4882a593Smuzhiyun 	coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	if(rx_pkt_rate < 800){
978*4882a593Smuzhiyun 		if(coal_conf->rx_coal_type != NO_COALESCE){
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 			coal_conf->rx_timeout = 0x0;
981*4882a593Smuzhiyun 			coal_conf->rx_event_count = 0;
982*4882a593Smuzhiyun 			amd8111e_set_coalesce(dev,RX_INTR_COAL);
983*4882a593Smuzhiyun 			coal_conf->rx_coal_type = NO_COALESCE;
984*4882a593Smuzhiyun 		}
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	else{
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		rx_pkt_size = rx_data_rate/rx_pkt_rate;
989*4882a593Smuzhiyun 		if (rx_pkt_size < 128){
990*4882a593Smuzhiyun 			if(coal_conf->rx_coal_type != NO_COALESCE){
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 				coal_conf->rx_timeout = 0;
993*4882a593Smuzhiyun 				coal_conf->rx_event_count = 0;
994*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
995*4882a593Smuzhiyun 				coal_conf->rx_coal_type = NO_COALESCE;
996*4882a593Smuzhiyun 			}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 		}
999*4882a593Smuzhiyun 		else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 			if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1002*4882a593Smuzhiyun 				coal_conf->rx_timeout = 1;
1003*4882a593Smuzhiyun 				coal_conf->rx_event_count = 4;
1004*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1005*4882a593Smuzhiyun 				coal_conf->rx_coal_type = LOW_COALESCE;
1006*4882a593Smuzhiyun 			}
1007*4882a593Smuzhiyun 		}
1008*4882a593Smuzhiyun 		else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 			if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1011*4882a593Smuzhiyun 				coal_conf->rx_timeout = 1;
1012*4882a593Smuzhiyun 				coal_conf->rx_event_count = 4;
1013*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1014*4882a593Smuzhiyun 				coal_conf->rx_coal_type = MEDIUM_COALESCE;
1015*4882a593Smuzhiyun 			}
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 		else if(rx_pkt_size >= 1024){
1019*4882a593Smuzhiyun 			if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1020*4882a593Smuzhiyun 				coal_conf->rx_timeout = 2;
1021*4882a593Smuzhiyun 				coal_conf->rx_event_count = 3;
1022*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1023*4882a593Smuzhiyun 				coal_conf->rx_coal_type = HIGH_COALESCE;
1024*4882a593Smuzhiyun 			}
1025*4882a593Smuzhiyun 		}
1026*4882a593Smuzhiyun 	}
1027*4882a593Smuzhiyun     	/* NOW FOR TX INTR COALESC */
1028*4882a593Smuzhiyun 	if(tx_pkt_rate < 800){
1029*4882a593Smuzhiyun 		if(coal_conf->tx_coal_type != NO_COALESCE){
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 			coal_conf->tx_timeout = 0x0;
1032*4882a593Smuzhiyun 			coal_conf->tx_event_count = 0;
1033*4882a593Smuzhiyun 			amd8111e_set_coalesce(dev,TX_INTR_COAL);
1034*4882a593Smuzhiyun 			coal_conf->tx_coal_type = NO_COALESCE;
1035*4882a593Smuzhiyun 		}
1036*4882a593Smuzhiyun 	}
1037*4882a593Smuzhiyun 	else{
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 		tx_pkt_size = tx_data_rate/tx_pkt_rate;
1040*4882a593Smuzhiyun 		if (tx_pkt_size < 128){
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 			if(coal_conf->tx_coal_type != NO_COALESCE){
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 				coal_conf->tx_timeout = 0;
1045*4882a593Smuzhiyun 				coal_conf->tx_event_count = 0;
1046*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1047*4882a593Smuzhiyun 				coal_conf->tx_coal_type = NO_COALESCE;
1048*4882a593Smuzhiyun 			}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 		}
1051*4882a593Smuzhiyun 		else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 			if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1054*4882a593Smuzhiyun 				coal_conf->tx_timeout = 1;
1055*4882a593Smuzhiyun 				coal_conf->tx_event_count = 2;
1056*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1057*4882a593Smuzhiyun 				coal_conf->tx_coal_type = LOW_COALESCE;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 			}
1060*4882a593Smuzhiyun 		}
1061*4882a593Smuzhiyun 		else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 			if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1064*4882a593Smuzhiyun 				coal_conf->tx_timeout = 2;
1065*4882a593Smuzhiyun 				coal_conf->tx_event_count = 5;
1066*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1067*4882a593Smuzhiyun 				coal_conf->tx_coal_type = MEDIUM_COALESCE;
1068*4882a593Smuzhiyun 			}
1069*4882a593Smuzhiyun 		} else if (tx_pkt_size >= 1024) {
1070*4882a593Smuzhiyun 			if (coal_conf->tx_coal_type != HIGH_COALESCE) {
1071*4882a593Smuzhiyun 				coal_conf->tx_timeout = 4;
1072*4882a593Smuzhiyun 				coal_conf->tx_event_count = 8;
1073*4882a593Smuzhiyun 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1074*4882a593Smuzhiyun 				coal_conf->tx_coal_type = HIGH_COALESCE;
1075*4882a593Smuzhiyun 			}
1076*4882a593Smuzhiyun 		}
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 	return 0;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /* This is device interrupt function. It handles transmit,
1083*4882a593Smuzhiyun  * receive,link change and hardware timer interrupts.
1084*4882a593Smuzhiyun  */
amd8111e_interrupt(int irq,void * dev_id)1085*4882a593Smuzhiyun static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
1089*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1090*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
1091*4882a593Smuzhiyun 	unsigned int intr0, intren0;
1092*4882a593Smuzhiyun 	unsigned int handled = 1;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	if(unlikely(dev == NULL))
1095*4882a593Smuzhiyun 		return IRQ_NONE;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	spin_lock(&lp->lock);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	/* disabling interrupt */
1100*4882a593Smuzhiyun 	writel(INTREN, mmio + CMD0);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	/* Read interrupt status */
1103*4882a593Smuzhiyun 	intr0 = readl(mmio + INT0);
1104*4882a593Smuzhiyun 	intren0 = readl(mmio + INTEN0);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	/* Process all the INT event until INTR bit is clear. */
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (!(intr0 & INTR)){
1109*4882a593Smuzhiyun 		handled = 0;
1110*4882a593Smuzhiyun 		goto err_no_interrupt;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	/* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1114*4882a593Smuzhiyun 	writel(intr0, mmio + INT0);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	/* Check if Receive Interrupt has occurred. */
1117*4882a593Smuzhiyun 	if (intr0 & RINT0) {
1118*4882a593Smuzhiyun 		if (napi_schedule_prep(&lp->napi)) {
1119*4882a593Smuzhiyun 			/* Disable receive interupts */
1120*4882a593Smuzhiyun 			writel(RINTEN0, mmio + INTEN0);
1121*4882a593Smuzhiyun 			/* Schedule a polling routine */
1122*4882a593Smuzhiyun 			__napi_schedule(&lp->napi);
1123*4882a593Smuzhiyun 		} else if (intren0 & RINTEN0) {
1124*4882a593Smuzhiyun 			netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
1125*4882a593Smuzhiyun 			/* Fix by disable receive interrupts */
1126*4882a593Smuzhiyun 			writel(RINTEN0, mmio + INTEN0);
1127*4882a593Smuzhiyun 		}
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	/* Check if  Transmit Interrupt has occurred. */
1131*4882a593Smuzhiyun 	if (intr0 & TINT0)
1132*4882a593Smuzhiyun 		amd8111e_tx(dev);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	/* Check if  Link Change Interrupt has occurred. */
1135*4882a593Smuzhiyun 	if (intr0 & LCINT)
1136*4882a593Smuzhiyun 		amd8111e_link_change(dev);
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	/* Check if Hardware Timer Interrupt has occurred. */
1139*4882a593Smuzhiyun 	if (intr0 & STINT)
1140*4882a593Smuzhiyun 		amd8111e_calc_coalesce(dev);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun err_no_interrupt:
1143*4882a593Smuzhiyun 	writel( VAL0 | INTREN,mmio + CMD0);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	spin_unlock(&lp->lock);
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
amd8111e_poll(struct net_device * dev)1151*4882a593Smuzhiyun static void amd8111e_poll(struct net_device *dev)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	unsigned long flags;
1154*4882a593Smuzhiyun 	local_irq_save(flags);
1155*4882a593Smuzhiyun 	amd8111e_interrupt(0, dev);
1156*4882a593Smuzhiyun 	local_irq_restore(flags);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun #endif
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun /* This function closes the network interface and updates
1162*4882a593Smuzhiyun  * the statistics so that most recent statistics will be
1163*4882a593Smuzhiyun  * available after the interface is down.
1164*4882a593Smuzhiyun  */
amd8111e_close(struct net_device * dev)1165*4882a593Smuzhiyun static int amd8111e_close(struct net_device *dev)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1168*4882a593Smuzhiyun 	netif_stop_queue(dev);
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	napi_disable(&lp->napi);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	amd8111e_disable_interrupt(lp);
1175*4882a593Smuzhiyun 	amd8111e_stop_chip(lp);
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	/* Free transmit and receive skbs */
1178*4882a593Smuzhiyun 	amd8111e_free_skbs(lp->amd8111e_net_dev);
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	netif_carrier_off(lp->amd8111e_net_dev);
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	/* Delete ipg timer */
1183*4882a593Smuzhiyun 	if(lp->options & OPTION_DYN_IPG_ENABLE)
1184*4882a593Smuzhiyun 		del_timer_sync(&lp->ipg_data.ipg_timer);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1187*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1188*4882a593Smuzhiyun 	amd8111e_free_ring(lp);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/* Update the statistics before closing */
1191*4882a593Smuzhiyun 	amd8111e_get_stats(dev);
1192*4882a593Smuzhiyun 	lp->opened = 0;
1193*4882a593Smuzhiyun 	return 0;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun /* This function opens new interface.It requests irq for the device,
1197*4882a593Smuzhiyun  * initializes the device,buffers and descriptors, and starts the device.
1198*4882a593Smuzhiyun  */
amd8111e_open(struct net_device * dev)1199*4882a593Smuzhiyun static int amd8111e_open(struct net_device *dev)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1204*4882a593Smuzhiyun 					 dev->name, dev))
1205*4882a593Smuzhiyun 		return -EAGAIN;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	napi_enable(&lp->napi);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	amd8111e_init_hw_default(lp);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	if(amd8111e_restart(dev)){
1214*4882a593Smuzhiyun 		spin_unlock_irq(&lp->lock);
1215*4882a593Smuzhiyun 		napi_disable(&lp->napi);
1216*4882a593Smuzhiyun 		if (dev->irq)
1217*4882a593Smuzhiyun 			free_irq(dev->irq, dev);
1218*4882a593Smuzhiyun 		return -ENOMEM;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 	/* Start ipg timer */
1221*4882a593Smuzhiyun 	if(lp->options & OPTION_DYN_IPG_ENABLE){
1222*4882a593Smuzhiyun 		add_timer(&lp->ipg_data.ipg_timer);
1223*4882a593Smuzhiyun 		netdev_info(dev, "Dynamic IPG Enabled\n");
1224*4882a593Smuzhiyun 	}
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	lp->opened = 1;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	netif_start_queue(dev);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	return 0;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun /* This function checks if there is any transmit  descriptors
1236*4882a593Smuzhiyun  * available to queue more packet.
1237*4882a593Smuzhiyun  */
amd8111e_tx_queue_avail(struct amd8111e_priv * lp)1238*4882a593Smuzhiyun static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun 	int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1241*4882a593Smuzhiyun 	if (lp->tx_skbuff[tx_index])
1242*4882a593Smuzhiyun 		return -1;
1243*4882a593Smuzhiyun 	else
1244*4882a593Smuzhiyun 		return 0;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun /* This function will queue the transmit packets to the
1249*4882a593Smuzhiyun  * descriptors and will trigger the send operation. It also
1250*4882a593Smuzhiyun  * initializes the transmit descriptors with buffer physical address,
1251*4882a593Smuzhiyun  * byte count, ownership to hardware etc.
1252*4882a593Smuzhiyun  */
amd8111e_start_xmit(struct sk_buff * skb,struct net_device * dev)1253*4882a593Smuzhiyun static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1254*4882a593Smuzhiyun 				       struct net_device *dev)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1257*4882a593Smuzhiyun 	int tx_index;
1258*4882a593Smuzhiyun 	unsigned long flags;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	spin_lock_irqsave(&lp->lock, flags);
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	lp->tx_skbuff[tx_index] = skb;
1267*4882a593Smuzhiyun 	lp->tx_ring[tx_index].tx_flags = 0;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
1270*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
1271*4882a593Smuzhiyun 		lp->tx_ring[tx_index].tag_ctrl_cmd |=
1272*4882a593Smuzhiyun 				cpu_to_le16(TCC_VLAN_INSERT);
1273*4882a593Smuzhiyun 		lp->tx_ring[tx_index].tag_ctrl_info =
1274*4882a593Smuzhiyun 				cpu_to_le16(skb_vlan_tag_get(skb));
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	}
1277*4882a593Smuzhiyun #endif
1278*4882a593Smuzhiyun 	lp->tx_dma_addr[tx_index] =
1279*4882a593Smuzhiyun 	    dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
1280*4882a593Smuzhiyun 			   DMA_TO_DEVICE);
1281*4882a593Smuzhiyun 	lp->tx_ring[tx_index].buff_phy_addr =
1282*4882a593Smuzhiyun 	    cpu_to_le32(lp->tx_dma_addr[tx_index]);
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	/*  Set FCS and LTINT bits */
1285*4882a593Smuzhiyun 	wmb();
1286*4882a593Smuzhiyun 	lp->tx_ring[tx_index].tx_flags |=
1287*4882a593Smuzhiyun 	    cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	lp->tx_idx++;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	/* Trigger an immediate send poll. */
1292*4882a593Smuzhiyun 	writel( VAL1 | TDMD0, lp->mmio + CMD0);
1293*4882a593Smuzhiyun 	writel( VAL2 | RDMD0,lp->mmio + CMD0);
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	if(amd8111e_tx_queue_avail(lp) < 0){
1296*4882a593Smuzhiyun 		netif_stop_queue(dev);
1297*4882a593Smuzhiyun 	}
1298*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lp->lock, flags);
1299*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun /* This function returns all the memory mapped registers of the device. */
amd8111e_read_regs(struct amd8111e_priv * lp,u32 * buf)1302*4882a593Smuzhiyun static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
1305*4882a593Smuzhiyun 	/* Read only necessary registers */
1306*4882a593Smuzhiyun 	buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1307*4882a593Smuzhiyun 	buf[1] = readl(mmio + XMT_RING_LEN0);
1308*4882a593Smuzhiyun 	buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1309*4882a593Smuzhiyun 	buf[3] = readl(mmio + RCV_RING_LEN0);
1310*4882a593Smuzhiyun 	buf[4] = readl(mmio + CMD0);
1311*4882a593Smuzhiyun 	buf[5] = readl(mmio + CMD2);
1312*4882a593Smuzhiyun 	buf[6] = readl(mmio + CMD3);
1313*4882a593Smuzhiyun 	buf[7] = readl(mmio + CMD7);
1314*4882a593Smuzhiyun 	buf[8] = readl(mmio + INT0);
1315*4882a593Smuzhiyun 	buf[9] = readl(mmio + INTEN0);
1316*4882a593Smuzhiyun 	buf[10] = readl(mmio + LADRF);
1317*4882a593Smuzhiyun 	buf[11] = readl(mmio + LADRF+4);
1318*4882a593Smuzhiyun 	buf[12] = readl(mmio + STAT0);
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun /* This function sets promiscuos mode, all-multi mode or the multicast address
1323*4882a593Smuzhiyun  * list to the device.
1324*4882a593Smuzhiyun  */
amd8111e_set_multicast_list(struct net_device * dev)1325*4882a593Smuzhiyun static void amd8111e_set_multicast_list(struct net_device *dev)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
1328*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1329*4882a593Smuzhiyun 	u32 mc_filter[2] ;
1330*4882a593Smuzhiyun 	int bit_num;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	if(dev->flags & IFF_PROMISC){
1333*4882a593Smuzhiyun 		writel( VAL2 | PROM, lp->mmio + CMD2);
1334*4882a593Smuzhiyun 		return;
1335*4882a593Smuzhiyun 	}
1336*4882a593Smuzhiyun 	else
1337*4882a593Smuzhiyun 		writel( PROM, lp->mmio + CMD2);
1338*4882a593Smuzhiyun 	if (dev->flags & IFF_ALLMULTI ||
1339*4882a593Smuzhiyun 	    netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1340*4882a593Smuzhiyun 		/* get all multicast packet */
1341*4882a593Smuzhiyun 		mc_filter[1] = mc_filter[0] = 0xffffffff;
1342*4882a593Smuzhiyun 		lp->options |= OPTION_MULTICAST_ENABLE;
1343*4882a593Smuzhiyun 		amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1344*4882a593Smuzhiyun 		return;
1345*4882a593Smuzhiyun 	}
1346*4882a593Smuzhiyun 	if (netdev_mc_empty(dev)) {
1347*4882a593Smuzhiyun 		/* get only own packets */
1348*4882a593Smuzhiyun 		mc_filter[1] = mc_filter[0] = 0;
1349*4882a593Smuzhiyun 		lp->options &= ~OPTION_MULTICAST_ENABLE;
1350*4882a593Smuzhiyun 		amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1351*4882a593Smuzhiyun 		/* disable promiscuous mode */
1352*4882a593Smuzhiyun 		writel(PROM, lp->mmio + CMD2);
1353*4882a593Smuzhiyun 		return;
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 	/* load all the multicast addresses in the logic filter */
1356*4882a593Smuzhiyun 	lp->options |= OPTION_MULTICAST_ENABLE;
1357*4882a593Smuzhiyun 	mc_filter[1] = mc_filter[0] = 0;
1358*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, dev) {
1359*4882a593Smuzhiyun 		bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1360*4882a593Smuzhiyun 		mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1361*4882a593Smuzhiyun 	}
1362*4882a593Smuzhiyun 	amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	/* To eliminate PCI posting bug */
1365*4882a593Smuzhiyun 	readl(lp->mmio + CMD2);
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun 
amd8111e_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1369*4882a593Smuzhiyun static void amd8111e_get_drvinfo(struct net_device *dev,
1370*4882a593Smuzhiyun 				 struct ethtool_drvinfo *info)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1373*4882a593Smuzhiyun 	struct pci_dev *pci_dev = lp->pci_dev;
1374*4882a593Smuzhiyun 	strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
1375*4882a593Smuzhiyun 	snprintf(info->fw_version, sizeof(info->fw_version),
1376*4882a593Smuzhiyun 		"%u", chip_version);
1377*4882a593Smuzhiyun 	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
amd8111e_get_regs_len(struct net_device * dev)1380*4882a593Smuzhiyun static int amd8111e_get_regs_len(struct net_device *dev)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	return AMD8111E_REG_DUMP_LEN;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun 
amd8111e_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1385*4882a593Smuzhiyun static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1388*4882a593Smuzhiyun 	regs->version = 0;
1389*4882a593Smuzhiyun 	amd8111e_read_regs(lp, buf);
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun 
amd8111e_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1392*4882a593Smuzhiyun static int amd8111e_get_link_ksettings(struct net_device *dev,
1393*4882a593Smuzhiyun 				       struct ethtool_link_ksettings *cmd)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1396*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1397*4882a593Smuzhiyun 	mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
1398*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1399*4882a593Smuzhiyun 	return 0;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun 
amd8111e_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1402*4882a593Smuzhiyun static int amd8111e_set_link_ksettings(struct net_device *dev,
1403*4882a593Smuzhiyun 				       const struct ethtool_link_ksettings *cmd)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1406*4882a593Smuzhiyun 	int res;
1407*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1408*4882a593Smuzhiyun 	res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
1409*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1410*4882a593Smuzhiyun 	return res;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun 
amd8111e_nway_reset(struct net_device * dev)1413*4882a593Smuzhiyun static int amd8111e_nway_reset(struct net_device *dev)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1416*4882a593Smuzhiyun 	return mii_nway_restart(&lp->mii_if);
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun 
amd8111e_get_link(struct net_device * dev)1419*4882a593Smuzhiyun static u32 amd8111e_get_link(struct net_device *dev)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1422*4882a593Smuzhiyun 	return mii_link_ok(&lp->mii_if);
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun 
amd8111e_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol_info)1425*4882a593Smuzhiyun static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1428*4882a593Smuzhiyun 	wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1429*4882a593Smuzhiyun 	if (lp->options & OPTION_WOL_ENABLE)
1430*4882a593Smuzhiyun 		wol_info->wolopts = WAKE_MAGIC;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
amd8111e_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol_info)1433*4882a593Smuzhiyun static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1436*4882a593Smuzhiyun 	if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1437*4882a593Smuzhiyun 		return -EINVAL;
1438*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1439*4882a593Smuzhiyun 	if (wol_info->wolopts & WAKE_MAGIC)
1440*4882a593Smuzhiyun 		lp->options |=
1441*4882a593Smuzhiyun 			(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1442*4882a593Smuzhiyun 	else if(wol_info->wolopts & WAKE_PHY)
1443*4882a593Smuzhiyun 		lp->options |=
1444*4882a593Smuzhiyun 			(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1445*4882a593Smuzhiyun 	else
1446*4882a593Smuzhiyun 		lp->options &= ~OPTION_WOL_ENABLE;
1447*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1448*4882a593Smuzhiyun 	return 0;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun static const struct ethtool_ops ops = {
1452*4882a593Smuzhiyun 	.get_drvinfo = amd8111e_get_drvinfo,
1453*4882a593Smuzhiyun 	.get_regs_len = amd8111e_get_regs_len,
1454*4882a593Smuzhiyun 	.get_regs = amd8111e_get_regs,
1455*4882a593Smuzhiyun 	.nway_reset = amd8111e_nway_reset,
1456*4882a593Smuzhiyun 	.get_link = amd8111e_get_link,
1457*4882a593Smuzhiyun 	.get_wol = amd8111e_get_wol,
1458*4882a593Smuzhiyun 	.set_wol = amd8111e_set_wol,
1459*4882a593Smuzhiyun 	.get_link_ksettings = amd8111e_get_link_ksettings,
1460*4882a593Smuzhiyun 	.set_link_ksettings = amd8111e_set_link_ksettings,
1461*4882a593Smuzhiyun };
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun /* This function handles all the  ethtool ioctls. It gives driver info,
1464*4882a593Smuzhiyun  * gets/sets driver speed, gets memory mapped register values, forces
1465*4882a593Smuzhiyun  * auto negotiation, sets/gets WOL options for ethtool application.
1466*4882a593Smuzhiyun  */
amd8111e_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1467*4882a593Smuzhiyun static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun 	struct mii_ioctl_data *data = if_mii(ifr);
1470*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1471*4882a593Smuzhiyun 	int err;
1472*4882a593Smuzhiyun 	u32 mii_regval;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	switch(cmd) {
1475*4882a593Smuzhiyun 	case SIOCGMIIPHY:
1476*4882a593Smuzhiyun 		data->phy_id = lp->ext_phy_addr;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 		fallthrough;
1479*4882a593Smuzhiyun 	case SIOCGMIIREG:
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 		spin_lock_irq(&lp->lock);
1482*4882a593Smuzhiyun 		err = amd8111e_read_phy(lp, data->phy_id,
1483*4882a593Smuzhiyun 			data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1484*4882a593Smuzhiyun 		spin_unlock_irq(&lp->lock);
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 		data->val_out = mii_regval;
1487*4882a593Smuzhiyun 		return err;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	case SIOCSMIIREG:
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 		spin_lock_irq(&lp->lock);
1492*4882a593Smuzhiyun 		err = amd8111e_write_phy(lp, data->phy_id,
1493*4882a593Smuzhiyun 			data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1494*4882a593Smuzhiyun 		spin_unlock_irq(&lp->lock);
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 		return err;
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	default:
1499*4882a593Smuzhiyun 		/* do nothing */
1500*4882a593Smuzhiyun 		break;
1501*4882a593Smuzhiyun 	}
1502*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1503*4882a593Smuzhiyun }
amd8111e_set_mac_address(struct net_device * dev,void * p)1504*4882a593Smuzhiyun static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1507*4882a593Smuzhiyun 	int i;
1508*4882a593Smuzhiyun 	struct sockaddr *addr = p;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1511*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1512*4882a593Smuzhiyun 	/* Setting the MAC address to the device */
1513*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++)
1514*4882a593Smuzhiyun 		writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	return 0;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun /* This function changes the mtu of the device. It restarts the device  to
1522*4882a593Smuzhiyun  * initialize the descriptor with new receive buffers.
1523*4882a593Smuzhiyun  */
amd8111e_change_mtu(struct net_device * dev,int new_mtu)1524*4882a593Smuzhiyun static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1527*4882a593Smuzhiyun 	int err;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	if (!netif_running(dev)) {
1530*4882a593Smuzhiyun 		/* new_mtu will be used
1531*4882a593Smuzhiyun 		 * when device starts netxt time
1532*4882a593Smuzhiyun 		 */
1533*4882a593Smuzhiyun 		dev->mtu = new_mtu;
1534*4882a593Smuzhiyun 		return 0;
1535*4882a593Smuzhiyun 	}
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun         /* stop the chip */
1540*4882a593Smuzhiyun 	writel(RUN, lp->mmio + CMD0);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	dev->mtu = new_mtu;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	err = amd8111e_restart(dev);
1545*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1546*4882a593Smuzhiyun 	if(!err)
1547*4882a593Smuzhiyun 		netif_start_queue(dev);
1548*4882a593Smuzhiyun 	return err;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun 
amd8111e_enable_magicpkt(struct amd8111e_priv * lp)1551*4882a593Smuzhiyun static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun 	writel( VAL1|MPPLBA, lp->mmio + CMD3);
1554*4882a593Smuzhiyun 	writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	/* To eliminate PCI posting bug */
1557*4882a593Smuzhiyun 	readl(lp->mmio + CMD7);
1558*4882a593Smuzhiyun 	return 0;
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun 
amd8111e_enable_link_change(struct amd8111e_priv * lp)1561*4882a593Smuzhiyun static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	/* Adapter is already stoped/suspended/interrupt-disabled */
1565*4882a593Smuzhiyun 	writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	/* To eliminate PCI posting bug */
1568*4882a593Smuzhiyun 	readl(lp->mmio + CMD7);
1569*4882a593Smuzhiyun 	return 0;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun /* This function is called when a packet transmission fails to complete
1573*4882a593Smuzhiyun  * within a reasonable period, on the assumption that an interrupt have
1574*4882a593Smuzhiyun  * failed or the interface is locked up. This function will reinitialize
1575*4882a593Smuzhiyun  * the hardware.
1576*4882a593Smuzhiyun  */
amd8111e_tx_timeout(struct net_device * dev,unsigned int txqueue)1577*4882a593Smuzhiyun static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1580*4882a593Smuzhiyun 	int err;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	netdev_err(dev, "transmit timed out, resetting\n");
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1585*4882a593Smuzhiyun 	err = amd8111e_restart(dev);
1586*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1587*4882a593Smuzhiyun 	if(!err)
1588*4882a593Smuzhiyun 		netif_wake_queue(dev);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun 
amd8111e_suspend(struct device * dev_d)1591*4882a593Smuzhiyun static int __maybe_unused amd8111e_suspend(struct device *dev_d)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(dev_d);
1594*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	if (!netif_running(dev))
1597*4882a593Smuzhiyun 		return 0;
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	/* disable the interrupt */
1600*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1601*4882a593Smuzhiyun 	amd8111e_disable_interrupt(lp);
1602*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	netif_device_detach(dev);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	/* stop chip */
1607*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1608*4882a593Smuzhiyun 	if(lp->options & OPTION_DYN_IPG_ENABLE)
1609*4882a593Smuzhiyun 		del_timer_sync(&lp->ipg_data.ipg_timer);
1610*4882a593Smuzhiyun 	amd8111e_stop_chip(lp);
1611*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	if(lp->options & OPTION_WOL_ENABLE){
1614*4882a593Smuzhiyun 		 /* enable wol */
1615*4882a593Smuzhiyun 		if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1616*4882a593Smuzhiyun 			amd8111e_enable_magicpkt(lp);
1617*4882a593Smuzhiyun 		if(lp->options & OPTION_WAKE_PHY_ENABLE)
1618*4882a593Smuzhiyun 			amd8111e_enable_link_change(lp);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 		device_set_wakeup_enable(dev_d, 1);
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	}
1623*4882a593Smuzhiyun 	else{
1624*4882a593Smuzhiyun 		device_set_wakeup_enable(dev_d, 0);
1625*4882a593Smuzhiyun 	}
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	return 0;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun 
amd8111e_resume(struct device * dev_d)1630*4882a593Smuzhiyun static int __maybe_unused amd8111e_resume(struct device *dev_d)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(dev_d);
1633*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	if (!netif_running(dev))
1636*4882a593Smuzhiyun 		return 0;
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	netif_device_attach(dev);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	spin_lock_irq(&lp->lock);
1641*4882a593Smuzhiyun 	amd8111e_restart(dev);
1642*4882a593Smuzhiyun 	/* Restart ipg timer */
1643*4882a593Smuzhiyun 	if(lp->options & OPTION_DYN_IPG_ENABLE)
1644*4882a593Smuzhiyun 		mod_timer(&lp->ipg_data.ipg_timer,
1645*4882a593Smuzhiyun 				jiffies + IPG_CONVERGE_JIFFIES);
1646*4882a593Smuzhiyun 	spin_unlock_irq(&lp->lock);
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	return 0;
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun 
amd8111e_config_ipg(struct timer_list * t)1651*4882a593Smuzhiyun static void amd8111e_config_ipg(struct timer_list *t)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun 	struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
1654*4882a593Smuzhiyun 	struct ipg_info *ipg_data = &lp->ipg_data;
1655*4882a593Smuzhiyun 	void __iomem *mmio = lp->mmio;
1656*4882a593Smuzhiyun 	unsigned int prev_col_cnt = ipg_data->col_cnt;
1657*4882a593Smuzhiyun 	unsigned int total_col_cnt;
1658*4882a593Smuzhiyun 	unsigned int tmp_ipg;
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	if(lp->link_config.duplex == DUPLEX_FULL){
1661*4882a593Smuzhiyun 		ipg_data->ipg = DEFAULT_IPG;
1662*4882a593Smuzhiyun 		return;
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	if(ipg_data->ipg_state == SSTATE){
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 		if(ipg_data->timer_tick == IPG_STABLE_TIME){
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 			ipg_data->timer_tick = 0;
1670*4882a593Smuzhiyun 			ipg_data->ipg = MIN_IPG - IPG_STEP;
1671*4882a593Smuzhiyun 			ipg_data->current_ipg = MIN_IPG;
1672*4882a593Smuzhiyun 			ipg_data->diff_col_cnt = 0xFFFFFFFF;
1673*4882a593Smuzhiyun 			ipg_data->ipg_state = CSTATE;
1674*4882a593Smuzhiyun 		}
1675*4882a593Smuzhiyun 		else
1676*4882a593Smuzhiyun 			ipg_data->timer_tick++;
1677*4882a593Smuzhiyun 	}
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	if(ipg_data->ipg_state == CSTATE){
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 		/* Get the current collision count */
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 		total_col_cnt = ipg_data->col_cnt =
1684*4882a593Smuzhiyun 				amd8111e_read_mib(mmio, xmt_collisions);
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 		if ((total_col_cnt - prev_col_cnt) <
1687*4882a593Smuzhiyun 				(ipg_data->diff_col_cnt)){
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 			ipg_data->diff_col_cnt =
1690*4882a593Smuzhiyun 				total_col_cnt - prev_col_cnt ;
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 			ipg_data->ipg = ipg_data->current_ipg;
1693*4882a593Smuzhiyun 		}
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 		ipg_data->current_ipg += IPG_STEP;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 		if (ipg_data->current_ipg <= MAX_IPG)
1698*4882a593Smuzhiyun 			tmp_ipg = ipg_data->current_ipg;
1699*4882a593Smuzhiyun 		else{
1700*4882a593Smuzhiyun 			tmp_ipg = ipg_data->ipg;
1701*4882a593Smuzhiyun 			ipg_data->ipg_state = SSTATE;
1702*4882a593Smuzhiyun 		}
1703*4882a593Smuzhiyun 		writew((u32)tmp_ipg, mmio + IPG);
1704*4882a593Smuzhiyun 		writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1705*4882a593Smuzhiyun 	}
1706*4882a593Smuzhiyun 	mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1707*4882a593Smuzhiyun 	return;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun 
amd8111e_probe_ext_phy(struct net_device * dev)1711*4882a593Smuzhiyun static void amd8111e_probe_ext_phy(struct net_device *dev)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun 	struct amd8111e_priv *lp = netdev_priv(dev);
1714*4882a593Smuzhiyun 	int i;
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	for (i = 0x1e; i >= 0; i--) {
1717*4882a593Smuzhiyun 		u32 id1, id2;
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 		if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1720*4882a593Smuzhiyun 			continue;
1721*4882a593Smuzhiyun 		if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1722*4882a593Smuzhiyun 			continue;
1723*4882a593Smuzhiyun 		lp->ext_phy_id = (id1 << 16) | id2;
1724*4882a593Smuzhiyun 		lp->ext_phy_addr = i;
1725*4882a593Smuzhiyun 		return;
1726*4882a593Smuzhiyun 	}
1727*4882a593Smuzhiyun 	lp->ext_phy_id = 0;
1728*4882a593Smuzhiyun 	lp->ext_phy_addr = 1;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun static const struct net_device_ops amd8111e_netdev_ops = {
1732*4882a593Smuzhiyun 	.ndo_open		= amd8111e_open,
1733*4882a593Smuzhiyun 	.ndo_stop		= amd8111e_close,
1734*4882a593Smuzhiyun 	.ndo_start_xmit		= amd8111e_start_xmit,
1735*4882a593Smuzhiyun 	.ndo_tx_timeout		= amd8111e_tx_timeout,
1736*4882a593Smuzhiyun 	.ndo_get_stats		= amd8111e_get_stats,
1737*4882a593Smuzhiyun 	.ndo_set_rx_mode	= amd8111e_set_multicast_list,
1738*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1739*4882a593Smuzhiyun 	.ndo_set_mac_address	= amd8111e_set_mac_address,
1740*4882a593Smuzhiyun 	.ndo_do_ioctl		= amd8111e_ioctl,
1741*4882a593Smuzhiyun 	.ndo_change_mtu		= amd8111e_change_mtu,
1742*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1743*4882a593Smuzhiyun 	.ndo_poll_controller	 = amd8111e_poll,
1744*4882a593Smuzhiyun #endif
1745*4882a593Smuzhiyun };
1746*4882a593Smuzhiyun 
amd8111e_probe_one(struct pci_dev * pdev,const struct pci_device_id * ent)1747*4882a593Smuzhiyun static int amd8111e_probe_one(struct pci_dev *pdev,
1748*4882a593Smuzhiyun 				  const struct pci_device_id *ent)
1749*4882a593Smuzhiyun {
1750*4882a593Smuzhiyun 	int err, i;
1751*4882a593Smuzhiyun 	unsigned long reg_addr,reg_len;
1752*4882a593Smuzhiyun 	struct amd8111e_priv *lp;
1753*4882a593Smuzhiyun 	struct net_device *dev;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
1756*4882a593Smuzhiyun 	if(err){
1757*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
1758*4882a593Smuzhiyun 		return err;
1759*4882a593Smuzhiyun 	}
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1762*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot find PCI base address\n");
1763*4882a593Smuzhiyun 		err = -ENODEV;
1764*4882a593Smuzhiyun 		goto err_disable_pdev;
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	err = pci_request_regions(pdev, MODULE_NAME);
1768*4882a593Smuzhiyun 	if(err){
1769*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
1770*4882a593Smuzhiyun 		goto err_disable_pdev;
1771*4882a593Smuzhiyun 	}
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	pci_set_master(pdev);
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	/* Find power-management capability. */
1776*4882a593Smuzhiyun 	if (!pdev->pm_cap) {
1777*4882a593Smuzhiyun 		dev_err(&pdev->dev, "No Power Management capability\n");
1778*4882a593Smuzhiyun 		err = -ENODEV;
1779*4882a593Smuzhiyun 		goto err_free_reg;
1780*4882a593Smuzhiyun 	}
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	/* Initialize DMA */
1783*4882a593Smuzhiyun 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
1784*4882a593Smuzhiyun 		dev_err(&pdev->dev, "DMA not supported\n");
1785*4882a593Smuzhiyun 		err = -ENODEV;
1786*4882a593Smuzhiyun 		goto err_free_reg;
1787*4882a593Smuzhiyun 	}
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	reg_addr = pci_resource_start(pdev, 0);
1790*4882a593Smuzhiyun 	reg_len = pci_resource_len(pdev, 0);
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1793*4882a593Smuzhiyun 	if (!dev) {
1794*4882a593Smuzhiyun 		err = -ENOMEM;
1795*4882a593Smuzhiyun 		goto err_free_reg;
1796*4882a593Smuzhiyun 	}
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
1801*4882a593Smuzhiyun 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
1802*4882a593Smuzhiyun #endif
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	lp = netdev_priv(dev);
1805*4882a593Smuzhiyun 	lp->pci_dev = pdev;
1806*4882a593Smuzhiyun 	lp->amd8111e_net_dev = dev;
1807*4882a593Smuzhiyun 	lp->pm_cap = pdev->pm_cap;
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	spin_lock_init(&lp->lock);
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
1812*4882a593Smuzhiyun 	if (!lp->mmio) {
1813*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot map device registers\n");
1814*4882a593Smuzhiyun 		err = -ENOMEM;
1815*4882a593Smuzhiyun 		goto err_free_dev;
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	/* Initializing MAC address */
1819*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++)
1820*4882a593Smuzhiyun 		dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	/* Setting user defined parametrs */
1823*4882a593Smuzhiyun 	lp->ext_phy_option = speed_duplex[card_idx];
1824*4882a593Smuzhiyun 	if(coalesce[card_idx])
1825*4882a593Smuzhiyun 		lp->options |= OPTION_INTR_COAL_ENABLE;
1826*4882a593Smuzhiyun 	if(dynamic_ipg[card_idx++])
1827*4882a593Smuzhiyun 		lp->options |= OPTION_DYN_IPG_ENABLE;
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	/* Initialize driver entry points */
1831*4882a593Smuzhiyun 	dev->netdev_ops = &amd8111e_netdev_ops;
1832*4882a593Smuzhiyun 	dev->ethtool_ops = &ops;
1833*4882a593Smuzhiyun 	dev->irq =pdev->irq;
1834*4882a593Smuzhiyun 	dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1835*4882a593Smuzhiyun 	dev->min_mtu = AMD8111E_MIN_MTU;
1836*4882a593Smuzhiyun 	dev->max_mtu = AMD8111E_MAX_MTU;
1837*4882a593Smuzhiyun 	netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun #if AMD8111E_VLAN_TAG_USED
1840*4882a593Smuzhiyun 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1841*4882a593Smuzhiyun #endif
1842*4882a593Smuzhiyun 	/* Probe the external PHY */
1843*4882a593Smuzhiyun 	amd8111e_probe_ext_phy(dev);
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	/* setting mii default values */
1846*4882a593Smuzhiyun 	lp->mii_if.dev = dev;
1847*4882a593Smuzhiyun 	lp->mii_if.mdio_read = amd8111e_mdio_read;
1848*4882a593Smuzhiyun 	lp->mii_if.mdio_write = amd8111e_mdio_write;
1849*4882a593Smuzhiyun 	lp->mii_if.phy_id = lp->ext_phy_addr;
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	/* Set receive buffer length and set jumbo option*/
1852*4882a593Smuzhiyun 	amd8111e_set_rx_buff_len(dev);
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	err = register_netdev(dev);
1856*4882a593Smuzhiyun 	if (err) {
1857*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot register net device\n");
1858*4882a593Smuzhiyun 		goto err_free_dev;
1859*4882a593Smuzhiyun 	}
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	pci_set_drvdata(pdev, dev);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	/* Initialize software ipg timer */
1864*4882a593Smuzhiyun 	if(lp->options & OPTION_DYN_IPG_ENABLE){
1865*4882a593Smuzhiyun 		timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
1866*4882a593Smuzhiyun 		lp->ipg_data.ipg_timer.expires = jiffies +
1867*4882a593Smuzhiyun 						 IPG_CONVERGE_JIFFIES;
1868*4882a593Smuzhiyun 		lp->ipg_data.ipg = DEFAULT_IPG;
1869*4882a593Smuzhiyun 		lp->ipg_data.ipg_state = CSTATE;
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	/*  display driver and device information */
1873*4882a593Smuzhiyun     	chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1874*4882a593Smuzhiyun 	dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1875*4882a593Smuzhiyun 		 chip_version, dev->dev_addr);
1876*4882a593Smuzhiyun 	if (lp->ext_phy_id)
1877*4882a593Smuzhiyun 		dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
1878*4882a593Smuzhiyun 			 lp->ext_phy_id, lp->ext_phy_addr);
1879*4882a593Smuzhiyun 	else
1880*4882a593Smuzhiyun 		dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun     	return 0;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun err_free_dev:
1885*4882a593Smuzhiyun 	free_netdev(dev);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun err_free_reg:
1888*4882a593Smuzhiyun 	pci_release_regions(pdev);
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun err_disable_pdev:
1891*4882a593Smuzhiyun 	pci_disable_device(pdev);
1892*4882a593Smuzhiyun 	return err;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun 
amd8111e_remove_one(struct pci_dev * pdev)1896*4882a593Smuzhiyun static void amd8111e_remove_one(struct pci_dev *pdev)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	if (dev) {
1901*4882a593Smuzhiyun 		unregister_netdev(dev);
1902*4882a593Smuzhiyun 		free_netdev(dev);
1903*4882a593Smuzhiyun 		pci_release_regions(pdev);
1904*4882a593Smuzhiyun 		pci_disable_device(pdev);
1905*4882a593Smuzhiyun 	}
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun static const struct pci_device_id amd8111e_pci_tbl[] = {
1909*4882a593Smuzhiyun 	{
1910*4882a593Smuzhiyun 	 .vendor = PCI_VENDOR_ID_AMD,
1911*4882a593Smuzhiyun 	 .device = PCI_DEVICE_ID_AMD8111E_7462,
1912*4882a593Smuzhiyun 	},
1913*4882a593Smuzhiyun 	{
1914*4882a593Smuzhiyun 	 .vendor = 0,
1915*4882a593Smuzhiyun 	}
1916*4882a593Smuzhiyun };
1917*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun static struct pci_driver amd8111e_driver = {
1922*4882a593Smuzhiyun 	.name   	= MODULE_NAME,
1923*4882a593Smuzhiyun 	.id_table	= amd8111e_pci_tbl,
1924*4882a593Smuzhiyun 	.probe		= amd8111e_probe_one,
1925*4882a593Smuzhiyun 	.remove		= amd8111e_remove_one,
1926*4882a593Smuzhiyun 	.driver.pm	= &amd8111e_pm_ops
1927*4882a593Smuzhiyun };
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun module_pci_driver(amd8111e_driver);
1930