xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/dnet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Dave DNET Ethernet Controller driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
6*4882a593Smuzhiyun  * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/moduleparam.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/etherdevice.h>
18*4882a593Smuzhiyun #include <linux/dma-mapping.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/phy.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "dnet.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #undef DEBUG
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* function for reading internal MAC register */
dnet_readw_mac(struct dnet * bp,u16 reg)27*4882a593Smuzhiyun static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	u16 data_read;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* issue a read */
32*4882a593Smuzhiyun 	dnet_writel(bp, reg, MACREG_ADDR);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	/* since a read/write op to the MAC is very slow,
35*4882a593Smuzhiyun 	 * we must wait before reading the data */
36*4882a593Smuzhiyun 	ndelay(500);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	/* read data read from the MAC register */
39*4882a593Smuzhiyun 	data_read = dnet_readl(bp, MACREG_DATA);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* all done */
42*4882a593Smuzhiyun 	return data_read;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* function for writing internal MAC register */
dnet_writew_mac(struct dnet * bp,u16 reg,u16 val)46*4882a593Smuzhiyun static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	/* load data to write */
49*4882a593Smuzhiyun 	dnet_writel(bp, val, MACREG_DATA);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/* issue a write */
52*4882a593Smuzhiyun 	dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	/* since a read/write op to the MAC is very slow,
55*4882a593Smuzhiyun 	 * we must wait before exiting */
56*4882a593Smuzhiyun 	ndelay(500);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
__dnet_set_hwaddr(struct dnet * bp)59*4882a593Smuzhiyun static void __dnet_set_hwaddr(struct dnet *bp)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	u16 tmp;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
64*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
65*4882a593Smuzhiyun 	tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
66*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
67*4882a593Smuzhiyun 	tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
68*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
dnet_get_hwaddr(struct dnet * bp)71*4882a593Smuzhiyun static void dnet_get_hwaddr(struct dnet *bp)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	u16 tmp;
74*4882a593Smuzhiyun 	u8 addr[6];
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * from MAC docs:
78*4882a593Smuzhiyun 	 * "Note that the MAC address is stored in the registers in Hexadecimal
79*4882a593Smuzhiyun 	 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
80*4882a593Smuzhiyun 	 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
81*4882a593Smuzhiyun 	 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
82*4882a593Smuzhiyun 	 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
83*4882a593Smuzhiyun 	 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
84*4882a593Smuzhiyun 	 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
85*4882a593Smuzhiyun 	 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
86*4882a593Smuzhiyun 	 * Mac_addr[15:0]).
87*4882a593Smuzhiyun 	 */
88*4882a593Smuzhiyun 	tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
89*4882a593Smuzhiyun 	*((__be16 *)addr) = cpu_to_be16(tmp);
90*4882a593Smuzhiyun 	tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
91*4882a593Smuzhiyun 	*((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
92*4882a593Smuzhiyun 	tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
93*4882a593Smuzhiyun 	*((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (is_valid_ether_addr(addr))
96*4882a593Smuzhiyun 		memcpy(bp->dev->dev_addr, addr, sizeof(addr));
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
dnet_mdio_read(struct mii_bus * bus,int mii_id,int regnum)99*4882a593Smuzhiyun static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct dnet *bp = bus->priv;
102*4882a593Smuzhiyun 	u16 value;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
105*4882a593Smuzhiyun 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
106*4882a593Smuzhiyun 		cpu_relax();
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* only 5 bits allowed for phy-addr and reg_offset */
109*4882a593Smuzhiyun 	mii_id &= 0x1f;
110*4882a593Smuzhiyun 	regnum &= 0x1f;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* prepare reg_value for a read */
113*4882a593Smuzhiyun 	value = (mii_id << 8);
114*4882a593Smuzhiyun 	value |= regnum;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* write control word */
117*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* wait for end of transfer */
120*4882a593Smuzhiyun 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
121*4882a593Smuzhiyun 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
122*4882a593Smuzhiyun 		cpu_relax();
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return value;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
dnet_mdio_write(struct mii_bus * bus,int mii_id,int regnum,u16 value)131*4882a593Smuzhiyun static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
132*4882a593Smuzhiyun 			   u16 value)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct dnet *bp = bus->priv;
135*4882a593Smuzhiyun 	u16 tmp;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
140*4882a593Smuzhiyun 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
141*4882a593Smuzhiyun 		cpu_relax();
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* prepare for a write operation */
144*4882a593Smuzhiyun 	tmp = (1 << 13);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* only 5 bits allowed for phy-addr and reg_offset */
147*4882a593Smuzhiyun 	mii_id &= 0x1f;
148*4882a593Smuzhiyun 	regnum &= 0x1f;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* only 16 bits on data */
151*4882a593Smuzhiyun 	value &= 0xffff;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* prepare reg_value for a write */
154*4882a593Smuzhiyun 	tmp |= (mii_id << 8);
155*4882a593Smuzhiyun 	tmp |= regnum;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* write data to write first */
158*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* write control word */
161*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
164*4882a593Smuzhiyun 				& DNET_INTERNAL_GMII_MNG_CMD_FIN))
165*4882a593Smuzhiyun 		cpu_relax();
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	return 0;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
dnet_handle_link_change(struct net_device * dev)170*4882a593Smuzhiyun static void dnet_handle_link_change(struct net_device *dev)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
173*4882a593Smuzhiyun 	struct phy_device *phydev = dev->phydev;
174*4882a593Smuzhiyun 	unsigned long flags;
175*4882a593Smuzhiyun 	u32 mode_reg, ctl_reg;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	int status_change = 0;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	spin_lock_irqsave(&bp->lock, flags);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
182*4882a593Smuzhiyun 	ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (phydev->link) {
185*4882a593Smuzhiyun 		if (bp->duplex != phydev->duplex) {
186*4882a593Smuzhiyun 			if (phydev->duplex)
187*4882a593Smuzhiyun 				ctl_reg &=
188*4882a593Smuzhiyun 				    ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
189*4882a593Smuzhiyun 			else
190*4882a593Smuzhiyun 				ctl_reg |=
191*4882a593Smuzhiyun 				    DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 			bp->duplex = phydev->duplex;
194*4882a593Smuzhiyun 			status_change = 1;
195*4882a593Smuzhiyun 		}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		if (bp->speed != phydev->speed) {
198*4882a593Smuzhiyun 			status_change = 1;
199*4882a593Smuzhiyun 			switch (phydev->speed) {
200*4882a593Smuzhiyun 			case 1000:
201*4882a593Smuzhiyun 				mode_reg |= DNET_INTERNAL_MODE_GBITEN;
202*4882a593Smuzhiyun 				break;
203*4882a593Smuzhiyun 			case 100:
204*4882a593Smuzhiyun 			case 10:
205*4882a593Smuzhiyun 				mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
206*4882a593Smuzhiyun 				break;
207*4882a593Smuzhiyun 			default:
208*4882a593Smuzhiyun 				printk(KERN_WARNING
209*4882a593Smuzhiyun 				       "%s: Ack!  Speed (%d) is not "
210*4882a593Smuzhiyun 				       "10/100/1000!\n", dev->name,
211*4882a593Smuzhiyun 				       phydev->speed);
212*4882a593Smuzhiyun 				break;
213*4882a593Smuzhiyun 			}
214*4882a593Smuzhiyun 			bp->speed = phydev->speed;
215*4882a593Smuzhiyun 		}
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (phydev->link != bp->link) {
219*4882a593Smuzhiyun 		if (phydev->link) {
220*4882a593Smuzhiyun 			mode_reg |=
221*4882a593Smuzhiyun 			    (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
222*4882a593Smuzhiyun 		} else {
223*4882a593Smuzhiyun 			mode_reg &=
224*4882a593Smuzhiyun 			    ~(DNET_INTERNAL_MODE_RXEN |
225*4882a593Smuzhiyun 			      DNET_INTERNAL_MODE_TXEN);
226*4882a593Smuzhiyun 			bp->speed = 0;
227*4882a593Smuzhiyun 			bp->duplex = -1;
228*4882a593Smuzhiyun 		}
229*4882a593Smuzhiyun 		bp->link = phydev->link;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		status_change = 1;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (status_change) {
235*4882a593Smuzhiyun 		dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
236*4882a593Smuzhiyun 		dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bp->lock, flags);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (status_change) {
242*4882a593Smuzhiyun 		if (phydev->link)
243*4882a593Smuzhiyun 			printk(KERN_INFO "%s: link up (%d/%s)\n",
244*4882a593Smuzhiyun 			       dev->name, phydev->speed,
245*4882a593Smuzhiyun 			       DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
246*4882a593Smuzhiyun 		else
247*4882a593Smuzhiyun 			printk(KERN_INFO "%s: link down\n", dev->name);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
dnet_mii_probe(struct net_device * dev)251*4882a593Smuzhiyun static int dnet_mii_probe(struct net_device *dev)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
254*4882a593Smuzhiyun 	struct phy_device *phydev = NULL;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* find the first phy */
257*4882a593Smuzhiyun 	phydev = phy_find_first(bp->mii_bus);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (!phydev) {
260*4882a593Smuzhiyun 		printk(KERN_ERR "%s: no PHY found\n", dev->name);
261*4882a593Smuzhiyun 		return -ENODEV;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/* TODO : add pin_irq */
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* attach the mac to the phy */
267*4882a593Smuzhiyun 	if (bp->capabilities & DNET_HAS_RMII) {
268*4882a593Smuzhiyun 		phydev = phy_connect(dev, phydev_name(phydev),
269*4882a593Smuzhiyun 				     &dnet_handle_link_change,
270*4882a593Smuzhiyun 				     PHY_INTERFACE_MODE_RMII);
271*4882a593Smuzhiyun 	} else {
272*4882a593Smuzhiyun 		phydev = phy_connect(dev, phydev_name(phydev),
273*4882a593Smuzhiyun 				     &dnet_handle_link_change,
274*4882a593Smuzhiyun 				     PHY_INTERFACE_MODE_MII);
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (IS_ERR(phydev)) {
278*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
279*4882a593Smuzhiyun 		return PTR_ERR(phydev);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* mask with MAC supported features */
283*4882a593Smuzhiyun 	if (bp->capabilities & DNET_HAS_GIGABIT)
284*4882a593Smuzhiyun 		phy_set_max_speed(phydev, SPEED_1000);
285*4882a593Smuzhiyun 	else
286*4882a593Smuzhiyun 		phy_set_max_speed(phydev, SPEED_100);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	phy_support_asym_pause(phydev);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	bp->link = 0;
291*4882a593Smuzhiyun 	bp->speed = 0;
292*4882a593Smuzhiyun 	bp->duplex = -1;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
dnet_mii_init(struct dnet * bp)297*4882a593Smuzhiyun static int dnet_mii_init(struct dnet *bp)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	int err;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	bp->mii_bus = mdiobus_alloc();
302*4882a593Smuzhiyun 	if (bp->mii_bus == NULL)
303*4882a593Smuzhiyun 		return -ENOMEM;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	bp->mii_bus->name = "dnet_mii_bus";
306*4882a593Smuzhiyun 	bp->mii_bus->read = &dnet_mdio_read;
307*4882a593Smuzhiyun 	bp->mii_bus->write = &dnet_mdio_write;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
310*4882a593Smuzhiyun 		bp->pdev->name, bp->pdev->id);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	bp->mii_bus->priv = bp;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (mdiobus_register(bp->mii_bus)) {
315*4882a593Smuzhiyun 		err = -ENXIO;
316*4882a593Smuzhiyun 		goto err_out;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (dnet_mii_probe(bp->dev) != 0) {
320*4882a593Smuzhiyun 		err = -ENXIO;
321*4882a593Smuzhiyun 		goto err_out_unregister_bus;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return 0;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun err_out_unregister_bus:
327*4882a593Smuzhiyun 	mdiobus_unregister(bp->mii_bus);
328*4882a593Smuzhiyun err_out:
329*4882a593Smuzhiyun 	mdiobus_free(bp->mii_bus);
330*4882a593Smuzhiyun 	return err;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
dnet_phy_marvell_fixup(struct phy_device * phydev)334*4882a593Smuzhiyun static int dnet_phy_marvell_fixup(struct phy_device *phydev)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	return phy_write(phydev, 0x18, 0x4148);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
dnet_update_stats(struct dnet * bp)339*4882a593Smuzhiyun static void dnet_update_stats(struct dnet *bp)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
342*4882a593Smuzhiyun 	u32 *p = &bp->hw_stats.rx_pkt_ignr;
343*4882a593Smuzhiyun 	u32 *end = &bp->hw_stats.rx_byte + 1;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	WARN_ON((unsigned long)(end - p - 1) !=
346*4882a593Smuzhiyun 		(DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	for (; p < end; p++, reg++)
349*4882a593Smuzhiyun 		*p += readl(reg);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	reg = bp->regs + DNET_TX_UNICAST_CNT;
352*4882a593Smuzhiyun 	p = &bp->hw_stats.tx_unicast;
353*4882a593Smuzhiyun 	end = &bp->hw_stats.tx_byte + 1;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	WARN_ON((unsigned long)(end - p - 1) !=
356*4882a593Smuzhiyun 		(DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	for (; p < end; p++, reg++)
359*4882a593Smuzhiyun 		*p += readl(reg);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
dnet_poll(struct napi_struct * napi,int budget)362*4882a593Smuzhiyun static int dnet_poll(struct napi_struct *napi, int budget)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct dnet *bp = container_of(napi, struct dnet, napi);
365*4882a593Smuzhiyun 	struct net_device *dev = bp->dev;
366*4882a593Smuzhiyun 	int npackets = 0;
367*4882a593Smuzhiyun 	unsigned int pkt_len;
368*4882a593Smuzhiyun 	struct sk_buff *skb;
369*4882a593Smuzhiyun 	unsigned int *data_ptr;
370*4882a593Smuzhiyun 	u32 int_enable;
371*4882a593Smuzhiyun 	u32 cmd_word;
372*4882a593Smuzhiyun 	int i;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	while (npackets < budget) {
375*4882a593Smuzhiyun 		/*
376*4882a593Smuzhiyun 		 * break out of while loop if there are no more
377*4882a593Smuzhiyun 		 * packets waiting
378*4882a593Smuzhiyun 		 */
379*4882a593Smuzhiyun 		if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
380*4882a593Smuzhiyun 			break;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		cmd_word = dnet_readl(bp, RX_LEN_FIFO);
383*4882a593Smuzhiyun 		pkt_len = cmd_word & 0xFFFF;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		if (cmd_word & 0xDF180000)
386*4882a593Smuzhiyun 			printk(KERN_ERR "%s packet receive error %x\n",
387*4882a593Smuzhiyun 			       __func__, cmd_word);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		skb = netdev_alloc_skb(dev, pkt_len + 5);
390*4882a593Smuzhiyun 		if (skb != NULL) {
391*4882a593Smuzhiyun 			/* Align IP on 16 byte boundaries */
392*4882a593Smuzhiyun 			skb_reserve(skb, 2);
393*4882a593Smuzhiyun 			/*
394*4882a593Smuzhiyun 			 * 'skb_put()' points to the start of sk_buff
395*4882a593Smuzhiyun 			 * data area.
396*4882a593Smuzhiyun 			 */
397*4882a593Smuzhiyun 			data_ptr = skb_put(skb, pkt_len);
398*4882a593Smuzhiyun 			for (i = 0; i < (pkt_len + 3) >> 2; i++)
399*4882a593Smuzhiyun 				*data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
400*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
401*4882a593Smuzhiyun 			netif_receive_skb(skb);
402*4882a593Smuzhiyun 			npackets++;
403*4882a593Smuzhiyun 		} else
404*4882a593Smuzhiyun 			printk(KERN_NOTICE
405*4882a593Smuzhiyun 			       "%s: No memory to allocate a sk_buff of "
406*4882a593Smuzhiyun 			       "size %u.\n", dev->name, pkt_len);
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (npackets < budget) {
410*4882a593Smuzhiyun 		/* We processed all packets available.  Tell NAPI it can
411*4882a593Smuzhiyun 		 * stop polling then re-enable rx interrupts.
412*4882a593Smuzhiyun 		 */
413*4882a593Smuzhiyun 		napi_complete_done(napi, npackets);
414*4882a593Smuzhiyun 		int_enable = dnet_readl(bp, INTR_ENB);
415*4882a593Smuzhiyun 		int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
416*4882a593Smuzhiyun 		dnet_writel(bp, int_enable, INTR_ENB);
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	return npackets;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
dnet_interrupt(int irq,void * dev_id)422*4882a593Smuzhiyun static irqreturn_t dnet_interrupt(int irq, void *dev_id)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
425*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
426*4882a593Smuzhiyun 	u32 int_src, int_enable, int_current;
427*4882a593Smuzhiyun 	unsigned long flags;
428*4882a593Smuzhiyun 	unsigned int handled = 0;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	spin_lock_irqsave(&bp->lock, flags);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* read and clear the DNET irq (clear on read) */
433*4882a593Smuzhiyun 	int_src = dnet_readl(bp, INTR_SRC);
434*4882a593Smuzhiyun 	int_enable = dnet_readl(bp, INTR_ENB);
435*4882a593Smuzhiyun 	int_current = int_src & int_enable;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* restart the queue if we had stopped it for TX fifo almost full */
438*4882a593Smuzhiyun 	if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
439*4882a593Smuzhiyun 		int_enable = dnet_readl(bp, INTR_ENB);
440*4882a593Smuzhiyun 		int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
441*4882a593Smuzhiyun 		dnet_writel(bp, int_enable, INTR_ENB);
442*4882a593Smuzhiyun 		netif_wake_queue(dev);
443*4882a593Smuzhiyun 		handled = 1;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* RX FIFO error checking */
447*4882a593Smuzhiyun 	if (int_current &
448*4882a593Smuzhiyun 	    (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
449*4882a593Smuzhiyun 		printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
450*4882a593Smuzhiyun 		       dnet_readl(bp, RX_STATUS), int_current);
451*4882a593Smuzhiyun 		/* we can only flush the RX FIFOs */
452*4882a593Smuzhiyun 		dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
453*4882a593Smuzhiyun 		ndelay(500);
454*4882a593Smuzhiyun 		dnet_writel(bp, 0, SYS_CTL);
455*4882a593Smuzhiyun 		handled = 1;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/* TX FIFO error checking */
459*4882a593Smuzhiyun 	if (int_current &
460*4882a593Smuzhiyun 	    (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
461*4882a593Smuzhiyun 		printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
462*4882a593Smuzhiyun 		       dnet_readl(bp, TX_STATUS), int_current);
463*4882a593Smuzhiyun 		/* we can only flush the TX FIFOs */
464*4882a593Smuzhiyun 		dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
465*4882a593Smuzhiyun 		ndelay(500);
466*4882a593Smuzhiyun 		dnet_writel(bp, 0, SYS_CTL);
467*4882a593Smuzhiyun 		handled = 1;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
471*4882a593Smuzhiyun 		if (napi_schedule_prep(&bp->napi)) {
472*4882a593Smuzhiyun 			/*
473*4882a593Smuzhiyun 			 * There's no point taking any more interrupts
474*4882a593Smuzhiyun 			 * until we have processed the buffers
475*4882a593Smuzhiyun 			 */
476*4882a593Smuzhiyun 			/* Disable Rx interrupts and schedule NAPI poll */
477*4882a593Smuzhiyun 			int_enable = dnet_readl(bp, INTR_ENB);
478*4882a593Smuzhiyun 			int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
479*4882a593Smuzhiyun 			dnet_writel(bp, int_enable, INTR_ENB);
480*4882a593Smuzhiyun 			__napi_schedule(&bp->napi);
481*4882a593Smuzhiyun 		}
482*4882a593Smuzhiyun 		handled = 1;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (!handled)
486*4882a593Smuzhiyun 		pr_debug("%s: irq %x remains\n", __func__, int_current);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bp->lock, flags);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun #ifdef DEBUG
dnet_print_skb(struct sk_buff * skb)494*4882a593Smuzhiyun static inline void dnet_print_skb(struct sk_buff *skb)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	int k;
497*4882a593Smuzhiyun 	printk(KERN_DEBUG PFX "data:");
498*4882a593Smuzhiyun 	for (k = 0; k < skb->len; k++)
499*4882a593Smuzhiyun 		printk(" %02x", (unsigned int)skb->data[k]);
500*4882a593Smuzhiyun 	printk("\n");
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun #else
503*4882a593Smuzhiyun #define dnet_print_skb(skb)	do {} while (0)
504*4882a593Smuzhiyun #endif
505*4882a593Smuzhiyun 
dnet_start_xmit(struct sk_buff * skb,struct net_device * dev)506*4882a593Smuzhiyun static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
510*4882a593Smuzhiyun 	unsigned int i, tx_cmd, wrsz;
511*4882a593Smuzhiyun 	unsigned long flags;
512*4882a593Smuzhiyun 	unsigned int *bufp;
513*4882a593Smuzhiyun 	u32 irq_enable;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	dnet_readl(bp, TX_STATUS);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	pr_debug("start_xmit: len %u head %p data %p\n",
518*4882a593Smuzhiyun 	       skb->len, skb->head, skb->data);
519*4882a593Smuzhiyun 	dnet_print_skb(skb);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	spin_lock_irqsave(&bp->lock, flags);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	dnet_readl(bp, TX_STATUS);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
526*4882a593Smuzhiyun 	wrsz = (u32) skb->len + 3;
527*4882a593Smuzhiyun 	wrsz += ((unsigned long) skb->data) & 0x3;
528*4882a593Smuzhiyun 	wrsz >>= 2;
529*4882a593Smuzhiyun 	tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* check if there is enough room for the current frame */
532*4882a593Smuzhiyun 	if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
533*4882a593Smuzhiyun 		for (i = 0; i < wrsz; i++)
534*4882a593Smuzhiyun 			dnet_writel(bp, *bufp++, TX_DATA_FIFO);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		/*
537*4882a593Smuzhiyun 		 * inform MAC that a packet's written and ready to be
538*4882a593Smuzhiyun 		 * shipped out
539*4882a593Smuzhiyun 		 */
540*4882a593Smuzhiyun 		dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
544*4882a593Smuzhiyun 		netif_stop_queue(dev);
545*4882a593Smuzhiyun 		dnet_readl(bp, INTR_SRC);
546*4882a593Smuzhiyun 		irq_enable = dnet_readl(bp, INTR_ENB);
547*4882a593Smuzhiyun 		irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
548*4882a593Smuzhiyun 		dnet_writel(bp, irq_enable, INTR_ENB);
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* free the buffer */
554*4882a593Smuzhiyun 	dev_kfree_skb(skb);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bp->lock, flags);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return NETDEV_TX_OK;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
dnet_reset_hw(struct dnet * bp)561*4882a593Smuzhiyun static void dnet_reset_hw(struct dnet *bp)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	/* put ts_mac in IDLE state i.e. disable rx/tx */
564*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/*
567*4882a593Smuzhiyun 	 * RX FIFO almost full threshold: only cmd FIFO almost full is
568*4882a593Smuzhiyun 	 * implemented for RX side
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
571*4882a593Smuzhiyun 	/*
572*4882a593Smuzhiyun 	 * TX FIFO almost empty threshold: only data FIFO almost empty
573*4882a593Smuzhiyun 	 * is implemented for TX side
574*4882a593Smuzhiyun 	 */
575*4882a593Smuzhiyun 	dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	/* flush rx/tx fifos */
578*4882a593Smuzhiyun 	dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
579*4882a593Smuzhiyun 			SYS_CTL);
580*4882a593Smuzhiyun 	msleep(1);
581*4882a593Smuzhiyun 	dnet_writel(bp, 0, SYS_CTL);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
dnet_init_hw(struct dnet * bp)584*4882a593Smuzhiyun static void dnet_init_hw(struct dnet *bp)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	u32 config;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	dnet_reset_hw(bp);
589*4882a593Smuzhiyun 	__dnet_set_hwaddr(bp);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if (bp->dev->flags & IFF_PROMISC)
594*4882a593Smuzhiyun 		/* Copy All Frames */
595*4882a593Smuzhiyun 		config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
596*4882a593Smuzhiyun 	if (!(bp->dev->flags & IFF_BROADCAST))
597*4882a593Smuzhiyun 		/* No BroadCast */
598*4882a593Smuzhiyun 		config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
601*4882a593Smuzhiyun 	    DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
602*4882a593Smuzhiyun 	    DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
603*4882a593Smuzhiyun 	    DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	/* clear irq before enabling them */
608*4882a593Smuzhiyun 	config = dnet_readl(bp, INTR_SRC);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/* enable RX/TX interrupt, recv packet ready interrupt */
611*4882a593Smuzhiyun 	dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
612*4882a593Smuzhiyun 			DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
613*4882a593Smuzhiyun 			DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
614*4882a593Smuzhiyun 			DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
615*4882a593Smuzhiyun 			DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
dnet_open(struct net_device * dev)618*4882a593Smuzhiyun static int dnet_open(struct net_device *dev)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* if the phy is not yet register, retry later */
623*4882a593Smuzhiyun 	if (!dev->phydev)
624*4882a593Smuzhiyun 		return -EAGAIN;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	napi_enable(&bp->napi);
627*4882a593Smuzhiyun 	dnet_init_hw(bp);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	phy_start_aneg(dev->phydev);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	/* schedule a link state check */
632*4882a593Smuzhiyun 	phy_start(dev->phydev);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	netif_start_queue(dev);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	return 0;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
dnet_close(struct net_device * dev)639*4882a593Smuzhiyun static int dnet_close(struct net_device *dev)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	netif_stop_queue(dev);
644*4882a593Smuzhiyun 	napi_disable(&bp->napi);
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (dev->phydev)
647*4882a593Smuzhiyun 		phy_stop(dev->phydev);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	dnet_reset_hw(bp);
650*4882a593Smuzhiyun 	netif_carrier_off(dev);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	return 0;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
dnet_print_pretty_hwstats(struct dnet_stats * hwstat)655*4882a593Smuzhiyun static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	pr_debug("%s\n", __func__);
658*4882a593Smuzhiyun 	pr_debug("----------------------------- RX statistics "
659*4882a593Smuzhiyun 		 "-------------------------------\n");
660*4882a593Smuzhiyun 	pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
661*4882a593Smuzhiyun 	pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
662*4882a593Smuzhiyun 	pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
663*4882a593Smuzhiyun 	pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
664*4882a593Smuzhiyun 	pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
665*4882a593Smuzhiyun 	pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
666*4882a593Smuzhiyun 	pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
667*4882a593Smuzhiyun 	pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
668*4882a593Smuzhiyun 	pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
669*4882a593Smuzhiyun 	pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
670*4882a593Smuzhiyun 	pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
671*4882a593Smuzhiyun 	pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
672*4882a593Smuzhiyun 	pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
673*4882a593Smuzhiyun 	pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
674*4882a593Smuzhiyun 	pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
675*4882a593Smuzhiyun 	pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
676*4882a593Smuzhiyun 	pr_debug("----------------------------- TX statistics "
677*4882a593Smuzhiyun 		 "-------------------------------\n");
678*4882a593Smuzhiyun 	pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
679*4882a593Smuzhiyun 	pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
680*4882a593Smuzhiyun 	pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
681*4882a593Smuzhiyun 	pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
682*4882a593Smuzhiyun 	pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
683*4882a593Smuzhiyun 	pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
684*4882a593Smuzhiyun 	pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
685*4882a593Smuzhiyun 	pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
dnet_get_stats(struct net_device * dev)688*4882a593Smuzhiyun static struct net_device_stats *dnet_get_stats(struct net_device *dev)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	struct dnet *bp = netdev_priv(dev);
692*4882a593Smuzhiyun 	struct net_device_stats *nstat = &dev->stats;
693*4882a593Smuzhiyun 	struct dnet_stats *hwstat = &bp->hw_stats;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* read stats from hardware */
696*4882a593Smuzhiyun 	dnet_update_stats(bp);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/* Convert HW stats into netdevice stats */
699*4882a593Smuzhiyun 	nstat->rx_errors = (hwstat->rx_len_chk_err +
700*4882a593Smuzhiyun 			    hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
701*4882a593Smuzhiyun 			    /* ignore IGP violation error
702*4882a593Smuzhiyun 			    hwstat->rx_ipg_viol + */
703*4882a593Smuzhiyun 			    hwstat->rx_crc_err +
704*4882a593Smuzhiyun 			    hwstat->rx_pre_shrink +
705*4882a593Smuzhiyun 			    hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
706*4882a593Smuzhiyun 	nstat->tx_errors = hwstat->tx_bad_fcs;
707*4882a593Smuzhiyun 	nstat->rx_length_errors = (hwstat->rx_len_chk_err +
708*4882a593Smuzhiyun 				   hwstat->rx_lng_frm +
709*4882a593Smuzhiyun 				   hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
710*4882a593Smuzhiyun 	nstat->rx_crc_errors = hwstat->rx_crc_err;
711*4882a593Smuzhiyun 	nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
712*4882a593Smuzhiyun 	nstat->rx_packets = hwstat->rx_ok_pkt;
713*4882a593Smuzhiyun 	nstat->tx_packets = (hwstat->tx_unicast +
714*4882a593Smuzhiyun 			     hwstat->tx_multicast + hwstat->tx_brdcast);
715*4882a593Smuzhiyun 	nstat->rx_bytes = hwstat->rx_byte;
716*4882a593Smuzhiyun 	nstat->tx_bytes = hwstat->tx_byte;
717*4882a593Smuzhiyun 	nstat->multicast = hwstat->rx_multicast;
718*4882a593Smuzhiyun 	nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	dnet_print_pretty_hwstats(hwstat);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	return nstat;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
dnet_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)725*4882a593Smuzhiyun static void dnet_get_drvinfo(struct net_device *dev,
726*4882a593Smuzhiyun 			     struct ethtool_drvinfo *info)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
729*4882a593Smuzhiyun 	strlcpy(info->bus_info, "0", sizeof(info->bus_info));
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun static const struct ethtool_ops dnet_ethtool_ops = {
733*4882a593Smuzhiyun 	.get_drvinfo		= dnet_get_drvinfo,
734*4882a593Smuzhiyun 	.get_link		= ethtool_op_get_link,
735*4882a593Smuzhiyun 	.get_ts_info		= ethtool_op_get_ts_info,
736*4882a593Smuzhiyun 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
737*4882a593Smuzhiyun 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
738*4882a593Smuzhiyun };
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun static const struct net_device_ops dnet_netdev_ops = {
741*4882a593Smuzhiyun 	.ndo_open		= dnet_open,
742*4882a593Smuzhiyun 	.ndo_stop		= dnet_close,
743*4882a593Smuzhiyun 	.ndo_get_stats		= dnet_get_stats,
744*4882a593Smuzhiyun 	.ndo_start_xmit		= dnet_start_xmit,
745*4882a593Smuzhiyun 	.ndo_do_ioctl		= phy_do_ioctl_running,
746*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
747*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
748*4882a593Smuzhiyun };
749*4882a593Smuzhiyun 
dnet_probe(struct platform_device * pdev)750*4882a593Smuzhiyun static int dnet_probe(struct platform_device *pdev)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	struct resource *res;
753*4882a593Smuzhiyun 	struct net_device *dev;
754*4882a593Smuzhiyun 	struct dnet *bp;
755*4882a593Smuzhiyun 	struct phy_device *phydev;
756*4882a593Smuzhiyun 	int err;
757*4882a593Smuzhiyun 	unsigned int irq;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(*bp));
762*4882a593Smuzhiyun 	if (!dev)
763*4882a593Smuzhiyun 		return -ENOMEM;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	/* TODO: Actually, we have some interesting features... */
766*4882a593Smuzhiyun 	dev->features |= 0;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	bp = netdev_priv(dev);
769*4882a593Smuzhiyun 	bp->dev = dev;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
772*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	spin_lock_init(&bp->lock);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
777*4882a593Smuzhiyun 	if (IS_ERR(bp->regs)) {
778*4882a593Smuzhiyun 		err = PTR_ERR(bp->regs);
779*4882a593Smuzhiyun 		goto err_out_free_dev;
780*4882a593Smuzhiyun 	}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	dev->irq = irq;
783*4882a593Smuzhiyun 	err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
784*4882a593Smuzhiyun 	if (err) {
785*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
786*4882a593Smuzhiyun 		       irq, err);
787*4882a593Smuzhiyun 		goto err_out_free_dev;
788*4882a593Smuzhiyun 	}
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	dev->netdev_ops = &dnet_netdev_ops;
791*4882a593Smuzhiyun 	netif_napi_add(dev, &bp->napi, dnet_poll, 64);
792*4882a593Smuzhiyun 	dev->ethtool_ops = &dnet_ethtool_ops;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	dev->base_addr = (unsigned long)bp->regs;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	dnet_get_hwaddr(bp);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	if (!is_valid_ether_addr(dev->dev_addr)) {
801*4882a593Smuzhiyun 		/* choose a random ethernet address */
802*4882a593Smuzhiyun 		eth_hw_addr_random(dev);
803*4882a593Smuzhiyun 		__dnet_set_hwaddr(bp);
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	err = register_netdev(dev);
807*4882a593Smuzhiyun 	if (err) {
808*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
809*4882a593Smuzhiyun 		goto err_out_free_irq;
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	/* register the PHY board fixup (for Marvell 88E1111) */
813*4882a593Smuzhiyun 	err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
814*4882a593Smuzhiyun 					 dnet_phy_marvell_fixup);
815*4882a593Smuzhiyun 	/* we can live without it, so just issue a warning */
816*4882a593Smuzhiyun 	if (err)
817*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	err = dnet_mii_init(bp);
820*4882a593Smuzhiyun 	if (err)
821*4882a593Smuzhiyun 		goto err_out_unregister_netdev;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
824*4882a593Smuzhiyun 	       bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
825*4882a593Smuzhiyun 	dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
826*4882a593Smuzhiyun 	       (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
827*4882a593Smuzhiyun 	       (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
828*4882a593Smuzhiyun 	       (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
829*4882a593Smuzhiyun 	       (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
830*4882a593Smuzhiyun 	phydev = dev->phydev;
831*4882a593Smuzhiyun 	phy_attached_info(phydev);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	return 0;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun err_out_unregister_netdev:
836*4882a593Smuzhiyun 	unregister_netdev(dev);
837*4882a593Smuzhiyun err_out_free_irq:
838*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
839*4882a593Smuzhiyun err_out_free_dev:
840*4882a593Smuzhiyun 	free_netdev(dev);
841*4882a593Smuzhiyun 	return err;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
dnet_remove(struct platform_device * pdev)844*4882a593Smuzhiyun static int dnet_remove(struct platform_device *pdev)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	struct net_device *dev;
848*4882a593Smuzhiyun 	struct dnet *bp;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	dev = platform_get_drvdata(pdev);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	if (dev) {
853*4882a593Smuzhiyun 		bp = netdev_priv(dev);
854*4882a593Smuzhiyun 		if (dev->phydev)
855*4882a593Smuzhiyun 			phy_disconnect(dev->phydev);
856*4882a593Smuzhiyun 		mdiobus_unregister(bp->mii_bus);
857*4882a593Smuzhiyun 		mdiobus_free(bp->mii_bus);
858*4882a593Smuzhiyun 		unregister_netdev(dev);
859*4882a593Smuzhiyun 		free_irq(dev->irq, dev);
860*4882a593Smuzhiyun 		free_netdev(dev);
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	return 0;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun static struct platform_driver dnet_driver = {
867*4882a593Smuzhiyun 	.probe		= dnet_probe,
868*4882a593Smuzhiyun 	.remove		= dnet_remove,
869*4882a593Smuzhiyun 	.driver		= {
870*4882a593Smuzhiyun 		.name		= "dnet",
871*4882a593Smuzhiyun 	},
872*4882a593Smuzhiyun };
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun module_platform_driver(dnet_driver);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun MODULE_LICENSE("GPL");
877*4882a593Smuzhiyun MODULE_DESCRIPTION("Dave DNET Ethernet driver");
878*4882a593Smuzhiyun MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
879*4882a593Smuzhiyun 	      "Matteo Vit <matteo.vit@dave.eu>");
880