xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/broadcom/bcm63xx_enet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for BCM963xx builtin Ethernet mac
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/init.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/ethtool.h>
15*4882a593Smuzhiyun #include <linux/crc32.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/dma-mapping.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/if_vlan.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <bcm63xx_dev_enet.h>
22*4882a593Smuzhiyun #include "bcm63xx_enet.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static char bcm_enet_driver_name[] = "bcm63xx_enet";
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static int copybreak __read_mostly = 128;
27*4882a593Smuzhiyun module_param(copybreak, int, 0);
28*4882a593Smuzhiyun MODULE_PARM_DESC(copybreak, "Receive copy threshold");
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* io registers memory shared between all devices */
31*4882a593Smuzhiyun static void __iomem *bcm_enet_shared_base[3];
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * io helpers to access mac registers
35*4882a593Smuzhiyun  */
enet_readl(struct bcm_enet_priv * priv,u32 off)36*4882a593Smuzhiyun static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	return bcm_readl(priv->base + off);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
enet_writel(struct bcm_enet_priv * priv,u32 val,u32 off)41*4882a593Smuzhiyun static inline void enet_writel(struct bcm_enet_priv *priv,
42*4882a593Smuzhiyun 			       u32 val, u32 off)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	bcm_writel(val, priv->base + off);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun  * io helpers to access switch registers
49*4882a593Smuzhiyun  */
enetsw_readl(struct bcm_enet_priv * priv,u32 off)50*4882a593Smuzhiyun static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	return bcm_readl(priv->base + off);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
enetsw_writel(struct bcm_enet_priv * priv,u32 val,u32 off)55*4882a593Smuzhiyun static inline void enetsw_writel(struct bcm_enet_priv *priv,
56*4882a593Smuzhiyun 				 u32 val, u32 off)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	bcm_writel(val, priv->base + off);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
enetsw_readw(struct bcm_enet_priv * priv,u32 off)61*4882a593Smuzhiyun static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	return bcm_readw(priv->base + off);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
enetsw_writew(struct bcm_enet_priv * priv,u16 val,u32 off)66*4882a593Smuzhiyun static inline void enetsw_writew(struct bcm_enet_priv *priv,
67*4882a593Smuzhiyun 				 u16 val, u32 off)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	bcm_writew(val, priv->base + off);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
enetsw_readb(struct bcm_enet_priv * priv,u32 off)72*4882a593Smuzhiyun static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return bcm_readb(priv->base + off);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
enetsw_writeb(struct bcm_enet_priv * priv,u8 val,u32 off)77*4882a593Smuzhiyun static inline void enetsw_writeb(struct bcm_enet_priv *priv,
78*4882a593Smuzhiyun 				 u8 val, u32 off)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	bcm_writeb(val, priv->base + off);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* io helpers to access shared registers */
enet_dma_readl(struct bcm_enet_priv * priv,u32 off)85*4882a593Smuzhiyun static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	return bcm_readl(bcm_enet_shared_base[0] + off);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
enet_dma_writel(struct bcm_enet_priv * priv,u32 val,u32 off)90*4882a593Smuzhiyun static inline void enet_dma_writel(struct bcm_enet_priv *priv,
91*4882a593Smuzhiyun 				       u32 val, u32 off)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	bcm_writel(val, bcm_enet_shared_base[0] + off);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
enet_dmac_readl(struct bcm_enet_priv * priv,u32 off,int chan)96*4882a593Smuzhiyun static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return bcm_readl(bcm_enet_shared_base[1] +
99*4882a593Smuzhiyun 		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
enet_dmac_writel(struct bcm_enet_priv * priv,u32 val,u32 off,int chan)102*4882a593Smuzhiyun static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
103*4882a593Smuzhiyun 				       u32 val, u32 off, int chan)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	bcm_writel(val, bcm_enet_shared_base[1] +
106*4882a593Smuzhiyun 		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
enet_dmas_readl(struct bcm_enet_priv * priv,u32 off,int chan)109*4882a593Smuzhiyun static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
enet_dmas_writel(struct bcm_enet_priv * priv,u32 val,u32 off,int chan)114*4882a593Smuzhiyun static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
115*4882a593Smuzhiyun 				       u32 val, u32 off, int chan)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * write given data into mii register and wait for transfer to end
122*4882a593Smuzhiyun  * with timeout (average measured transfer time is 25us)
123*4882a593Smuzhiyun  */
do_mdio_op(struct bcm_enet_priv * priv,unsigned int data)124*4882a593Smuzhiyun static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	int limit;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* make sure mii interrupt status is cleared */
129*4882a593Smuzhiyun 	enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	enet_writel(priv, data, ENET_MIIDATA_REG);
132*4882a593Smuzhiyun 	wmb();
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* busy wait on mii interrupt bit, with timeout */
135*4882a593Smuzhiyun 	limit = 1000;
136*4882a593Smuzhiyun 	do {
137*4882a593Smuzhiyun 		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
138*4882a593Smuzhiyun 			break;
139*4882a593Smuzhiyun 		udelay(1);
140*4882a593Smuzhiyun 	} while (limit-- > 0);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return (limit < 0) ? 1 : 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun  * MII internal read callback
147*4882a593Smuzhiyun  */
bcm_enet_mdio_read(struct bcm_enet_priv * priv,int mii_id,int regnum)148*4882a593Smuzhiyun static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
149*4882a593Smuzhiyun 			      int regnum)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	u32 tmp, val;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	tmp = regnum << ENET_MIIDATA_REG_SHIFT;
154*4882a593Smuzhiyun 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
155*4882a593Smuzhiyun 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
156*4882a593Smuzhiyun 	tmp |= ENET_MIIDATA_OP_READ_MASK;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (do_mdio_op(priv, tmp))
159*4882a593Smuzhiyun 		return -1;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_MIIDATA_REG);
162*4882a593Smuzhiyun 	val &= 0xffff;
163*4882a593Smuzhiyun 	return val;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun  * MII internal write callback
168*4882a593Smuzhiyun  */
bcm_enet_mdio_write(struct bcm_enet_priv * priv,int mii_id,int regnum,u16 value)169*4882a593Smuzhiyun static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
170*4882a593Smuzhiyun 			       int regnum, u16 value)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	u32 tmp;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
175*4882a593Smuzhiyun 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
176*4882a593Smuzhiyun 	tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
177*4882a593Smuzhiyun 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
178*4882a593Smuzhiyun 	tmp |= ENET_MIIDATA_OP_WRITE_MASK;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	(void)do_mdio_op(priv, tmp);
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * MII read callback from phylib
186*4882a593Smuzhiyun  */
bcm_enet_mdio_read_phylib(struct mii_bus * bus,int mii_id,int regnum)187*4882a593Smuzhiyun static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
188*4882a593Smuzhiyun 				     int regnum)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun  * MII write callback from phylib
195*4882a593Smuzhiyun  */
bcm_enet_mdio_write_phylib(struct mii_bus * bus,int mii_id,int regnum,u16 value)196*4882a593Smuzhiyun static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
197*4882a593Smuzhiyun 				      int regnum, u16 value)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun  * MII read callback from mii core
204*4882a593Smuzhiyun  */
bcm_enet_mdio_read_mii(struct net_device * dev,int mii_id,int regnum)205*4882a593Smuzhiyun static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
206*4882a593Smuzhiyun 				  int regnum)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * MII write callback from mii core
213*4882a593Smuzhiyun  */
bcm_enet_mdio_write_mii(struct net_device * dev,int mii_id,int regnum,int value)214*4882a593Smuzhiyun static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
215*4882a593Smuzhiyun 				    int regnum, int value)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun  * refill rx queue
222*4882a593Smuzhiyun  */
bcm_enet_refill_rx(struct net_device * dev)223*4882a593Smuzhiyun static int bcm_enet_refill_rx(struct net_device *dev)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	priv = netdev_priv(dev);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	while (priv->rx_desc_count < priv->rx_ring_size) {
230*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
231*4882a593Smuzhiyun 		struct sk_buff *skb;
232*4882a593Smuzhiyun 		dma_addr_t p;
233*4882a593Smuzhiyun 		int desc_idx;
234*4882a593Smuzhiyun 		u32 len_stat;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		desc_idx = priv->rx_dirty_desc;
237*4882a593Smuzhiyun 		desc = &priv->rx_desc_cpu[desc_idx];
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		if (!priv->rx_skb[desc_idx]) {
240*4882a593Smuzhiyun 			skb = netdev_alloc_skb(dev, priv->rx_skb_size);
241*4882a593Smuzhiyun 			if (!skb)
242*4882a593Smuzhiyun 				break;
243*4882a593Smuzhiyun 			priv->rx_skb[desc_idx] = skb;
244*4882a593Smuzhiyun 			p = dma_map_single(&priv->pdev->dev, skb->data,
245*4882a593Smuzhiyun 					   priv->rx_skb_size,
246*4882a593Smuzhiyun 					   DMA_FROM_DEVICE);
247*4882a593Smuzhiyun 			desc->address = p;
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
251*4882a593Smuzhiyun 		len_stat |= DMADESC_OWNER_MASK;
252*4882a593Smuzhiyun 		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
253*4882a593Smuzhiyun 			len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
254*4882a593Smuzhiyun 			priv->rx_dirty_desc = 0;
255*4882a593Smuzhiyun 		} else {
256*4882a593Smuzhiyun 			priv->rx_dirty_desc++;
257*4882a593Smuzhiyun 		}
258*4882a593Smuzhiyun 		wmb();
259*4882a593Smuzhiyun 		desc->len_stat = len_stat;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		priv->rx_desc_count++;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 		/* tell dma engine we allocated one buffer */
264*4882a593Smuzhiyun 		if (priv->dma_has_sram)
265*4882a593Smuzhiyun 			enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
266*4882a593Smuzhiyun 		else
267*4882a593Smuzhiyun 			enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* If rx ring is still empty, set a timer to try allocating
271*4882a593Smuzhiyun 	 * again at a later time. */
272*4882a593Smuzhiyun 	if (priv->rx_desc_count == 0 && netif_running(dev)) {
273*4882a593Smuzhiyun 		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
274*4882a593Smuzhiyun 		priv->rx_timeout.expires = jiffies + HZ;
275*4882a593Smuzhiyun 		add_timer(&priv->rx_timeout);
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun  * timer callback to defer refill rx queue in case we're OOM
283*4882a593Smuzhiyun  */
bcm_enet_refill_rx_timer(struct timer_list * t)284*4882a593Smuzhiyun static void bcm_enet_refill_rx_timer(struct timer_list *t)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
287*4882a593Smuzhiyun 	struct net_device *dev = priv->net_dev;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	spin_lock(&priv->rx_lock);
290*4882a593Smuzhiyun 	bcm_enet_refill_rx(dev);
291*4882a593Smuzhiyun 	spin_unlock(&priv->rx_lock);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun  * extract packet from rx queue
296*4882a593Smuzhiyun  */
bcm_enet_receive_queue(struct net_device * dev,int budget)297*4882a593Smuzhiyun static int bcm_enet_receive_queue(struct net_device *dev, int budget)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
300*4882a593Smuzhiyun 	struct device *kdev;
301*4882a593Smuzhiyun 	int processed;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	priv = netdev_priv(dev);
304*4882a593Smuzhiyun 	kdev = &priv->pdev->dev;
305*4882a593Smuzhiyun 	processed = 0;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* don't scan ring further than number of refilled
308*4882a593Smuzhiyun 	 * descriptor */
309*4882a593Smuzhiyun 	if (budget > priv->rx_desc_count)
310*4882a593Smuzhiyun 		budget = priv->rx_desc_count;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	do {
313*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
314*4882a593Smuzhiyun 		struct sk_buff *skb;
315*4882a593Smuzhiyun 		int desc_idx;
316*4882a593Smuzhiyun 		u32 len_stat;
317*4882a593Smuzhiyun 		unsigned int len;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 		desc_idx = priv->rx_curr_desc;
320*4882a593Smuzhiyun 		desc = &priv->rx_desc_cpu[desc_idx];
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		/* make sure we actually read the descriptor status at
323*4882a593Smuzhiyun 		 * each loop */
324*4882a593Smuzhiyun 		rmb();
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 		len_stat = desc->len_stat;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		/* break if dma ownership belongs to hw */
329*4882a593Smuzhiyun 		if (len_stat & DMADESC_OWNER_MASK)
330*4882a593Smuzhiyun 			break;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		processed++;
333*4882a593Smuzhiyun 		priv->rx_curr_desc++;
334*4882a593Smuzhiyun 		if (priv->rx_curr_desc == priv->rx_ring_size)
335*4882a593Smuzhiyun 			priv->rx_curr_desc = 0;
336*4882a593Smuzhiyun 		priv->rx_desc_count--;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		/* if the packet does not have start of packet _and_
339*4882a593Smuzhiyun 		 * end of packet flag set, then just recycle it */
340*4882a593Smuzhiyun 		if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
341*4882a593Smuzhiyun 			(DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
342*4882a593Smuzhiyun 			dev->stats.rx_dropped++;
343*4882a593Smuzhiyun 			continue;
344*4882a593Smuzhiyun 		}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 		/* recycle packet if it's marked as bad */
347*4882a593Smuzhiyun 		if (!priv->enet_is_sw &&
348*4882a593Smuzhiyun 		    unlikely(len_stat & DMADESC_ERR_MASK)) {
349*4882a593Smuzhiyun 			dev->stats.rx_errors++;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 			if (len_stat & DMADESC_OVSIZE_MASK)
352*4882a593Smuzhiyun 				dev->stats.rx_length_errors++;
353*4882a593Smuzhiyun 			if (len_stat & DMADESC_CRC_MASK)
354*4882a593Smuzhiyun 				dev->stats.rx_crc_errors++;
355*4882a593Smuzhiyun 			if (len_stat & DMADESC_UNDER_MASK)
356*4882a593Smuzhiyun 				dev->stats.rx_frame_errors++;
357*4882a593Smuzhiyun 			if (len_stat & DMADESC_OV_MASK)
358*4882a593Smuzhiyun 				dev->stats.rx_fifo_errors++;
359*4882a593Smuzhiyun 			continue;
360*4882a593Smuzhiyun 		}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		/* valid packet */
363*4882a593Smuzhiyun 		skb = priv->rx_skb[desc_idx];
364*4882a593Smuzhiyun 		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
365*4882a593Smuzhiyun 		/* don't include FCS */
366*4882a593Smuzhiyun 		len -= 4;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		if (len < copybreak) {
369*4882a593Smuzhiyun 			struct sk_buff *nskb;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 			nskb = napi_alloc_skb(&priv->napi, len);
372*4882a593Smuzhiyun 			if (!nskb) {
373*4882a593Smuzhiyun 				/* forget packet, just rearm desc */
374*4882a593Smuzhiyun 				dev->stats.rx_dropped++;
375*4882a593Smuzhiyun 				continue;
376*4882a593Smuzhiyun 			}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 			dma_sync_single_for_cpu(kdev, desc->address,
379*4882a593Smuzhiyun 						len, DMA_FROM_DEVICE);
380*4882a593Smuzhiyun 			memcpy(nskb->data, skb->data, len);
381*4882a593Smuzhiyun 			dma_sync_single_for_device(kdev, desc->address,
382*4882a593Smuzhiyun 						   len, DMA_FROM_DEVICE);
383*4882a593Smuzhiyun 			skb = nskb;
384*4882a593Smuzhiyun 		} else {
385*4882a593Smuzhiyun 			dma_unmap_single(&priv->pdev->dev, desc->address,
386*4882a593Smuzhiyun 					 priv->rx_skb_size, DMA_FROM_DEVICE);
387*4882a593Smuzhiyun 			priv->rx_skb[desc_idx] = NULL;
388*4882a593Smuzhiyun 		}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		skb_put(skb, len);
391*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, dev);
392*4882a593Smuzhiyun 		dev->stats.rx_packets++;
393*4882a593Smuzhiyun 		dev->stats.rx_bytes += len;
394*4882a593Smuzhiyun 		netif_receive_skb(skb);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	} while (--budget > 0);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (processed || !priv->rx_desc_count) {
399*4882a593Smuzhiyun 		bcm_enet_refill_rx(dev);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 		/* kick rx dma */
402*4882a593Smuzhiyun 		enet_dmac_writel(priv, priv->dma_chan_en_mask,
403*4882a593Smuzhiyun 					 ENETDMAC_CHANCFG, priv->rx_chan);
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return processed;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun  * try to or force reclaim of transmitted buffers
412*4882a593Smuzhiyun  */
bcm_enet_tx_reclaim(struct net_device * dev,int force)413*4882a593Smuzhiyun static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
416*4882a593Smuzhiyun 	int released;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	priv = netdev_priv(dev);
419*4882a593Smuzhiyun 	released = 0;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	while (priv->tx_desc_count < priv->tx_ring_size) {
422*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
423*4882a593Smuzhiyun 		struct sk_buff *skb;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		/* We run in a bh and fight against start_xmit, which
426*4882a593Smuzhiyun 		 * is called with bh disabled  */
427*4882a593Smuzhiyun 		spin_lock(&priv->tx_lock);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
432*4882a593Smuzhiyun 			spin_unlock(&priv->tx_lock);
433*4882a593Smuzhiyun 			break;
434*4882a593Smuzhiyun 		}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		/* ensure other field of the descriptor were not read
437*4882a593Smuzhiyun 		 * before we checked ownership */
438*4882a593Smuzhiyun 		rmb();
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		skb = priv->tx_skb[priv->tx_dirty_desc];
441*4882a593Smuzhiyun 		priv->tx_skb[priv->tx_dirty_desc] = NULL;
442*4882a593Smuzhiyun 		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
443*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		priv->tx_dirty_desc++;
446*4882a593Smuzhiyun 		if (priv->tx_dirty_desc == priv->tx_ring_size)
447*4882a593Smuzhiyun 			priv->tx_dirty_desc = 0;
448*4882a593Smuzhiyun 		priv->tx_desc_count++;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		spin_unlock(&priv->tx_lock);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		if (desc->len_stat & DMADESC_UNDER_MASK)
453*4882a593Smuzhiyun 			dev->stats.tx_errors++;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		dev_kfree_skb(skb);
456*4882a593Smuzhiyun 		released++;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (netif_queue_stopped(dev) && released)
460*4882a593Smuzhiyun 		netif_wake_queue(dev);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	return released;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun /*
466*4882a593Smuzhiyun  * poll func, called by network core
467*4882a593Smuzhiyun  */
bcm_enet_poll(struct napi_struct * napi,int budget)468*4882a593Smuzhiyun static int bcm_enet_poll(struct napi_struct *napi, int budget)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
471*4882a593Smuzhiyun 	struct net_device *dev;
472*4882a593Smuzhiyun 	int rx_work_done;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	priv = container_of(napi, struct bcm_enet_priv, napi);
475*4882a593Smuzhiyun 	dev = priv->net_dev;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	/* ack interrupts */
478*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
479*4882a593Smuzhiyun 			 ENETDMAC_IR, priv->rx_chan);
480*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
481*4882a593Smuzhiyun 			 ENETDMAC_IR, priv->tx_chan);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* reclaim sent skb */
484*4882a593Smuzhiyun 	bcm_enet_tx_reclaim(dev, 0);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	spin_lock(&priv->rx_lock);
487*4882a593Smuzhiyun 	rx_work_done = bcm_enet_receive_queue(dev, budget);
488*4882a593Smuzhiyun 	spin_unlock(&priv->rx_lock);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (rx_work_done >= budget) {
491*4882a593Smuzhiyun 		/* rx queue is not yet empty/clean */
492*4882a593Smuzhiyun 		return rx_work_done;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/* no more packet in rx/tx queue, remove device from poll
496*4882a593Smuzhiyun 	 * queue */
497*4882a593Smuzhiyun 	napi_complete_done(napi, rx_work_done);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/* restore rx/tx interrupt */
500*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
501*4882a593Smuzhiyun 			 ENETDMAC_IRMASK, priv->rx_chan);
502*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
503*4882a593Smuzhiyun 			 ENETDMAC_IRMASK, priv->tx_chan);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return rx_work_done;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun /*
509*4882a593Smuzhiyun  * mac interrupt handler
510*4882a593Smuzhiyun  */
bcm_enet_isr_mac(int irq,void * dev_id)511*4882a593Smuzhiyun static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct net_device *dev;
514*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
515*4882a593Smuzhiyun 	u32 stat;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	dev = dev_id;
518*4882a593Smuzhiyun 	priv = netdev_priv(dev);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	stat = enet_readl(priv, ENET_IR_REG);
521*4882a593Smuzhiyun 	if (!(stat & ENET_IR_MIB))
522*4882a593Smuzhiyun 		return IRQ_NONE;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* clear & mask interrupt */
525*4882a593Smuzhiyun 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
526*4882a593Smuzhiyun 	enet_writel(priv, 0, ENET_IRMASK_REG);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* read mib registers in workqueue */
529*4882a593Smuzhiyun 	schedule_work(&priv->mib_update_task);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return IRQ_HANDLED;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun /*
535*4882a593Smuzhiyun  * rx/tx dma interrupt handler
536*4882a593Smuzhiyun  */
bcm_enet_isr_dma(int irq,void * dev_id)537*4882a593Smuzhiyun static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct net_device *dev;
540*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	dev = dev_id;
543*4882a593Smuzhiyun 	priv = netdev_priv(dev);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	/* mask rx/tx interrupts */
546*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
547*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	napi_schedule(&priv->napi);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return IRQ_HANDLED;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun  * tx request callback
556*4882a593Smuzhiyun  */
557*4882a593Smuzhiyun static netdev_tx_t
bcm_enet_start_xmit(struct sk_buff * skb,struct net_device * dev)558*4882a593Smuzhiyun bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
561*4882a593Smuzhiyun 	struct bcm_enet_desc *desc;
562*4882a593Smuzhiyun 	u32 len_stat;
563*4882a593Smuzhiyun 	netdev_tx_t ret;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	priv = netdev_priv(dev);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	/* lock against tx reclaim */
568*4882a593Smuzhiyun 	spin_lock(&priv->tx_lock);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	/* make sure  the tx hw queue  is not full,  should not happen
571*4882a593Smuzhiyun 	 * since we stop queue before it's the case */
572*4882a593Smuzhiyun 	if (unlikely(!priv->tx_desc_count)) {
573*4882a593Smuzhiyun 		netif_stop_queue(dev);
574*4882a593Smuzhiyun 		dev_err(&priv->pdev->dev, "xmit called with no tx desc "
575*4882a593Smuzhiyun 			"available?\n");
576*4882a593Smuzhiyun 		ret = NETDEV_TX_BUSY;
577*4882a593Smuzhiyun 		goto out_unlock;
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/* pad small packets sent on a switch device */
581*4882a593Smuzhiyun 	if (priv->enet_is_sw && skb->len < 64) {
582*4882a593Smuzhiyun 		int needed = 64 - skb->len;
583*4882a593Smuzhiyun 		char *data;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		if (unlikely(skb_tailroom(skb) < needed)) {
586*4882a593Smuzhiyun 			struct sk_buff *nskb;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
589*4882a593Smuzhiyun 			if (!nskb) {
590*4882a593Smuzhiyun 				ret = NETDEV_TX_BUSY;
591*4882a593Smuzhiyun 				goto out_unlock;
592*4882a593Smuzhiyun 			}
593*4882a593Smuzhiyun 			dev_kfree_skb(skb);
594*4882a593Smuzhiyun 			skb = nskb;
595*4882a593Smuzhiyun 		}
596*4882a593Smuzhiyun 		data = skb_put_zero(skb, needed);
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* point to the next available desc */
600*4882a593Smuzhiyun 	desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
601*4882a593Smuzhiyun 	priv->tx_skb[priv->tx_curr_desc] = skb;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	/* fill descriptor */
604*4882a593Smuzhiyun 	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
605*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
608*4882a593Smuzhiyun 	len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
609*4882a593Smuzhiyun 		DMADESC_APPEND_CRC |
610*4882a593Smuzhiyun 		DMADESC_OWNER_MASK;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	priv->tx_curr_desc++;
613*4882a593Smuzhiyun 	if (priv->tx_curr_desc == priv->tx_ring_size) {
614*4882a593Smuzhiyun 		priv->tx_curr_desc = 0;
615*4882a593Smuzhiyun 		len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 	priv->tx_desc_count--;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/* dma might be already polling, make sure we update desc
620*4882a593Smuzhiyun 	 * fields in correct order */
621*4882a593Smuzhiyun 	wmb();
622*4882a593Smuzhiyun 	desc->len_stat = len_stat;
623*4882a593Smuzhiyun 	wmb();
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* kick tx dma */
626*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_en_mask,
627*4882a593Smuzhiyun 				 ENETDMAC_CHANCFG, priv->tx_chan);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* stop queue if no more desc available */
630*4882a593Smuzhiyun 	if (!priv->tx_desc_count)
631*4882a593Smuzhiyun 		netif_stop_queue(dev);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	dev->stats.tx_bytes += skb->len;
634*4882a593Smuzhiyun 	dev->stats.tx_packets++;
635*4882a593Smuzhiyun 	ret = NETDEV_TX_OK;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun out_unlock:
638*4882a593Smuzhiyun 	spin_unlock(&priv->tx_lock);
639*4882a593Smuzhiyun 	return ret;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun  * Change the interface's mac address.
644*4882a593Smuzhiyun  */
bcm_enet_set_mac_address(struct net_device * dev,void * p)645*4882a593Smuzhiyun static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
648*4882a593Smuzhiyun 	struct sockaddr *addr = p;
649*4882a593Smuzhiyun 	u32 val;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	priv = netdev_priv(dev);
652*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/* use perfect match register 0 to store my mac address */
655*4882a593Smuzhiyun 	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
656*4882a593Smuzhiyun 		(dev->dev_addr[4] << 8) | dev->dev_addr[5];
657*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_PML_REG(0));
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
660*4882a593Smuzhiyun 	val |= ENET_PMH_DATAVALID_MASK;
661*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_PMH_REG(0));
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	return 0;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun  * Change rx mode (promiscuous/allmulti) and update multicast list
668*4882a593Smuzhiyun  */
bcm_enet_set_multicast_list(struct net_device * dev)669*4882a593Smuzhiyun static void bcm_enet_set_multicast_list(struct net_device *dev)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
672*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
673*4882a593Smuzhiyun 	u32 val;
674*4882a593Smuzhiyun 	int i;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	priv = netdev_priv(dev);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_RXCFG_REG);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC)
681*4882a593Smuzhiyun 		val |= ENET_RXCFG_PROMISC_MASK;
682*4882a593Smuzhiyun 	else
683*4882a593Smuzhiyun 		val &= ~ENET_RXCFG_PROMISC_MASK;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* only 3 perfect match registers left, first one is used for
686*4882a593Smuzhiyun 	 * own mac address */
687*4882a593Smuzhiyun 	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
688*4882a593Smuzhiyun 		val |= ENET_RXCFG_ALLMCAST_MASK;
689*4882a593Smuzhiyun 	else
690*4882a593Smuzhiyun 		val &= ~ENET_RXCFG_ALLMCAST_MASK;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	/* no need to set perfect match registers if we catch all
693*4882a593Smuzhiyun 	 * multicast */
694*4882a593Smuzhiyun 	if (val & ENET_RXCFG_ALLMCAST_MASK) {
695*4882a593Smuzhiyun 		enet_writel(priv, val, ENET_RXCFG_REG);
696*4882a593Smuzhiyun 		return;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	i = 0;
700*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, dev) {
701*4882a593Smuzhiyun 		u8 *dmi_addr;
702*4882a593Smuzhiyun 		u32 tmp;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		if (i == 3)
705*4882a593Smuzhiyun 			break;
706*4882a593Smuzhiyun 		/* update perfect match registers */
707*4882a593Smuzhiyun 		dmi_addr = ha->addr;
708*4882a593Smuzhiyun 		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
709*4882a593Smuzhiyun 			(dmi_addr[4] << 8) | dmi_addr[5];
710*4882a593Smuzhiyun 		enet_writel(priv, tmp, ENET_PML_REG(i + 1));
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
713*4882a593Smuzhiyun 		tmp |= ENET_PMH_DATAVALID_MASK;
714*4882a593Smuzhiyun 		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	for (; i < 3; i++) {
718*4882a593Smuzhiyun 		enet_writel(priv, 0, ENET_PML_REG(i + 1));
719*4882a593Smuzhiyun 		enet_writel(priv, 0, ENET_PMH_REG(i + 1));
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_RXCFG_REG);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun  * set mac duplex parameters
727*4882a593Smuzhiyun  */
bcm_enet_set_duplex(struct bcm_enet_priv * priv,int fullduplex)728*4882a593Smuzhiyun static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	u32 val;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_TXCTL_REG);
733*4882a593Smuzhiyun 	if (fullduplex)
734*4882a593Smuzhiyun 		val |= ENET_TXCTL_FD_MASK;
735*4882a593Smuzhiyun 	else
736*4882a593Smuzhiyun 		val &= ~ENET_TXCTL_FD_MASK;
737*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_TXCTL_REG);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun /*
741*4882a593Smuzhiyun  * set mac flow control parameters
742*4882a593Smuzhiyun  */
bcm_enet_set_flow(struct bcm_enet_priv * priv,int rx_en,int tx_en)743*4882a593Smuzhiyun static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun 	u32 val;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* rx flow control (pause frame handling) */
748*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_RXCFG_REG);
749*4882a593Smuzhiyun 	if (rx_en)
750*4882a593Smuzhiyun 		val |= ENET_RXCFG_ENFLOW_MASK;
751*4882a593Smuzhiyun 	else
752*4882a593Smuzhiyun 		val &= ~ENET_RXCFG_ENFLOW_MASK;
753*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_RXCFG_REG);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	if (!priv->dma_has_sram)
756*4882a593Smuzhiyun 		return;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/* tx flow control (pause frame generation) */
759*4882a593Smuzhiyun 	val = enet_dma_readl(priv, ENETDMA_CFG_REG);
760*4882a593Smuzhiyun 	if (tx_en)
761*4882a593Smuzhiyun 		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
762*4882a593Smuzhiyun 	else
763*4882a593Smuzhiyun 		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
764*4882a593Smuzhiyun 	enet_dma_writel(priv, val, ENETDMA_CFG_REG);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun  * link changed callback (from phylib)
769*4882a593Smuzhiyun  */
bcm_enet_adjust_phy_link(struct net_device * dev)770*4882a593Smuzhiyun static void bcm_enet_adjust_phy_link(struct net_device *dev)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
773*4882a593Smuzhiyun 	struct phy_device *phydev;
774*4882a593Smuzhiyun 	int status_changed;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	priv = netdev_priv(dev);
777*4882a593Smuzhiyun 	phydev = dev->phydev;
778*4882a593Smuzhiyun 	status_changed = 0;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (priv->old_link != phydev->link) {
781*4882a593Smuzhiyun 		status_changed = 1;
782*4882a593Smuzhiyun 		priv->old_link = phydev->link;
783*4882a593Smuzhiyun 	}
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/* reflect duplex change in mac configuration */
786*4882a593Smuzhiyun 	if (phydev->link && phydev->duplex != priv->old_duplex) {
787*4882a593Smuzhiyun 		bcm_enet_set_duplex(priv,
788*4882a593Smuzhiyun 				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
789*4882a593Smuzhiyun 		status_changed = 1;
790*4882a593Smuzhiyun 		priv->old_duplex = phydev->duplex;
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	/* enable flow control if remote advertise it (trust phylib to
794*4882a593Smuzhiyun 	 * check that duplex is full */
795*4882a593Smuzhiyun 	if (phydev->link && phydev->pause != priv->old_pause) {
796*4882a593Smuzhiyun 		int rx_pause_en, tx_pause_en;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		if (phydev->pause) {
799*4882a593Smuzhiyun 			/* pause was advertised by lpa and us */
800*4882a593Smuzhiyun 			rx_pause_en = 1;
801*4882a593Smuzhiyun 			tx_pause_en = 1;
802*4882a593Smuzhiyun 		} else if (!priv->pause_auto) {
803*4882a593Smuzhiyun 			/* pause setting overridden by user */
804*4882a593Smuzhiyun 			rx_pause_en = priv->pause_rx;
805*4882a593Smuzhiyun 			tx_pause_en = priv->pause_tx;
806*4882a593Smuzhiyun 		} else {
807*4882a593Smuzhiyun 			rx_pause_en = 0;
808*4882a593Smuzhiyun 			tx_pause_en = 0;
809*4882a593Smuzhiyun 		}
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
812*4882a593Smuzhiyun 		status_changed = 1;
813*4882a593Smuzhiyun 		priv->old_pause = phydev->pause;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (status_changed) {
817*4882a593Smuzhiyun 		pr_info("%s: link %s", dev->name, phydev->link ?
818*4882a593Smuzhiyun 			"UP" : "DOWN");
819*4882a593Smuzhiyun 		if (phydev->link)
820*4882a593Smuzhiyun 			pr_cont(" - %d/%s - flow control %s", phydev->speed,
821*4882a593Smuzhiyun 			       DUPLEX_FULL == phydev->duplex ? "full" : "half",
822*4882a593Smuzhiyun 			       phydev->pause == 1 ? "rx&tx" : "off");
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 		pr_cont("\n");
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun  * link changed callback (if phylib is not used)
830*4882a593Smuzhiyun  */
bcm_enet_adjust_link(struct net_device * dev)831*4882a593Smuzhiyun static void bcm_enet_adjust_link(struct net_device *dev)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	priv = netdev_priv(dev);
836*4882a593Smuzhiyun 	bcm_enet_set_duplex(priv, priv->force_duplex_full);
837*4882a593Smuzhiyun 	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
838*4882a593Smuzhiyun 	netif_carrier_on(dev);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
841*4882a593Smuzhiyun 		dev->name,
842*4882a593Smuzhiyun 		priv->force_speed_100 ? 100 : 10,
843*4882a593Smuzhiyun 		priv->force_duplex_full ? "full" : "half",
844*4882a593Smuzhiyun 		priv->pause_rx ? "rx" : "off",
845*4882a593Smuzhiyun 		priv->pause_tx ? "tx" : "off");
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun /*
849*4882a593Smuzhiyun  * open callback, allocate dma rings & buffers and start rx operation
850*4882a593Smuzhiyun  */
bcm_enet_open(struct net_device * dev)851*4882a593Smuzhiyun static int bcm_enet_open(struct net_device *dev)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
854*4882a593Smuzhiyun 	struct sockaddr addr;
855*4882a593Smuzhiyun 	struct device *kdev;
856*4882a593Smuzhiyun 	struct phy_device *phydev;
857*4882a593Smuzhiyun 	int i, ret;
858*4882a593Smuzhiyun 	unsigned int size;
859*4882a593Smuzhiyun 	char phy_id[MII_BUS_ID_SIZE + 3];
860*4882a593Smuzhiyun 	void *p;
861*4882a593Smuzhiyun 	u32 val;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	priv = netdev_priv(dev);
864*4882a593Smuzhiyun 	kdev = &priv->pdev->dev;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	if (priv->has_phy) {
867*4882a593Smuzhiyun 		/* connect to PHY */
868*4882a593Smuzhiyun 		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
869*4882a593Smuzhiyun 			 priv->mii_bus->id, priv->phy_id);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 		phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
872*4882a593Smuzhiyun 				     PHY_INTERFACE_MODE_MII);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		if (IS_ERR(phydev)) {
875*4882a593Smuzhiyun 			dev_err(kdev, "could not attach to PHY\n");
876*4882a593Smuzhiyun 			return PTR_ERR(phydev);
877*4882a593Smuzhiyun 		}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 		/* mask with MAC supported features */
880*4882a593Smuzhiyun 		phy_support_sym_pause(phydev);
881*4882a593Smuzhiyun 		phy_set_max_speed(phydev, SPEED_100);
882*4882a593Smuzhiyun 		phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
883*4882a593Smuzhiyun 				  priv->pause_auto);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 		phy_attached_info(phydev);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		priv->old_link = 0;
888*4882a593Smuzhiyun 		priv->old_duplex = -1;
889*4882a593Smuzhiyun 		priv->old_pause = -1;
890*4882a593Smuzhiyun 	} else {
891*4882a593Smuzhiyun 		phydev = NULL;
892*4882a593Smuzhiyun 	}
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* mask all interrupts and request them */
895*4882a593Smuzhiyun 	enet_writel(priv, 0, ENET_IRMASK_REG);
896*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
897*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
900*4882a593Smuzhiyun 	if (ret)
901*4882a593Smuzhiyun 		goto out_phy_disconnect;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
904*4882a593Smuzhiyun 			  dev->name, dev);
905*4882a593Smuzhiyun 	if (ret)
906*4882a593Smuzhiyun 		goto out_freeirq;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
909*4882a593Smuzhiyun 			  0, dev->name, dev);
910*4882a593Smuzhiyun 	if (ret)
911*4882a593Smuzhiyun 		goto out_freeirq_rx;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	/* initialize perfect match registers */
914*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
915*4882a593Smuzhiyun 		enet_writel(priv, 0, ENET_PML_REG(i));
916*4882a593Smuzhiyun 		enet_writel(priv, 0, ENET_PMH_REG(i));
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/* write device mac address */
920*4882a593Smuzhiyun 	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
921*4882a593Smuzhiyun 	bcm_enet_set_mac_address(dev, &addr);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* allocate rx dma ring */
924*4882a593Smuzhiyun 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
925*4882a593Smuzhiyun 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
926*4882a593Smuzhiyun 	if (!p) {
927*4882a593Smuzhiyun 		ret = -ENOMEM;
928*4882a593Smuzhiyun 		goto out_freeirq_tx;
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	priv->rx_desc_alloc_size = size;
932*4882a593Smuzhiyun 	priv->rx_desc_cpu = p;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	/* allocate tx dma ring */
935*4882a593Smuzhiyun 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
936*4882a593Smuzhiyun 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
937*4882a593Smuzhiyun 	if (!p) {
938*4882a593Smuzhiyun 		ret = -ENOMEM;
939*4882a593Smuzhiyun 		goto out_free_rx_ring;
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	priv->tx_desc_alloc_size = size;
943*4882a593Smuzhiyun 	priv->tx_desc_cpu = p;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
946*4882a593Smuzhiyun 			       GFP_KERNEL);
947*4882a593Smuzhiyun 	if (!priv->tx_skb) {
948*4882a593Smuzhiyun 		ret = -ENOMEM;
949*4882a593Smuzhiyun 		goto out_free_tx_ring;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	priv->tx_desc_count = priv->tx_ring_size;
953*4882a593Smuzhiyun 	priv->tx_dirty_desc = 0;
954*4882a593Smuzhiyun 	priv->tx_curr_desc = 0;
955*4882a593Smuzhiyun 	spin_lock_init(&priv->tx_lock);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	/* init & fill rx ring with skbs */
958*4882a593Smuzhiyun 	priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
959*4882a593Smuzhiyun 			       GFP_KERNEL);
960*4882a593Smuzhiyun 	if (!priv->rx_skb) {
961*4882a593Smuzhiyun 		ret = -ENOMEM;
962*4882a593Smuzhiyun 		goto out_free_tx_skb;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	priv->rx_desc_count = 0;
966*4882a593Smuzhiyun 	priv->rx_dirty_desc = 0;
967*4882a593Smuzhiyun 	priv->rx_curr_desc = 0;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	/* initialize flow control buffer allocation */
970*4882a593Smuzhiyun 	if (priv->dma_has_sram)
971*4882a593Smuzhiyun 		enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
972*4882a593Smuzhiyun 				ENETDMA_BUFALLOC_REG(priv->rx_chan));
973*4882a593Smuzhiyun 	else
974*4882a593Smuzhiyun 		enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
975*4882a593Smuzhiyun 				ENETDMAC_BUFALLOC, priv->rx_chan);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	if (bcm_enet_refill_rx(dev)) {
978*4882a593Smuzhiyun 		dev_err(kdev, "cannot allocate rx skb queue\n");
979*4882a593Smuzhiyun 		ret = -ENOMEM;
980*4882a593Smuzhiyun 		goto out;
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* write rx & tx ring addresses */
984*4882a593Smuzhiyun 	if (priv->dma_has_sram) {
985*4882a593Smuzhiyun 		enet_dmas_writel(priv, priv->rx_desc_dma,
986*4882a593Smuzhiyun 				 ENETDMAS_RSTART_REG, priv->rx_chan);
987*4882a593Smuzhiyun 		enet_dmas_writel(priv, priv->tx_desc_dma,
988*4882a593Smuzhiyun 			 ENETDMAS_RSTART_REG, priv->tx_chan);
989*4882a593Smuzhiyun 	} else {
990*4882a593Smuzhiyun 		enet_dmac_writel(priv, priv->rx_desc_dma,
991*4882a593Smuzhiyun 				ENETDMAC_RSTART, priv->rx_chan);
992*4882a593Smuzhiyun 		enet_dmac_writel(priv, priv->tx_desc_dma,
993*4882a593Smuzhiyun 				ENETDMAC_RSTART, priv->tx_chan);
994*4882a593Smuzhiyun 	}
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* clear remaining state ram for rx & tx channel */
997*4882a593Smuzhiyun 	if (priv->dma_has_sram) {
998*4882a593Smuzhiyun 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
999*4882a593Smuzhiyun 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1000*4882a593Smuzhiyun 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1001*4882a593Smuzhiyun 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1002*4882a593Smuzhiyun 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1003*4882a593Smuzhiyun 		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1004*4882a593Smuzhiyun 	} else {
1005*4882a593Smuzhiyun 		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1006*4882a593Smuzhiyun 		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1007*4882a593Smuzhiyun 	}
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	/* set max rx/tx length */
1010*4882a593Smuzhiyun 	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1011*4882a593Smuzhiyun 	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	/* set dma maximum burst len */
1014*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_maxburst,
1015*4882a593Smuzhiyun 			 ENETDMAC_MAXBURST, priv->rx_chan);
1016*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_maxburst,
1017*4882a593Smuzhiyun 			 ENETDMAC_MAXBURST, priv->tx_chan);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/* set correct transmit fifo watermark */
1020*4882a593Smuzhiyun 	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	/* set flow control low/high threshold to 1/3 / 2/3 */
1023*4882a593Smuzhiyun 	if (priv->dma_has_sram) {
1024*4882a593Smuzhiyun 		val = priv->rx_ring_size / 3;
1025*4882a593Smuzhiyun 		enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1026*4882a593Smuzhiyun 		val = (priv->rx_ring_size * 2) / 3;
1027*4882a593Smuzhiyun 		enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1028*4882a593Smuzhiyun 	} else {
1029*4882a593Smuzhiyun 		enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1030*4882a593Smuzhiyun 		enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1031*4882a593Smuzhiyun 		enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1032*4882a593Smuzhiyun 	}
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	/* all set, enable mac and interrupts, start dma engine and
1035*4882a593Smuzhiyun 	 * kick rx dma channel */
1036*4882a593Smuzhiyun 	wmb();
1037*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_CTL_REG);
1038*4882a593Smuzhiyun 	val |= ENET_CTL_ENABLE_MASK;
1039*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_CTL_REG);
1040*4882a593Smuzhiyun 	if (priv->dma_has_sram)
1041*4882a593Smuzhiyun 		enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1042*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_en_mask,
1043*4882a593Smuzhiyun 			 ENETDMAC_CHANCFG, priv->rx_chan);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* watch "mib counters about to overflow" interrupt */
1046*4882a593Smuzhiyun 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1047*4882a593Smuzhiyun 	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	/* watch "packet transferred" interrupt in rx and tx */
1050*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1051*4882a593Smuzhiyun 			 ENETDMAC_IR, priv->rx_chan);
1052*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1053*4882a593Smuzhiyun 			 ENETDMAC_IR, priv->tx_chan);
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	/* make sure we enable napi before rx interrupt  */
1056*4882a593Smuzhiyun 	napi_enable(&priv->napi);
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1059*4882a593Smuzhiyun 			 ENETDMAC_IRMASK, priv->rx_chan);
1060*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1061*4882a593Smuzhiyun 			 ENETDMAC_IRMASK, priv->tx_chan);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	if (phydev)
1064*4882a593Smuzhiyun 		phy_start(phydev);
1065*4882a593Smuzhiyun 	else
1066*4882a593Smuzhiyun 		bcm_enet_adjust_link(dev);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	netif_start_queue(dev);
1069*4882a593Smuzhiyun 	return 0;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun out:
1072*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_ring_size; i++) {
1073*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 		if (!priv->rx_skb[i])
1076*4882a593Smuzhiyun 			continue;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		desc = &priv->rx_desc_cpu[i];
1079*4882a593Smuzhiyun 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1080*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
1081*4882a593Smuzhiyun 		kfree_skb(priv->rx_skb[i]);
1082*4882a593Smuzhiyun 	}
1083*4882a593Smuzhiyun 	kfree(priv->rx_skb);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun out_free_tx_skb:
1086*4882a593Smuzhiyun 	kfree(priv->tx_skb);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun out_free_tx_ring:
1089*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1090*4882a593Smuzhiyun 			  priv->tx_desc_cpu, priv->tx_desc_dma);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun out_free_rx_ring:
1093*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1094*4882a593Smuzhiyun 			  priv->rx_desc_cpu, priv->rx_desc_dma);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun out_freeirq_tx:
1097*4882a593Smuzhiyun 	free_irq(priv->irq_tx, dev);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun out_freeirq_rx:
1100*4882a593Smuzhiyun 	free_irq(priv->irq_rx, dev);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun out_freeirq:
1103*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun out_phy_disconnect:
1106*4882a593Smuzhiyun 	if (phydev)
1107*4882a593Smuzhiyun 		phy_disconnect(phydev);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	return ret;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun /*
1113*4882a593Smuzhiyun  * disable mac
1114*4882a593Smuzhiyun  */
bcm_enet_disable_mac(struct bcm_enet_priv * priv)1115*4882a593Smuzhiyun static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	int limit;
1118*4882a593Smuzhiyun 	u32 val;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_CTL_REG);
1121*4882a593Smuzhiyun 	val |= ENET_CTL_DISABLE_MASK;
1122*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_CTL_REG);
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	limit = 1000;
1125*4882a593Smuzhiyun 	do {
1126*4882a593Smuzhiyun 		u32 val;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 		val = enet_readl(priv, ENET_CTL_REG);
1129*4882a593Smuzhiyun 		if (!(val & ENET_CTL_DISABLE_MASK))
1130*4882a593Smuzhiyun 			break;
1131*4882a593Smuzhiyun 		udelay(1);
1132*4882a593Smuzhiyun 	} while (limit--);
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun /*
1136*4882a593Smuzhiyun  * disable dma in given channel
1137*4882a593Smuzhiyun  */
bcm_enet_disable_dma(struct bcm_enet_priv * priv,int chan)1138*4882a593Smuzhiyun static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	int limit;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	limit = 1000;
1145*4882a593Smuzhiyun 	do {
1146*4882a593Smuzhiyun 		u32 val;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 		val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1149*4882a593Smuzhiyun 		if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1150*4882a593Smuzhiyun 			break;
1151*4882a593Smuzhiyun 		udelay(1);
1152*4882a593Smuzhiyun 	} while (limit--);
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun /*
1156*4882a593Smuzhiyun  * stop callback
1157*4882a593Smuzhiyun  */
bcm_enet_stop(struct net_device * dev)1158*4882a593Smuzhiyun static int bcm_enet_stop(struct net_device *dev)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1161*4882a593Smuzhiyun 	struct device *kdev;
1162*4882a593Smuzhiyun 	int i;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1165*4882a593Smuzhiyun 	kdev = &priv->pdev->dev;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	netif_stop_queue(dev);
1168*4882a593Smuzhiyun 	napi_disable(&priv->napi);
1169*4882a593Smuzhiyun 	if (priv->has_phy)
1170*4882a593Smuzhiyun 		phy_stop(dev->phydev);
1171*4882a593Smuzhiyun 	del_timer_sync(&priv->rx_timeout);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	/* mask all interrupts */
1174*4882a593Smuzhiyun 	enet_writel(priv, 0, ENET_IRMASK_REG);
1175*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1176*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	/* make sure no mib update is scheduled */
1179*4882a593Smuzhiyun 	cancel_work_sync(&priv->mib_update_task);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	/* disable dma & mac */
1182*4882a593Smuzhiyun 	bcm_enet_disable_dma(priv, priv->tx_chan);
1183*4882a593Smuzhiyun 	bcm_enet_disable_dma(priv, priv->rx_chan);
1184*4882a593Smuzhiyun 	bcm_enet_disable_mac(priv);
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	/* force reclaim of all tx buffers */
1187*4882a593Smuzhiyun 	bcm_enet_tx_reclaim(dev, 1);
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	/* free the rx skb ring */
1190*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_ring_size; i++) {
1191*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 		if (!priv->rx_skb[i])
1194*4882a593Smuzhiyun 			continue;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 		desc = &priv->rx_desc_cpu[i];
1197*4882a593Smuzhiyun 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1198*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
1199*4882a593Smuzhiyun 		kfree_skb(priv->rx_skb[i]);
1200*4882a593Smuzhiyun 	}
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	/* free remaining allocated memory */
1203*4882a593Smuzhiyun 	kfree(priv->rx_skb);
1204*4882a593Smuzhiyun 	kfree(priv->tx_skb);
1205*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1206*4882a593Smuzhiyun 			  priv->rx_desc_cpu, priv->rx_desc_dma);
1207*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1208*4882a593Smuzhiyun 			  priv->tx_desc_cpu, priv->tx_desc_dma);
1209*4882a593Smuzhiyun 	free_irq(priv->irq_tx, dev);
1210*4882a593Smuzhiyun 	free_irq(priv->irq_rx, dev);
1211*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	/* release phy */
1214*4882a593Smuzhiyun 	if (priv->has_phy)
1215*4882a593Smuzhiyun 		phy_disconnect(dev->phydev);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	return 0;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun /*
1221*4882a593Smuzhiyun  * ethtool callbacks
1222*4882a593Smuzhiyun  */
1223*4882a593Smuzhiyun struct bcm_enet_stats {
1224*4882a593Smuzhiyun 	char stat_string[ETH_GSTRING_LEN];
1225*4882a593Smuzhiyun 	int sizeof_stat;
1226*4882a593Smuzhiyun 	int stat_offset;
1227*4882a593Smuzhiyun 	int mib_reg;
1228*4882a593Smuzhiyun };
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\
1231*4882a593Smuzhiyun 		     offsetof(struct bcm_enet_priv, m)
1232*4882a593Smuzhiyun #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),		\
1233*4882a593Smuzhiyun 		     offsetof(struct net_device_stats, m)
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1236*4882a593Smuzhiyun 	{ "rx_packets", DEV_STAT(rx_packets), -1 },
1237*4882a593Smuzhiyun 	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
1238*4882a593Smuzhiyun 	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
1239*4882a593Smuzhiyun 	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
1240*4882a593Smuzhiyun 	{ "rx_errors", DEV_STAT(rx_errors), -1 },
1241*4882a593Smuzhiyun 	{ "tx_errors", DEV_STAT(tx_errors), -1 },
1242*4882a593Smuzhiyun 	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
1243*4882a593Smuzhiyun 	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1246*4882a593Smuzhiyun 	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1247*4882a593Smuzhiyun 	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1248*4882a593Smuzhiyun 	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1249*4882a593Smuzhiyun 	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1250*4882a593Smuzhiyun 	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1251*4882a593Smuzhiyun 	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1252*4882a593Smuzhiyun 	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1253*4882a593Smuzhiyun 	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1254*4882a593Smuzhiyun 	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1255*4882a593Smuzhiyun 	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1256*4882a593Smuzhiyun 	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1257*4882a593Smuzhiyun 	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1258*4882a593Smuzhiyun 	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1259*4882a593Smuzhiyun 	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1260*4882a593Smuzhiyun 	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1261*4882a593Smuzhiyun 	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1262*4882a593Smuzhiyun 	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1263*4882a593Smuzhiyun 	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1264*4882a593Smuzhiyun 	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1265*4882a593Smuzhiyun 	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1268*4882a593Smuzhiyun 	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1269*4882a593Smuzhiyun 	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1270*4882a593Smuzhiyun 	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1271*4882a593Smuzhiyun 	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1272*4882a593Smuzhiyun 	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1273*4882a593Smuzhiyun 	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1274*4882a593Smuzhiyun 	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1275*4882a593Smuzhiyun 	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1276*4882a593Smuzhiyun 	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1277*4882a593Smuzhiyun 	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1278*4882a593Smuzhiyun 	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1279*4882a593Smuzhiyun 	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1280*4882a593Smuzhiyun 	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1281*4882a593Smuzhiyun 	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1282*4882a593Smuzhiyun 	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1283*4882a593Smuzhiyun 	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1284*4882a593Smuzhiyun 	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1285*4882a593Smuzhiyun 	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1286*4882a593Smuzhiyun 	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1287*4882a593Smuzhiyun 	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1288*4882a593Smuzhiyun 	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun };
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun #define BCM_ENET_STATS_LEN	ARRAY_SIZE(bcm_enet_gstrings_stats)
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun static const u32 unused_mib_regs[] = {
1295*4882a593Smuzhiyun 	ETH_MIB_TX_ALL_OCTETS,
1296*4882a593Smuzhiyun 	ETH_MIB_TX_ALL_PKTS,
1297*4882a593Smuzhiyun 	ETH_MIB_RX_ALL_OCTETS,
1298*4882a593Smuzhiyun 	ETH_MIB_RX_ALL_PKTS,
1299*4882a593Smuzhiyun };
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 
bcm_enet_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)1302*4882a593Smuzhiyun static void bcm_enet_get_drvinfo(struct net_device *netdev,
1303*4882a593Smuzhiyun 				 struct ethtool_drvinfo *drvinfo)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun 	strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1306*4882a593Smuzhiyun 	strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun 
bcm_enet_get_sset_count(struct net_device * netdev,int string_set)1309*4882a593Smuzhiyun static int bcm_enet_get_sset_count(struct net_device *netdev,
1310*4882a593Smuzhiyun 					int string_set)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun 	switch (string_set) {
1313*4882a593Smuzhiyun 	case ETH_SS_STATS:
1314*4882a593Smuzhiyun 		return BCM_ENET_STATS_LEN;
1315*4882a593Smuzhiyun 	default:
1316*4882a593Smuzhiyun 		return -EINVAL;
1317*4882a593Smuzhiyun 	}
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun 
bcm_enet_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1320*4882a593Smuzhiyun static void bcm_enet_get_strings(struct net_device *netdev,
1321*4882a593Smuzhiyun 				 u32 stringset, u8 *data)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun 	int i;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	switch (stringset) {
1326*4882a593Smuzhiyun 	case ETH_SS_STATS:
1327*4882a593Smuzhiyun 		for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1328*4882a593Smuzhiyun 			memcpy(data + i * ETH_GSTRING_LEN,
1329*4882a593Smuzhiyun 			       bcm_enet_gstrings_stats[i].stat_string,
1330*4882a593Smuzhiyun 			       ETH_GSTRING_LEN);
1331*4882a593Smuzhiyun 		}
1332*4882a593Smuzhiyun 		break;
1333*4882a593Smuzhiyun 	}
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun 
update_mib_counters(struct bcm_enet_priv * priv)1336*4882a593Smuzhiyun static void update_mib_counters(struct bcm_enet_priv *priv)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun 	int i;
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1341*4882a593Smuzhiyun 		const struct bcm_enet_stats *s;
1342*4882a593Smuzhiyun 		u32 val;
1343*4882a593Smuzhiyun 		char *p;
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 		s = &bcm_enet_gstrings_stats[i];
1346*4882a593Smuzhiyun 		if (s->mib_reg == -1)
1347*4882a593Smuzhiyun 			continue;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1350*4882a593Smuzhiyun 		p = (char *)priv + s->stat_offset;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 		if (s->sizeof_stat == sizeof(u64))
1353*4882a593Smuzhiyun 			*(u64 *)p += val;
1354*4882a593Smuzhiyun 		else
1355*4882a593Smuzhiyun 			*(u32 *)p += val;
1356*4882a593Smuzhiyun 	}
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	/* also empty unused mib counters to make sure mib counter
1359*4882a593Smuzhiyun 	 * overflow interrupt is cleared */
1360*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1361*4882a593Smuzhiyun 		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun 
bcm_enet_update_mib_counters_defer(struct work_struct * t)1364*4882a593Smuzhiyun static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1369*4882a593Smuzhiyun 	mutex_lock(&priv->mib_update_lock);
1370*4882a593Smuzhiyun 	update_mib_counters(priv);
1371*4882a593Smuzhiyun 	mutex_unlock(&priv->mib_update_lock);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	/* reenable mib interrupt */
1374*4882a593Smuzhiyun 	if (netif_running(priv->net_dev))
1375*4882a593Smuzhiyun 		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
bcm_enet_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1378*4882a593Smuzhiyun static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1379*4882a593Smuzhiyun 				       struct ethtool_stats *stats,
1380*4882a593Smuzhiyun 				       u64 *data)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1383*4882a593Smuzhiyun 	int i;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	priv = netdev_priv(netdev);
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	mutex_lock(&priv->mib_update_lock);
1388*4882a593Smuzhiyun 	update_mib_counters(priv);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1391*4882a593Smuzhiyun 		const struct bcm_enet_stats *s;
1392*4882a593Smuzhiyun 		char *p;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 		s = &bcm_enet_gstrings_stats[i];
1395*4882a593Smuzhiyun 		if (s->mib_reg == -1)
1396*4882a593Smuzhiyun 			p = (char *)&netdev->stats;
1397*4882a593Smuzhiyun 		else
1398*4882a593Smuzhiyun 			p = (char *)priv;
1399*4882a593Smuzhiyun 		p += s->stat_offset;
1400*4882a593Smuzhiyun 		data[i] = (s->sizeof_stat == sizeof(u64)) ?
1401*4882a593Smuzhiyun 			*(u64 *)p : *(u32 *)p;
1402*4882a593Smuzhiyun 	}
1403*4882a593Smuzhiyun 	mutex_unlock(&priv->mib_update_lock);
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun 
bcm_enet_nway_reset(struct net_device * dev)1406*4882a593Smuzhiyun static int bcm_enet_nway_reset(struct net_device *dev)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1411*4882a593Smuzhiyun 	if (priv->has_phy)
1412*4882a593Smuzhiyun 		return phy_ethtool_nway_reset(dev);
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
bcm_enet_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1417*4882a593Smuzhiyun static int bcm_enet_get_link_ksettings(struct net_device *dev,
1418*4882a593Smuzhiyun 				       struct ethtool_link_ksettings *cmd)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1421*4882a593Smuzhiyun 	u32 supported, advertising;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	if (priv->has_phy) {
1426*4882a593Smuzhiyun 		if (!dev->phydev)
1427*4882a593Smuzhiyun 			return -ENODEV;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 		phy_ethtool_ksettings_get(dev->phydev, cmd);
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 		return 0;
1432*4882a593Smuzhiyun 	} else {
1433*4882a593Smuzhiyun 		cmd->base.autoneg = 0;
1434*4882a593Smuzhiyun 		cmd->base.speed = (priv->force_speed_100) ?
1435*4882a593Smuzhiyun 			SPEED_100 : SPEED_10;
1436*4882a593Smuzhiyun 		cmd->base.duplex = (priv->force_duplex_full) ?
1437*4882a593Smuzhiyun 			DUPLEX_FULL : DUPLEX_HALF;
1438*4882a593Smuzhiyun 		supported = ADVERTISED_10baseT_Half |
1439*4882a593Smuzhiyun 			ADVERTISED_10baseT_Full |
1440*4882a593Smuzhiyun 			ADVERTISED_100baseT_Half |
1441*4882a593Smuzhiyun 			ADVERTISED_100baseT_Full;
1442*4882a593Smuzhiyun 		advertising = 0;
1443*4882a593Smuzhiyun 		ethtool_convert_legacy_u32_to_link_mode(
1444*4882a593Smuzhiyun 			cmd->link_modes.supported, supported);
1445*4882a593Smuzhiyun 		ethtool_convert_legacy_u32_to_link_mode(
1446*4882a593Smuzhiyun 			cmd->link_modes.advertising, advertising);
1447*4882a593Smuzhiyun 		cmd->base.port = PORT_MII;
1448*4882a593Smuzhiyun 	}
1449*4882a593Smuzhiyun 	return 0;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun 
bcm_enet_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1452*4882a593Smuzhiyun static int bcm_enet_set_link_ksettings(struct net_device *dev,
1453*4882a593Smuzhiyun 				       const struct ethtool_link_ksettings *cmd)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1458*4882a593Smuzhiyun 	if (priv->has_phy) {
1459*4882a593Smuzhiyun 		if (!dev->phydev)
1460*4882a593Smuzhiyun 			return -ENODEV;
1461*4882a593Smuzhiyun 		return phy_ethtool_ksettings_set(dev->phydev, cmd);
1462*4882a593Smuzhiyun 	} else {
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 		if (cmd->base.autoneg ||
1465*4882a593Smuzhiyun 		    (cmd->base.speed != SPEED_100 &&
1466*4882a593Smuzhiyun 		     cmd->base.speed != SPEED_10) ||
1467*4882a593Smuzhiyun 		    cmd->base.port != PORT_MII)
1468*4882a593Smuzhiyun 			return -EINVAL;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 		priv->force_speed_100 =
1471*4882a593Smuzhiyun 			(cmd->base.speed == SPEED_100) ? 1 : 0;
1472*4882a593Smuzhiyun 		priv->force_duplex_full =
1473*4882a593Smuzhiyun 			(cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 		if (netif_running(dev))
1476*4882a593Smuzhiyun 			bcm_enet_adjust_link(dev);
1477*4882a593Smuzhiyun 		return 0;
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun 
bcm_enet_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)1481*4882a593Smuzhiyun static void bcm_enet_get_ringparam(struct net_device *dev,
1482*4882a593Smuzhiyun 				   struct ethtool_ringparam *ering)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	/* rx/tx ring is actually only limited by memory */
1489*4882a593Smuzhiyun 	ering->rx_max_pending = 8192;
1490*4882a593Smuzhiyun 	ering->tx_max_pending = 8192;
1491*4882a593Smuzhiyun 	ering->rx_pending = priv->rx_ring_size;
1492*4882a593Smuzhiyun 	ering->tx_pending = priv->tx_ring_size;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun 
bcm_enet_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)1495*4882a593Smuzhiyun static int bcm_enet_set_ringparam(struct net_device *dev,
1496*4882a593Smuzhiyun 				  struct ethtool_ringparam *ering)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1499*4882a593Smuzhiyun 	int was_running;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 	was_running = 0;
1504*4882a593Smuzhiyun 	if (netif_running(dev)) {
1505*4882a593Smuzhiyun 		bcm_enet_stop(dev);
1506*4882a593Smuzhiyun 		was_running = 1;
1507*4882a593Smuzhiyun 	}
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	priv->rx_ring_size = ering->rx_pending;
1510*4882a593Smuzhiyun 	priv->tx_ring_size = ering->tx_pending;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	if (was_running) {
1513*4882a593Smuzhiyun 		int err;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		err = bcm_enet_open(dev);
1516*4882a593Smuzhiyun 		if (err)
1517*4882a593Smuzhiyun 			dev_close(dev);
1518*4882a593Smuzhiyun 		else
1519*4882a593Smuzhiyun 			bcm_enet_set_multicast_list(dev);
1520*4882a593Smuzhiyun 	}
1521*4882a593Smuzhiyun 	return 0;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun 
bcm_enet_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * ecmd)1524*4882a593Smuzhiyun static void bcm_enet_get_pauseparam(struct net_device *dev,
1525*4882a593Smuzhiyun 				    struct ethtool_pauseparam *ecmd)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1530*4882a593Smuzhiyun 	ecmd->autoneg = priv->pause_auto;
1531*4882a593Smuzhiyun 	ecmd->rx_pause = priv->pause_rx;
1532*4882a593Smuzhiyun 	ecmd->tx_pause = priv->pause_tx;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun 
bcm_enet_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * ecmd)1535*4882a593Smuzhiyun static int bcm_enet_set_pauseparam(struct net_device *dev,
1536*4882a593Smuzhiyun 				   struct ethtool_pauseparam *ecmd)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	if (priv->has_phy) {
1543*4882a593Smuzhiyun 		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1544*4882a593Smuzhiyun 			/* asymetric pause mode not supported,
1545*4882a593Smuzhiyun 			 * actually possible but integrated PHY has RO
1546*4882a593Smuzhiyun 			 * asym_pause bit */
1547*4882a593Smuzhiyun 			return -EINVAL;
1548*4882a593Smuzhiyun 		}
1549*4882a593Smuzhiyun 	} else {
1550*4882a593Smuzhiyun 		/* no pause autoneg on direct mii connection */
1551*4882a593Smuzhiyun 		if (ecmd->autoneg)
1552*4882a593Smuzhiyun 			return -EINVAL;
1553*4882a593Smuzhiyun 	}
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	priv->pause_auto = ecmd->autoneg;
1556*4882a593Smuzhiyun 	priv->pause_rx = ecmd->rx_pause;
1557*4882a593Smuzhiyun 	priv->pause_tx = ecmd->tx_pause;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	return 0;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun static const struct ethtool_ops bcm_enet_ethtool_ops = {
1563*4882a593Smuzhiyun 	.get_strings		= bcm_enet_get_strings,
1564*4882a593Smuzhiyun 	.get_sset_count		= bcm_enet_get_sset_count,
1565*4882a593Smuzhiyun 	.get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1566*4882a593Smuzhiyun 	.nway_reset		= bcm_enet_nway_reset,
1567*4882a593Smuzhiyun 	.get_drvinfo		= bcm_enet_get_drvinfo,
1568*4882a593Smuzhiyun 	.get_link		= ethtool_op_get_link,
1569*4882a593Smuzhiyun 	.get_ringparam		= bcm_enet_get_ringparam,
1570*4882a593Smuzhiyun 	.set_ringparam		= bcm_enet_set_ringparam,
1571*4882a593Smuzhiyun 	.get_pauseparam		= bcm_enet_get_pauseparam,
1572*4882a593Smuzhiyun 	.set_pauseparam		= bcm_enet_set_pauseparam,
1573*4882a593Smuzhiyun 	.get_link_ksettings	= bcm_enet_get_link_ksettings,
1574*4882a593Smuzhiyun 	.set_link_ksettings	= bcm_enet_set_link_ksettings,
1575*4882a593Smuzhiyun };
1576*4882a593Smuzhiyun 
bcm_enet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1577*4882a593Smuzhiyun static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1582*4882a593Smuzhiyun 	if (priv->has_phy) {
1583*4882a593Smuzhiyun 		if (!dev->phydev)
1584*4882a593Smuzhiyun 			return -ENODEV;
1585*4882a593Smuzhiyun 		return phy_mii_ioctl(dev->phydev, rq, cmd);
1586*4882a593Smuzhiyun 	} else {
1587*4882a593Smuzhiyun 		struct mii_if_info mii;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 		mii.dev = dev;
1590*4882a593Smuzhiyun 		mii.mdio_read = bcm_enet_mdio_read_mii;
1591*4882a593Smuzhiyun 		mii.mdio_write = bcm_enet_mdio_write_mii;
1592*4882a593Smuzhiyun 		mii.phy_id = 0;
1593*4882a593Smuzhiyun 		mii.phy_id_mask = 0x3f;
1594*4882a593Smuzhiyun 		mii.reg_num_mask = 0x1f;
1595*4882a593Smuzhiyun 		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1596*4882a593Smuzhiyun 	}
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun /*
1600*4882a593Smuzhiyun  * adjust mtu, can't be called while device is running
1601*4882a593Smuzhiyun  */
bcm_enet_change_mtu(struct net_device * dev,int new_mtu)1602*4882a593Smuzhiyun static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1603*4882a593Smuzhiyun {
1604*4882a593Smuzhiyun 	struct bcm_enet_priv *priv = netdev_priv(dev);
1605*4882a593Smuzhiyun 	int actual_mtu = new_mtu;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (netif_running(dev))
1608*4882a593Smuzhiyun 		return -EBUSY;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	/* add ethernet header + vlan tag size */
1611*4882a593Smuzhiyun 	actual_mtu += VLAN_ETH_HLEN;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	/*
1614*4882a593Smuzhiyun 	 * setup maximum size before we get overflow mark in
1615*4882a593Smuzhiyun 	 * descriptor, note that this will not prevent reception of
1616*4882a593Smuzhiyun 	 * big frames, they will be split into multiple buffers
1617*4882a593Smuzhiyun 	 * anyway
1618*4882a593Smuzhiyun 	 */
1619*4882a593Smuzhiyun 	priv->hw_mtu = actual_mtu;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	/*
1622*4882a593Smuzhiyun 	 * align rx buffer size to dma burst len, account FCS since
1623*4882a593Smuzhiyun 	 * it's appended
1624*4882a593Smuzhiyun 	 */
1625*4882a593Smuzhiyun 	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1626*4882a593Smuzhiyun 				  priv->dma_maxburst * 4);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	dev->mtu = new_mtu;
1629*4882a593Smuzhiyun 	return 0;
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun /*
1633*4882a593Smuzhiyun  * preinit hardware to allow mii operation while device is down
1634*4882a593Smuzhiyun  */
bcm_enet_hw_preinit(struct bcm_enet_priv * priv)1635*4882a593Smuzhiyun static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun 	u32 val;
1638*4882a593Smuzhiyun 	int limit;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	/* make sure mac is disabled */
1641*4882a593Smuzhiyun 	bcm_enet_disable_mac(priv);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	/* soft reset mac */
1644*4882a593Smuzhiyun 	val = ENET_CTL_SRESET_MASK;
1645*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_CTL_REG);
1646*4882a593Smuzhiyun 	wmb();
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	limit = 1000;
1649*4882a593Smuzhiyun 	do {
1650*4882a593Smuzhiyun 		val = enet_readl(priv, ENET_CTL_REG);
1651*4882a593Smuzhiyun 		if (!(val & ENET_CTL_SRESET_MASK))
1652*4882a593Smuzhiyun 			break;
1653*4882a593Smuzhiyun 		udelay(1);
1654*4882a593Smuzhiyun 	} while (limit--);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	/* select correct mii interface */
1657*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_CTL_REG);
1658*4882a593Smuzhiyun 	if (priv->use_external_mii)
1659*4882a593Smuzhiyun 		val |= ENET_CTL_EPHYSEL_MASK;
1660*4882a593Smuzhiyun 	else
1661*4882a593Smuzhiyun 		val &= ~ENET_CTL_EPHYSEL_MASK;
1662*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_CTL_REG);
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	/* turn on mdc clock */
1665*4882a593Smuzhiyun 	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1666*4882a593Smuzhiyun 		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	/* set mib counters to self-clear when read */
1669*4882a593Smuzhiyun 	val = enet_readl(priv, ENET_MIBCTL_REG);
1670*4882a593Smuzhiyun 	val |= ENET_MIBCTL_RDCLEAR_MASK;
1671*4882a593Smuzhiyun 	enet_writel(priv, val, ENET_MIBCTL_REG);
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun static const struct net_device_ops bcm_enet_ops = {
1675*4882a593Smuzhiyun 	.ndo_open		= bcm_enet_open,
1676*4882a593Smuzhiyun 	.ndo_stop		= bcm_enet_stop,
1677*4882a593Smuzhiyun 	.ndo_start_xmit		= bcm_enet_start_xmit,
1678*4882a593Smuzhiyun 	.ndo_set_mac_address	= bcm_enet_set_mac_address,
1679*4882a593Smuzhiyun 	.ndo_set_rx_mode	= bcm_enet_set_multicast_list,
1680*4882a593Smuzhiyun 	.ndo_do_ioctl		= bcm_enet_ioctl,
1681*4882a593Smuzhiyun 	.ndo_change_mtu		= bcm_enet_change_mtu,
1682*4882a593Smuzhiyun };
1683*4882a593Smuzhiyun 
1684*4882a593Smuzhiyun /*
1685*4882a593Smuzhiyun  * allocate netdevice, request register memory and register device.
1686*4882a593Smuzhiyun  */
bcm_enet_probe(struct platform_device * pdev)1687*4882a593Smuzhiyun static int bcm_enet_probe(struct platform_device *pdev)
1688*4882a593Smuzhiyun {
1689*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1690*4882a593Smuzhiyun 	struct net_device *dev;
1691*4882a593Smuzhiyun 	struct bcm63xx_enet_platform_data *pd;
1692*4882a593Smuzhiyun 	struct resource *res_irq, *res_irq_rx, *res_irq_tx;
1693*4882a593Smuzhiyun 	struct mii_bus *bus;
1694*4882a593Smuzhiyun 	int i, ret;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	if (!bcm_enet_shared_base[0])
1697*4882a593Smuzhiyun 		return -EPROBE_DEFER;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1700*4882a593Smuzhiyun 	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1701*4882a593Smuzhiyun 	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1702*4882a593Smuzhiyun 	if (!res_irq || !res_irq_rx || !res_irq_tx)
1703*4882a593Smuzhiyun 		return -ENODEV;
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(*priv));
1706*4882a593Smuzhiyun 	if (!dev)
1707*4882a593Smuzhiyun 		return -ENOMEM;
1708*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	priv->enet_is_sw = false;
1711*4882a593Smuzhiyun 	priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	ret = bcm_enet_change_mtu(dev, dev->mtu);
1714*4882a593Smuzhiyun 	if (ret)
1715*4882a593Smuzhiyun 		goto out;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1718*4882a593Smuzhiyun 	if (IS_ERR(priv->base)) {
1719*4882a593Smuzhiyun 		ret = PTR_ERR(priv->base);
1720*4882a593Smuzhiyun 		goto out;
1721*4882a593Smuzhiyun 	}
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 	dev->irq = priv->irq = res_irq->start;
1724*4882a593Smuzhiyun 	priv->irq_rx = res_irq_rx->start;
1725*4882a593Smuzhiyun 	priv->irq_tx = res_irq_tx->start;
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 	priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1728*4882a593Smuzhiyun 	if (IS_ERR(priv->mac_clk)) {
1729*4882a593Smuzhiyun 		ret = PTR_ERR(priv->mac_clk);
1730*4882a593Smuzhiyun 		goto out;
1731*4882a593Smuzhiyun 	}
1732*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->mac_clk);
1733*4882a593Smuzhiyun 	if (ret)
1734*4882a593Smuzhiyun 		goto out;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	/* initialize default and fetch platform data */
1737*4882a593Smuzhiyun 	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1738*4882a593Smuzhiyun 	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	pd = dev_get_platdata(&pdev->dev);
1741*4882a593Smuzhiyun 	if (pd) {
1742*4882a593Smuzhiyun 		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1743*4882a593Smuzhiyun 		priv->has_phy = pd->has_phy;
1744*4882a593Smuzhiyun 		priv->phy_id = pd->phy_id;
1745*4882a593Smuzhiyun 		priv->has_phy_interrupt = pd->has_phy_interrupt;
1746*4882a593Smuzhiyun 		priv->phy_interrupt = pd->phy_interrupt;
1747*4882a593Smuzhiyun 		priv->use_external_mii = !pd->use_internal_phy;
1748*4882a593Smuzhiyun 		priv->pause_auto = pd->pause_auto;
1749*4882a593Smuzhiyun 		priv->pause_rx = pd->pause_rx;
1750*4882a593Smuzhiyun 		priv->pause_tx = pd->pause_tx;
1751*4882a593Smuzhiyun 		priv->force_duplex_full = pd->force_duplex_full;
1752*4882a593Smuzhiyun 		priv->force_speed_100 = pd->force_speed_100;
1753*4882a593Smuzhiyun 		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1754*4882a593Smuzhiyun 		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1755*4882a593Smuzhiyun 		priv->dma_chan_width = pd->dma_chan_width;
1756*4882a593Smuzhiyun 		priv->dma_has_sram = pd->dma_has_sram;
1757*4882a593Smuzhiyun 		priv->dma_desc_shift = pd->dma_desc_shift;
1758*4882a593Smuzhiyun 		priv->rx_chan = pd->rx_chan;
1759*4882a593Smuzhiyun 		priv->tx_chan = pd->tx_chan;
1760*4882a593Smuzhiyun 	}
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	if (priv->has_phy && !priv->use_external_mii) {
1763*4882a593Smuzhiyun 		/* using internal PHY, enable clock */
1764*4882a593Smuzhiyun 		priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1765*4882a593Smuzhiyun 		if (IS_ERR(priv->phy_clk)) {
1766*4882a593Smuzhiyun 			ret = PTR_ERR(priv->phy_clk);
1767*4882a593Smuzhiyun 			priv->phy_clk = NULL;
1768*4882a593Smuzhiyun 			goto out_disable_clk_mac;
1769*4882a593Smuzhiyun 		}
1770*4882a593Smuzhiyun 		ret = clk_prepare_enable(priv->phy_clk);
1771*4882a593Smuzhiyun 		if (ret)
1772*4882a593Smuzhiyun 			goto out_disable_clk_mac;
1773*4882a593Smuzhiyun 	}
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	/* do minimal hardware init to be able to probe mii bus */
1776*4882a593Smuzhiyun 	bcm_enet_hw_preinit(priv);
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	/* MII bus registration */
1779*4882a593Smuzhiyun 	if (priv->has_phy) {
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 		priv->mii_bus = mdiobus_alloc();
1782*4882a593Smuzhiyun 		if (!priv->mii_bus) {
1783*4882a593Smuzhiyun 			ret = -ENOMEM;
1784*4882a593Smuzhiyun 			goto out_uninit_hw;
1785*4882a593Smuzhiyun 		}
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 		bus = priv->mii_bus;
1788*4882a593Smuzhiyun 		bus->name = "bcm63xx_enet MII bus";
1789*4882a593Smuzhiyun 		bus->parent = &pdev->dev;
1790*4882a593Smuzhiyun 		bus->priv = priv;
1791*4882a593Smuzhiyun 		bus->read = bcm_enet_mdio_read_phylib;
1792*4882a593Smuzhiyun 		bus->write = bcm_enet_mdio_write_phylib;
1793*4882a593Smuzhiyun 		sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 		/* only probe bus where we think the PHY is, because
1796*4882a593Smuzhiyun 		 * the mdio read operation return 0 instead of 0xffff
1797*4882a593Smuzhiyun 		 * if a slave is not present on hw */
1798*4882a593Smuzhiyun 		bus->phy_mask = ~(1 << priv->phy_id);
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		if (priv->has_phy_interrupt)
1801*4882a593Smuzhiyun 			bus->irq[priv->phy_id] = priv->phy_interrupt;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 		ret = mdiobus_register(bus);
1804*4882a593Smuzhiyun 		if (ret) {
1805*4882a593Smuzhiyun 			dev_err(&pdev->dev, "unable to register mdio bus\n");
1806*4882a593Smuzhiyun 			goto out_free_mdio;
1807*4882a593Smuzhiyun 		}
1808*4882a593Smuzhiyun 	} else {
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 		/* run platform code to initialize PHY device */
1811*4882a593Smuzhiyun 		if (pd && pd->mii_config &&
1812*4882a593Smuzhiyun 		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1813*4882a593Smuzhiyun 				   bcm_enet_mdio_write_mii)) {
1814*4882a593Smuzhiyun 			dev_err(&pdev->dev, "unable to configure mdio bus\n");
1815*4882a593Smuzhiyun 			goto out_uninit_hw;
1816*4882a593Smuzhiyun 		}
1817*4882a593Smuzhiyun 	}
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	spin_lock_init(&priv->rx_lock);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	/* init rx timeout (used for oom) */
1822*4882a593Smuzhiyun 	timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	/* init the mib update lock&work */
1825*4882a593Smuzhiyun 	mutex_init(&priv->mib_update_lock);
1826*4882a593Smuzhiyun 	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	/* zero mib counters */
1829*4882a593Smuzhiyun 	for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1830*4882a593Smuzhiyun 		enet_writel(priv, 0, ENET_MIB_REG(i));
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	/* register netdevice */
1833*4882a593Smuzhiyun 	dev->netdev_ops = &bcm_enet_ops;
1834*4882a593Smuzhiyun 	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	dev->ethtool_ops = &bcm_enet_ethtool_ops;
1837*4882a593Smuzhiyun 	/* MTU range: 46 - 2028 */
1838*4882a593Smuzhiyun 	dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1839*4882a593Smuzhiyun 	dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1840*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun 	ret = register_netdev(dev);
1843*4882a593Smuzhiyun 	if (ret)
1844*4882a593Smuzhiyun 		goto out_unregister_mdio;
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	netif_carrier_off(dev);
1847*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
1848*4882a593Smuzhiyun 	priv->pdev = pdev;
1849*4882a593Smuzhiyun 	priv->net_dev = dev;
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	return 0;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun out_unregister_mdio:
1854*4882a593Smuzhiyun 	if (priv->mii_bus)
1855*4882a593Smuzhiyun 		mdiobus_unregister(priv->mii_bus);
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun out_free_mdio:
1858*4882a593Smuzhiyun 	if (priv->mii_bus)
1859*4882a593Smuzhiyun 		mdiobus_free(priv->mii_bus);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun out_uninit_hw:
1862*4882a593Smuzhiyun 	/* turn off mdc clock */
1863*4882a593Smuzhiyun 	enet_writel(priv, 0, ENET_MIISC_REG);
1864*4882a593Smuzhiyun 	clk_disable_unprepare(priv->phy_clk);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun out_disable_clk_mac:
1867*4882a593Smuzhiyun 	clk_disable_unprepare(priv->mac_clk);
1868*4882a593Smuzhiyun out:
1869*4882a593Smuzhiyun 	free_netdev(dev);
1870*4882a593Smuzhiyun 	return ret;
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun /*
1875*4882a593Smuzhiyun  * exit func, stops hardware and unregisters netdevice
1876*4882a593Smuzhiyun  */
bcm_enet_remove(struct platform_device * pdev)1877*4882a593Smuzhiyun static int bcm_enet_remove(struct platform_device *pdev)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
1880*4882a593Smuzhiyun 	struct net_device *dev;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	/* stop netdevice */
1883*4882a593Smuzhiyun 	dev = platform_get_drvdata(pdev);
1884*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1885*4882a593Smuzhiyun 	unregister_netdev(dev);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	/* turn off mdc clock */
1888*4882a593Smuzhiyun 	enet_writel(priv, 0, ENET_MIISC_REG);
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	if (priv->has_phy) {
1891*4882a593Smuzhiyun 		mdiobus_unregister(priv->mii_bus);
1892*4882a593Smuzhiyun 		mdiobus_free(priv->mii_bus);
1893*4882a593Smuzhiyun 	} else {
1894*4882a593Smuzhiyun 		struct bcm63xx_enet_platform_data *pd;
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 		pd = dev_get_platdata(&pdev->dev);
1897*4882a593Smuzhiyun 		if (pd && pd->mii_config)
1898*4882a593Smuzhiyun 			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1899*4882a593Smuzhiyun 				       bcm_enet_mdio_write_mii);
1900*4882a593Smuzhiyun 	}
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	/* disable hw block clocks */
1903*4882a593Smuzhiyun 	clk_disable_unprepare(priv->phy_clk);
1904*4882a593Smuzhiyun 	clk_disable_unprepare(priv->mac_clk);
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 	free_netdev(dev);
1907*4882a593Smuzhiyun 	return 0;
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun struct platform_driver bcm63xx_enet_driver = {
1911*4882a593Smuzhiyun 	.probe	= bcm_enet_probe,
1912*4882a593Smuzhiyun 	.remove	= bcm_enet_remove,
1913*4882a593Smuzhiyun 	.driver	= {
1914*4882a593Smuzhiyun 		.name	= "bcm63xx_enet",
1915*4882a593Smuzhiyun 		.owner  = THIS_MODULE,
1916*4882a593Smuzhiyun 	},
1917*4882a593Smuzhiyun };
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun /*
1920*4882a593Smuzhiyun  * switch mii access callbacks
1921*4882a593Smuzhiyun  */
bcmenet_sw_mdio_read(struct bcm_enet_priv * priv,int ext,int phy_id,int location)1922*4882a593Smuzhiyun static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1923*4882a593Smuzhiyun 				int ext, int phy_id, int location)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun 	u32 reg;
1926*4882a593Smuzhiyun 	int ret;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	spin_lock_bh(&priv->enetsw_mdio_lock);
1929*4882a593Smuzhiyun 	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	reg = ENETSW_MDIOC_RD_MASK |
1932*4882a593Smuzhiyun 		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1933*4882a593Smuzhiyun 		(location << ENETSW_MDIOC_REG_SHIFT);
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	if (ext)
1936*4882a593Smuzhiyun 		reg |= ENETSW_MDIOC_EXT_MASK;
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1939*4882a593Smuzhiyun 	udelay(50);
1940*4882a593Smuzhiyun 	ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1941*4882a593Smuzhiyun 	spin_unlock_bh(&priv->enetsw_mdio_lock);
1942*4882a593Smuzhiyun 	return ret;
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun 
bcmenet_sw_mdio_write(struct bcm_enet_priv * priv,int ext,int phy_id,int location,uint16_t data)1945*4882a593Smuzhiyun static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1946*4882a593Smuzhiyun 				 int ext, int phy_id, int location,
1947*4882a593Smuzhiyun 				 uint16_t data)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun 	u32 reg;
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	spin_lock_bh(&priv->enetsw_mdio_lock);
1952*4882a593Smuzhiyun 	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	reg = ENETSW_MDIOC_WR_MASK |
1955*4882a593Smuzhiyun 		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1956*4882a593Smuzhiyun 		(location << ENETSW_MDIOC_REG_SHIFT);
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	if (ext)
1959*4882a593Smuzhiyun 		reg |= ENETSW_MDIOC_EXT_MASK;
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	reg |= data;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1964*4882a593Smuzhiyun 	udelay(50);
1965*4882a593Smuzhiyun 	spin_unlock_bh(&priv->enetsw_mdio_lock);
1966*4882a593Smuzhiyun }
1967*4882a593Smuzhiyun 
bcm_enet_port_is_rgmii(int portid)1968*4882a593Smuzhiyun static inline int bcm_enet_port_is_rgmii(int portid)
1969*4882a593Smuzhiyun {
1970*4882a593Smuzhiyun 	return portid >= ENETSW_RGMII_PORT0;
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun /*
1974*4882a593Smuzhiyun  * enet sw PHY polling
1975*4882a593Smuzhiyun  */
swphy_poll_timer(struct timer_list * t)1976*4882a593Smuzhiyun static void swphy_poll_timer(struct timer_list *t)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun 	struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
1979*4882a593Smuzhiyun 	unsigned int i;
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 	for (i = 0; i < priv->num_ports; i++) {
1982*4882a593Smuzhiyun 		struct bcm63xx_enetsw_port *port;
1983*4882a593Smuzhiyun 		int val, j, up, advertise, lpa, speed, duplex, media;
1984*4882a593Smuzhiyun 		int external_phy = bcm_enet_port_is_rgmii(i);
1985*4882a593Smuzhiyun 		u8 override;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 		port = &priv->used_ports[i];
1988*4882a593Smuzhiyun 		if (!port->used)
1989*4882a593Smuzhiyun 			continue;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 		if (port->bypass_link)
1992*4882a593Smuzhiyun 			continue;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 		/* dummy read to clear */
1995*4882a593Smuzhiyun 		for (j = 0; j < 2; j++)
1996*4882a593Smuzhiyun 			val = bcmenet_sw_mdio_read(priv, external_phy,
1997*4882a593Smuzhiyun 						   port->phy_id, MII_BMSR);
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 		if (val == 0xffff)
2000*4882a593Smuzhiyun 			continue;
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 		up = (val & BMSR_LSTATUS) ? 1 : 0;
2003*4882a593Smuzhiyun 		if (!(up ^ priv->sw_port_link[i]))
2004*4882a593Smuzhiyun 			continue;
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 		priv->sw_port_link[i] = up;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 		/* link changed */
2009*4882a593Smuzhiyun 		if (!up) {
2010*4882a593Smuzhiyun 			dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2011*4882a593Smuzhiyun 				 port->name);
2012*4882a593Smuzhiyun 			enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2013*4882a593Smuzhiyun 				      ENETSW_PORTOV_REG(i));
2014*4882a593Smuzhiyun 			enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2015*4882a593Smuzhiyun 				      ENETSW_PTCTRL_TXDIS_MASK,
2016*4882a593Smuzhiyun 				      ENETSW_PTCTRL_REG(i));
2017*4882a593Smuzhiyun 			continue;
2018*4882a593Smuzhiyun 		}
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 		advertise = bcmenet_sw_mdio_read(priv, external_phy,
2021*4882a593Smuzhiyun 						 port->phy_id, MII_ADVERTISE);
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 		lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2024*4882a593Smuzhiyun 					   MII_LPA);
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 		/* figure out media and duplex from advertise and LPA values */
2027*4882a593Smuzhiyun 		media = mii_nway_result(lpa & advertise);
2028*4882a593Smuzhiyun 		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 		if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2031*4882a593Smuzhiyun 			speed = 100;
2032*4882a593Smuzhiyun 		else
2033*4882a593Smuzhiyun 			speed = 10;
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun 		if (val & BMSR_ESTATEN) {
2036*4882a593Smuzhiyun 			advertise = bcmenet_sw_mdio_read(priv, external_phy,
2037*4882a593Smuzhiyun 						port->phy_id, MII_CTRL1000);
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 			lpa = bcmenet_sw_mdio_read(priv, external_phy,
2040*4882a593Smuzhiyun 						port->phy_id, MII_STAT1000);
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 			if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2043*4882a593Smuzhiyun 					&& lpa & (LPA_1000FULL | LPA_1000HALF)) {
2044*4882a593Smuzhiyun 				speed = 1000;
2045*4882a593Smuzhiyun 				duplex = (lpa & LPA_1000FULL);
2046*4882a593Smuzhiyun 			}
2047*4882a593Smuzhiyun 		}
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 		dev_info(&priv->pdev->dev,
2050*4882a593Smuzhiyun 			 "link UP on %s, %dMbps, %s-duplex\n",
2051*4882a593Smuzhiyun 			 port->name, speed, duplex ? "full" : "half");
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 		override = ENETSW_PORTOV_ENABLE_MASK |
2054*4882a593Smuzhiyun 			ENETSW_PORTOV_LINKUP_MASK;
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 		if (speed == 1000)
2057*4882a593Smuzhiyun 			override |= ENETSW_IMPOV_1000_MASK;
2058*4882a593Smuzhiyun 		else if (speed == 100)
2059*4882a593Smuzhiyun 			override |= ENETSW_IMPOV_100_MASK;
2060*4882a593Smuzhiyun 		if (duplex)
2061*4882a593Smuzhiyun 			override |= ENETSW_IMPOV_FDX_MASK;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2064*4882a593Smuzhiyun 		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2065*4882a593Smuzhiyun 	}
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	priv->swphy_poll.expires = jiffies + HZ;
2068*4882a593Smuzhiyun 	add_timer(&priv->swphy_poll);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun /*
2072*4882a593Smuzhiyun  * open callback, allocate dma rings & buffers and start rx operation
2073*4882a593Smuzhiyun  */
bcm_enetsw_open(struct net_device * dev)2074*4882a593Smuzhiyun static int bcm_enetsw_open(struct net_device *dev)
2075*4882a593Smuzhiyun {
2076*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2077*4882a593Smuzhiyun 	struct device *kdev;
2078*4882a593Smuzhiyun 	int i, ret;
2079*4882a593Smuzhiyun 	unsigned int size;
2080*4882a593Smuzhiyun 	void *p;
2081*4882a593Smuzhiyun 	u32 val;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2084*4882a593Smuzhiyun 	kdev = &priv->pdev->dev;
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	/* mask all interrupts and request them */
2087*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2088*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2091*4882a593Smuzhiyun 			  0, dev->name, dev);
2092*4882a593Smuzhiyun 	if (ret)
2093*4882a593Smuzhiyun 		goto out_freeirq;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	if (priv->irq_tx != -1) {
2096*4882a593Smuzhiyun 		ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2097*4882a593Smuzhiyun 				  0, dev->name, dev);
2098*4882a593Smuzhiyun 		if (ret)
2099*4882a593Smuzhiyun 			goto out_freeirq_rx;
2100*4882a593Smuzhiyun 	}
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 	/* allocate rx dma ring */
2103*4882a593Smuzhiyun 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2104*4882a593Smuzhiyun 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2105*4882a593Smuzhiyun 	if (!p) {
2106*4882a593Smuzhiyun 		dev_err(kdev, "cannot allocate rx ring %u\n", size);
2107*4882a593Smuzhiyun 		ret = -ENOMEM;
2108*4882a593Smuzhiyun 		goto out_freeirq_tx;
2109*4882a593Smuzhiyun 	}
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	priv->rx_desc_alloc_size = size;
2112*4882a593Smuzhiyun 	priv->rx_desc_cpu = p;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	/* allocate tx dma ring */
2115*4882a593Smuzhiyun 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2116*4882a593Smuzhiyun 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2117*4882a593Smuzhiyun 	if (!p) {
2118*4882a593Smuzhiyun 		dev_err(kdev, "cannot allocate tx ring\n");
2119*4882a593Smuzhiyun 		ret = -ENOMEM;
2120*4882a593Smuzhiyun 		goto out_free_rx_ring;
2121*4882a593Smuzhiyun 	}
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	priv->tx_desc_alloc_size = size;
2124*4882a593Smuzhiyun 	priv->tx_desc_cpu = p;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2127*4882a593Smuzhiyun 			       GFP_KERNEL);
2128*4882a593Smuzhiyun 	if (!priv->tx_skb) {
2129*4882a593Smuzhiyun 		dev_err(kdev, "cannot allocate rx skb queue\n");
2130*4882a593Smuzhiyun 		ret = -ENOMEM;
2131*4882a593Smuzhiyun 		goto out_free_tx_ring;
2132*4882a593Smuzhiyun 	}
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 	priv->tx_desc_count = priv->tx_ring_size;
2135*4882a593Smuzhiyun 	priv->tx_dirty_desc = 0;
2136*4882a593Smuzhiyun 	priv->tx_curr_desc = 0;
2137*4882a593Smuzhiyun 	spin_lock_init(&priv->tx_lock);
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	/* init & fill rx ring with skbs */
2140*4882a593Smuzhiyun 	priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
2141*4882a593Smuzhiyun 			       GFP_KERNEL);
2142*4882a593Smuzhiyun 	if (!priv->rx_skb) {
2143*4882a593Smuzhiyun 		dev_err(kdev, "cannot allocate rx skb queue\n");
2144*4882a593Smuzhiyun 		ret = -ENOMEM;
2145*4882a593Smuzhiyun 		goto out_free_tx_skb;
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	priv->rx_desc_count = 0;
2149*4882a593Smuzhiyun 	priv->rx_dirty_desc = 0;
2150*4882a593Smuzhiyun 	priv->rx_curr_desc = 0;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	/* disable all ports */
2153*4882a593Smuzhiyun 	for (i = 0; i < priv->num_ports; i++) {
2154*4882a593Smuzhiyun 		enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2155*4882a593Smuzhiyun 			      ENETSW_PORTOV_REG(i));
2156*4882a593Smuzhiyun 		enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2157*4882a593Smuzhiyun 			      ENETSW_PTCTRL_TXDIS_MASK,
2158*4882a593Smuzhiyun 			      ENETSW_PTCTRL_REG(i));
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 		priv->sw_port_link[i] = 0;
2161*4882a593Smuzhiyun 	}
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	/* reset mib */
2164*4882a593Smuzhiyun 	val = enetsw_readb(priv, ENETSW_GMCR_REG);
2165*4882a593Smuzhiyun 	val |= ENETSW_GMCR_RST_MIB_MASK;
2166*4882a593Smuzhiyun 	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2167*4882a593Smuzhiyun 	mdelay(1);
2168*4882a593Smuzhiyun 	val &= ~ENETSW_GMCR_RST_MIB_MASK;
2169*4882a593Smuzhiyun 	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2170*4882a593Smuzhiyun 	mdelay(1);
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 	/* force CPU port state */
2173*4882a593Smuzhiyun 	val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2174*4882a593Smuzhiyun 	val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2175*4882a593Smuzhiyun 	enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	/* enable switch forward engine */
2178*4882a593Smuzhiyun 	val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2179*4882a593Smuzhiyun 	val |= ENETSW_SWMODE_FWD_EN_MASK;
2180*4882a593Smuzhiyun 	enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	/* enable jumbo on all ports */
2183*4882a593Smuzhiyun 	enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2184*4882a593Smuzhiyun 	enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	/* initialize flow control buffer allocation */
2187*4882a593Smuzhiyun 	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2188*4882a593Smuzhiyun 			ENETDMA_BUFALLOC_REG(priv->rx_chan));
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	if (bcm_enet_refill_rx(dev)) {
2191*4882a593Smuzhiyun 		dev_err(kdev, "cannot allocate rx skb queue\n");
2192*4882a593Smuzhiyun 		ret = -ENOMEM;
2193*4882a593Smuzhiyun 		goto out;
2194*4882a593Smuzhiyun 	}
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun 	/* write rx & tx ring addresses */
2197*4882a593Smuzhiyun 	enet_dmas_writel(priv, priv->rx_desc_dma,
2198*4882a593Smuzhiyun 			 ENETDMAS_RSTART_REG, priv->rx_chan);
2199*4882a593Smuzhiyun 	enet_dmas_writel(priv, priv->tx_desc_dma,
2200*4882a593Smuzhiyun 			 ENETDMAS_RSTART_REG, priv->tx_chan);
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	/* clear remaining state ram for rx & tx channel */
2203*4882a593Smuzhiyun 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2204*4882a593Smuzhiyun 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2205*4882a593Smuzhiyun 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2206*4882a593Smuzhiyun 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2207*4882a593Smuzhiyun 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2208*4882a593Smuzhiyun 	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	/* set dma maximum burst len */
2211*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_maxburst,
2212*4882a593Smuzhiyun 			 ENETDMAC_MAXBURST, priv->rx_chan);
2213*4882a593Smuzhiyun 	enet_dmac_writel(priv, priv->dma_maxburst,
2214*4882a593Smuzhiyun 			 ENETDMAC_MAXBURST, priv->tx_chan);
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	/* set flow control low/high threshold to 1/3 / 2/3 */
2217*4882a593Smuzhiyun 	val = priv->rx_ring_size / 3;
2218*4882a593Smuzhiyun 	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2219*4882a593Smuzhiyun 	val = (priv->rx_ring_size * 2) / 3;
2220*4882a593Smuzhiyun 	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	/* all set, enable mac and interrupts, start dma engine and
2223*4882a593Smuzhiyun 	 * kick rx dma channel
2224*4882a593Smuzhiyun 	 */
2225*4882a593Smuzhiyun 	wmb();
2226*4882a593Smuzhiyun 	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2227*4882a593Smuzhiyun 	enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2228*4882a593Smuzhiyun 			 ENETDMAC_CHANCFG, priv->rx_chan);
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 	/* watch "packet transferred" interrupt in rx and tx */
2231*4882a593Smuzhiyun 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2232*4882a593Smuzhiyun 			 ENETDMAC_IR, priv->rx_chan);
2233*4882a593Smuzhiyun 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2234*4882a593Smuzhiyun 			 ENETDMAC_IR, priv->tx_chan);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	/* make sure we enable napi before rx interrupt  */
2237*4882a593Smuzhiyun 	napi_enable(&priv->napi);
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2240*4882a593Smuzhiyun 			 ENETDMAC_IRMASK, priv->rx_chan);
2241*4882a593Smuzhiyun 	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2242*4882a593Smuzhiyun 			 ENETDMAC_IRMASK, priv->tx_chan);
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	netif_carrier_on(dev);
2245*4882a593Smuzhiyun 	netif_start_queue(dev);
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	/* apply override config for bypass_link ports here. */
2248*4882a593Smuzhiyun 	for (i = 0; i < priv->num_ports; i++) {
2249*4882a593Smuzhiyun 		struct bcm63xx_enetsw_port *port;
2250*4882a593Smuzhiyun 		u8 override;
2251*4882a593Smuzhiyun 		port = &priv->used_ports[i];
2252*4882a593Smuzhiyun 		if (!port->used)
2253*4882a593Smuzhiyun 			continue;
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 		if (!port->bypass_link)
2256*4882a593Smuzhiyun 			continue;
2257*4882a593Smuzhiyun 
2258*4882a593Smuzhiyun 		override = ENETSW_PORTOV_ENABLE_MASK |
2259*4882a593Smuzhiyun 			ENETSW_PORTOV_LINKUP_MASK;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 		switch (port->force_speed) {
2262*4882a593Smuzhiyun 		case 1000:
2263*4882a593Smuzhiyun 			override |= ENETSW_IMPOV_1000_MASK;
2264*4882a593Smuzhiyun 			break;
2265*4882a593Smuzhiyun 		case 100:
2266*4882a593Smuzhiyun 			override |= ENETSW_IMPOV_100_MASK;
2267*4882a593Smuzhiyun 			break;
2268*4882a593Smuzhiyun 		case 10:
2269*4882a593Smuzhiyun 			break;
2270*4882a593Smuzhiyun 		default:
2271*4882a593Smuzhiyun 			pr_warn("invalid forced speed on port %s: assume 10\n",
2272*4882a593Smuzhiyun 			       port->name);
2273*4882a593Smuzhiyun 			break;
2274*4882a593Smuzhiyun 		}
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun 		if (port->force_duplex_full)
2277*4882a593Smuzhiyun 			override |= ENETSW_IMPOV_FDX_MASK;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2281*4882a593Smuzhiyun 		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2282*4882a593Smuzhiyun 	}
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	/* start phy polling timer */
2285*4882a593Smuzhiyun 	timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2286*4882a593Smuzhiyun 	mod_timer(&priv->swphy_poll, jiffies);
2287*4882a593Smuzhiyun 	return 0;
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun out:
2290*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_ring_size; i++) {
2291*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 		if (!priv->rx_skb[i])
2294*4882a593Smuzhiyun 			continue;
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 		desc = &priv->rx_desc_cpu[i];
2297*4882a593Smuzhiyun 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2298*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
2299*4882a593Smuzhiyun 		kfree_skb(priv->rx_skb[i]);
2300*4882a593Smuzhiyun 	}
2301*4882a593Smuzhiyun 	kfree(priv->rx_skb);
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun out_free_tx_skb:
2304*4882a593Smuzhiyun 	kfree(priv->tx_skb);
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun out_free_tx_ring:
2307*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2308*4882a593Smuzhiyun 			  priv->tx_desc_cpu, priv->tx_desc_dma);
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun out_free_rx_ring:
2311*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2312*4882a593Smuzhiyun 			  priv->rx_desc_cpu, priv->rx_desc_dma);
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun out_freeirq_tx:
2315*4882a593Smuzhiyun 	if (priv->irq_tx != -1)
2316*4882a593Smuzhiyun 		free_irq(priv->irq_tx, dev);
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun out_freeirq_rx:
2319*4882a593Smuzhiyun 	free_irq(priv->irq_rx, dev);
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun out_freeirq:
2322*4882a593Smuzhiyun 	return ret;
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun /* stop callback */
bcm_enetsw_stop(struct net_device * dev)2326*4882a593Smuzhiyun static int bcm_enetsw_stop(struct net_device *dev)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2329*4882a593Smuzhiyun 	struct device *kdev;
2330*4882a593Smuzhiyun 	int i;
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2333*4882a593Smuzhiyun 	kdev = &priv->pdev->dev;
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	del_timer_sync(&priv->swphy_poll);
2336*4882a593Smuzhiyun 	netif_stop_queue(dev);
2337*4882a593Smuzhiyun 	napi_disable(&priv->napi);
2338*4882a593Smuzhiyun 	del_timer_sync(&priv->rx_timeout);
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	/* mask all interrupts */
2341*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2342*4882a593Smuzhiyun 	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	/* disable dma & mac */
2345*4882a593Smuzhiyun 	bcm_enet_disable_dma(priv, priv->tx_chan);
2346*4882a593Smuzhiyun 	bcm_enet_disable_dma(priv, priv->rx_chan);
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	/* force reclaim of all tx buffers */
2349*4882a593Smuzhiyun 	bcm_enet_tx_reclaim(dev, 1);
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	/* free the rx skb ring */
2352*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_ring_size; i++) {
2353*4882a593Smuzhiyun 		struct bcm_enet_desc *desc;
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 		if (!priv->rx_skb[i])
2356*4882a593Smuzhiyun 			continue;
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 		desc = &priv->rx_desc_cpu[i];
2359*4882a593Smuzhiyun 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2360*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
2361*4882a593Smuzhiyun 		kfree_skb(priv->rx_skb[i]);
2362*4882a593Smuzhiyun 	}
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	/* free remaining allocated memory */
2365*4882a593Smuzhiyun 	kfree(priv->rx_skb);
2366*4882a593Smuzhiyun 	kfree(priv->tx_skb);
2367*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2368*4882a593Smuzhiyun 			  priv->rx_desc_cpu, priv->rx_desc_dma);
2369*4882a593Smuzhiyun 	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2370*4882a593Smuzhiyun 			  priv->tx_desc_cpu, priv->tx_desc_dma);
2371*4882a593Smuzhiyun 	if (priv->irq_tx != -1)
2372*4882a593Smuzhiyun 		free_irq(priv->irq_tx, dev);
2373*4882a593Smuzhiyun 	free_irq(priv->irq_rx, dev);
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 	return 0;
2376*4882a593Smuzhiyun }
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun /* try to sort out phy external status by walking the used_port field
2379*4882a593Smuzhiyun  * in the bcm_enet_priv structure. in case the phy address is not
2380*4882a593Smuzhiyun  * assigned to any physical port on the switch, assume it is external
2381*4882a593Smuzhiyun  * (and yell at the user).
2382*4882a593Smuzhiyun  */
bcm_enetsw_phy_is_external(struct bcm_enet_priv * priv,int phy_id)2383*4882a593Smuzhiyun static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun 	int i;
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	for (i = 0; i < priv->num_ports; ++i) {
2388*4882a593Smuzhiyun 		if (!priv->used_ports[i].used)
2389*4882a593Smuzhiyun 			continue;
2390*4882a593Smuzhiyun 		if (priv->used_ports[i].phy_id == phy_id)
2391*4882a593Smuzhiyun 			return bcm_enet_port_is_rgmii(i);
2392*4882a593Smuzhiyun 	}
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2395*4882a593Smuzhiyun 		    phy_id);
2396*4882a593Smuzhiyun 	return 1;
2397*4882a593Smuzhiyun }
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2400*4882a593Smuzhiyun  * external/internal status of the given phy_id first.
2401*4882a593Smuzhiyun  */
bcm_enetsw_mii_mdio_read(struct net_device * dev,int phy_id,int location)2402*4882a593Smuzhiyun static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2403*4882a593Smuzhiyun 				    int location)
2404*4882a593Smuzhiyun {
2405*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2408*4882a593Smuzhiyun 	return bcmenet_sw_mdio_read(priv,
2409*4882a593Smuzhiyun 				    bcm_enetsw_phy_is_external(priv, phy_id),
2410*4882a593Smuzhiyun 				    phy_id, location);
2411*4882a593Smuzhiyun }
2412*4882a593Smuzhiyun 
2413*4882a593Smuzhiyun /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2414*4882a593Smuzhiyun  * external/internal status of the given phy_id first.
2415*4882a593Smuzhiyun  */
bcm_enetsw_mii_mdio_write(struct net_device * dev,int phy_id,int location,int val)2416*4882a593Smuzhiyun static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2417*4882a593Smuzhiyun 				      int location,
2418*4882a593Smuzhiyun 				      int val)
2419*4882a593Smuzhiyun {
2420*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2423*4882a593Smuzhiyun 	bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2424*4882a593Smuzhiyun 			      phy_id, location, val);
2425*4882a593Smuzhiyun }
2426*4882a593Smuzhiyun 
bcm_enetsw_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2427*4882a593Smuzhiyun static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428*4882a593Smuzhiyun {
2429*4882a593Smuzhiyun 	struct mii_if_info mii;
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	mii.dev = dev;
2432*4882a593Smuzhiyun 	mii.mdio_read = bcm_enetsw_mii_mdio_read;
2433*4882a593Smuzhiyun 	mii.mdio_write = bcm_enetsw_mii_mdio_write;
2434*4882a593Smuzhiyun 	mii.phy_id = 0;
2435*4882a593Smuzhiyun 	mii.phy_id_mask = 0x3f;
2436*4882a593Smuzhiyun 	mii.reg_num_mask = 0x1f;
2437*4882a593Smuzhiyun 	return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun static const struct net_device_ops bcm_enetsw_ops = {
2442*4882a593Smuzhiyun 	.ndo_open		= bcm_enetsw_open,
2443*4882a593Smuzhiyun 	.ndo_stop		= bcm_enetsw_stop,
2444*4882a593Smuzhiyun 	.ndo_start_xmit		= bcm_enet_start_xmit,
2445*4882a593Smuzhiyun 	.ndo_change_mtu		= bcm_enet_change_mtu,
2446*4882a593Smuzhiyun 	.ndo_do_ioctl		= bcm_enetsw_ioctl,
2447*4882a593Smuzhiyun };
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2451*4882a593Smuzhiyun 	{ "rx_packets", DEV_STAT(rx_packets), -1 },
2452*4882a593Smuzhiyun 	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
2453*4882a593Smuzhiyun 	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
2454*4882a593Smuzhiyun 	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
2455*4882a593Smuzhiyun 	{ "rx_errors", DEV_STAT(rx_errors), -1 },
2456*4882a593Smuzhiyun 	{ "tx_errors", DEV_STAT(tx_errors), -1 },
2457*4882a593Smuzhiyun 	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
2458*4882a593Smuzhiyun 	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2461*4882a593Smuzhiyun 	{ "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2462*4882a593Smuzhiyun 	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2463*4882a593Smuzhiyun 	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2464*4882a593Smuzhiyun 	{ "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2465*4882a593Smuzhiyun 	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2466*4882a593Smuzhiyun 	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2467*4882a593Smuzhiyun 	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2468*4882a593Smuzhiyun 	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2469*4882a593Smuzhiyun 	{ "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2470*4882a593Smuzhiyun 	  ETHSW_MIB_RX_1024_1522 },
2471*4882a593Smuzhiyun 	{ "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2472*4882a593Smuzhiyun 	  ETHSW_MIB_RX_1523_2047 },
2473*4882a593Smuzhiyun 	{ "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2474*4882a593Smuzhiyun 	  ETHSW_MIB_RX_2048_4095 },
2475*4882a593Smuzhiyun 	{ "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2476*4882a593Smuzhiyun 	  ETHSW_MIB_RX_4096_8191 },
2477*4882a593Smuzhiyun 	{ "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2478*4882a593Smuzhiyun 	  ETHSW_MIB_RX_8192_9728 },
2479*4882a593Smuzhiyun 	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2480*4882a593Smuzhiyun 	{ "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2481*4882a593Smuzhiyun 	{ "tx_dropped",	GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2482*4882a593Smuzhiyun 	{ "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2483*4882a593Smuzhiyun 	{ "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2486*4882a593Smuzhiyun 	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2487*4882a593Smuzhiyun 	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2488*4882a593Smuzhiyun 	{ "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2489*4882a593Smuzhiyun 	{ "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2490*4882a593Smuzhiyun 	{ "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun };
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun #define BCM_ENETSW_STATS_LEN	\
2495*4882a593Smuzhiyun 	(sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2496*4882a593Smuzhiyun 
bcm_enetsw_get_strings(struct net_device * netdev,u32 stringset,u8 * data)2497*4882a593Smuzhiyun static void bcm_enetsw_get_strings(struct net_device *netdev,
2498*4882a593Smuzhiyun 				   u32 stringset, u8 *data)
2499*4882a593Smuzhiyun {
2500*4882a593Smuzhiyun 	int i;
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	switch (stringset) {
2503*4882a593Smuzhiyun 	case ETH_SS_STATS:
2504*4882a593Smuzhiyun 		for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2505*4882a593Smuzhiyun 			memcpy(data + i * ETH_GSTRING_LEN,
2506*4882a593Smuzhiyun 			       bcm_enetsw_gstrings_stats[i].stat_string,
2507*4882a593Smuzhiyun 			       ETH_GSTRING_LEN);
2508*4882a593Smuzhiyun 		}
2509*4882a593Smuzhiyun 		break;
2510*4882a593Smuzhiyun 	}
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun 
bcm_enetsw_get_sset_count(struct net_device * netdev,int string_set)2513*4882a593Smuzhiyun static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2514*4882a593Smuzhiyun 				     int string_set)
2515*4882a593Smuzhiyun {
2516*4882a593Smuzhiyun 	switch (string_set) {
2517*4882a593Smuzhiyun 	case ETH_SS_STATS:
2518*4882a593Smuzhiyun 		return BCM_ENETSW_STATS_LEN;
2519*4882a593Smuzhiyun 	default:
2520*4882a593Smuzhiyun 		return -EINVAL;
2521*4882a593Smuzhiyun 	}
2522*4882a593Smuzhiyun }
2523*4882a593Smuzhiyun 
bcm_enetsw_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)2524*4882a593Smuzhiyun static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2525*4882a593Smuzhiyun 				   struct ethtool_drvinfo *drvinfo)
2526*4882a593Smuzhiyun {
2527*4882a593Smuzhiyun 	strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2528*4882a593Smuzhiyun 	strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun 
bcm_enetsw_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)2531*4882a593Smuzhiyun static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2532*4882a593Smuzhiyun 					 struct ethtool_stats *stats,
2533*4882a593Smuzhiyun 					 u64 *data)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2536*4882a593Smuzhiyun 	int i;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	priv = netdev_priv(netdev);
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2541*4882a593Smuzhiyun 		const struct bcm_enet_stats *s;
2542*4882a593Smuzhiyun 		u32 lo, hi;
2543*4882a593Smuzhiyun 		char *p;
2544*4882a593Smuzhiyun 		int reg;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 		s = &bcm_enetsw_gstrings_stats[i];
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 		reg = s->mib_reg;
2549*4882a593Smuzhiyun 		if (reg == -1)
2550*4882a593Smuzhiyun 			continue;
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 		lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2553*4882a593Smuzhiyun 		p = (char *)priv + s->stat_offset;
2554*4882a593Smuzhiyun 
2555*4882a593Smuzhiyun 		if (s->sizeof_stat == sizeof(u64)) {
2556*4882a593Smuzhiyun 			hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2557*4882a593Smuzhiyun 			*(u64 *)p = ((u64)hi << 32 | lo);
2558*4882a593Smuzhiyun 		} else {
2559*4882a593Smuzhiyun 			*(u32 *)p = lo;
2560*4882a593Smuzhiyun 		}
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2564*4882a593Smuzhiyun 		const struct bcm_enet_stats *s;
2565*4882a593Smuzhiyun 		char *p;
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 		s = &bcm_enetsw_gstrings_stats[i];
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 		if (s->mib_reg == -1)
2570*4882a593Smuzhiyun 			p = (char *)&netdev->stats + s->stat_offset;
2571*4882a593Smuzhiyun 		else
2572*4882a593Smuzhiyun 			p = (char *)priv + s->stat_offset;
2573*4882a593Smuzhiyun 
2574*4882a593Smuzhiyun 		data[i] = (s->sizeof_stat == sizeof(u64)) ?
2575*4882a593Smuzhiyun 			*(u64 *)p : *(u32 *)p;
2576*4882a593Smuzhiyun 	}
2577*4882a593Smuzhiyun }
2578*4882a593Smuzhiyun 
bcm_enetsw_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)2579*4882a593Smuzhiyun static void bcm_enetsw_get_ringparam(struct net_device *dev,
2580*4882a593Smuzhiyun 				     struct ethtool_ringparam *ering)
2581*4882a593Smuzhiyun {
2582*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 	/* rx/tx ring is actually only limited by memory */
2587*4882a593Smuzhiyun 	ering->rx_max_pending = 8192;
2588*4882a593Smuzhiyun 	ering->tx_max_pending = 8192;
2589*4882a593Smuzhiyun 	ering->rx_mini_max_pending = 0;
2590*4882a593Smuzhiyun 	ering->rx_jumbo_max_pending = 0;
2591*4882a593Smuzhiyun 	ering->rx_pending = priv->rx_ring_size;
2592*4882a593Smuzhiyun 	ering->tx_pending = priv->tx_ring_size;
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun 
bcm_enetsw_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)2595*4882a593Smuzhiyun static int bcm_enetsw_set_ringparam(struct net_device *dev,
2596*4882a593Smuzhiyun 				    struct ethtool_ringparam *ering)
2597*4882a593Smuzhiyun {
2598*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2599*4882a593Smuzhiyun 	int was_running;
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	was_running = 0;
2604*4882a593Smuzhiyun 	if (netif_running(dev)) {
2605*4882a593Smuzhiyun 		bcm_enetsw_stop(dev);
2606*4882a593Smuzhiyun 		was_running = 1;
2607*4882a593Smuzhiyun 	}
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	priv->rx_ring_size = ering->rx_pending;
2610*4882a593Smuzhiyun 	priv->tx_ring_size = ering->tx_pending;
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	if (was_running) {
2613*4882a593Smuzhiyun 		int err;
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun 		err = bcm_enetsw_open(dev);
2616*4882a593Smuzhiyun 		if (err)
2617*4882a593Smuzhiyun 			dev_close(dev);
2618*4882a593Smuzhiyun 	}
2619*4882a593Smuzhiyun 	return 0;
2620*4882a593Smuzhiyun }
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2623*4882a593Smuzhiyun 	.get_strings		= bcm_enetsw_get_strings,
2624*4882a593Smuzhiyun 	.get_sset_count		= bcm_enetsw_get_sset_count,
2625*4882a593Smuzhiyun 	.get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2626*4882a593Smuzhiyun 	.get_drvinfo		= bcm_enetsw_get_drvinfo,
2627*4882a593Smuzhiyun 	.get_ringparam		= bcm_enetsw_get_ringparam,
2628*4882a593Smuzhiyun 	.set_ringparam		= bcm_enetsw_set_ringparam,
2629*4882a593Smuzhiyun };
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun /* allocate netdevice, request register memory and register device. */
bcm_enetsw_probe(struct platform_device * pdev)2632*4882a593Smuzhiyun static int bcm_enetsw_probe(struct platform_device *pdev)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2635*4882a593Smuzhiyun 	struct net_device *dev;
2636*4882a593Smuzhiyun 	struct bcm63xx_enetsw_platform_data *pd;
2637*4882a593Smuzhiyun 	struct resource *res_mem;
2638*4882a593Smuzhiyun 	int ret, irq_rx, irq_tx;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	if (!bcm_enet_shared_base[0])
2641*4882a593Smuzhiyun 		return -EPROBE_DEFER;
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2644*4882a593Smuzhiyun 	irq_rx = platform_get_irq(pdev, 0);
2645*4882a593Smuzhiyun 	irq_tx = platform_get_irq(pdev, 1);
2646*4882a593Smuzhiyun 	if (!res_mem || irq_rx < 0)
2647*4882a593Smuzhiyun 		return -ENODEV;
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	ret = 0;
2650*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(*priv));
2651*4882a593Smuzhiyun 	if (!dev)
2652*4882a593Smuzhiyun 		return -ENOMEM;
2653*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 	/* initialize default and fetch platform data */
2656*4882a593Smuzhiyun 	priv->enet_is_sw = true;
2657*4882a593Smuzhiyun 	priv->irq_rx = irq_rx;
2658*4882a593Smuzhiyun 	priv->irq_tx = irq_tx;
2659*4882a593Smuzhiyun 	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2660*4882a593Smuzhiyun 	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2661*4882a593Smuzhiyun 	priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	pd = dev_get_platdata(&pdev->dev);
2664*4882a593Smuzhiyun 	if (pd) {
2665*4882a593Smuzhiyun 		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2666*4882a593Smuzhiyun 		memcpy(priv->used_ports, pd->used_ports,
2667*4882a593Smuzhiyun 		       sizeof(pd->used_ports));
2668*4882a593Smuzhiyun 		priv->num_ports = pd->num_ports;
2669*4882a593Smuzhiyun 		priv->dma_has_sram = pd->dma_has_sram;
2670*4882a593Smuzhiyun 		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2671*4882a593Smuzhiyun 		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2672*4882a593Smuzhiyun 		priv->dma_chan_width = pd->dma_chan_width;
2673*4882a593Smuzhiyun 	}
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	ret = bcm_enet_change_mtu(dev, dev->mtu);
2676*4882a593Smuzhiyun 	if (ret)
2677*4882a593Smuzhiyun 		goto out;
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun 	priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2680*4882a593Smuzhiyun 	if (IS_ERR(priv->base)) {
2681*4882a593Smuzhiyun 		ret = PTR_ERR(priv->base);
2682*4882a593Smuzhiyun 		goto out;
2683*4882a593Smuzhiyun 	}
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2686*4882a593Smuzhiyun 	if (IS_ERR(priv->mac_clk)) {
2687*4882a593Smuzhiyun 		ret = PTR_ERR(priv->mac_clk);
2688*4882a593Smuzhiyun 		goto out;
2689*4882a593Smuzhiyun 	}
2690*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->mac_clk);
2691*4882a593Smuzhiyun 	if (ret)
2692*4882a593Smuzhiyun 		goto out;
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	priv->rx_chan = 0;
2695*4882a593Smuzhiyun 	priv->tx_chan = 1;
2696*4882a593Smuzhiyun 	spin_lock_init(&priv->rx_lock);
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	/* init rx timeout (used for oom) */
2699*4882a593Smuzhiyun 	timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun 	/* register netdevice */
2702*4882a593Smuzhiyun 	dev->netdev_ops = &bcm_enetsw_ops;
2703*4882a593Smuzhiyun 	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2704*4882a593Smuzhiyun 	dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2705*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 	spin_lock_init(&priv->enetsw_mdio_lock);
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	ret = register_netdev(dev);
2710*4882a593Smuzhiyun 	if (ret)
2711*4882a593Smuzhiyun 		goto out_disable_clk;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 	netif_carrier_off(dev);
2714*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
2715*4882a593Smuzhiyun 	priv->pdev = pdev;
2716*4882a593Smuzhiyun 	priv->net_dev = dev;
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun 	return 0;
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun out_disable_clk:
2721*4882a593Smuzhiyun 	clk_disable_unprepare(priv->mac_clk);
2722*4882a593Smuzhiyun out:
2723*4882a593Smuzhiyun 	free_netdev(dev);
2724*4882a593Smuzhiyun 	return ret;
2725*4882a593Smuzhiyun }
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun /* exit func, stops hardware and unregisters netdevice */
bcm_enetsw_remove(struct platform_device * pdev)2729*4882a593Smuzhiyun static int bcm_enetsw_remove(struct platform_device *pdev)
2730*4882a593Smuzhiyun {
2731*4882a593Smuzhiyun 	struct bcm_enet_priv *priv;
2732*4882a593Smuzhiyun 	struct net_device *dev;
2733*4882a593Smuzhiyun 
2734*4882a593Smuzhiyun 	/* stop netdevice */
2735*4882a593Smuzhiyun 	dev = platform_get_drvdata(pdev);
2736*4882a593Smuzhiyun 	priv = netdev_priv(dev);
2737*4882a593Smuzhiyun 	unregister_netdev(dev);
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun 	clk_disable_unprepare(priv->mac_clk);
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun 	free_netdev(dev);
2742*4882a593Smuzhiyun 	return 0;
2743*4882a593Smuzhiyun }
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun struct platform_driver bcm63xx_enetsw_driver = {
2746*4882a593Smuzhiyun 	.probe	= bcm_enetsw_probe,
2747*4882a593Smuzhiyun 	.remove	= bcm_enetsw_remove,
2748*4882a593Smuzhiyun 	.driver	= {
2749*4882a593Smuzhiyun 		.name	= "bcm63xx_enetsw",
2750*4882a593Smuzhiyun 		.owner  = THIS_MODULE,
2751*4882a593Smuzhiyun 	},
2752*4882a593Smuzhiyun };
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun /* reserve & remap memory space shared between all macs */
bcm_enet_shared_probe(struct platform_device * pdev)2755*4882a593Smuzhiyun static int bcm_enet_shared_probe(struct platform_device *pdev)
2756*4882a593Smuzhiyun {
2757*4882a593Smuzhiyun 	void __iomem *p[3];
2758*4882a593Smuzhiyun 	unsigned int i;
2759*4882a593Smuzhiyun 
2760*4882a593Smuzhiyun 	memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
2763*4882a593Smuzhiyun 		p[i] = devm_platform_ioremap_resource(pdev, i);
2764*4882a593Smuzhiyun 		if (IS_ERR(p[i]))
2765*4882a593Smuzhiyun 			return PTR_ERR(p[i]);
2766*4882a593Smuzhiyun 	}
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	return 0;
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun 
bcm_enet_shared_remove(struct platform_device * pdev)2773*4882a593Smuzhiyun static int bcm_enet_shared_remove(struct platform_device *pdev)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun 	return 0;
2776*4882a593Smuzhiyun }
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun /* this "shared" driver is needed because both macs share a single
2779*4882a593Smuzhiyun  * address space
2780*4882a593Smuzhiyun  */
2781*4882a593Smuzhiyun struct platform_driver bcm63xx_enet_shared_driver = {
2782*4882a593Smuzhiyun 	.probe	= bcm_enet_shared_probe,
2783*4882a593Smuzhiyun 	.remove	= bcm_enet_shared_remove,
2784*4882a593Smuzhiyun 	.driver	= {
2785*4882a593Smuzhiyun 		.name	= "bcm63xx_enet_shared",
2786*4882a593Smuzhiyun 		.owner  = THIS_MODULE,
2787*4882a593Smuzhiyun 	},
2788*4882a593Smuzhiyun };
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun static struct platform_driver * const drivers[] = {
2791*4882a593Smuzhiyun 	&bcm63xx_enet_shared_driver,
2792*4882a593Smuzhiyun 	&bcm63xx_enet_driver,
2793*4882a593Smuzhiyun 	&bcm63xx_enetsw_driver,
2794*4882a593Smuzhiyun };
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun /* entry point */
bcm_enet_init(void)2797*4882a593Smuzhiyun static int __init bcm_enet_init(void)
2798*4882a593Smuzhiyun {
2799*4882a593Smuzhiyun 	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun 
bcm_enet_exit(void)2802*4882a593Smuzhiyun static void __exit bcm_enet_exit(void)
2803*4882a593Smuzhiyun {
2804*4882a593Smuzhiyun 	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2805*4882a593Smuzhiyun }
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun module_init(bcm_enet_init);
2809*4882a593Smuzhiyun module_exit(bcm_enet_exit);
2810*4882a593Smuzhiyun 
2811*4882a593Smuzhiyun MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2812*4882a593Smuzhiyun MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2813*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2814