1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Driver for the MPC5200 Fast Ethernet Controller
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
5*4882a593Smuzhiyun * now maintained by Sylvain Munaut <tnt@246tNt.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
8*4882a593Smuzhiyun * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
9*4882a593Smuzhiyun * Copyright (C) 2003-2004 MontaVista, Software, Inc.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This file is licensed under the terms of the GNU General Public License
12*4882a593Smuzhiyun * version 2. This program is licensed "as is" without any warranty of any
13*4882a593Smuzhiyun * kind, whether express or implied.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/kernel.h>
23*4882a593Smuzhiyun #include <linux/types.h>
24*4882a593Smuzhiyun #include <linux/spinlock.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/errno.h>
27*4882a593Smuzhiyun #include <linux/init.h>
28*4882a593Smuzhiyun #include <linux/interrupt.h>
29*4882a593Smuzhiyun #include <linux/crc32.h>
30*4882a593Smuzhiyun #include <linux/hardirq.h>
31*4882a593Smuzhiyun #include <linux/delay.h>
32*4882a593Smuzhiyun #include <linux/of_device.h>
33*4882a593Smuzhiyun #include <linux/of_mdio.h>
34*4882a593Smuzhiyun #include <linux/of_net.h>
35*4882a593Smuzhiyun #include <linux/of_platform.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <linux/netdevice.h>
38*4882a593Smuzhiyun #include <linux/etherdevice.h>
39*4882a593Smuzhiyun #include <linux/ethtool.h>
40*4882a593Smuzhiyun #include <linux/skbuff.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include <asm/io.h>
43*4882a593Smuzhiyun #include <asm/delay.h>
44*4882a593Smuzhiyun #include <asm/mpc52xx.h>
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #include <linux/fsl/bestcomm/bestcomm.h>
47*4882a593Smuzhiyun #include <linux/fsl/bestcomm/fec.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include "fec_mpc52xx.h"
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define DRIVER_NAME "mpc52xx-fec"
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Private driver data structure */
54*4882a593Smuzhiyun struct mpc52xx_fec_priv {
55*4882a593Smuzhiyun struct net_device *ndev;
56*4882a593Smuzhiyun int duplex;
57*4882a593Smuzhiyun int speed;
58*4882a593Smuzhiyun int r_irq;
59*4882a593Smuzhiyun int t_irq;
60*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec;
61*4882a593Smuzhiyun struct bcom_task *rx_dmatsk;
62*4882a593Smuzhiyun struct bcom_task *tx_dmatsk;
63*4882a593Smuzhiyun spinlock_t lock;
64*4882a593Smuzhiyun int msg_enable;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* MDIO link details */
67*4882a593Smuzhiyun unsigned int mdio_speed;
68*4882a593Smuzhiyun struct device_node *phy_node;
69*4882a593Smuzhiyun enum phy_state link;
70*4882a593Smuzhiyun int seven_wire_mode;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun static irqreturn_t mpc52xx_fec_interrupt(int, void *);
75*4882a593Smuzhiyun static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
76*4882a593Smuzhiyun static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
77*4882a593Smuzhiyun static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep);
78*4882a593Smuzhiyun static void mpc52xx_fec_start(struct net_device *dev);
79*4882a593Smuzhiyun static void mpc52xx_fec_reset(struct net_device *dev);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82*4882a593Smuzhiyun NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
83*4882a593Smuzhiyun static int debug = -1; /* the above default */
84*4882a593Smuzhiyun module_param(debug, int, 0);
85*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "debugging messages level");
86*4882a593Smuzhiyun
mpc52xx_fec_tx_timeout(struct net_device * dev,unsigned int txqueue)87*4882a593Smuzhiyun static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
90*4882a593Smuzhiyun unsigned long flags;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun dev_warn(&dev->dev, "transmit timed out\n");
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun spin_lock_irqsave(&priv->lock, flags);
95*4882a593Smuzhiyun mpc52xx_fec_reset(dev);
96*4882a593Smuzhiyun dev->stats.tx_errors++;
97*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->lock, flags);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun netif_wake_queue(dev);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
mpc52xx_fec_set_paddr(struct net_device * dev,u8 * mac)102*4882a593Smuzhiyun static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
105*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
108*4882a593Smuzhiyun out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
mpc52xx_fec_set_mac_address(struct net_device * dev,void * addr)111*4882a593Smuzhiyun static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct sockaddr *sock = addr;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun mpc52xx_fec_set_paddr(dev, sock->sa_data);
118*4882a593Smuzhiyun return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
mpc52xx_fec_free_rx_buffers(struct net_device * dev,struct bcom_task * s)121*4882a593Smuzhiyun static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun while (!bcom_queue_empty(s)) {
124*4882a593Smuzhiyun struct bcom_fec_bd *bd;
125*4882a593Smuzhiyun struct sk_buff *skb;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
128*4882a593Smuzhiyun dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
129*4882a593Smuzhiyun DMA_FROM_DEVICE);
130*4882a593Smuzhiyun kfree_skb(skb);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun static void
mpc52xx_fec_rx_submit(struct net_device * dev,struct sk_buff * rskb)135*4882a593Smuzhiyun mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
138*4882a593Smuzhiyun struct bcom_fec_bd *bd;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
141*4882a593Smuzhiyun bd->status = FEC_RX_BUFFER_SIZE;
142*4882a593Smuzhiyun bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
143*4882a593Smuzhiyun FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
144*4882a593Smuzhiyun bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
mpc52xx_fec_alloc_rx_buffers(struct net_device * dev,struct bcom_task * rxtsk)147*4882a593Smuzhiyun static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct sk_buff *skb;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun while (!bcom_queue_full(rxtsk)) {
152*4882a593Smuzhiyun skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
153*4882a593Smuzhiyun if (!skb)
154*4882a593Smuzhiyun return -EAGAIN;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* zero out the initial receive buffers to aid debugging */
157*4882a593Smuzhiyun memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
158*4882a593Smuzhiyun mpc52xx_fec_rx_submit(dev, skb);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* based on generic_adjust_link from fs_enet-main.c */
mpc52xx_fec_adjust_link(struct net_device * dev)164*4882a593Smuzhiyun static void mpc52xx_fec_adjust_link(struct net_device *dev)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
167*4882a593Smuzhiyun struct phy_device *phydev = dev->phydev;
168*4882a593Smuzhiyun int new_state = 0;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (phydev->link != PHY_DOWN) {
171*4882a593Smuzhiyun if (phydev->duplex != priv->duplex) {
172*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
173*4882a593Smuzhiyun u32 rcntrl;
174*4882a593Smuzhiyun u32 tcntrl;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun new_state = 1;
177*4882a593Smuzhiyun priv->duplex = phydev->duplex;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun rcntrl = in_be32(&fec->r_cntrl);
180*4882a593Smuzhiyun tcntrl = in_be32(&fec->x_cntrl);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun rcntrl &= ~FEC_RCNTRL_DRT;
183*4882a593Smuzhiyun tcntrl &= ~FEC_TCNTRL_FDEN;
184*4882a593Smuzhiyun if (phydev->duplex == DUPLEX_FULL)
185*4882a593Smuzhiyun tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
186*4882a593Smuzhiyun else
187*4882a593Smuzhiyun rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun out_be32(&fec->r_cntrl, rcntrl);
190*4882a593Smuzhiyun out_be32(&fec->x_cntrl, tcntrl);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (phydev->speed != priv->speed) {
194*4882a593Smuzhiyun new_state = 1;
195*4882a593Smuzhiyun priv->speed = phydev->speed;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (priv->link == PHY_DOWN) {
199*4882a593Smuzhiyun new_state = 1;
200*4882a593Smuzhiyun priv->link = phydev->link;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun } else if (priv->link) {
204*4882a593Smuzhiyun new_state = 1;
205*4882a593Smuzhiyun priv->link = PHY_DOWN;
206*4882a593Smuzhiyun priv->speed = 0;
207*4882a593Smuzhiyun priv->duplex = -1;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (new_state && netif_msg_link(priv))
211*4882a593Smuzhiyun phy_print_status(phydev);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
mpc52xx_fec_open(struct net_device * dev)214*4882a593Smuzhiyun static int mpc52xx_fec_open(struct net_device *dev)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
217*4882a593Smuzhiyun struct phy_device *phydev = NULL;
218*4882a593Smuzhiyun int err = -EBUSY;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (priv->phy_node) {
221*4882a593Smuzhiyun phydev = of_phy_connect(priv->ndev, priv->phy_node,
222*4882a593Smuzhiyun mpc52xx_fec_adjust_link, 0, 0);
223*4882a593Smuzhiyun if (!phydev) {
224*4882a593Smuzhiyun dev_err(&dev->dev, "of_phy_connect failed\n");
225*4882a593Smuzhiyun return -ENODEV;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun phy_start(phydev);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
231*4882a593Smuzhiyun DRIVER_NAME "_ctrl", dev)) {
232*4882a593Smuzhiyun dev_err(&dev->dev, "ctrl interrupt request failed\n");
233*4882a593Smuzhiyun goto free_phy;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
236*4882a593Smuzhiyun DRIVER_NAME "_rx", dev)) {
237*4882a593Smuzhiyun dev_err(&dev->dev, "rx interrupt request failed\n");
238*4882a593Smuzhiyun goto free_ctrl_irq;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
241*4882a593Smuzhiyun DRIVER_NAME "_tx", dev)) {
242*4882a593Smuzhiyun dev_err(&dev->dev, "tx interrupt request failed\n");
243*4882a593Smuzhiyun goto free_2irqs;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun bcom_fec_rx_reset(priv->rx_dmatsk);
247*4882a593Smuzhiyun bcom_fec_tx_reset(priv->tx_dmatsk);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
250*4882a593Smuzhiyun if (err) {
251*4882a593Smuzhiyun dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
252*4882a593Smuzhiyun goto free_irqs;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun bcom_enable(priv->rx_dmatsk);
256*4882a593Smuzhiyun bcom_enable(priv->tx_dmatsk);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun mpc52xx_fec_start(dev);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun netif_start_queue(dev);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun free_irqs:
265*4882a593Smuzhiyun free_irq(priv->t_irq, dev);
266*4882a593Smuzhiyun free_2irqs:
267*4882a593Smuzhiyun free_irq(priv->r_irq, dev);
268*4882a593Smuzhiyun free_ctrl_irq:
269*4882a593Smuzhiyun free_irq(dev->irq, dev);
270*4882a593Smuzhiyun free_phy:
271*4882a593Smuzhiyun if (phydev) {
272*4882a593Smuzhiyun phy_stop(phydev);
273*4882a593Smuzhiyun phy_disconnect(phydev);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun return err;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
mpc52xx_fec_close(struct net_device * dev)279*4882a593Smuzhiyun static int mpc52xx_fec_close(struct net_device *dev)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
282*4882a593Smuzhiyun struct phy_device *phydev = dev->phydev;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun netif_stop_queue(dev);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun mpc52xx_fec_stop(dev, true);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun free_irq(dev->irq, dev);
291*4882a593Smuzhiyun free_irq(priv->r_irq, dev);
292*4882a593Smuzhiyun free_irq(priv->t_irq, dev);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (phydev) {
295*4882a593Smuzhiyun /* power down phy */
296*4882a593Smuzhiyun phy_stop(phydev);
297*4882a593Smuzhiyun phy_disconnect(phydev);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return 0;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* This will only be invoked if your driver is _not_ in XOFF state.
304*4882a593Smuzhiyun * What this means is that you need not check it, and that this
305*4882a593Smuzhiyun * invariant will hold if you make sure that the netif_*_queue()
306*4882a593Smuzhiyun * calls are done at the proper times.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun static netdev_tx_t
mpc52xx_fec_start_xmit(struct sk_buff * skb,struct net_device * dev)309*4882a593Smuzhiyun mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
312*4882a593Smuzhiyun struct bcom_fec_bd *bd;
313*4882a593Smuzhiyun unsigned long flags;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (bcom_queue_full(priv->tx_dmatsk)) {
316*4882a593Smuzhiyun if (net_ratelimit())
317*4882a593Smuzhiyun dev_err(&dev->dev, "transmit queue overrun\n");
318*4882a593Smuzhiyun return NETDEV_TX_BUSY;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun spin_lock_irqsave(&priv->lock, flags);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun bd = (struct bcom_fec_bd *)
324*4882a593Smuzhiyun bcom_prepare_next_buffer(priv->tx_dmatsk);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
327*4882a593Smuzhiyun bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
328*4882a593Smuzhiyun DMA_TO_DEVICE);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun skb_tx_timestamp(skb);
331*4882a593Smuzhiyun bcom_submit_next_buffer(priv->tx_dmatsk, skb);
332*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->lock, flags);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (bcom_queue_full(priv->tx_dmatsk)) {
335*4882a593Smuzhiyun netif_stop_queue(dev);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return NETDEV_TX_OK;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
mpc52xx_fec_poll_controller(struct net_device * dev)342*4882a593Smuzhiyun static void mpc52xx_fec_poll_controller(struct net_device *dev)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun disable_irq(priv->t_irq);
347*4882a593Smuzhiyun mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
348*4882a593Smuzhiyun enable_irq(priv->t_irq);
349*4882a593Smuzhiyun disable_irq(priv->r_irq);
350*4882a593Smuzhiyun mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
351*4882a593Smuzhiyun enable_irq(priv->r_irq);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun #endif
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* This handles BestComm transmit task interrupts
357*4882a593Smuzhiyun */
mpc52xx_fec_tx_interrupt(int irq,void * dev_id)358*4882a593Smuzhiyun static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct net_device *dev = dev_id;
361*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun spin_lock(&priv->lock);
364*4882a593Smuzhiyun while (bcom_buffer_done(priv->tx_dmatsk)) {
365*4882a593Smuzhiyun struct sk_buff *skb;
366*4882a593Smuzhiyun struct bcom_fec_bd *bd;
367*4882a593Smuzhiyun skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
368*4882a593Smuzhiyun (struct bcom_bd **)&bd);
369*4882a593Smuzhiyun dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
370*4882a593Smuzhiyun DMA_TO_DEVICE);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun dev_consume_skb_irq(skb);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun spin_unlock(&priv->lock);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun netif_wake_queue(dev);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return IRQ_HANDLED;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
mpc52xx_fec_rx_interrupt(int irq,void * dev_id)381*4882a593Smuzhiyun static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct net_device *dev = dev_id;
384*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
385*4882a593Smuzhiyun struct sk_buff *rskb; /* received sk_buff */
386*4882a593Smuzhiyun struct sk_buff *skb; /* new sk_buff to enqueue in its place */
387*4882a593Smuzhiyun struct bcom_fec_bd *bd;
388*4882a593Smuzhiyun u32 status, physaddr;
389*4882a593Smuzhiyun int length;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun spin_lock(&priv->lock);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun while (bcom_buffer_done(priv->rx_dmatsk)) {
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
396*4882a593Smuzhiyun (struct bcom_bd **)&bd);
397*4882a593Smuzhiyun physaddr = bd->skb_pa;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Test for errors in received frame */
400*4882a593Smuzhiyun if (status & BCOM_FEC_RX_BD_ERRORS) {
401*4882a593Smuzhiyun /* Drop packet and reuse the buffer */
402*4882a593Smuzhiyun mpc52xx_fec_rx_submit(dev, rskb);
403*4882a593Smuzhiyun dev->stats.rx_dropped++;
404*4882a593Smuzhiyun continue;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* skbs are allocated on open, so now we allocate a new one,
408*4882a593Smuzhiyun * and remove the old (with the packet) */
409*4882a593Smuzhiyun skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
410*4882a593Smuzhiyun if (!skb) {
411*4882a593Smuzhiyun /* Can't get a new one : reuse the same & drop pkt */
412*4882a593Smuzhiyun dev_notice(&dev->dev, "Low memory - dropped packet.\n");
413*4882a593Smuzhiyun mpc52xx_fec_rx_submit(dev, rskb);
414*4882a593Smuzhiyun dev->stats.rx_dropped++;
415*4882a593Smuzhiyun continue;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Enqueue the new sk_buff back on the hardware */
419*4882a593Smuzhiyun mpc52xx_fec_rx_submit(dev, skb);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Process the received skb - Drop the spin lock while
422*4882a593Smuzhiyun * calling into the network stack */
423*4882a593Smuzhiyun spin_unlock(&priv->lock);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
426*4882a593Smuzhiyun DMA_FROM_DEVICE);
427*4882a593Smuzhiyun length = status & BCOM_FEC_RX_BD_LEN_MASK;
428*4882a593Smuzhiyun skb_put(rskb, length - 4); /* length without CRC32 */
429*4882a593Smuzhiyun rskb->protocol = eth_type_trans(rskb, dev);
430*4882a593Smuzhiyun if (!skb_defer_rx_timestamp(rskb))
431*4882a593Smuzhiyun netif_rx(rskb);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun spin_lock(&priv->lock);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun spin_unlock(&priv->lock);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return IRQ_HANDLED;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
mpc52xx_fec_interrupt(int irq,void * dev_id)441*4882a593Smuzhiyun static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct net_device *dev = dev_id;
444*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
445*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
446*4882a593Smuzhiyun u32 ievent;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun ievent = in_be32(&fec->ievent);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
451*4882a593Smuzhiyun if (!ievent)
452*4882a593Smuzhiyun return IRQ_NONE;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun out_be32(&fec->ievent, ievent); /* clear pending events */
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* on fifo error, soft-reset fec */
457*4882a593Smuzhiyun if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
460*4882a593Smuzhiyun dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
461*4882a593Smuzhiyun if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
462*4882a593Smuzhiyun dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun spin_lock(&priv->lock);
465*4882a593Smuzhiyun mpc52xx_fec_reset(dev);
466*4882a593Smuzhiyun spin_unlock(&priv->lock);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun return IRQ_HANDLED;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (ievent & ~FEC_IEVENT_TFINT)
472*4882a593Smuzhiyun dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return IRQ_HANDLED;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * Get the current statistics.
479*4882a593Smuzhiyun * This may be called with the card open or closed.
480*4882a593Smuzhiyun */
mpc52xx_fec_get_stats(struct net_device * dev)481*4882a593Smuzhiyun static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
484*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
485*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun stats->rx_bytes = in_be32(&fec->rmon_r_octets);
488*4882a593Smuzhiyun stats->rx_packets = in_be32(&fec->rmon_r_packets);
489*4882a593Smuzhiyun stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
490*4882a593Smuzhiyun in_be32(&fec->rmon_r_undersize) +
491*4882a593Smuzhiyun in_be32(&fec->rmon_r_oversize) +
492*4882a593Smuzhiyun in_be32(&fec->rmon_r_frag) +
493*4882a593Smuzhiyun in_be32(&fec->rmon_r_jab);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun stats->tx_bytes = in_be32(&fec->rmon_t_octets);
496*4882a593Smuzhiyun stats->tx_packets = in_be32(&fec->rmon_t_packets);
497*4882a593Smuzhiyun stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
498*4882a593Smuzhiyun in_be32(&fec->rmon_t_undersize) +
499*4882a593Smuzhiyun in_be32(&fec->rmon_t_oversize) +
500*4882a593Smuzhiyun in_be32(&fec->rmon_t_frag) +
501*4882a593Smuzhiyun in_be32(&fec->rmon_t_jab);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
504*4882a593Smuzhiyun stats->collisions = in_be32(&fec->rmon_t_col);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* detailed rx_errors: */
507*4882a593Smuzhiyun stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
508*4882a593Smuzhiyun + in_be32(&fec->rmon_r_oversize)
509*4882a593Smuzhiyun + in_be32(&fec->rmon_r_frag)
510*4882a593Smuzhiyun + in_be32(&fec->rmon_r_jab);
511*4882a593Smuzhiyun stats->rx_over_errors = in_be32(&fec->r_macerr);
512*4882a593Smuzhiyun stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
513*4882a593Smuzhiyun stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
514*4882a593Smuzhiyun stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
515*4882a593Smuzhiyun stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* detailed tx_errors: */
518*4882a593Smuzhiyun stats->tx_aborted_errors = 0;
519*4882a593Smuzhiyun stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
520*4882a593Smuzhiyun stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
521*4882a593Smuzhiyun stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
522*4882a593Smuzhiyun stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return stats;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * Read MIB counters in order to reset them,
529*4882a593Smuzhiyun * then zero all the stats fields in memory
530*4882a593Smuzhiyun */
mpc52xx_fec_reset_stats(struct net_device * dev)531*4882a593Smuzhiyun static void mpc52xx_fec_reset_stats(struct net_device *dev)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
534*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun out_be32(&fec->mib_control, FEC_MIB_DISABLE);
537*4882a593Smuzhiyun memset_io(&fec->rmon_t_drop, 0,
538*4882a593Smuzhiyun offsetof(struct mpc52xx_fec, reserved10) -
539*4882a593Smuzhiyun offsetof(struct mpc52xx_fec, rmon_t_drop));
540*4882a593Smuzhiyun out_be32(&fec->mib_control, 0);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun memset(&dev->stats, 0, sizeof(dev->stats));
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * Set or clear the multicast filter for this adaptor.
547*4882a593Smuzhiyun */
mpc52xx_fec_set_multicast_list(struct net_device * dev)548*4882a593Smuzhiyun static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
551*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
552*4882a593Smuzhiyun u32 rx_control;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun rx_control = in_be32(&fec->r_cntrl);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
557*4882a593Smuzhiyun rx_control |= FEC_RCNTRL_PROM;
558*4882a593Smuzhiyun out_be32(&fec->r_cntrl, rx_control);
559*4882a593Smuzhiyun } else {
560*4882a593Smuzhiyun rx_control &= ~FEC_RCNTRL_PROM;
561*4882a593Smuzhiyun out_be32(&fec->r_cntrl, rx_control);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (dev->flags & IFF_ALLMULTI) {
564*4882a593Smuzhiyun out_be32(&fec->gaddr1, 0xffffffff);
565*4882a593Smuzhiyun out_be32(&fec->gaddr2, 0xffffffff);
566*4882a593Smuzhiyun } else {
567*4882a593Smuzhiyun u32 crc;
568*4882a593Smuzhiyun struct netdev_hw_addr *ha;
569*4882a593Smuzhiyun u32 gaddr1 = 0x00000000;
570*4882a593Smuzhiyun u32 gaddr2 = 0x00000000;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
573*4882a593Smuzhiyun crc = ether_crc_le(6, ha->addr) >> 26;
574*4882a593Smuzhiyun if (crc >= 32)
575*4882a593Smuzhiyun gaddr1 |= 1 << (crc-32);
576*4882a593Smuzhiyun else
577*4882a593Smuzhiyun gaddr2 |= 1 << crc;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun out_be32(&fec->gaddr1, gaddr1);
580*4882a593Smuzhiyun out_be32(&fec->gaddr2, gaddr2);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /**
586*4882a593Smuzhiyun * mpc52xx_fec_hw_init
587*4882a593Smuzhiyun * @dev: network device
588*4882a593Smuzhiyun *
589*4882a593Smuzhiyun * Setup various hardware setting, only needed once on start
590*4882a593Smuzhiyun */
mpc52xx_fec_hw_init(struct net_device * dev)591*4882a593Smuzhiyun static void mpc52xx_fec_hw_init(struct net_device *dev)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
594*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
595*4882a593Smuzhiyun int i;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* Whack a reset. We should wait for this. */
598*4882a593Smuzhiyun out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
599*4882a593Smuzhiyun for (i = 0; i < FEC_RESET_DELAY; ++i) {
600*4882a593Smuzhiyun if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun udelay(1);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun if (i == FEC_RESET_DELAY)
605*4882a593Smuzhiyun dev_err(&dev->dev, "FEC Reset timeout!\n");
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* set pause to 0x20 frames */
608*4882a593Smuzhiyun out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /* high service request will be deasserted when there's < 7 bytes in fifo
611*4882a593Smuzhiyun * low service request will be deasserted when there's < 4*7 bytes in fifo
612*4882a593Smuzhiyun */
613*4882a593Smuzhiyun out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
614*4882a593Smuzhiyun out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /* alarm when <= x bytes in FIFO */
617*4882a593Smuzhiyun out_be32(&fec->rfifo_alarm, 0x0000030c);
618*4882a593Smuzhiyun out_be32(&fec->tfifo_alarm, 0x00000100);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
621*4882a593Smuzhiyun out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* enable crc generation */
624*4882a593Smuzhiyun out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
625*4882a593Smuzhiyun out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
626*4882a593Smuzhiyun out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* set phy speed.
629*4882a593Smuzhiyun * this can't be done in phy driver, since it needs to be called
630*4882a593Smuzhiyun * before fec stuff (even on resume) */
631*4882a593Smuzhiyun out_be32(&fec->mii_speed, priv->mdio_speed);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /**
635*4882a593Smuzhiyun * mpc52xx_fec_start
636*4882a593Smuzhiyun * @dev: network device
637*4882a593Smuzhiyun *
638*4882a593Smuzhiyun * This function is called to start or restart the FEC during a link
639*4882a593Smuzhiyun * change. This happens on fifo errors or when switching between half
640*4882a593Smuzhiyun * and full duplex.
641*4882a593Smuzhiyun */
mpc52xx_fec_start(struct net_device * dev)642*4882a593Smuzhiyun static void mpc52xx_fec_start(struct net_device *dev)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
645*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
646*4882a593Smuzhiyun u32 rcntrl;
647*4882a593Smuzhiyun u32 tcntrl;
648*4882a593Smuzhiyun u32 tmp;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /* clear sticky error bits */
651*4882a593Smuzhiyun tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
652*4882a593Smuzhiyun out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
653*4882a593Smuzhiyun out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* FIFOs will reset on mpc52xx_fec_enable */
656*4882a593Smuzhiyun out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* Set station address. */
659*4882a593Smuzhiyun mpc52xx_fec_set_paddr(dev, dev->dev_addr);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun mpc52xx_fec_set_multicast_list(dev);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* set max frame len, enable flow control, select mii mode */
664*4882a593Smuzhiyun rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
665*4882a593Smuzhiyun rcntrl |= FEC_RCNTRL_FCE;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (!priv->seven_wire_mode)
668*4882a593Smuzhiyun rcntrl |= FEC_RCNTRL_MII_MODE;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (priv->duplex == DUPLEX_FULL)
671*4882a593Smuzhiyun tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
672*4882a593Smuzhiyun else {
673*4882a593Smuzhiyun rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
674*4882a593Smuzhiyun tcntrl = 0;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun out_be32(&fec->r_cntrl, rcntrl);
677*4882a593Smuzhiyun out_be32(&fec->x_cntrl, tcntrl);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* Clear any outstanding interrupt. */
680*4882a593Smuzhiyun out_be32(&fec->ievent, 0xffffffff);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Enable interrupts we wish to service. */
683*4882a593Smuzhiyun out_be32(&fec->imask, FEC_IMASK_ENABLE);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* And last, enable the transmit and receive processing. */
686*4882a593Smuzhiyun out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
687*4882a593Smuzhiyun out_be32(&fec->r_des_active, 0x01000000);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /**
691*4882a593Smuzhiyun * mpc52xx_fec_stop
692*4882a593Smuzhiyun * @dev: network device
693*4882a593Smuzhiyun *
694*4882a593Smuzhiyun * stop all activity on fec and empty dma buffers
695*4882a593Smuzhiyun */
mpc52xx_fec_stop(struct net_device * dev,bool may_sleep)696*4882a593Smuzhiyun static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
699*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
700*4882a593Smuzhiyun unsigned long timeout;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* disable all interrupts */
703*4882a593Smuzhiyun out_be32(&fec->imask, 0);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /* Disable the rx task. */
706*4882a593Smuzhiyun bcom_disable(priv->rx_dmatsk);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Wait for tx queue to drain, but only if we're in process context */
709*4882a593Smuzhiyun if (may_sleep) {
710*4882a593Smuzhiyun timeout = jiffies + msecs_to_jiffies(2000);
711*4882a593Smuzhiyun while (time_before(jiffies, timeout) &&
712*4882a593Smuzhiyun !bcom_queue_empty(priv->tx_dmatsk))
713*4882a593Smuzhiyun msleep(100);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (time_after_eq(jiffies, timeout))
716*4882a593Smuzhiyun dev_err(&dev->dev, "queues didn't drain\n");
717*4882a593Smuzhiyun #if 1
718*4882a593Smuzhiyun if (time_after_eq(jiffies, timeout)) {
719*4882a593Smuzhiyun dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
720*4882a593Smuzhiyun priv->tx_dmatsk->index,
721*4882a593Smuzhiyun priv->tx_dmatsk->outdex);
722*4882a593Smuzhiyun dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
723*4882a593Smuzhiyun priv->rx_dmatsk->index,
724*4882a593Smuzhiyun priv->rx_dmatsk->outdex);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun #endif
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun bcom_disable(priv->tx_dmatsk);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* Stop FEC */
732*4882a593Smuzhiyun out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* reset fec and bestcomm tasks */
mpc52xx_fec_reset(struct net_device * dev)736*4882a593Smuzhiyun static void mpc52xx_fec_reset(struct net_device *dev)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
739*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun mpc52xx_fec_stop(dev, false);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
744*4882a593Smuzhiyun out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun mpc52xx_fec_hw_init(dev);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun bcom_fec_rx_reset(priv->rx_dmatsk);
751*4882a593Smuzhiyun bcom_fec_tx_reset(priv->tx_dmatsk);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun bcom_enable(priv->rx_dmatsk);
756*4882a593Smuzhiyun bcom_enable(priv->tx_dmatsk);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun mpc52xx_fec_start(dev);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun netif_wake_queue(dev);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* ethtool interface */
765*4882a593Smuzhiyun
mpc52xx_fec_get_msglevel(struct net_device * dev)766*4882a593Smuzhiyun static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
769*4882a593Smuzhiyun return priv->msg_enable;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
mpc52xx_fec_set_msglevel(struct net_device * dev,u32 level)772*4882a593Smuzhiyun static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = netdev_priv(dev);
775*4882a593Smuzhiyun priv->msg_enable = level;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
779*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
780*4882a593Smuzhiyun .get_msglevel = mpc52xx_fec_get_msglevel,
781*4882a593Smuzhiyun .set_msglevel = mpc52xx_fec_set_msglevel,
782*4882a593Smuzhiyun .get_ts_info = ethtool_op_get_ts_info,
783*4882a593Smuzhiyun .get_link_ksettings = phy_ethtool_get_link_ksettings,
784*4882a593Smuzhiyun .set_link_ksettings = phy_ethtool_set_link_ksettings,
785*4882a593Smuzhiyun };
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun static const struct net_device_ops mpc52xx_fec_netdev_ops = {
789*4882a593Smuzhiyun .ndo_open = mpc52xx_fec_open,
790*4882a593Smuzhiyun .ndo_stop = mpc52xx_fec_close,
791*4882a593Smuzhiyun .ndo_start_xmit = mpc52xx_fec_start_xmit,
792*4882a593Smuzhiyun .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
793*4882a593Smuzhiyun .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
794*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
795*4882a593Smuzhiyun .ndo_do_ioctl = phy_do_ioctl,
796*4882a593Smuzhiyun .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
797*4882a593Smuzhiyun .ndo_get_stats = mpc52xx_fec_get_stats,
798*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
799*4882a593Smuzhiyun .ndo_poll_controller = mpc52xx_fec_poll_controller,
800*4882a593Smuzhiyun #endif
801*4882a593Smuzhiyun };
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* ======================================================================== */
804*4882a593Smuzhiyun /* OF Driver */
805*4882a593Smuzhiyun /* ======================================================================== */
806*4882a593Smuzhiyun
mpc52xx_fec_probe(struct platform_device * op)807*4882a593Smuzhiyun static int mpc52xx_fec_probe(struct platform_device *op)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun int rv;
810*4882a593Smuzhiyun struct net_device *ndev;
811*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv = NULL;
812*4882a593Smuzhiyun struct resource mem;
813*4882a593Smuzhiyun const u32 *prop;
814*4882a593Smuzhiyun int prop_size;
815*4882a593Smuzhiyun struct device_node *np = op->dev.of_node;
816*4882a593Smuzhiyun const char *mac_addr;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun phys_addr_t rx_fifo;
819*4882a593Smuzhiyun phys_addr_t tx_fifo;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* Get the ether ndev & it's private zone */
822*4882a593Smuzhiyun ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
823*4882a593Smuzhiyun if (!ndev)
824*4882a593Smuzhiyun return -ENOMEM;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun priv = netdev_priv(ndev);
827*4882a593Smuzhiyun priv->ndev = ndev;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* Reserve FEC control zone */
830*4882a593Smuzhiyun rv = of_address_to_resource(np, 0, &mem);
831*4882a593Smuzhiyun if (rv) {
832*4882a593Smuzhiyun pr_err("Error while parsing device node resource\n");
833*4882a593Smuzhiyun goto err_netdev;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
836*4882a593Smuzhiyun pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
837*4882a593Smuzhiyun (unsigned long)resource_size(&mem),
838*4882a593Smuzhiyun sizeof(struct mpc52xx_fec));
839*4882a593Smuzhiyun rv = -EINVAL;
840*4882a593Smuzhiyun goto err_netdev;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
844*4882a593Smuzhiyun DRIVER_NAME)) {
845*4882a593Smuzhiyun rv = -EBUSY;
846*4882a593Smuzhiyun goto err_netdev;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun /* Init ether ndev with what we have */
850*4882a593Smuzhiyun ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
851*4882a593Smuzhiyun ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
852*4882a593Smuzhiyun ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
853*4882a593Smuzhiyun ndev->base_addr = mem.start;
854*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &op->dev);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun spin_lock_init(&priv->lock);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* ioremap the zones */
859*4882a593Smuzhiyun priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if (!priv->fec) {
862*4882a593Smuzhiyun rv = -ENOMEM;
863*4882a593Smuzhiyun goto err_mem_region;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Bestcomm init */
867*4882a593Smuzhiyun rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
868*4882a593Smuzhiyun tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
871*4882a593Smuzhiyun priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
874*4882a593Smuzhiyun pr_err("Can not init SDMA tasks\n");
875*4882a593Smuzhiyun rv = -ENOMEM;
876*4882a593Smuzhiyun goto err_rx_tx_dmatsk;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Get the IRQ we need one by one */
880*4882a593Smuzhiyun /* Control */
881*4882a593Smuzhiyun ndev->irq = irq_of_parse_and_map(np, 0);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* RX */
884*4882a593Smuzhiyun priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* TX */
887*4882a593Smuzhiyun priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /*
890*4882a593Smuzhiyun * MAC address init:
891*4882a593Smuzhiyun *
892*4882a593Smuzhiyun * First try to read MAC address from DT
893*4882a593Smuzhiyun */
894*4882a593Smuzhiyun mac_addr = of_get_mac_address(np);
895*4882a593Smuzhiyun if (!IS_ERR(mac_addr)) {
896*4882a593Smuzhiyun ether_addr_copy(ndev->dev_addr, mac_addr);
897*4882a593Smuzhiyun } else {
898*4882a593Smuzhiyun struct mpc52xx_fec __iomem *fec = priv->fec;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /*
901*4882a593Smuzhiyun * If the MAC addresse is not provided via DT then read
902*4882a593Smuzhiyun * it back from the controller regs
903*4882a593Smuzhiyun */
904*4882a593Smuzhiyun *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
905*4882a593Smuzhiyun *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /*
909*4882a593Smuzhiyun * Check if the MAC address is valid, if not get a random one
910*4882a593Smuzhiyun */
911*4882a593Smuzhiyun if (!is_valid_ether_addr(ndev->dev_addr)) {
912*4882a593Smuzhiyun eth_hw_addr_random(ndev);
913*4882a593Smuzhiyun dev_warn(&ndev->dev, "using random MAC address %pM\n",
914*4882a593Smuzhiyun ndev->dev_addr);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /*
920*4882a593Smuzhiyun * Link mode configuration
921*4882a593Smuzhiyun */
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* Start with safe defaults for link connection */
924*4882a593Smuzhiyun priv->speed = 100;
925*4882a593Smuzhiyun priv->duplex = DUPLEX_HALF;
926*4882a593Smuzhiyun priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /* The current speed preconfigures the speed of the MII link */
929*4882a593Smuzhiyun prop = of_get_property(np, "current-speed", &prop_size);
930*4882a593Smuzhiyun if (prop && (prop_size >= sizeof(u32) * 2)) {
931*4882a593Smuzhiyun priv->speed = prop[0];
932*4882a593Smuzhiyun priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* If there is a phy handle, then get the PHY node */
936*4882a593Smuzhiyun priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /* the 7-wire property means don't use MII mode */
939*4882a593Smuzhiyun if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
940*4882a593Smuzhiyun priv->seven_wire_mode = 1;
941*4882a593Smuzhiyun dev_info(&ndev->dev, "using 7-wire PHY mode\n");
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* Hardware init */
945*4882a593Smuzhiyun mpc52xx_fec_hw_init(ndev);
946*4882a593Smuzhiyun mpc52xx_fec_reset_stats(ndev);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun rv = register_netdev(ndev);
949*4882a593Smuzhiyun if (rv < 0)
950*4882a593Smuzhiyun goto err_node;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* We're done ! */
953*4882a593Smuzhiyun platform_set_drvdata(op, ndev);
954*4882a593Smuzhiyun netdev_info(ndev, "%pOF MAC %pM\n",
955*4882a593Smuzhiyun op->dev.of_node, ndev->dev_addr);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun return 0;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun err_node:
960*4882a593Smuzhiyun of_node_put(priv->phy_node);
961*4882a593Smuzhiyun irq_dispose_mapping(ndev->irq);
962*4882a593Smuzhiyun err_rx_tx_dmatsk:
963*4882a593Smuzhiyun if (priv->rx_dmatsk)
964*4882a593Smuzhiyun bcom_fec_rx_release(priv->rx_dmatsk);
965*4882a593Smuzhiyun if (priv->tx_dmatsk)
966*4882a593Smuzhiyun bcom_fec_tx_release(priv->tx_dmatsk);
967*4882a593Smuzhiyun iounmap(priv->fec);
968*4882a593Smuzhiyun err_mem_region:
969*4882a593Smuzhiyun release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
970*4882a593Smuzhiyun err_netdev:
971*4882a593Smuzhiyun free_netdev(ndev);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return rv;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun static int
mpc52xx_fec_remove(struct platform_device * op)977*4882a593Smuzhiyun mpc52xx_fec_remove(struct platform_device *op)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct net_device *ndev;
980*4882a593Smuzhiyun struct mpc52xx_fec_priv *priv;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun ndev = platform_get_drvdata(op);
983*4882a593Smuzhiyun priv = netdev_priv(ndev);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun unregister_netdev(ndev);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun of_node_put(priv->phy_node);
988*4882a593Smuzhiyun priv->phy_node = NULL;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun irq_dispose_mapping(ndev->irq);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun bcom_fec_rx_release(priv->rx_dmatsk);
993*4882a593Smuzhiyun bcom_fec_tx_release(priv->tx_dmatsk);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun iounmap(priv->fec);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun free_netdev(ndev);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return 0;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun #ifdef CONFIG_PM
mpc52xx_fec_of_suspend(struct platform_device * op,pm_message_t state)1005*4882a593Smuzhiyun static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun struct net_device *dev = platform_get_drvdata(op);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (netif_running(dev))
1010*4882a593Smuzhiyun mpc52xx_fec_close(dev);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
mpc52xx_fec_of_resume(struct platform_device * op)1015*4882a593Smuzhiyun static int mpc52xx_fec_of_resume(struct platform_device *op)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun struct net_device *dev = platform_get_drvdata(op);
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun mpc52xx_fec_hw_init(dev);
1020*4882a593Smuzhiyun mpc52xx_fec_reset_stats(dev);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (netif_running(dev))
1023*4882a593Smuzhiyun mpc52xx_fec_open(dev);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun return 0;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun #endif
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun static const struct of_device_id mpc52xx_fec_match[] = {
1030*4882a593Smuzhiyun { .compatible = "fsl,mpc5200b-fec", },
1031*4882a593Smuzhiyun { .compatible = "fsl,mpc5200-fec", },
1032*4882a593Smuzhiyun { .compatible = "mpc5200-fec", },
1033*4882a593Smuzhiyun { }
1034*4882a593Smuzhiyun };
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun static struct platform_driver mpc52xx_fec_driver = {
1039*4882a593Smuzhiyun .driver = {
1040*4882a593Smuzhiyun .name = DRIVER_NAME,
1041*4882a593Smuzhiyun .of_match_table = mpc52xx_fec_match,
1042*4882a593Smuzhiyun },
1043*4882a593Smuzhiyun .probe = mpc52xx_fec_probe,
1044*4882a593Smuzhiyun .remove = mpc52xx_fec_remove,
1045*4882a593Smuzhiyun #ifdef CONFIG_PM
1046*4882a593Smuzhiyun .suspend = mpc52xx_fec_of_suspend,
1047*4882a593Smuzhiyun .resume = mpc52xx_fec_of_resume,
1048*4882a593Smuzhiyun #endif
1049*4882a593Smuzhiyun };
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /* ======================================================================== */
1053*4882a593Smuzhiyun /* Module */
1054*4882a593Smuzhiyun /* ======================================================================== */
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun static struct platform_driver * const drivers[] = {
1057*4882a593Smuzhiyun #ifdef CONFIG_FEC_MPC52xx_MDIO
1058*4882a593Smuzhiyun &mpc52xx_fec_mdio_driver,
1059*4882a593Smuzhiyun #endif
1060*4882a593Smuzhiyun &mpc52xx_fec_driver,
1061*4882a593Smuzhiyun };
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun static int __init
mpc52xx_fec_init(void)1064*4882a593Smuzhiyun mpc52xx_fec_init(void)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun static void __exit
mpc52xx_fec_exit(void)1070*4882a593Smuzhiyun mpc52xx_fec_exit(void)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun module_init(mpc52xx_fec_init);
1077*4882a593Smuzhiyun module_exit(mpc52xx_fec_exit);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1080*4882a593Smuzhiyun MODULE_AUTHOR("Dale Farnsworth");
1081*4882a593Smuzhiyun MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
1082