1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2011 Michal Simek <monstr@monstr.eu>
3*4882a593Smuzhiyun * Copyright (C) 2011 PetaLogix
4*4882a593Smuzhiyun * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <config.h>
10*4882a593Smuzhiyun #include <common.h>
11*4882a593Smuzhiyun #include <dm.h>
12*4882a593Smuzhiyun #include <net.h>
13*4882a593Smuzhiyun #include <malloc.h>
14*4882a593Smuzhiyun #include <asm/io.h>
15*4882a593Smuzhiyun #include <phy.h>
16*4882a593Smuzhiyun #include <miiphy.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Link setup */
21*4882a593Smuzhiyun #define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */
22*4882a593Smuzhiyun #define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
23*4882a593Smuzhiyun #define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
24*4882a593Smuzhiyun #define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Interrupt Status/Enable/Mask Registers bit definitions */
27*4882a593Smuzhiyun #define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
28*4882a593Smuzhiyun #define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Receive Configuration Word 1 (RCW1) Register bit definitions */
31*4882a593Smuzhiyun #define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* Transmitter Configuration (TC) Register bit definitions */
34*4882a593Smuzhiyun #define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* MDIO Management Configuration (MC) Register bit definitions */
39*4882a593Smuzhiyun #define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable*/
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* MDIO Management Control Register (MCR) Register bit definitions */
42*4882a593Smuzhiyun #define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */
43*4882a593Smuzhiyun #define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */
44*4882a593Smuzhiyun #define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */
45*4882a593Smuzhiyun #define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */
46*4882a593Smuzhiyun #define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */
47*4882a593Smuzhiyun #define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */
48*4882a593Smuzhiyun #define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
49*4882a593Smuzhiyun #define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* DMA macros */
54*4882a593Smuzhiyun /* Bitmasks of XAXIDMA_CR_OFFSET register */
55*4882a593Smuzhiyun #define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
56*4882a593Smuzhiyun #define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Bitmasks of XAXIDMA_SR_OFFSET register */
59*4882a593Smuzhiyun #define XAXIDMA_HALTED_MASK 0x00000001 /* DMA channel halted */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Bitmask for interrupts */
62*4882a593Smuzhiyun #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
63*4882a593Smuzhiyun #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
64*4882a593Smuzhiyun #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Bitmasks of XAXIDMA_BD_CTRL_OFFSET register */
67*4882a593Smuzhiyun #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
68*4882a593Smuzhiyun #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define DMAALIGN 128
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static u8 rxframe[PKTSIZE_ALIGN] __attribute((aligned(DMAALIGN)));
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Reflect dma offsets */
75*4882a593Smuzhiyun struct axidma_reg {
76*4882a593Smuzhiyun u32 control; /* DMACR */
77*4882a593Smuzhiyun u32 status; /* DMASR */
78*4882a593Smuzhiyun u32 current; /* CURDESC */
79*4882a593Smuzhiyun u32 reserved;
80*4882a593Smuzhiyun u32 tail; /* TAILDESC */
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Private driver structures */
84*4882a593Smuzhiyun struct axidma_priv {
85*4882a593Smuzhiyun struct axidma_reg *dmatx;
86*4882a593Smuzhiyun struct axidma_reg *dmarx;
87*4882a593Smuzhiyun int phyaddr;
88*4882a593Smuzhiyun struct axi_regs *iobase;
89*4882a593Smuzhiyun phy_interface_t interface;
90*4882a593Smuzhiyun struct phy_device *phydev;
91*4882a593Smuzhiyun struct mii_dev *bus;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* BD descriptors */
95*4882a593Smuzhiyun struct axidma_bd {
96*4882a593Smuzhiyun u32 next; /* Next descriptor pointer */
97*4882a593Smuzhiyun u32 reserved1;
98*4882a593Smuzhiyun u32 phys; /* Buffer address */
99*4882a593Smuzhiyun u32 reserved2;
100*4882a593Smuzhiyun u32 reserved3;
101*4882a593Smuzhiyun u32 reserved4;
102*4882a593Smuzhiyun u32 cntrl; /* Control */
103*4882a593Smuzhiyun u32 status; /* Status */
104*4882a593Smuzhiyun u32 app0;
105*4882a593Smuzhiyun u32 app1; /* TX start << 16 | insert */
106*4882a593Smuzhiyun u32 app2; /* TX csum seed */
107*4882a593Smuzhiyun u32 app3;
108*4882a593Smuzhiyun u32 app4;
109*4882a593Smuzhiyun u32 sw_id_offset;
110*4882a593Smuzhiyun u32 reserved5;
111*4882a593Smuzhiyun u32 reserved6;
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Static BDs - driver uses only one BD */
115*4882a593Smuzhiyun static struct axidma_bd tx_bd __attribute((aligned(DMAALIGN)));
116*4882a593Smuzhiyun static struct axidma_bd rx_bd __attribute((aligned(DMAALIGN)));
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun struct axi_regs {
119*4882a593Smuzhiyun u32 reserved[3];
120*4882a593Smuzhiyun u32 is; /* 0xC: Interrupt status */
121*4882a593Smuzhiyun u32 reserved2;
122*4882a593Smuzhiyun u32 ie; /* 0x14: Interrupt enable */
123*4882a593Smuzhiyun u32 reserved3[251];
124*4882a593Smuzhiyun u32 rcw1; /* 0x404: Rx Configuration Word 1 */
125*4882a593Smuzhiyun u32 tc; /* 0x408: Tx Configuration */
126*4882a593Smuzhiyun u32 reserved4;
127*4882a593Smuzhiyun u32 emmc; /* 0x410: EMAC mode configuration */
128*4882a593Smuzhiyun u32 reserved5[59];
129*4882a593Smuzhiyun u32 mdio_mc; /* 0x500: MII Management Config */
130*4882a593Smuzhiyun u32 mdio_mcr; /* 0x504: MII Management Control */
131*4882a593Smuzhiyun u32 mdio_mwd; /* 0x508: MII Management Write Data */
132*4882a593Smuzhiyun u32 mdio_mrd; /* 0x50C: MII Management Read Data */
133*4882a593Smuzhiyun u32 reserved6[124];
134*4882a593Smuzhiyun u32 uaw0; /* 0x700: Unicast address word 0 */
135*4882a593Smuzhiyun u32 uaw1; /* 0x704: Unicast address word 1 */
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Use MII register 1 (MII status register) to detect PHY */
139*4882a593Smuzhiyun #define PHY_DETECT_REG 1
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * Mask used to verify certain PHY features (or register contents)
143*4882a593Smuzhiyun * in the register above:
144*4882a593Smuzhiyun * 0x1000: 10Mbps full duplex support
145*4882a593Smuzhiyun * 0x0800: 10Mbps half duplex support
146*4882a593Smuzhiyun * 0x0008: Auto-negotiation support
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun #define PHY_DETECT_MASK 0x1808
149*4882a593Smuzhiyun
mdio_wait(struct axi_regs * regs)150*4882a593Smuzhiyun static inline int mdio_wait(struct axi_regs *regs)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun u32 timeout = 200;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Wait till MDIO interface is ready to accept a new transaction. */
155*4882a593Smuzhiyun while (timeout && (!(in_be32(®s->mdio_mcr)
156*4882a593Smuzhiyun & XAE_MDIO_MCR_READY_MASK))) {
157*4882a593Smuzhiyun timeout--;
158*4882a593Smuzhiyun udelay(1);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun if (!timeout) {
161*4882a593Smuzhiyun printf("%s: Timeout\n", __func__);
162*4882a593Smuzhiyun return 1;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun return 0;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
phyread(struct axidma_priv * priv,u32 phyaddress,u32 registernum,u16 * val)167*4882a593Smuzhiyun static u32 phyread(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
168*4882a593Smuzhiyun u16 *val)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
171*4882a593Smuzhiyun u32 mdioctrlreg = 0;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (mdio_wait(regs))
174*4882a593Smuzhiyun return 1;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
177*4882a593Smuzhiyun XAE_MDIO_MCR_PHYAD_MASK) |
178*4882a593Smuzhiyun ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
179*4882a593Smuzhiyun & XAE_MDIO_MCR_REGAD_MASK) |
180*4882a593Smuzhiyun XAE_MDIO_MCR_INITIATE_MASK |
181*4882a593Smuzhiyun XAE_MDIO_MCR_OP_READ_MASK;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun out_be32(®s->mdio_mcr, mdioctrlreg);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (mdio_wait(regs))
186*4882a593Smuzhiyun return 1;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Read data */
189*4882a593Smuzhiyun *val = in_be32(®s->mdio_mrd);
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
phywrite(struct axidma_priv * priv,u32 phyaddress,u32 registernum,u32 data)193*4882a593Smuzhiyun static u32 phywrite(struct axidma_priv *priv, u32 phyaddress, u32 registernum,
194*4882a593Smuzhiyun u32 data)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
197*4882a593Smuzhiyun u32 mdioctrlreg = 0;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (mdio_wait(regs))
200*4882a593Smuzhiyun return 1;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun mdioctrlreg = ((phyaddress << XAE_MDIO_MCR_PHYAD_SHIFT) &
203*4882a593Smuzhiyun XAE_MDIO_MCR_PHYAD_MASK) |
204*4882a593Smuzhiyun ((registernum << XAE_MDIO_MCR_REGAD_SHIFT)
205*4882a593Smuzhiyun & XAE_MDIO_MCR_REGAD_MASK) |
206*4882a593Smuzhiyun XAE_MDIO_MCR_INITIATE_MASK |
207*4882a593Smuzhiyun XAE_MDIO_MCR_OP_WRITE_MASK;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Write data */
210*4882a593Smuzhiyun out_be32(®s->mdio_mwd, data);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun out_be32(®s->mdio_mcr, mdioctrlreg);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (mdio_wait(regs))
215*4882a593Smuzhiyun return 1;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
axiemac_phy_init(struct udevice * dev)220*4882a593Smuzhiyun static int axiemac_phy_init(struct udevice *dev)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun u16 phyreg;
223*4882a593Smuzhiyun u32 i, ret;
224*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
225*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
226*4882a593Smuzhiyun struct phy_device *phydev;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun u32 supported = SUPPORTED_10baseT_Half |
229*4882a593Smuzhiyun SUPPORTED_10baseT_Full |
230*4882a593Smuzhiyun SUPPORTED_100baseT_Half |
231*4882a593Smuzhiyun SUPPORTED_100baseT_Full |
232*4882a593Smuzhiyun SUPPORTED_1000baseT_Half |
233*4882a593Smuzhiyun SUPPORTED_1000baseT_Full;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Set default MDIO divisor */
236*4882a593Smuzhiyun out_be32(®s->mdio_mc, XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (priv->phyaddr == -1) {
239*4882a593Smuzhiyun /* Detect the PHY address */
240*4882a593Smuzhiyun for (i = 31; i >= 0; i--) {
241*4882a593Smuzhiyun ret = phyread(priv, i, PHY_DETECT_REG, &phyreg);
242*4882a593Smuzhiyun if (!ret && (phyreg != 0xFFFF) &&
243*4882a593Smuzhiyun ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
244*4882a593Smuzhiyun /* Found a valid PHY address */
245*4882a593Smuzhiyun priv->phyaddr = i;
246*4882a593Smuzhiyun debug("axiemac: Found valid phy address, %x\n",
247*4882a593Smuzhiyun i);
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Interface - look at tsec */
254*4882a593Smuzhiyun phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun phydev->supported &= supported;
257*4882a593Smuzhiyun phydev->advertising = phydev->supported;
258*4882a593Smuzhiyun priv->phydev = phydev;
259*4882a593Smuzhiyun phy_config(phydev);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Setting axi emac and phy to proper setting */
setup_phy(struct udevice * dev)265*4882a593Smuzhiyun static int setup_phy(struct udevice *dev)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun u16 temp;
268*4882a593Smuzhiyun u32 speed, emmc_reg, ret;
269*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
270*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
271*4882a593Smuzhiyun struct phy_device *phydev = priv->phydev;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * In SGMII cases the isolate bit might set
276*4882a593Smuzhiyun * after DMA and ethernet resets and hence
277*4882a593Smuzhiyun * check and clear if set.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun ret = phyread(priv, priv->phyaddr, MII_BMCR, &temp);
280*4882a593Smuzhiyun if (ret)
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun if (temp & BMCR_ISOLATE) {
283*4882a593Smuzhiyun temp &= ~BMCR_ISOLATE;
284*4882a593Smuzhiyun ret = phywrite(priv, priv->phyaddr, MII_BMCR, temp);
285*4882a593Smuzhiyun if (ret)
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (phy_startup(phydev)) {
291*4882a593Smuzhiyun printf("axiemac: could not initialize PHY %s\n",
292*4882a593Smuzhiyun phydev->dev->name);
293*4882a593Smuzhiyun return 0;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun if (!phydev->link) {
296*4882a593Smuzhiyun printf("%s: No link.\n", phydev->dev->name);
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun switch (phydev->speed) {
301*4882a593Smuzhiyun case 1000:
302*4882a593Smuzhiyun speed = XAE_EMMC_LINKSPD_1000;
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun case 100:
305*4882a593Smuzhiyun speed = XAE_EMMC_LINKSPD_100;
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun case 10:
308*4882a593Smuzhiyun speed = XAE_EMMC_LINKSPD_10;
309*4882a593Smuzhiyun break;
310*4882a593Smuzhiyun default:
311*4882a593Smuzhiyun return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Setup the emac for the phy speed */
315*4882a593Smuzhiyun emmc_reg = in_be32(®s->emmc);
316*4882a593Smuzhiyun emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
317*4882a593Smuzhiyun emmc_reg |= speed;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Write new speed setting out to Axi Ethernet */
320*4882a593Smuzhiyun out_be32(®s->emmc, emmc_reg);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Setting the operating speed of the MAC needs a delay. There
324*4882a593Smuzhiyun * doesn't seem to be register to poll, so please consider this
325*4882a593Smuzhiyun * during your application design.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun udelay(1);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 1;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* STOP DMA transfers */
axiemac_stop(struct udevice * dev)333*4882a593Smuzhiyun static void axiemac_stop(struct udevice *dev)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
336*4882a593Smuzhiyun u32 temp;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Stop the hardware */
339*4882a593Smuzhiyun temp = in_be32(&priv->dmatx->control);
340*4882a593Smuzhiyun temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
341*4882a593Smuzhiyun out_be32(&priv->dmatx->control, temp);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun temp = in_be32(&priv->dmarx->control);
344*4882a593Smuzhiyun temp &= ~XAXIDMA_CR_RUNSTOP_MASK;
345*4882a593Smuzhiyun out_be32(&priv->dmarx->control, temp);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun debug("axiemac: Halted\n");
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
axi_ethernet_init(struct axidma_priv * priv)350*4882a593Smuzhiyun static int axi_ethernet_init(struct axidma_priv *priv)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
353*4882a593Smuzhiyun u32 timeout = 200;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * Check the status of the MgtRdy bit in the interrupt status
357*4882a593Smuzhiyun * registers. This must be done to allow the MGT clock to become stable
358*4882a593Smuzhiyun * for the Sgmii and 1000BaseX PHY interfaces. No other register reads
359*4882a593Smuzhiyun * will be valid until this bit is valid.
360*4882a593Smuzhiyun * The bit is always a 1 for all other PHY interfaces.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun if (!priv->eth_hasnobuf) {
363*4882a593Smuzhiyun err = wait_for_bit_le32(®s->is, XAE_INT_MGTRDY_MASK,
364*4882a593Smuzhiyun true, 200, false);
365*4882a593Smuzhiyun if (err) {
366*4882a593Smuzhiyun printf("%s: Timeout\n", __func__);
367*4882a593Smuzhiyun return 1;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * Stop the device and reset HW
372*4882a593Smuzhiyun * Disable interrupts
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun writel(0, ®s->ie);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Stop the device and reset HW */
378*4882a593Smuzhiyun /* Disable interrupts */
379*4882a593Smuzhiyun out_be32(®s->ie, 0);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* Disable the receiver */
382*4882a593Smuzhiyun out_be32(®s->rcw1, in_be32(®s->rcw1) & ~XAE_RCW1_RX_MASK);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * Stopping the receiver in mid-packet causes a dropped packet
386*4882a593Smuzhiyun * indication from HW. Clear it.
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun /* Set the interrupt status register to clear the interrupt */
389*4882a593Smuzhiyun out_be32(®s->is, XAE_INT_RXRJECT_MASK);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Setup HW */
392*4882a593Smuzhiyun /* Set default MDIO divisor */
393*4882a593Smuzhiyun out_be32(®s->mdio_mc, XAE_MDIO_DIV_DFT | XAE_MDIO_MC_MDIOEN_MASK);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun debug("axiemac: InitHw done\n");
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
axiemac_write_hwaddr(struct udevice * dev)399*4882a593Smuzhiyun static int axiemac_write_hwaddr(struct udevice *dev)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun struct eth_pdata *pdata = dev_get_platdata(dev);
402*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
403*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /* Set the MAC address */
406*4882a593Smuzhiyun int val = ((pdata->enetaddr[3] << 24) | (pdata->enetaddr[2] << 16) |
407*4882a593Smuzhiyun (pdata->enetaddr[1] << 8) | (pdata->enetaddr[0]));
408*4882a593Smuzhiyun out_be32(®s->uaw0, val);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun val = (pdata->enetaddr[5] << 8) | pdata->enetaddr[4];
411*4882a593Smuzhiyun val |= in_be32(®s->uaw1) & ~XAE_UAW1_UNICASTADDR_MASK;
412*4882a593Smuzhiyun out_be32(®s->uaw1, val);
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* Reset DMA engine */
axi_dma_init(struct axidma_priv * priv)417*4882a593Smuzhiyun static void axi_dma_init(struct axidma_priv *priv)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun u32 timeout = 500;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Reset the engine so the hardware starts from a known state */
422*4882a593Smuzhiyun out_be32(&priv->dmatx->control, XAXIDMA_CR_RESET_MASK);
423*4882a593Smuzhiyun out_be32(&priv->dmarx->control, XAXIDMA_CR_RESET_MASK);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* At the initialization time, hardware should finish reset quickly */
426*4882a593Smuzhiyun while (timeout--) {
427*4882a593Smuzhiyun /* Check transmit/receive channel */
428*4882a593Smuzhiyun /* Reset is done when the reset bit is low */
429*4882a593Smuzhiyun if (!((in_be32(&priv->dmatx->control) |
430*4882a593Smuzhiyun in_be32(&priv->dmarx->control))
431*4882a593Smuzhiyun & XAXIDMA_CR_RESET_MASK)) {
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun if (!timeout)
436*4882a593Smuzhiyun printf("%s: Timeout\n", __func__);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
axiemac_start(struct udevice * dev)439*4882a593Smuzhiyun static int axiemac_start(struct udevice *dev)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
442*4882a593Smuzhiyun struct axi_regs *regs = priv->iobase;
443*4882a593Smuzhiyun u32 temp;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun debug("axiemac: Init started\n");
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * Initialize AXIDMA engine. AXIDMA engine must be initialized before
448*4882a593Smuzhiyun * AxiEthernet. During AXIDMA engine initialization, AXIDMA hardware is
449*4882a593Smuzhiyun * reset, and since AXIDMA reset line is connected to AxiEthernet, this
450*4882a593Smuzhiyun * would ensure a reset of AxiEthernet.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun axi_dma_init(priv);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Initialize AxiEthernet hardware. */
455*4882a593Smuzhiyun if (axi_ethernet_init(priv))
456*4882a593Smuzhiyun return -1;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Disable all RX interrupts before RxBD space setup */
459*4882a593Smuzhiyun temp = in_be32(&priv->dmarx->control);
460*4882a593Smuzhiyun temp &= ~XAXIDMA_IRQ_ALL_MASK;
461*4882a593Smuzhiyun out_be32(&priv->dmarx->control, temp);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* Start DMA RX channel. Now it's ready to receive data.*/
464*4882a593Smuzhiyun out_be32(&priv->dmarx->current, (u32)&rx_bd);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Setup the BD. */
467*4882a593Smuzhiyun memset(&rx_bd, 0, sizeof(rx_bd));
468*4882a593Smuzhiyun rx_bd.next = (u32)&rx_bd;
469*4882a593Smuzhiyun rx_bd.phys = (u32)&rxframe;
470*4882a593Smuzhiyun rx_bd.cntrl = sizeof(rxframe);
471*4882a593Smuzhiyun /* Flush the last BD so DMA core could see the updates */
472*4882a593Smuzhiyun flush_cache((u32)&rx_bd, sizeof(rx_bd));
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* It is necessary to flush rxframe because if you don't do it
475*4882a593Smuzhiyun * then cache can contain uninitialized data */
476*4882a593Smuzhiyun flush_cache((u32)&rxframe, sizeof(rxframe));
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Start the hardware */
479*4882a593Smuzhiyun temp = in_be32(&priv->dmarx->control);
480*4882a593Smuzhiyun temp |= XAXIDMA_CR_RUNSTOP_MASK;
481*4882a593Smuzhiyun out_be32(&priv->dmarx->control, temp);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* Rx BD is ready - start */
484*4882a593Smuzhiyun out_be32(&priv->dmarx->tail, (u32)&rx_bd);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* Enable TX */
487*4882a593Smuzhiyun out_be32(®s->tc, XAE_TC_TX_MASK);
488*4882a593Smuzhiyun /* Enable RX */
489*4882a593Smuzhiyun out_be32(®s->rcw1, XAE_RCW1_RX_MASK);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* PHY setup */
492*4882a593Smuzhiyun if (!setup_phy(dev)) {
493*4882a593Smuzhiyun axiemac_stop(dev);
494*4882a593Smuzhiyun return -1;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun debug("axiemac: Init complete\n");
498*4882a593Smuzhiyun return 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
axiemac_send(struct udevice * dev,void * ptr,int len)501*4882a593Smuzhiyun static int axiemac_send(struct udevice *dev, void *ptr, int len)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
504*4882a593Smuzhiyun u32 timeout;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (len > PKTSIZE_ALIGN)
507*4882a593Smuzhiyun len = PKTSIZE_ALIGN;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* Flush packet to main memory to be trasfered by DMA */
510*4882a593Smuzhiyun flush_cache((u32)ptr, len);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* Setup Tx BD */
513*4882a593Smuzhiyun memset(&tx_bd, 0, sizeof(tx_bd));
514*4882a593Smuzhiyun /* At the end of the ring, link the last BD back to the top */
515*4882a593Smuzhiyun tx_bd.next = (u32)&tx_bd;
516*4882a593Smuzhiyun tx_bd.phys = (u32)ptr;
517*4882a593Smuzhiyun /* Save len */
518*4882a593Smuzhiyun tx_bd.cntrl = len | XAXIDMA_BD_CTRL_TXSOF_MASK |
519*4882a593Smuzhiyun XAXIDMA_BD_CTRL_TXEOF_MASK;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* Flush the last BD so DMA core could see the updates */
522*4882a593Smuzhiyun flush_cache((u32)&tx_bd, sizeof(tx_bd));
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (in_be32(&priv->dmatx->status) & XAXIDMA_HALTED_MASK) {
525*4882a593Smuzhiyun u32 temp;
526*4882a593Smuzhiyun out_be32(&priv->dmatx->current, (u32)&tx_bd);
527*4882a593Smuzhiyun /* Start the hardware */
528*4882a593Smuzhiyun temp = in_be32(&priv->dmatx->control);
529*4882a593Smuzhiyun temp |= XAXIDMA_CR_RUNSTOP_MASK;
530*4882a593Smuzhiyun out_be32(&priv->dmatx->control, temp);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* Start transfer */
534*4882a593Smuzhiyun out_be32(&priv->dmatx->tail, (u32)&tx_bd);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* Wait for transmission to complete */
537*4882a593Smuzhiyun debug("axiemac: Waiting for tx to be done\n");
538*4882a593Smuzhiyun timeout = 200;
539*4882a593Smuzhiyun while (timeout && (!(in_be32(&priv->dmatx->status) &
540*4882a593Smuzhiyun (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))) {
541*4882a593Smuzhiyun timeout--;
542*4882a593Smuzhiyun udelay(1);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun if (!timeout) {
545*4882a593Smuzhiyun printf("%s: Timeout\n", __func__);
546*4882a593Smuzhiyun return 1;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun debug("axiemac: Sending complete\n");
550*4882a593Smuzhiyun return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
isrxready(struct axidma_priv * priv)553*4882a593Smuzhiyun static int isrxready(struct axidma_priv *priv)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun u32 status;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* Read pending interrupts */
558*4882a593Smuzhiyun status = in_be32(&priv->dmarx->status);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* Acknowledge pending interrupts */
561*4882a593Smuzhiyun out_be32(&priv->dmarx->status, status & XAXIDMA_IRQ_ALL_MASK);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * If Reception done interrupt is asserted, call RX call back function
565*4882a593Smuzhiyun * to handle the processed BDs and then raise the according flag.
566*4882a593Smuzhiyun */
567*4882a593Smuzhiyun if ((status & (XAXIDMA_IRQ_DELAY_MASK | XAXIDMA_IRQ_IOC_MASK)))
568*4882a593Smuzhiyun return 1;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun return 0;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
axiemac_recv(struct udevice * dev,int flags,uchar ** packetp)573*4882a593Smuzhiyun static int axiemac_recv(struct udevice *dev, int flags, uchar **packetp)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun u32 length;
576*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
577*4882a593Smuzhiyun u32 temp;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* Wait for an incoming packet */
580*4882a593Smuzhiyun if (!isrxready(priv))
581*4882a593Smuzhiyun return -1;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun debug("axiemac: RX data ready\n");
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Disable IRQ for a moment till packet is handled */
586*4882a593Smuzhiyun temp = in_be32(&priv->dmarx->control);
587*4882a593Smuzhiyun temp &= ~XAXIDMA_IRQ_ALL_MASK;
588*4882a593Smuzhiyun out_be32(&priv->dmarx->control, temp);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun length = rx_bd.app4 & 0xFFFF; /* max length mask */
591*4882a593Smuzhiyun #ifdef DEBUG
592*4882a593Smuzhiyun print_buffer(&rxframe, &rxframe[0], 1, length, 16);
593*4882a593Smuzhiyun #endif
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun *packetp = rxframe;
596*4882a593Smuzhiyun return length;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
axiemac_free_pkt(struct udevice * dev,uchar * packet,int length)599*4882a593Smuzhiyun static int axiemac_free_pkt(struct udevice *dev, uchar *packet, int length)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun #ifdef DEBUG
604*4882a593Smuzhiyun /* It is useful to clear buffer to be sure that it is consistent */
605*4882a593Smuzhiyun memset(rxframe, 0, sizeof(rxframe));
606*4882a593Smuzhiyun #endif
607*4882a593Smuzhiyun /* Setup RxBD */
608*4882a593Smuzhiyun /* Clear the whole buffer and setup it again - all flags are cleared */
609*4882a593Smuzhiyun memset(&rx_bd, 0, sizeof(rx_bd));
610*4882a593Smuzhiyun rx_bd.next = (u32)&rx_bd;
611*4882a593Smuzhiyun rx_bd.phys = (u32)&rxframe;
612*4882a593Smuzhiyun rx_bd.cntrl = sizeof(rxframe);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* Write bd to HW */
615*4882a593Smuzhiyun flush_cache((u32)&rx_bd, sizeof(rx_bd));
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* It is necessary to flush rxframe because if you don't do it
618*4882a593Smuzhiyun * then cache will contain previous packet */
619*4882a593Smuzhiyun flush_cache((u32)&rxframe, sizeof(rxframe));
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Rx BD is ready - start again */
622*4882a593Smuzhiyun out_be32(&priv->dmarx->tail, (u32)&rx_bd);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun debug("axiemac: RX completed, framelength = %d\n", length);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
axiemac_miiphy_read(struct mii_dev * bus,int addr,int devad,int reg)629*4882a593Smuzhiyun static int axiemac_miiphy_read(struct mii_dev *bus, int addr,
630*4882a593Smuzhiyun int devad, int reg)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun int ret;
633*4882a593Smuzhiyun u16 value;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun ret = phyread(bus->priv, addr, reg, &value);
636*4882a593Smuzhiyun debug("axiemac: Read MII 0x%x, 0x%x, 0x%x, %d\n", addr, reg,
637*4882a593Smuzhiyun value, ret);
638*4882a593Smuzhiyun return value;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
axiemac_miiphy_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)641*4882a593Smuzhiyun static int axiemac_miiphy_write(struct mii_dev *bus, int addr, int devad,
642*4882a593Smuzhiyun int reg, u16 value)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun debug("axiemac: Write MII 0x%x, 0x%x, 0x%x\n", addr, reg, value);
645*4882a593Smuzhiyun return phywrite(bus->priv, addr, reg, value);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
axi_emac_probe(struct udevice * dev)648*4882a593Smuzhiyun static int axi_emac_probe(struct udevice *dev)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
651*4882a593Smuzhiyun int ret;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun priv->bus = mdio_alloc();
654*4882a593Smuzhiyun priv->bus->read = axiemac_miiphy_read;
655*4882a593Smuzhiyun priv->bus->write = axiemac_miiphy_write;
656*4882a593Smuzhiyun priv->bus->priv = priv;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun ret = mdio_register_seq(priv->bus, dev->seq);
659*4882a593Smuzhiyun if (ret)
660*4882a593Smuzhiyun return ret;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun axiemac_phy_init(dev);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return 0;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
axi_emac_remove(struct udevice * dev)667*4882a593Smuzhiyun static int axi_emac_remove(struct udevice *dev)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun free(priv->phydev);
672*4882a593Smuzhiyun mdio_unregister(priv->bus);
673*4882a593Smuzhiyun mdio_free(priv->bus);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun static const struct eth_ops axi_emac_ops = {
679*4882a593Smuzhiyun .start = axiemac_start,
680*4882a593Smuzhiyun .send = axiemac_send,
681*4882a593Smuzhiyun .recv = axiemac_recv,
682*4882a593Smuzhiyun .free_pkt = axiemac_free_pkt,
683*4882a593Smuzhiyun .stop = axiemac_stop,
684*4882a593Smuzhiyun .write_hwaddr = axiemac_write_hwaddr,
685*4882a593Smuzhiyun };
686*4882a593Smuzhiyun
axi_emac_ofdata_to_platdata(struct udevice * dev)687*4882a593Smuzhiyun static int axi_emac_ofdata_to_platdata(struct udevice *dev)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct eth_pdata *pdata = dev_get_platdata(dev);
690*4882a593Smuzhiyun struct axidma_priv *priv = dev_get_priv(dev);
691*4882a593Smuzhiyun int node = dev_of_offset(dev);
692*4882a593Smuzhiyun int offset = 0;
693*4882a593Smuzhiyun const char *phy_mode;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun pdata->iobase = (phys_addr_t)devfdt_get_addr(dev);
696*4882a593Smuzhiyun priv->iobase = (struct axi_regs *)pdata->iobase;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun offset = fdtdec_lookup_phandle(gd->fdt_blob, node,
699*4882a593Smuzhiyun "axistream-connected");
700*4882a593Smuzhiyun if (offset <= 0) {
701*4882a593Smuzhiyun printf("%s: axistream is not found\n", __func__);
702*4882a593Smuzhiyun return -EINVAL;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun priv->dmatx = (struct axidma_reg *)fdtdec_get_int(gd->fdt_blob,
705*4882a593Smuzhiyun offset, "reg", 0);
706*4882a593Smuzhiyun if (!priv->dmatx) {
707*4882a593Smuzhiyun printf("%s: axi_dma register space not found\n", __func__);
708*4882a593Smuzhiyun return -EINVAL;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun /* RX channel offset is 0x30 */
711*4882a593Smuzhiyun priv->dmarx = (struct axidma_reg *)((u32)priv->dmatx + 0x30);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun priv->phyaddr = -1;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun offset = fdtdec_lookup_phandle(gd->fdt_blob, node, "phy-handle");
716*4882a593Smuzhiyun if (offset > 0)
717*4882a593Smuzhiyun priv->phyaddr = fdtdec_get_int(gd->fdt_blob, offset, "reg", -1);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun phy_mode = fdt_getprop(gd->fdt_blob, node, "phy-mode", NULL);
720*4882a593Smuzhiyun if (phy_mode)
721*4882a593Smuzhiyun pdata->phy_interface = phy_get_interface_by_name(phy_mode);
722*4882a593Smuzhiyun if (pdata->phy_interface == -1) {
723*4882a593Smuzhiyun printf("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
724*4882a593Smuzhiyun return -EINVAL;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun priv->interface = pdata->phy_interface;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun printf("AXI EMAC: %lx, phyaddr %d, interface %s\n", (ulong)priv->iobase,
729*4882a593Smuzhiyun priv->phyaddr, phy_string_for_interface(priv->interface));
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun return 0;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun static const struct udevice_id axi_emac_ids[] = {
735*4882a593Smuzhiyun { .compatible = "xlnx,axi-ethernet-1.00.a" },
736*4882a593Smuzhiyun { }
737*4882a593Smuzhiyun };
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun U_BOOT_DRIVER(axi_emac) = {
740*4882a593Smuzhiyun .name = "axi_emac",
741*4882a593Smuzhiyun .id = UCLASS_ETH,
742*4882a593Smuzhiyun .of_match = axi_emac_ids,
743*4882a593Smuzhiyun .ofdata_to_platdata = axi_emac_ofdata_to_platdata,
744*4882a593Smuzhiyun .probe = axi_emac_probe,
745*4882a593Smuzhiyun .remove = axi_emac_remove,
746*4882a593Smuzhiyun .ops = &axi_emac_ops,
747*4882a593Smuzhiyun .priv_auto_alloc_size = sizeof(struct axidma_priv),
748*4882a593Smuzhiyun .platdata_auto_alloc_size = sizeof(struct eth_pdata),
749*4882a593Smuzhiyun };
750