xref: /OK3568_Linux_fs/u-boot/drivers/net/designware.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * (C) Copyright 2010
3*4882a593Smuzhiyun  * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * Designware ethernet IP driver for U-Boot
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <common.h>
13*4882a593Smuzhiyun #include <dm.h>
14*4882a593Smuzhiyun #include <errno.h>
15*4882a593Smuzhiyun #include <miiphy.h>
16*4882a593Smuzhiyun #include <malloc.h>
17*4882a593Smuzhiyun #include <pci.h>
18*4882a593Smuzhiyun #include <linux/compiler.h>
19*4882a593Smuzhiyun #include <linux/err.h>
20*4882a593Smuzhiyun #include <asm/io.h>
21*4882a593Smuzhiyun #include <power/regulator.h>
22*4882a593Smuzhiyun #include "designware.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
25*4882a593Smuzhiyun 
dw_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)26*4882a593Smuzhiyun static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun #ifdef CONFIG_DM_ETH
29*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
30*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
31*4882a593Smuzhiyun #else
32*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = bus->priv;
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun 	ulong start;
35*4882a593Smuzhiyun 	u16 miiaddr;
36*4882a593Smuzhiyun 	int timeout = CONFIG_MDIO_TIMEOUT;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
39*4882a593Smuzhiyun 		  ((reg << MIIREGSHIFT) & MII_REGMSK);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	start = get_timer(0);
44*4882a593Smuzhiyun 	while (get_timer(start) < timeout) {
45*4882a593Smuzhiyun 		if (!(readl(&mac_p->miiaddr) & MII_BUSY))
46*4882a593Smuzhiyun 			return readl(&mac_p->miidata);
47*4882a593Smuzhiyun 		udelay(10);
48*4882a593Smuzhiyun 	};
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return -ETIMEDOUT;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
dw_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 val)53*4882a593Smuzhiyun static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
54*4882a593Smuzhiyun 			u16 val)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun #ifdef CONFIG_DM_ETH
57*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
58*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
59*4882a593Smuzhiyun #else
60*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = bus->priv;
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun 	ulong start;
63*4882a593Smuzhiyun 	u16 miiaddr;
64*4882a593Smuzhiyun 	int ret = -ETIMEDOUT, timeout = CONFIG_MDIO_TIMEOUT;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	writel(val, &mac_p->miidata);
67*4882a593Smuzhiyun 	miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
68*4882a593Smuzhiyun 		  ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	start = get_timer(0);
73*4882a593Smuzhiyun 	while (get_timer(start) < timeout) {
74*4882a593Smuzhiyun 		if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
75*4882a593Smuzhiyun 			ret = 0;
76*4882a593Smuzhiyun 			break;
77*4882a593Smuzhiyun 		}
78*4882a593Smuzhiyun 		udelay(10);
79*4882a593Smuzhiyun 	};
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	return ret;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #if defined(CONFIG_DM_ETH) && defined(CONFIG_DM_GPIO)
dw_mdio_reset(struct mii_dev * bus)85*4882a593Smuzhiyun static int dw_mdio_reset(struct mii_dev *bus)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct udevice *dev = bus->priv;
88*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
89*4882a593Smuzhiyun 	struct dw_eth_pdata *pdata = dev_get_platdata(dev);
90*4882a593Smuzhiyun 	int ret;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (!dm_gpio_is_valid(&priv->reset_gpio))
93*4882a593Smuzhiyun 		return 0;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* reset the phy */
96*4882a593Smuzhiyun 	ret = dm_gpio_set_value(&priv->reset_gpio, 0);
97*4882a593Smuzhiyun 	if (ret)
98*4882a593Smuzhiyun 		return ret;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	udelay(pdata->reset_delays[0]);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	ret = dm_gpio_set_value(&priv->reset_gpio, 1);
103*4882a593Smuzhiyun 	if (ret)
104*4882a593Smuzhiyun 		return ret;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	udelay(pdata->reset_delays[1]);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	ret = dm_gpio_set_value(&priv->reset_gpio, 0);
109*4882a593Smuzhiyun 	if (ret)
110*4882a593Smuzhiyun 		return ret;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	udelay(pdata->reset_delays[2]);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	return 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun #endif
117*4882a593Smuzhiyun 
dw_mdio_init(const char * name,void * priv)118*4882a593Smuzhiyun static int dw_mdio_init(const char *name, void *priv)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct mii_dev *bus = mdio_alloc();
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (!bus) {
123*4882a593Smuzhiyun 		printf("Failed to allocate MDIO bus\n");
124*4882a593Smuzhiyun 		return -ENOMEM;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	bus->read = dw_mdio_read;
128*4882a593Smuzhiyun 	bus->write = dw_mdio_write;
129*4882a593Smuzhiyun 	snprintf(bus->name, sizeof(bus->name), "%s", name);
130*4882a593Smuzhiyun #if defined(CONFIG_DM_ETH) && defined(CONFIG_DM_GPIO)
131*4882a593Smuzhiyun 	bus->reset = dw_mdio_reset;
132*4882a593Smuzhiyun #endif
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	bus->priv = priv;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return mdio_register(bus);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
tx_descs_init(struct dw_eth_dev * priv)139*4882a593Smuzhiyun static void tx_descs_init(struct dw_eth_dev *priv)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
142*4882a593Smuzhiyun 	struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
143*4882a593Smuzhiyun 	char *txbuffs = &priv->txbuffs[0];
144*4882a593Smuzhiyun 	struct dmamacdescr *desc_p;
145*4882a593Smuzhiyun 	u32 idx;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
148*4882a593Smuzhiyun 		desc_p = &desc_table_p[idx];
149*4882a593Smuzhiyun 		desc_p->dmamac_addr = (ulong)&txbuffs[idx * CONFIG_ETH_BUFSIZE];
150*4882a593Smuzhiyun 		desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #if defined(CONFIG_DW_ALTDESCRIPTOR)
153*4882a593Smuzhiyun 		desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
154*4882a593Smuzhiyun 				DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
155*4882a593Smuzhiyun 				DESC_TXSTS_TXCHECKINSCTRL |
156*4882a593Smuzhiyun 				DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
159*4882a593Smuzhiyun 		desc_p->dmamac_cntl = 0;
160*4882a593Smuzhiyun 		desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
161*4882a593Smuzhiyun #else
162*4882a593Smuzhiyun 		desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
163*4882a593Smuzhiyun 		desc_p->txrx_status = 0;
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	/* Correcting the last pointer of the chain */
168*4882a593Smuzhiyun 	desc_p->dmamac_next = (ulong)&desc_table_p[0];
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* Flush all Tx buffer descriptors at once */
171*4882a593Smuzhiyun 	flush_dcache_range((ulong)priv->tx_mac_descrtable,
172*4882a593Smuzhiyun 			   (ulong)priv->tx_mac_descrtable +
173*4882a593Smuzhiyun 			   sizeof(priv->tx_mac_descrtable));
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
176*4882a593Smuzhiyun 	priv->tx_currdescnum = 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
rx_descs_init(struct dw_eth_dev * priv)179*4882a593Smuzhiyun static void rx_descs_init(struct dw_eth_dev *priv)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
182*4882a593Smuzhiyun 	struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
183*4882a593Smuzhiyun 	char *rxbuffs = &priv->rxbuffs[0];
184*4882a593Smuzhiyun 	struct dmamacdescr *desc_p;
185*4882a593Smuzhiyun 	u32 idx;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* Before passing buffers to GMAC we need to make sure zeros
188*4882a593Smuzhiyun 	 * written there right after "priv" structure allocation were
189*4882a593Smuzhiyun 	 * flushed into RAM.
190*4882a593Smuzhiyun 	 * Otherwise there's a chance to get some of them flushed in RAM when
191*4882a593Smuzhiyun 	 * GMAC is already pushing data to RAM via DMA. This way incoming from
192*4882a593Smuzhiyun 	 * GMAC data will be corrupted. */
193*4882a593Smuzhiyun 	flush_dcache_range((ulong)rxbuffs, (ulong)rxbuffs + RX_TOTAL_BUFSIZE);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
196*4882a593Smuzhiyun 		desc_p = &desc_table_p[idx];
197*4882a593Smuzhiyun 		desc_p->dmamac_addr = (ulong)&rxbuffs[idx * CONFIG_ETH_BUFSIZE];
198*4882a593Smuzhiyun 		desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		desc_p->dmamac_cntl =
201*4882a593Smuzhiyun 			(MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
202*4882a593Smuzhiyun 				      DESC_RXCTRL_RXCHAIN;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Correcting the last pointer of the chain */
208*4882a593Smuzhiyun 	desc_p->dmamac_next = (ulong)&desc_table_p[0];
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* Flush all Rx buffer descriptors at once */
211*4882a593Smuzhiyun 	flush_dcache_range((ulong)priv->rx_mac_descrtable,
212*4882a593Smuzhiyun 			   (ulong)priv->rx_mac_descrtable +
213*4882a593Smuzhiyun 			   sizeof(priv->rx_mac_descrtable));
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
216*4882a593Smuzhiyun 	priv->rx_currdescnum = 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
_dw_write_hwaddr(struct dw_eth_dev * priv,u8 * mac_id)219*4882a593Smuzhiyun static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
222*4882a593Smuzhiyun 	u32 macid_lo, macid_hi;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
225*4882a593Smuzhiyun 		   (mac_id[3] << 24);
226*4882a593Smuzhiyun 	macid_hi = mac_id[4] + (mac_id[5] << 8);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	writel(macid_hi, &mac_p->macaddr0hi);
229*4882a593Smuzhiyun 	writel(macid_lo, &mac_p->macaddr0lo);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return 0;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
dw_adjust_link(struct dw_eth_dev * priv,struct eth_mac_regs * mac_p,struct phy_device * phydev)234*4882a593Smuzhiyun static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
235*4882a593Smuzhiyun 			  struct phy_device *phydev)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (!phydev->link) {
240*4882a593Smuzhiyun 		printf("%s: No link.\n", phydev->dev->name);
241*4882a593Smuzhiyun 		return 0;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (phydev->speed != 1000)
245*4882a593Smuzhiyun 		conf |= MII_PORTSELECT;
246*4882a593Smuzhiyun 	else
247*4882a593Smuzhiyun 		conf &= ~MII_PORTSELECT;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (phydev->speed == 100)
250*4882a593Smuzhiyun 		conf |= FES_100;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (phydev->duplex)
253*4882a593Smuzhiyun 		conf |= FULLDPLXMODE;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	writel(conf, &mac_p->conf);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	printf("Speed: %d, %s duplex%s\n", phydev->speed,
258*4882a593Smuzhiyun 	       (phydev->duplex) ? "full" : "half",
259*4882a593Smuzhiyun 	       (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
_dw_eth_halt(struct dw_eth_dev * priv)264*4882a593Smuzhiyun static void _dw_eth_halt(struct dw_eth_dev *priv)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
267*4882a593Smuzhiyun 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
270*4882a593Smuzhiyun 	writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	phy_shutdown(priv->phydev);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
designware_eth_init(struct dw_eth_dev * priv,u8 * enetaddr)275*4882a593Smuzhiyun int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
278*4882a593Smuzhiyun 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
279*4882a593Smuzhiyun 	unsigned int start;
280*4882a593Smuzhiyun 	int ret;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	start = get_timer(0);
285*4882a593Smuzhiyun 	while (readl(&dma_p->busmode) & DMAMAC_SRST) {
286*4882a593Smuzhiyun 		if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
287*4882a593Smuzhiyun 			printf("DMA reset timeout\n");
288*4882a593Smuzhiyun 			return -ETIMEDOUT;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		mdelay(100);
292*4882a593Smuzhiyun 	};
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/*
295*4882a593Smuzhiyun 	 * Soft reset above clears HW address registers.
296*4882a593Smuzhiyun 	 * So we have to set it here once again.
297*4882a593Smuzhiyun 	 */
298*4882a593Smuzhiyun 	_dw_write_hwaddr(priv, enetaddr);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	rx_descs_init(priv);
301*4882a593Smuzhiyun 	tx_descs_init(priv);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun #ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
306*4882a593Smuzhiyun 	writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
307*4882a593Smuzhiyun 	       &dma_p->opmode);
308*4882a593Smuzhiyun #else
309*4882a593Smuzhiyun 	writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
310*4882a593Smuzhiyun 	       &dma_p->opmode);
311*4882a593Smuzhiyun #endif
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #ifdef CONFIG_DW_AXI_BURST_LEN
316*4882a593Smuzhiyun 	writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
317*4882a593Smuzhiyun #endif
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* Start up the PHY */
320*4882a593Smuzhiyun 	ret = phy_startup(priv->phydev);
321*4882a593Smuzhiyun 	if (ret) {
322*4882a593Smuzhiyun 		printf("Could not initialize PHY %s\n",
323*4882a593Smuzhiyun 		       priv->phydev->dev->name);
324*4882a593Smuzhiyun 		return ret;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	ret = dw_adjust_link(priv, mac_p, priv->phydev);
328*4882a593Smuzhiyun 	if (ret)
329*4882a593Smuzhiyun 		return ret;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
designware_eth_enable(struct dw_eth_dev * priv)334*4882a593Smuzhiyun int designware_eth_enable(struct dw_eth_dev *priv)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (!priv->phydev->link)
339*4882a593Smuzhiyun 		return -EIO;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return 0;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
_dw_eth_send(struct dw_eth_dev * priv,void * packet,int length)346*4882a593Smuzhiyun static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
349*4882a593Smuzhiyun 	u32 desc_num = priv->tx_currdescnum;
350*4882a593Smuzhiyun 	struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
351*4882a593Smuzhiyun 	ulong desc_start = (ulong)desc_p;
352*4882a593Smuzhiyun 	ulong desc_end = desc_start +
353*4882a593Smuzhiyun 		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
354*4882a593Smuzhiyun 	ulong data_start = desc_p->dmamac_addr;
355*4882a593Smuzhiyun 	ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
356*4882a593Smuzhiyun 	/*
357*4882a593Smuzhiyun 	 * Strictly we only need to invalidate the "txrx_status" field
358*4882a593Smuzhiyun 	 * for the following check, but on some platforms we cannot
359*4882a593Smuzhiyun 	 * invalidate only 4 bytes, so we flush the entire descriptor,
360*4882a593Smuzhiyun 	 * which is 16 bytes in total. This is safe because the
361*4882a593Smuzhiyun 	 * individual descriptors in the array are each aligned to
362*4882a593Smuzhiyun 	 * ARCH_DMA_MINALIGN and padded appropriately.
363*4882a593Smuzhiyun 	 */
364*4882a593Smuzhiyun 	invalidate_dcache_range(desc_start, desc_end);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* Check if the descriptor is owned by CPU */
367*4882a593Smuzhiyun 	if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
368*4882a593Smuzhiyun 		printf("CPU not owner of tx frame\n");
369*4882a593Smuzhiyun 		return -EPERM;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	memcpy((void *)data_start, packet, length);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/* Flush data to be sent */
375*4882a593Smuzhiyun 	flush_dcache_range(data_start, data_end);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun #if defined(CONFIG_DW_ALTDESCRIPTOR)
378*4882a593Smuzhiyun 	desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
379*4882a593Smuzhiyun 	desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) &
380*4882a593Smuzhiyun 			       DESC_TXCTRL_SIZE1MASK;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
383*4882a593Smuzhiyun 	desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
384*4882a593Smuzhiyun #else
385*4882a593Smuzhiyun 	desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) &
386*4882a593Smuzhiyun 			       DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
387*4882a593Smuzhiyun 			       DESC_TXCTRL_TXFIRST;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
390*4882a593Smuzhiyun #endif
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Flush modified buffer descriptor */
393*4882a593Smuzhiyun 	flush_dcache_range(desc_start, desc_end);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* Test the wrap-around condition. */
396*4882a593Smuzhiyun 	if (++desc_num >= CONFIG_TX_DESCR_NUM)
397*4882a593Smuzhiyun 		desc_num = 0;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	priv->tx_currdescnum = desc_num;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* Start the transmission */
402*4882a593Smuzhiyun 	writel(POLL_DATA, &dma_p->txpolldemand);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
_dw_eth_recv(struct dw_eth_dev * priv,uchar ** packetp)407*4882a593Smuzhiyun static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	u32 status, desc_num = priv->rx_currdescnum;
410*4882a593Smuzhiyun 	struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
411*4882a593Smuzhiyun 	int length = -EAGAIN;
412*4882a593Smuzhiyun 	ulong desc_start = (ulong)desc_p;
413*4882a593Smuzhiyun 	ulong desc_end = desc_start +
414*4882a593Smuzhiyun 		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
415*4882a593Smuzhiyun 	ulong data_start = desc_p->dmamac_addr;
416*4882a593Smuzhiyun 	ulong data_end;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* Invalidate entire buffer descriptor */
419*4882a593Smuzhiyun 	invalidate_dcache_range(desc_start, desc_end);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	status = desc_p->txrx_status;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/* Check  if the owner is the CPU */
424*4882a593Smuzhiyun 	if (!(status & DESC_RXSTS_OWNBYDMA)) {
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		length = (status & DESC_RXSTS_FRMLENMSK) >>
427*4882a593Smuzhiyun 			 DESC_RXSTS_FRMLENSHFT;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		/* Invalidate received data */
430*4882a593Smuzhiyun 		data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
431*4882a593Smuzhiyun 		invalidate_dcache_range(data_start, data_end);
432*4882a593Smuzhiyun 		*packetp = (uchar *)(ulong)desc_p->dmamac_addr;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	return length;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
_dw_free_pkt(struct dw_eth_dev * priv)438*4882a593Smuzhiyun static int _dw_free_pkt(struct dw_eth_dev *priv)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	u32 desc_num = priv->rx_currdescnum;
441*4882a593Smuzhiyun 	struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
442*4882a593Smuzhiyun 	ulong desc_start = (ulong)desc_p;
443*4882a593Smuzhiyun 	ulong desc_end = desc_start +
444*4882a593Smuzhiyun 		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/*
447*4882a593Smuzhiyun 	 * Make the current descriptor valid again and go to
448*4882a593Smuzhiyun 	 * the next one
449*4882a593Smuzhiyun 	 */
450*4882a593Smuzhiyun 	desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	/* Flush only status field - others weren't changed */
453*4882a593Smuzhiyun 	flush_dcache_range(desc_start, desc_end);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	/* Test the wrap-around condition. */
456*4882a593Smuzhiyun 	if (++desc_num >= CONFIG_RX_DESCR_NUM)
457*4882a593Smuzhiyun 		desc_num = 0;
458*4882a593Smuzhiyun 	priv->rx_currdescnum = desc_num;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
dw_phy_init(struct dw_eth_dev * priv,void * dev)463*4882a593Smuzhiyun static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	struct phy_device *phydev;
466*4882a593Smuzhiyun 	int mask = 0xffffffff, ret;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun #ifdef CONFIG_PHY_ADDR
469*4882a593Smuzhiyun 	mask = 1 << CONFIG_PHY_ADDR;
470*4882a593Smuzhiyun #endif
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	phydev = phy_find_by_mask(priv->bus, mask, priv->interface);
473*4882a593Smuzhiyun 	if (!phydev)
474*4882a593Smuzhiyun 		return -ENODEV;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	phy_connect_dev(phydev, dev);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	phydev->supported &= PHY_GBIT_FEATURES;
479*4882a593Smuzhiyun 	if (priv->max_speed) {
480*4882a593Smuzhiyun 		ret = phy_set_supported(phydev, priv->max_speed);
481*4882a593Smuzhiyun 		if (ret)
482*4882a593Smuzhiyun 			return ret;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 	phydev->advertising = phydev->supported;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	priv->phydev = phydev;
487*4882a593Smuzhiyun 	phy_config(phydev);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	return 0;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun #ifndef CONFIG_DM_ETH
dw_eth_init(struct eth_device * dev,bd_t * bis)493*4882a593Smuzhiyun static int dw_eth_init(struct eth_device *dev, bd_t *bis)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	int ret;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	ret = designware_eth_init(dev->priv, dev->enetaddr);
498*4882a593Smuzhiyun 	if (!ret)
499*4882a593Smuzhiyun 		ret = designware_eth_enable(dev->priv);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	return ret;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
dw_eth_send(struct eth_device * dev,void * packet,int length)504*4882a593Smuzhiyun static int dw_eth_send(struct eth_device *dev, void *packet, int length)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	return _dw_eth_send(dev->priv, packet, length);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
dw_eth_recv(struct eth_device * dev)509*4882a593Smuzhiyun static int dw_eth_recv(struct eth_device *dev)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	uchar *packet;
512*4882a593Smuzhiyun 	int length;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	length = _dw_eth_recv(dev->priv, &packet);
515*4882a593Smuzhiyun 	if (length == -EAGAIN)
516*4882a593Smuzhiyun 		return 0;
517*4882a593Smuzhiyun 	net_process_received_packet(packet, length);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	_dw_free_pkt(dev->priv);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
dw_eth_halt(struct eth_device * dev)524*4882a593Smuzhiyun static void dw_eth_halt(struct eth_device *dev)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	return _dw_eth_halt(dev->priv);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
dw_write_hwaddr(struct eth_device * dev)529*4882a593Smuzhiyun static int dw_write_hwaddr(struct eth_device *dev)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	return _dw_write_hwaddr(dev->priv, dev->enetaddr);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
designware_initialize(ulong base_addr,u32 interface)534*4882a593Smuzhiyun int designware_initialize(ulong base_addr, u32 interface)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct eth_device *dev;
537*4882a593Smuzhiyun 	struct dw_eth_dev *priv;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	dev = (struct eth_device *) malloc(sizeof(struct eth_device));
540*4882a593Smuzhiyun 	if (!dev)
541*4882a593Smuzhiyun 		return -ENOMEM;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/*
544*4882a593Smuzhiyun 	 * Since the priv structure contains the descriptors which need a strict
545*4882a593Smuzhiyun 	 * buswidth alignment, memalign is used to allocate memory
546*4882a593Smuzhiyun 	 */
547*4882a593Smuzhiyun 	priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
548*4882a593Smuzhiyun 					      sizeof(struct dw_eth_dev));
549*4882a593Smuzhiyun 	if (!priv) {
550*4882a593Smuzhiyun 		free(dev);
551*4882a593Smuzhiyun 		return -ENOMEM;
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if ((phys_addr_t)priv + sizeof(*priv) > (1ULL << 32)) {
555*4882a593Smuzhiyun 		printf("designware: buffers are outside DMA memory\n");
556*4882a593Smuzhiyun 		return -EINVAL;
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	memset(dev, 0, sizeof(struct eth_device));
560*4882a593Smuzhiyun 	memset(priv, 0, sizeof(struct dw_eth_dev));
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	sprintf(dev->name, "dwmac.%lx", base_addr);
563*4882a593Smuzhiyun 	dev->iobase = (int)base_addr;
564*4882a593Smuzhiyun 	dev->priv = priv;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	priv->dev = dev;
567*4882a593Smuzhiyun 	priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
568*4882a593Smuzhiyun 	priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
569*4882a593Smuzhiyun 			DW_DMA_BASE_OFFSET);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	dev->init = dw_eth_init;
572*4882a593Smuzhiyun 	dev->send = dw_eth_send;
573*4882a593Smuzhiyun 	dev->recv = dw_eth_recv;
574*4882a593Smuzhiyun 	dev->halt = dw_eth_halt;
575*4882a593Smuzhiyun 	dev->write_hwaddr = dw_write_hwaddr;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	eth_register(dev);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	priv->interface = interface;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	dw_mdio_init(dev->name, priv->mac_regs_p);
582*4882a593Smuzhiyun 	priv->bus = miiphy_get_dev_by_name(dev->name);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	return dw_phy_init(priv, dev);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun #endif
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun #ifdef CONFIG_DM_ETH
designware_eth_start(struct udevice * dev)589*4882a593Smuzhiyun static int designware_eth_start(struct udevice *dev)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
592*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
593*4882a593Smuzhiyun 	int ret;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	ret = designware_eth_init(priv, pdata->enetaddr);
596*4882a593Smuzhiyun 	if (ret)
597*4882a593Smuzhiyun 		return ret;
598*4882a593Smuzhiyun 	ret = designware_eth_enable(priv);
599*4882a593Smuzhiyun 	if (ret)
600*4882a593Smuzhiyun 		return ret;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	return 0;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
designware_eth_send(struct udevice * dev,void * packet,int length)605*4882a593Smuzhiyun int designware_eth_send(struct udevice *dev, void *packet, int length)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	return _dw_eth_send(priv, packet, length);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
designware_eth_recv(struct udevice * dev,int flags,uchar ** packetp)612*4882a593Smuzhiyun int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	return _dw_eth_recv(priv, packetp);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
designware_eth_free_pkt(struct udevice * dev,uchar * packet,int length)619*4882a593Smuzhiyun int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	return _dw_free_pkt(priv);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
designware_eth_stop(struct udevice * dev)626*4882a593Smuzhiyun void designware_eth_stop(struct udevice *dev)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	return _dw_eth_halt(priv);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
designware_eth_write_hwaddr(struct udevice * dev)633*4882a593Smuzhiyun int designware_eth_write_hwaddr(struct udevice *dev)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
636*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	return _dw_write_hwaddr(priv, pdata->enetaddr);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
designware_eth_bind(struct udevice * dev)641*4882a593Smuzhiyun static int designware_eth_bind(struct udevice *dev)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun #ifdef CONFIG_DM_PCI
644*4882a593Smuzhiyun 	static int num_cards;
645*4882a593Smuzhiyun 	char name[20];
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/* Create a unique device name for PCI type devices */
648*4882a593Smuzhiyun 	if (device_is_on_pci_bus(dev)) {
649*4882a593Smuzhiyun 		sprintf(name, "eth_designware#%u", num_cards++);
650*4882a593Smuzhiyun 		device_set_name(dev, name);
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun #endif
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return 0;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
designware_eth_probe(struct udevice * dev)657*4882a593Smuzhiyun int designware_eth_probe(struct udevice *dev)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
660*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
661*4882a593Smuzhiyun 	u32 iobase = pdata->iobase;
662*4882a593Smuzhiyun 	ulong ioaddr;
663*4882a593Smuzhiyun 	int ret;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun #if defined(CONFIG_DM_REGULATOR)
666*4882a593Smuzhiyun 	struct udevice *phy_supply;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	ret = device_get_supply_regulator(dev, "phy-supply",
669*4882a593Smuzhiyun 					  &phy_supply);
670*4882a593Smuzhiyun 	if (ret) {
671*4882a593Smuzhiyun 		debug("%s: No phy supply\n", dev->name);
672*4882a593Smuzhiyun 	} else {
673*4882a593Smuzhiyun 		ret = regulator_set_enable(phy_supply, true);
674*4882a593Smuzhiyun 		if (ret) {
675*4882a593Smuzhiyun 			puts("Error enabling phy supply\n");
676*4882a593Smuzhiyun 			return ret;
677*4882a593Smuzhiyun 		}
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun #endif
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun #ifdef CONFIG_DM_PCI
682*4882a593Smuzhiyun 	/*
683*4882a593Smuzhiyun 	 * If we are on PCI bus, either directly attached to a PCI root port,
684*4882a593Smuzhiyun 	 * or via a PCI bridge, fill in platdata before we probe the hardware.
685*4882a593Smuzhiyun 	 */
686*4882a593Smuzhiyun 	if (device_is_on_pci_bus(dev)) {
687*4882a593Smuzhiyun 		dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
688*4882a593Smuzhiyun 		iobase &= PCI_BASE_ADDRESS_MEM_MASK;
689*4882a593Smuzhiyun 		iobase = dm_pci_mem_to_phys(dev, iobase);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		pdata->iobase = iobase;
692*4882a593Smuzhiyun 		pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun #endif
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
697*4882a593Smuzhiyun 	ioaddr = iobase;
698*4882a593Smuzhiyun 	priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
699*4882a593Smuzhiyun 	priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
700*4882a593Smuzhiyun 	priv->interface = pdata->phy_interface;
701*4882a593Smuzhiyun 	priv->max_speed = pdata->max_speed;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	dw_mdio_init(dev->name, dev);
704*4882a593Smuzhiyun 	priv->bus = miiphy_get_dev_by_name(dev->name);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	ret = dw_phy_init(priv, dev);
707*4882a593Smuzhiyun 	debug("%s, ret=%d\n", __func__, ret);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	return ret;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
designware_eth_remove(struct udevice * dev)712*4882a593Smuzhiyun static int designware_eth_remove(struct udevice *dev)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	free(priv->phydev);
717*4882a593Smuzhiyun 	mdio_unregister(priv->bus);
718*4882a593Smuzhiyun 	mdio_free(priv->bus);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	return 0;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun const struct eth_ops designware_eth_ops = {
724*4882a593Smuzhiyun 	.start			= designware_eth_start,
725*4882a593Smuzhiyun 	.send			= designware_eth_send,
726*4882a593Smuzhiyun 	.recv			= designware_eth_recv,
727*4882a593Smuzhiyun 	.free_pkt		= designware_eth_free_pkt,
728*4882a593Smuzhiyun 	.stop			= designware_eth_stop,
729*4882a593Smuzhiyun 	.write_hwaddr		= designware_eth_write_hwaddr,
730*4882a593Smuzhiyun };
731*4882a593Smuzhiyun 
designware_eth_ofdata_to_platdata(struct udevice * dev)732*4882a593Smuzhiyun int designware_eth_ofdata_to_platdata(struct udevice *dev)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	struct dw_eth_pdata *dw_pdata = dev_get_platdata(dev);
735*4882a593Smuzhiyun #ifdef CONFIG_DM_GPIO
736*4882a593Smuzhiyun 	struct dw_eth_dev *priv = dev_get_priv(dev);
737*4882a593Smuzhiyun #endif
738*4882a593Smuzhiyun 	struct eth_pdata *pdata = &dw_pdata->eth_pdata;
739*4882a593Smuzhiyun 	const char *phy_mode;
740*4882a593Smuzhiyun #ifdef CONFIG_DM_GPIO
741*4882a593Smuzhiyun 	int reset_flags = GPIOD_IS_OUT;
742*4882a593Smuzhiyun #endif
743*4882a593Smuzhiyun 	int ret = 0;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	pdata->iobase = dev_read_addr(dev);
746*4882a593Smuzhiyun 	pdata->phy_interface = -1;
747*4882a593Smuzhiyun 	phy_mode = dev_read_string(dev, "phy-mode");
748*4882a593Smuzhiyun 	if (phy_mode)
749*4882a593Smuzhiyun 		pdata->phy_interface = phy_get_interface_by_name(phy_mode);
750*4882a593Smuzhiyun 	if (pdata->phy_interface == -1) {
751*4882a593Smuzhiyun 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
752*4882a593Smuzhiyun 		return -EINVAL;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun #ifdef CONFIG_DM_GPIO
758*4882a593Smuzhiyun 	if (dev_read_bool(dev, "snps,reset-active-low"))
759*4882a593Smuzhiyun 		reset_flags |= GPIOD_ACTIVE_LOW;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
762*4882a593Smuzhiyun 		&priv->reset_gpio, reset_flags);
763*4882a593Smuzhiyun 	if (ret == 0) {
764*4882a593Smuzhiyun 		ret = dev_read_u32_array(dev, "snps,reset-delays-us",
765*4882a593Smuzhiyun 					 dw_pdata->reset_delays, 3);
766*4882a593Smuzhiyun 	} else if (ret == -ENOENT) {
767*4882a593Smuzhiyun 		ret = 0;
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun #endif
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	return ret;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun static const struct udevice_id designware_eth_ids[] = {
775*4882a593Smuzhiyun 	{ .compatible = "allwinner,sun7i-a20-gmac" },
776*4882a593Smuzhiyun 	{ .compatible = "altr,socfpga-stmmac" },
777*4882a593Smuzhiyun 	{ .compatible = "amlogic,meson6-dwmac" },
778*4882a593Smuzhiyun 	{ .compatible = "amlogic,meson-gx-dwmac" },
779*4882a593Smuzhiyun 	{ .compatible = "st,stm32-dwmac" },
780*4882a593Smuzhiyun 	{ }
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun U_BOOT_DRIVER(eth_designware) = {
784*4882a593Smuzhiyun 	.name	= "eth_designware",
785*4882a593Smuzhiyun 	.id	= UCLASS_ETH,
786*4882a593Smuzhiyun 	.of_match = designware_eth_ids,
787*4882a593Smuzhiyun 	.ofdata_to_platdata = designware_eth_ofdata_to_platdata,
788*4882a593Smuzhiyun 	.bind	= designware_eth_bind,
789*4882a593Smuzhiyun 	.probe	= designware_eth_probe,
790*4882a593Smuzhiyun 	.remove	= designware_eth_remove,
791*4882a593Smuzhiyun 	.ops	= &designware_eth_ops,
792*4882a593Smuzhiyun 	.priv_auto_alloc_size = sizeof(struct dw_eth_dev),
793*4882a593Smuzhiyun 	.platdata_auto_alloc_size = sizeof(struct dw_eth_pdata),
794*4882a593Smuzhiyun 	.flags = DM_FLAG_ALLOC_PRIV_DMA,
795*4882a593Smuzhiyun };
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun static struct pci_device_id supported[] = {
798*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
799*4882a593Smuzhiyun 	{ }
800*4882a593Smuzhiyun };
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun U_BOOT_PCI_DEVICE(eth_designware, supported);
803*4882a593Smuzhiyun #endif
804