1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * (c) 2015 Purna Chandra Mandal <purna.mandal@microchip.com>
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <common.h>
8*4882a593Smuzhiyun #include <errno.h>
9*4882a593Smuzhiyun #include <dm.h>
10*4882a593Smuzhiyun #include <net.h>
11*4882a593Smuzhiyun #include <miiphy.h>
12*4882a593Smuzhiyun #include <console.h>
13*4882a593Smuzhiyun #include <wait_bit.h>
14*4882a593Smuzhiyun #include <asm/gpio.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "pic32_eth.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define MAX_RX_BUF_SIZE 1536
19*4882a593Smuzhiyun #define MAX_RX_DESCR PKTBUFSRX
20*4882a593Smuzhiyun #define MAX_TX_DESCR 2
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct pic32eth_dev {
25*4882a593Smuzhiyun struct eth_dma_desc rxd_ring[MAX_RX_DESCR];
26*4882a593Smuzhiyun struct eth_dma_desc txd_ring[MAX_TX_DESCR];
27*4882a593Smuzhiyun u32 rxd_idx; /* index of RX desc to read */
28*4882a593Smuzhiyun /* regs */
29*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_regs;
30*4882a593Smuzhiyun struct pic32_emac_regs *emac_regs;
31*4882a593Smuzhiyun /* Phy */
32*4882a593Smuzhiyun struct phy_device *phydev;
33*4882a593Smuzhiyun phy_interface_t phyif;
34*4882a593Smuzhiyun u32 phy_addr;
35*4882a593Smuzhiyun struct gpio_desc rst_gpio;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
board_netphy_reset(void * dev)38*4882a593Smuzhiyun void __weak board_netphy_reset(void *dev)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct pic32eth_dev *priv = dev;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (!dm_gpio_is_valid(&priv->rst_gpio))
43*4882a593Smuzhiyun return;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* phy reset */
46*4882a593Smuzhiyun dm_gpio_set_value(&priv->rst_gpio, 0);
47*4882a593Smuzhiyun udelay(300);
48*4882a593Smuzhiyun dm_gpio_set_value(&priv->rst_gpio, 1);
49*4882a593Smuzhiyun udelay(300);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Initialize mii(MDIO) interface, discover which PHY is
53*4882a593Smuzhiyun * attached to the device, and configure it properly.
54*4882a593Smuzhiyun */
pic32_mii_init(struct pic32eth_dev * priv)55*4882a593Smuzhiyun static int pic32_mii_init(struct pic32eth_dev *priv)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
58*4882a593Smuzhiyun struct pic32_emac_regs *emac_p = priv->emac_regs;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* board phy reset */
61*4882a593Smuzhiyun board_netphy_reset(priv);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* disable RX, TX & all transactions */
64*4882a593Smuzhiyun writel(ETHCON_ON | ETHCON_TXRTS | ETHCON_RXEN, &ectl_p->con1.clr);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* wait till busy */
67*4882a593Smuzhiyun wait_for_bit_le32(&ectl_p->stat.raw, ETHSTAT_BUSY, false,
68*4882a593Smuzhiyun CONFIG_SYS_HZ, false);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* turn controller ON to access PHY over MII */
71*4882a593Smuzhiyun writel(ETHCON_ON, &ectl_p->con1.set);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun mdelay(10);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* reset MAC */
76*4882a593Smuzhiyun writel(EMAC_SOFTRESET, &emac_p->cfg1.set); /* reset assert */
77*4882a593Smuzhiyun mdelay(10);
78*4882a593Smuzhiyun writel(EMAC_SOFTRESET, &emac_p->cfg1.clr); /* reset deassert */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* initialize MDIO/MII */
81*4882a593Smuzhiyun if (priv->phyif == PHY_INTERFACE_MODE_RMII) {
82*4882a593Smuzhiyun writel(EMAC_RMII_RESET, &emac_p->supp.set);
83*4882a593Smuzhiyun mdelay(10);
84*4882a593Smuzhiyun writel(EMAC_RMII_RESET, &emac_p->supp.clr);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun return pic32_mdio_init(PIC32_MDIO_NAME, (ulong)&emac_p->mii);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
pic32_phy_init(struct pic32eth_dev * priv,struct udevice * dev)90*4882a593Smuzhiyun static int pic32_phy_init(struct pic32eth_dev *priv, struct udevice *dev)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct mii_dev *mii;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun mii = miiphy_get_dev_by_name(PIC32_MDIO_NAME);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* find & connect PHY */
97*4882a593Smuzhiyun priv->phydev = phy_connect(mii, priv->phy_addr,
98*4882a593Smuzhiyun dev, priv->phyif);
99*4882a593Smuzhiyun if (!priv->phydev) {
100*4882a593Smuzhiyun printf("%s: %s: Error, PHY connect\n", __FILE__, __func__);
101*4882a593Smuzhiyun return 0;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Wait for phy to complete reset */
105*4882a593Smuzhiyun mdelay(10);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* configure supported modes */
108*4882a593Smuzhiyun priv->phydev->supported = SUPPORTED_10baseT_Half |
109*4882a593Smuzhiyun SUPPORTED_10baseT_Full |
110*4882a593Smuzhiyun SUPPORTED_100baseT_Half |
111*4882a593Smuzhiyun SUPPORTED_100baseT_Full |
112*4882a593Smuzhiyun SUPPORTED_Autoneg;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun priv->phydev->advertising = ADVERTISED_10baseT_Half |
115*4882a593Smuzhiyun ADVERTISED_10baseT_Full |
116*4882a593Smuzhiyun ADVERTISED_100baseT_Half |
117*4882a593Smuzhiyun ADVERTISED_100baseT_Full |
118*4882a593Smuzhiyun ADVERTISED_Autoneg;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun priv->phydev->autoneg = AUTONEG_ENABLE;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* Configure MAC based on negotiated speed and duplex
126*4882a593Smuzhiyun * reported by PHY.
127*4882a593Smuzhiyun */
pic32_mac_adjust_link(struct pic32eth_dev * priv)128*4882a593Smuzhiyun static int pic32_mac_adjust_link(struct pic32eth_dev *priv)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct phy_device *phydev = priv->phydev;
131*4882a593Smuzhiyun struct pic32_emac_regs *emac_p = priv->emac_regs;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (!phydev->link) {
134*4882a593Smuzhiyun printf("%s: No link.\n", phydev->dev->name);
135*4882a593Smuzhiyun return -EINVAL;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (phydev->duplex) {
139*4882a593Smuzhiyun writel(EMAC_FULLDUP, &emac_p->cfg2.set);
140*4882a593Smuzhiyun writel(FULLDUP_GAP_TIME, &emac_p->ipgt.raw);
141*4882a593Smuzhiyun } else {
142*4882a593Smuzhiyun writel(EMAC_FULLDUP, &emac_p->cfg2.clr);
143*4882a593Smuzhiyun writel(HALFDUP_GAP_TIME, &emac_p->ipgt.raw);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun switch (phydev->speed) {
147*4882a593Smuzhiyun case SPEED_100:
148*4882a593Smuzhiyun writel(EMAC_RMII_SPD100, &emac_p->supp.set);
149*4882a593Smuzhiyun break;
150*4882a593Smuzhiyun case SPEED_10:
151*4882a593Smuzhiyun writel(EMAC_RMII_SPD100, &emac_p->supp.clr);
152*4882a593Smuzhiyun break;
153*4882a593Smuzhiyun default:
154*4882a593Smuzhiyun printf("%s: Speed was bad\n", phydev->dev->name);
155*4882a593Smuzhiyun return -EINVAL;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun printf("pic32eth: PHY is %s with %dbase%s, %s\n",
159*4882a593Smuzhiyun phydev->drv->name, phydev->speed,
160*4882a593Smuzhiyun (phydev->port == PORT_TP) ? "T" : "X",
161*4882a593Smuzhiyun (phydev->duplex) ? "full" : "half");
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
pic32_mac_init(struct pic32eth_dev * priv,u8 * macaddr)166*4882a593Smuzhiyun static void pic32_mac_init(struct pic32eth_dev *priv, u8 *macaddr)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct pic32_emac_regs *emac_p = priv->emac_regs;
169*4882a593Smuzhiyun u32 stat = 0, v;
170*4882a593Smuzhiyun u64 expire;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun v = EMAC_TXPAUSE | EMAC_RXPAUSE | EMAC_RXENABLE;
173*4882a593Smuzhiyun writel(v, &emac_p->cfg1.raw);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun v = EMAC_EXCESS | EMAC_AUTOPAD | EMAC_PADENABLE |
176*4882a593Smuzhiyun EMAC_CRCENABLE | EMAC_LENGTHCK | EMAC_FULLDUP;
177*4882a593Smuzhiyun writel(v, &emac_p->cfg2.raw);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* recommended back-to-back inter-packet gap for 10 Mbps half duplex */
180*4882a593Smuzhiyun writel(HALFDUP_GAP_TIME, &emac_p->ipgt.raw);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* recommended non-back-to-back interpacket gap is 0xc12 */
183*4882a593Smuzhiyun writel(0xc12, &emac_p->ipgr.raw);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* recommended collision window retry limit is 0x370F */
186*4882a593Smuzhiyun writel(0x370f, &emac_p->clrt.raw);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* set maximum frame length: allow VLAN tagged frame */
189*4882a593Smuzhiyun writel(0x600, &emac_p->maxf.raw);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* set the mac address */
192*4882a593Smuzhiyun writel(macaddr[0] | (macaddr[1] << 8), &emac_p->sa2.raw);
193*4882a593Smuzhiyun writel(macaddr[2] | (macaddr[3] << 8), &emac_p->sa1.raw);
194*4882a593Smuzhiyun writel(macaddr[4] | (macaddr[5] << 8), &emac_p->sa0.raw);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* default, enable 10 Mbps operation */
197*4882a593Smuzhiyun writel(EMAC_RMII_SPD100, &emac_p->supp.clr);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* wait until link status UP or deadline elapsed */
200*4882a593Smuzhiyun expire = get_ticks() + get_tbclk() * 2;
201*4882a593Smuzhiyun for (; get_ticks() < expire;) {
202*4882a593Smuzhiyun stat = phy_read(priv->phydev, priv->phy_addr, MII_BMSR);
203*4882a593Smuzhiyun if (stat & BMSR_LSTATUS)
204*4882a593Smuzhiyun break;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (!(stat & BMSR_LSTATUS))
208*4882a593Smuzhiyun printf("MAC: Link is DOWN!\n");
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* delay to stabilize before any tx/rx */
211*4882a593Smuzhiyun mdelay(10);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
pic32_mac_reset(struct pic32eth_dev * priv)214*4882a593Smuzhiyun static void pic32_mac_reset(struct pic32eth_dev *priv)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct pic32_emac_regs *emac_p = priv->emac_regs;
217*4882a593Smuzhiyun struct mii_dev *mii;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Reset MAC */
220*4882a593Smuzhiyun writel(EMAC_SOFTRESET, &emac_p->cfg1.raw);
221*4882a593Smuzhiyun mdelay(10);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* clear reset */
224*4882a593Smuzhiyun writel(0, &emac_p->cfg1.raw);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Reset MII */
227*4882a593Smuzhiyun mii = priv->phydev->bus;
228*4882a593Smuzhiyun if (mii && mii->reset)
229*4882a593Smuzhiyun mii->reset(mii);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* initializes the MAC and PHY, then establishes a link */
pic32_ctrl_reset(struct pic32eth_dev * priv)233*4882a593Smuzhiyun static void pic32_ctrl_reset(struct pic32eth_dev *priv)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
236*4882a593Smuzhiyun u32 v;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* disable RX, TX & any other transactions */
239*4882a593Smuzhiyun writel(ETHCON_ON | ETHCON_TXRTS | ETHCON_RXEN, &ectl_p->con1.clr);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* wait till busy */
242*4882a593Smuzhiyun wait_for_bit_le32(&ectl_p->stat.raw, ETHSTAT_BUSY, false,
243*4882a593Smuzhiyun CONFIG_SYS_HZ, false);
244*4882a593Smuzhiyun /* decrement received buffcnt to zero. */
245*4882a593Smuzhiyun while (readl(&ectl_p->stat.raw) & ETHSTAT_BUFCNT)
246*4882a593Smuzhiyun writel(ETHCON_BUFCDEC, &ectl_p->con1.set);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* clear any existing interrupt event */
249*4882a593Smuzhiyun writel(0xffffffff, &ectl_p->irq.clr);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* clear RX/TX start address */
252*4882a593Smuzhiyun writel(0xffffffff, &ectl_p->txst.clr);
253*4882a593Smuzhiyun writel(0xffffffff, &ectl_p->rxst.clr);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* clear the receive filters */
256*4882a593Smuzhiyun writel(0x00ff, &ectl_p->rxfc.clr);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* set the receive filters
259*4882a593Smuzhiyun * ETH_FILT_CRC_ERR_REJECT
260*4882a593Smuzhiyun * ETH_FILT_RUNT_REJECT
261*4882a593Smuzhiyun * ETH_FILT_UCAST_ACCEPT
262*4882a593Smuzhiyun * ETH_FILT_MCAST_ACCEPT
263*4882a593Smuzhiyun * ETH_FILT_BCAST_ACCEPT
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun v = ETHRXFC_BCEN | ETHRXFC_MCEN | ETHRXFC_UCEN |
266*4882a593Smuzhiyun ETHRXFC_RUNTEN | ETHRXFC_CRCOKEN;
267*4882a593Smuzhiyun writel(v, &ectl_p->rxfc.set);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* turn controller ON to access PHY over MII */
270*4882a593Smuzhiyun writel(ETHCON_ON, &ectl_p->con1.set);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
pic32_rx_desc_init(struct pic32eth_dev * priv)273*4882a593Smuzhiyun static void pic32_rx_desc_init(struct pic32eth_dev *priv)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
276*4882a593Smuzhiyun struct eth_dma_desc *rxd;
277*4882a593Smuzhiyun u32 idx, bufsz;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun priv->rxd_idx = 0;
280*4882a593Smuzhiyun for (idx = 0; idx < MAX_RX_DESCR; idx++) {
281*4882a593Smuzhiyun rxd = &priv->rxd_ring[idx];
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* hw owned */
284*4882a593Smuzhiyun rxd->hdr = EDH_NPV | EDH_EOWN | EDH_STICKY;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* packet buffer address */
287*4882a593Smuzhiyun rxd->data_buff = virt_to_phys(net_rx_packets[idx]);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* link to next desc */
290*4882a593Smuzhiyun rxd->next_ed = virt_to_phys(rxd + 1);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* reset status */
293*4882a593Smuzhiyun rxd->stat1 = 0;
294*4882a593Smuzhiyun rxd->stat2 = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* decrement bufcnt */
297*4882a593Smuzhiyun writel(ETHCON_BUFCDEC, &ectl_p->con1.set);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* link last descr to beginning of list */
301*4882a593Smuzhiyun rxd->next_ed = virt_to_phys(&priv->rxd_ring[0]);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* flush rx ring */
304*4882a593Smuzhiyun flush_dcache_range((ulong)priv->rxd_ring,
305*4882a593Smuzhiyun (ulong)priv->rxd_ring + sizeof(priv->rxd_ring));
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* set rx desc-ring start address */
308*4882a593Smuzhiyun writel((ulong)virt_to_phys(&priv->rxd_ring[0]), &ectl_p->rxst.raw);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* RX Buffer size */
311*4882a593Smuzhiyun bufsz = readl(&ectl_p->con2.raw);
312*4882a593Smuzhiyun bufsz &= ~(ETHCON_RXBUFSZ << ETHCON_RXBUFSZ_SHFT);
313*4882a593Smuzhiyun bufsz |= ((MAX_RX_BUF_SIZE / 16) << ETHCON_RXBUFSZ_SHFT);
314*4882a593Smuzhiyun writel(bufsz, &ectl_p->con2.raw);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* enable the receiver in hardware which allows hardware
317*4882a593Smuzhiyun * to DMA received pkts to the descriptor pointer address.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun writel(ETHCON_RXEN, &ectl_p->con1.set);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
pic32_eth_start(struct udevice * dev)322*4882a593Smuzhiyun static int pic32_eth_start(struct udevice *dev)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct eth_pdata *pdata = dev_get_platdata(dev);
325*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* controller */
328*4882a593Smuzhiyun pic32_ctrl_reset(priv);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* reset MAC */
331*4882a593Smuzhiyun pic32_mac_reset(priv);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* configure PHY */
334*4882a593Smuzhiyun phy_config(priv->phydev);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* initialize MAC */
337*4882a593Smuzhiyun pic32_mac_init(priv, &pdata->enetaddr[0]);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* init RX descriptor; TX descriptors are handled in xmit */
340*4882a593Smuzhiyun pic32_rx_desc_init(priv);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Start up & update link status of PHY */
343*4882a593Smuzhiyun phy_startup(priv->phydev);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* adjust mac with phy link status */
346*4882a593Smuzhiyun return pic32_mac_adjust_link(priv);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
pic32_eth_stop(struct udevice * dev)349*4882a593Smuzhiyun static void pic32_eth_stop(struct udevice *dev)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
352*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
353*4882a593Smuzhiyun struct pic32_emac_regs *emac_p = priv->emac_regs;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /* Reset the phy if the controller is enabled */
356*4882a593Smuzhiyun if (readl(&ectl_p->con1.raw) & ETHCON_ON)
357*4882a593Smuzhiyun phy_reset(priv->phydev);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* Shut down the PHY */
360*4882a593Smuzhiyun phy_shutdown(priv->phydev);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Stop rx/tx */
363*4882a593Smuzhiyun writel(ETHCON_TXRTS | ETHCON_RXEN, &ectl_p->con1.clr);
364*4882a593Smuzhiyun mdelay(10);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* reset MAC */
367*4882a593Smuzhiyun writel(EMAC_SOFTRESET, &emac_p->cfg1.raw);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* clear reset */
370*4882a593Smuzhiyun writel(0, &emac_p->cfg1.raw);
371*4882a593Smuzhiyun mdelay(10);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* disable controller */
374*4882a593Smuzhiyun writel(ETHCON_ON, &ectl_p->con1.clr);
375*4882a593Smuzhiyun mdelay(10);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* wait until everything is down */
378*4882a593Smuzhiyun wait_for_bit_le32(&ectl_p->stat.raw, ETHSTAT_BUSY, false,
379*4882a593Smuzhiyun 2 * CONFIG_SYS_HZ, false);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* clear any existing interrupt event */
382*4882a593Smuzhiyun writel(0xffffffff, &ectl_p->irq.clr);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
pic32_eth_send(struct udevice * dev,void * packet,int length)385*4882a593Smuzhiyun static int pic32_eth_send(struct udevice *dev, void *packet, int length)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
388*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
389*4882a593Smuzhiyun struct eth_dma_desc *txd;
390*4882a593Smuzhiyun u64 deadline;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun txd = &priv->txd_ring[0];
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* set proper flags & length in descriptor header */
395*4882a593Smuzhiyun txd->hdr = EDH_SOP | EDH_EOP | EDH_EOWN | EDH_BCOUNT(length);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* pass buffer address to hardware */
398*4882a593Smuzhiyun txd->data_buff = virt_to_phys(packet);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun debug("%s: %d / .hdr %x, .data_buff %x, .stat %x, .nexted %x\n",
401*4882a593Smuzhiyun __func__, __LINE__, txd->hdr, txd->data_buff, txd->stat2,
402*4882a593Smuzhiyun txd->next_ed);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* cache flush (packet) */
405*4882a593Smuzhiyun flush_dcache_range((ulong)packet, (ulong)packet + length);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* cache flush (txd) */
408*4882a593Smuzhiyun flush_dcache_range((ulong)txd, (ulong)txd + sizeof(*txd));
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* pass descriptor table base to h/w */
411*4882a593Smuzhiyun writel(virt_to_phys(txd), &ectl_p->txst.raw);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* ready to send enabled, hardware can now send the packet(s) */
414*4882a593Smuzhiyun writel(ETHCON_TXRTS | ETHCON_ON, &ectl_p->con1.set);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* wait until tx has completed and h/w has released ownership
417*4882a593Smuzhiyun * of the tx descriptor or timeout elapsed.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun deadline = get_ticks() + get_tbclk();
420*4882a593Smuzhiyun for (;;) {
421*4882a593Smuzhiyun /* check timeout */
422*4882a593Smuzhiyun if (get_ticks() > deadline)
423*4882a593Smuzhiyun return -ETIMEDOUT;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (ctrlc())
426*4882a593Smuzhiyun return -EINTR;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* tx completed ? */
429*4882a593Smuzhiyun if (readl(&ectl_p->con1.raw) & ETHCON_TXRTS) {
430*4882a593Smuzhiyun udelay(1);
431*4882a593Smuzhiyun continue;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* h/w not released ownership yet? */
435*4882a593Smuzhiyun invalidate_dcache_range((ulong)txd, (ulong)txd + sizeof(*txd));
436*4882a593Smuzhiyun if (!(txd->hdr & EDH_EOWN))
437*4882a593Smuzhiyun break;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
pic32_eth_recv(struct udevice * dev,int flags,uchar ** packetp)443*4882a593Smuzhiyun static int pic32_eth_recv(struct udevice *dev, int flags, uchar **packetp)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
446*4882a593Smuzhiyun struct eth_dma_desc *rxd;
447*4882a593Smuzhiyun u32 idx = priv->rxd_idx;
448*4882a593Smuzhiyun u32 rx_count;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* find the next ready to receive */
451*4882a593Smuzhiyun rxd = &priv->rxd_ring[idx];
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun invalidate_dcache_range((ulong)rxd, (ulong)rxd + sizeof(*rxd));
454*4882a593Smuzhiyun /* check if owned by MAC */
455*4882a593Smuzhiyun if (rxd->hdr & EDH_EOWN)
456*4882a593Smuzhiyun return -EAGAIN;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Sanity check on header: SOP and EOP */
459*4882a593Smuzhiyun if ((rxd->hdr & (EDH_SOP | EDH_EOP)) != (EDH_SOP | EDH_EOP)) {
460*4882a593Smuzhiyun printf("%s: %s, rx pkt across multiple descr\n",
461*4882a593Smuzhiyun __FILE__, __func__);
462*4882a593Smuzhiyun return 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun debug("%s: %d /idx %i, hdr=%x, data_buff %x, stat %x, nexted %x\n",
466*4882a593Smuzhiyun __func__, __LINE__, idx, rxd->hdr,
467*4882a593Smuzhiyun rxd->data_buff, rxd->stat2, rxd->next_ed);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Sanity check on rx_stat: OK, CRC */
470*4882a593Smuzhiyun if (!RSV_RX_OK(rxd->stat2) || RSV_CRC_ERR(rxd->stat2)) {
471*4882a593Smuzhiyun debug("%s: %s: Error, rx problem detected\n",
472*4882a593Smuzhiyun __FILE__, __func__);
473*4882a593Smuzhiyun return 0;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* invalidate dcache */
477*4882a593Smuzhiyun rx_count = RSV_RX_COUNT(rxd->stat2);
478*4882a593Smuzhiyun invalidate_dcache_range((ulong)net_rx_packets[idx],
479*4882a593Smuzhiyun (ulong)net_rx_packets[idx] + rx_count);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* Pass the packet to protocol layer */
482*4882a593Smuzhiyun *packetp = net_rx_packets[idx];
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* increment number of bytes rcvd (ignore CRC) */
485*4882a593Smuzhiyun return rx_count - 4;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
pic32_eth_free_pkt(struct udevice * dev,uchar * packet,int length)488*4882a593Smuzhiyun static int pic32_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
491*4882a593Smuzhiyun struct pic32_ectl_regs *ectl_p = priv->ectl_regs;
492*4882a593Smuzhiyun struct eth_dma_desc *rxd;
493*4882a593Smuzhiyun int idx = priv->rxd_idx;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* sanity check */
496*4882a593Smuzhiyun if (packet != net_rx_packets[idx]) {
497*4882a593Smuzhiyun printf("rxd_id %d: packet is not matched,\n", idx);
498*4882a593Smuzhiyun return -EAGAIN;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* prepare for receive */
502*4882a593Smuzhiyun rxd = &priv->rxd_ring[idx];
503*4882a593Smuzhiyun rxd->hdr = EDH_STICKY | EDH_NPV | EDH_EOWN;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun flush_dcache_range((ulong)rxd, (ulong)rxd + sizeof(*rxd));
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* decrement rx pkt count */
508*4882a593Smuzhiyun writel(ETHCON_BUFCDEC, &ectl_p->con1.set);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun debug("%s: %d / idx %i, hdr %x, data_buff %x, stat %x, nexted %x\n",
511*4882a593Smuzhiyun __func__, __LINE__, idx, rxd->hdr, rxd->data_buff,
512*4882a593Smuzhiyun rxd->stat2, rxd->next_ed);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun priv->rxd_idx = (priv->rxd_idx + 1) % MAX_RX_DESCR;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun static const struct eth_ops pic32_eth_ops = {
520*4882a593Smuzhiyun .start = pic32_eth_start,
521*4882a593Smuzhiyun .send = pic32_eth_send,
522*4882a593Smuzhiyun .recv = pic32_eth_recv,
523*4882a593Smuzhiyun .free_pkt = pic32_eth_free_pkt,
524*4882a593Smuzhiyun .stop = pic32_eth_stop,
525*4882a593Smuzhiyun };
526*4882a593Smuzhiyun
pic32_eth_probe(struct udevice * dev)527*4882a593Smuzhiyun static int pic32_eth_probe(struct udevice *dev)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct eth_pdata *pdata = dev_get_platdata(dev);
530*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
531*4882a593Smuzhiyun const char *phy_mode;
532*4882a593Smuzhiyun void __iomem *iobase;
533*4882a593Smuzhiyun fdt_addr_t addr;
534*4882a593Smuzhiyun fdt_size_t size;
535*4882a593Smuzhiyun int offset = 0;
536*4882a593Smuzhiyun int phy_addr = -1;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun addr = fdtdec_get_addr_size(gd->fdt_blob, dev_of_offset(dev), "reg",
539*4882a593Smuzhiyun &size);
540*4882a593Smuzhiyun if (addr == FDT_ADDR_T_NONE)
541*4882a593Smuzhiyun return -EINVAL;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun iobase = ioremap(addr, size);
544*4882a593Smuzhiyun pdata->iobase = (phys_addr_t)addr;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* get phy mode */
547*4882a593Smuzhiyun pdata->phy_interface = -1;
548*4882a593Smuzhiyun phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
549*4882a593Smuzhiyun NULL);
550*4882a593Smuzhiyun if (phy_mode)
551*4882a593Smuzhiyun pdata->phy_interface = phy_get_interface_by_name(phy_mode);
552*4882a593Smuzhiyun if (pdata->phy_interface == -1) {
553*4882a593Smuzhiyun debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* get phy addr */
558*4882a593Smuzhiyun offset = fdtdec_lookup_phandle(gd->fdt_blob, dev_of_offset(dev),
559*4882a593Smuzhiyun "phy-handle");
560*4882a593Smuzhiyun if (offset > 0)
561*4882a593Smuzhiyun phy_addr = fdtdec_get_int(gd->fdt_blob, offset, "reg", -1);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* phy reset gpio */
564*4882a593Smuzhiyun gpio_request_by_name_nodev(dev_ofnode(dev), "reset-gpios", 0,
565*4882a593Smuzhiyun &priv->rst_gpio, GPIOD_IS_OUT);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun priv->phyif = pdata->phy_interface;
568*4882a593Smuzhiyun priv->phy_addr = phy_addr;
569*4882a593Smuzhiyun priv->ectl_regs = iobase;
570*4882a593Smuzhiyun priv->emac_regs = iobase + PIC32_EMAC1CFG1;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun pic32_mii_init(priv);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return pic32_phy_init(priv, dev);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
pic32_eth_remove(struct udevice * dev)577*4882a593Smuzhiyun static int pic32_eth_remove(struct udevice *dev)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun struct pic32eth_dev *priv = dev_get_priv(dev);
580*4882a593Smuzhiyun struct mii_dev *bus;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun dm_gpio_free(dev, &priv->rst_gpio);
583*4882a593Smuzhiyun phy_shutdown(priv->phydev);
584*4882a593Smuzhiyun free(priv->phydev);
585*4882a593Smuzhiyun bus = miiphy_get_dev_by_name(PIC32_MDIO_NAME);
586*4882a593Smuzhiyun mdio_unregister(bus);
587*4882a593Smuzhiyun mdio_free(bus);
588*4882a593Smuzhiyun iounmap(priv->ectl_regs);
589*4882a593Smuzhiyun return 0;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun static const struct udevice_id pic32_eth_ids[] = {
593*4882a593Smuzhiyun { .compatible = "microchip,pic32mzda-eth" },
594*4882a593Smuzhiyun { }
595*4882a593Smuzhiyun };
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun U_BOOT_DRIVER(pic32_ethernet) = {
598*4882a593Smuzhiyun .name = "pic32_ethernet",
599*4882a593Smuzhiyun .id = UCLASS_ETH,
600*4882a593Smuzhiyun .of_match = pic32_eth_ids,
601*4882a593Smuzhiyun .probe = pic32_eth_probe,
602*4882a593Smuzhiyun .remove = pic32_eth_remove,
603*4882a593Smuzhiyun .ops = &pic32_eth_ops,
604*4882a593Smuzhiyun .priv_auto_alloc_size = sizeof(struct pic32eth_dev),
605*4882a593Smuzhiyun .platdata_auto_alloc_size = sizeof(struct eth_pdata),
606*4882a593Smuzhiyun };
607