xref: /OK3568_Linux_fs/u-boot/drivers/net/ravb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * drivers/net/ravb.c
3*4882a593Smuzhiyun  *     This file is driver for Renesas Ethernet AVB.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2015-2017  Renesas Electronics Corporation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Based on the SuperH Ethernet driver.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <common.h>
13*4882a593Smuzhiyun #include <clk.h>
14*4882a593Smuzhiyun #include <dm.h>
15*4882a593Smuzhiyun #include <errno.h>
16*4882a593Smuzhiyun #include <miiphy.h>
17*4882a593Smuzhiyun #include <malloc.h>
18*4882a593Smuzhiyun #include <linux/mii.h>
19*4882a593Smuzhiyun #include <wait_bit.h>
20*4882a593Smuzhiyun #include <asm/io.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* Registers */
23*4882a593Smuzhiyun #define RAVB_REG_CCC		0x000
24*4882a593Smuzhiyun #define RAVB_REG_DBAT		0x004
25*4882a593Smuzhiyun #define RAVB_REG_CSR		0x00C
26*4882a593Smuzhiyun #define RAVB_REG_APSR		0x08C
27*4882a593Smuzhiyun #define RAVB_REG_RCR		0x090
28*4882a593Smuzhiyun #define RAVB_REG_TGC		0x300
29*4882a593Smuzhiyun #define RAVB_REG_TCCR		0x304
30*4882a593Smuzhiyun #define RAVB_REG_RIC0		0x360
31*4882a593Smuzhiyun #define RAVB_REG_RIC1		0x368
32*4882a593Smuzhiyun #define RAVB_REG_RIC2		0x370
33*4882a593Smuzhiyun #define RAVB_REG_TIC		0x378
34*4882a593Smuzhiyun #define RAVB_REG_ECMR		0x500
35*4882a593Smuzhiyun #define RAVB_REG_RFLR		0x508
36*4882a593Smuzhiyun #define RAVB_REG_ECSIPR		0x518
37*4882a593Smuzhiyun #define RAVB_REG_PIR		0x520
38*4882a593Smuzhiyun #define RAVB_REG_GECMR		0x5b0
39*4882a593Smuzhiyun #define RAVB_REG_MAHR		0x5c0
40*4882a593Smuzhiyun #define RAVB_REG_MALR		0x5c8
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define CCC_OPC_CONFIG		BIT(0)
43*4882a593Smuzhiyun #define CCC_OPC_OPERATION	BIT(1)
44*4882a593Smuzhiyun #define CCC_BOC			BIT(20)
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define CSR_OPS			0x0000000F
47*4882a593Smuzhiyun #define CSR_OPS_CONFIG		BIT(1)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define TCCR_TSRQ0		BIT(0)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define RFLR_RFL_MIN		0x05EE
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define PIR_MDI			BIT(3)
54*4882a593Smuzhiyun #define PIR_MDO			BIT(2)
55*4882a593Smuzhiyun #define PIR_MMD			BIT(1)
56*4882a593Smuzhiyun #define PIR_MDC			BIT(0)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define ECMR_TRCCM		BIT(26)
59*4882a593Smuzhiyun #define ECMR_RZPF		BIT(20)
60*4882a593Smuzhiyun #define ECMR_PFR		BIT(18)
61*4882a593Smuzhiyun #define ECMR_RXF		BIT(17)
62*4882a593Smuzhiyun #define ECMR_RE			BIT(6)
63*4882a593Smuzhiyun #define ECMR_TE			BIT(5)
64*4882a593Smuzhiyun #define ECMR_DM			BIT(1)
65*4882a593Smuzhiyun #define ECMR_CHG_DM		(ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* DMA Descriptors */
68*4882a593Smuzhiyun #define RAVB_NUM_BASE_DESC		16
69*4882a593Smuzhiyun #define RAVB_NUM_TX_DESC		8
70*4882a593Smuzhiyun #define RAVB_NUM_RX_DESC		8
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define RAVB_TX_QUEUE_OFFSET		0
73*4882a593Smuzhiyun #define RAVB_RX_QUEUE_OFFSET		4
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define RAVB_DESC_DT(n)			((n) << 28)
76*4882a593Smuzhiyun #define RAVB_DESC_DT_FSINGLE		RAVB_DESC_DT(0x7)
77*4882a593Smuzhiyun #define RAVB_DESC_DT_LINKFIX		RAVB_DESC_DT(0x9)
78*4882a593Smuzhiyun #define RAVB_DESC_DT_EOS		RAVB_DESC_DT(0xa)
79*4882a593Smuzhiyun #define RAVB_DESC_DT_FEMPTY		RAVB_DESC_DT(0xc)
80*4882a593Smuzhiyun #define RAVB_DESC_DT_EEMPTY		RAVB_DESC_DT(0x3)
81*4882a593Smuzhiyun #define RAVB_DESC_DT_MASK		RAVB_DESC_DT(0xf)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define RAVB_DESC_DS(n)			(((n) & 0xfff) << 0)
84*4882a593Smuzhiyun #define RAVB_DESC_DS_MASK		0xfff
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_MC		BIT(23)
87*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_CEEF		BIT(22)
88*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_CRL		BIT(21)
89*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_FRE		BIT(20)
90*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_RTLF		BIT(19)
91*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_RTSF		BIT(18)
92*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_RFE		BIT(17)
93*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_CRC		BIT(16)
94*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_MASK		(0xff << 16)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define RAVB_RX_DESC_MSC_RX_ERR_MASK \
97*4882a593Smuzhiyun 	(RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
98*4882a593Smuzhiyun 	 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define RAVB_TX_TIMEOUT_MS		1000
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct ravb_desc {
103*4882a593Smuzhiyun 	u32	ctrl;
104*4882a593Smuzhiyun 	u32	dptr;
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct ravb_rxdesc {
108*4882a593Smuzhiyun 	struct ravb_desc	data;
109*4882a593Smuzhiyun 	struct ravb_desc	link;
110*4882a593Smuzhiyun 	u8			__pad[48];
111*4882a593Smuzhiyun 	u8			packet[PKTSIZE_ALIGN];
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun struct ravb_priv {
115*4882a593Smuzhiyun 	struct ravb_desc	base_desc[RAVB_NUM_BASE_DESC];
116*4882a593Smuzhiyun 	struct ravb_desc	tx_desc[RAVB_NUM_TX_DESC];
117*4882a593Smuzhiyun 	struct ravb_rxdesc	rx_desc[RAVB_NUM_RX_DESC];
118*4882a593Smuzhiyun 	u32			rx_desc_idx;
119*4882a593Smuzhiyun 	u32			tx_desc_idx;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	struct phy_device	*phydev;
122*4882a593Smuzhiyun 	struct mii_dev		*bus;
123*4882a593Smuzhiyun 	void __iomem		*iobase;
124*4882a593Smuzhiyun 	struct clk		clk;
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun 
ravb_flush_dcache(u32 addr,u32 len)127*4882a593Smuzhiyun static inline void ravb_flush_dcache(u32 addr, u32 len)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	flush_dcache_range(addr, addr + len);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
ravb_invalidate_dcache(u32 addr,u32 len)132*4882a593Smuzhiyun static inline void ravb_invalidate_dcache(u32 addr, u32 len)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
135*4882a593Smuzhiyun 	u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
136*4882a593Smuzhiyun 	invalidate_dcache_range(start, end);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
ravb_send(struct udevice * dev,void * packet,int len)139*4882a593Smuzhiyun static int ravb_send(struct udevice *dev, void *packet, int len)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
142*4882a593Smuzhiyun 	struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
143*4882a593Smuzhiyun 	unsigned int start;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* Update TX descriptor */
146*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)packet, len);
147*4882a593Smuzhiyun 	memset(desc, 0x0, sizeof(*desc));
148*4882a593Smuzhiyun 	desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
149*4882a593Smuzhiyun 	desc->dptr = (uintptr_t)packet;
150*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* Restart the transmitter if disabled */
153*4882a593Smuzhiyun 	if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
154*4882a593Smuzhiyun 		setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Wait until packet is transmitted */
157*4882a593Smuzhiyun 	start = get_timer(0);
158*4882a593Smuzhiyun 	while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
159*4882a593Smuzhiyun 		ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
160*4882a593Smuzhiyun 		if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
161*4882a593Smuzhiyun 			break;
162*4882a593Smuzhiyun 		udelay(10);
163*4882a593Smuzhiyun 	};
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
166*4882a593Smuzhiyun 		return -ETIMEDOUT;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
ravb_recv(struct udevice * dev,int flags,uchar ** packetp)172*4882a593Smuzhiyun static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
175*4882a593Smuzhiyun 	struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
176*4882a593Smuzhiyun 	int len;
177*4882a593Smuzhiyun 	u8 *packet;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* Check if the rx descriptor is ready */
180*4882a593Smuzhiyun 	ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
181*4882a593Smuzhiyun 	if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
182*4882a593Smuzhiyun 		return -EAGAIN;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* Check for errors */
185*4882a593Smuzhiyun 	if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
186*4882a593Smuzhiyun 		desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
187*4882a593Smuzhiyun 		return -EAGAIN;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	len = desc->data.ctrl & RAVB_DESC_DS_MASK;
191*4882a593Smuzhiyun 	packet = (u8 *)(uintptr_t)desc->data.dptr;
192*4882a593Smuzhiyun 	ravb_invalidate_dcache((uintptr_t)packet, len);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	*packetp = packet;
195*4882a593Smuzhiyun 	return len;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
ravb_free_pkt(struct udevice * dev,uchar * packet,int length)198*4882a593Smuzhiyun static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
201*4882a593Smuzhiyun 	struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Make current descriptor available again */
204*4882a593Smuzhiyun 	desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
205*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Point to the next descriptor */
208*4882a593Smuzhiyun 	eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
209*4882a593Smuzhiyun 	desc = &eth->rx_desc[eth->rx_desc_idx];
210*4882a593Smuzhiyun 	ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
ravb_reset(struct udevice * dev)215*4882a593Smuzhiyun static int ravb_reset(struct udevice *dev)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Set config mode */
220*4882a593Smuzhiyun 	writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* Check the operating mode is changed to the config mode. */
223*4882a593Smuzhiyun 	return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
224*4882a593Smuzhiyun 				 CSR_OPS_CONFIG, true, 100, true);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
ravb_base_desc_init(struct ravb_priv * eth)227*4882a593Smuzhiyun static void ravb_base_desc_init(struct ravb_priv *eth)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
230*4882a593Smuzhiyun 	int i;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Initialize all descriptors */
233*4882a593Smuzhiyun 	memset(eth->base_desc, 0x0, desc_size);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
236*4882a593Smuzhiyun 		eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* Register the descriptor base address table */
241*4882a593Smuzhiyun 	writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
ravb_tx_desc_init(struct ravb_priv * eth)244*4882a593Smuzhiyun static void ravb_tx_desc_init(struct ravb_priv *eth)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
247*4882a593Smuzhiyun 	int i;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* Initialize all descriptors */
250*4882a593Smuzhiyun 	memset(eth->tx_desc, 0x0, desc_size);
251*4882a593Smuzhiyun 	eth->tx_desc_idx = 0;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	for (i = 0; i < RAVB_NUM_TX_DESC; i++)
254*4882a593Smuzhiyun 		eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* Mark the end of the descriptors */
257*4882a593Smuzhiyun 	eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
258*4882a593Smuzhiyun 	eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
259*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* Point the controller to the TX descriptor list. */
262*4882a593Smuzhiyun 	eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
263*4882a593Smuzhiyun 	eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
264*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
265*4882a593Smuzhiyun 			  sizeof(struct ravb_desc));
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
ravb_rx_desc_init(struct ravb_priv * eth)268*4882a593Smuzhiyun static void ravb_rx_desc_init(struct ravb_priv *eth)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
271*4882a593Smuzhiyun 	int i;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* Initialize all descriptors */
274*4882a593Smuzhiyun 	memset(eth->rx_desc, 0x0, desc_size);
275*4882a593Smuzhiyun 	eth->rx_desc_idx = 0;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
278*4882a593Smuzhiyun 		eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
279*4882a593Smuzhiyun 					    RAVB_DESC_DS(PKTSIZE_ALIGN);
280*4882a593Smuzhiyun 		eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
283*4882a593Smuzhiyun 		eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* Mark the end of the descriptors */
287*4882a593Smuzhiyun 	eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
288*4882a593Smuzhiyun 	eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
289*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* Point the controller to the rx descriptor list */
292*4882a593Smuzhiyun 	eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
293*4882a593Smuzhiyun 	eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
294*4882a593Smuzhiyun 	ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
295*4882a593Smuzhiyun 			  sizeof(struct ravb_desc));
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
ravb_phy_config(struct udevice * dev)298*4882a593Smuzhiyun static int ravb_phy_config(struct udevice *dev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
301*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
302*4882a593Smuzhiyun 	struct phy_device *phydev;
303*4882a593Smuzhiyun 	int mask = 0xffffffff, reg;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	phydev = phy_find_by_mask(eth->bus, mask, pdata->phy_interface);
306*4882a593Smuzhiyun 	if (!phydev)
307*4882a593Smuzhiyun 		return -ENODEV;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	phy_connect_dev(phydev, dev);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	eth->phydev = phydev;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* 10BASE is not supported for Ethernet AVB MAC */
314*4882a593Smuzhiyun 	phydev->supported &= ~(SUPPORTED_10baseT_Full
315*4882a593Smuzhiyun 			       | SUPPORTED_10baseT_Half);
316*4882a593Smuzhiyun 	if (pdata->max_speed != 1000) {
317*4882a593Smuzhiyun 		phydev->supported &= ~(SUPPORTED_1000baseT_Half
318*4882a593Smuzhiyun 				       | SUPPORTED_1000baseT_Full);
319*4882a593Smuzhiyun 		reg = phy_read(phydev, -1, MII_CTRL1000);
320*4882a593Smuzhiyun 		reg &= ~(BIT(9) | BIT(8));
321*4882a593Smuzhiyun 		phy_write(phydev, -1, MII_CTRL1000, reg);
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	phy_config(phydev);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /* Set Mac address */
ravb_write_hwaddr(struct udevice * dev)330*4882a593Smuzhiyun static int ravb_write_hwaddr(struct udevice *dev)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
333*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
334*4882a593Smuzhiyun 	unsigned char *mac = pdata->enetaddr;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
337*4882a593Smuzhiyun 	       eth->iobase + RAVB_REG_MAHR);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /* E-MAC init function */
ravb_mac_init(struct ravb_priv * eth)345*4882a593Smuzhiyun static int ravb_mac_init(struct ravb_priv *eth)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	/* Disable MAC Interrupt */
348*4882a593Smuzhiyun 	writel(0, eth->iobase + RAVB_REG_ECSIPR);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Recv frame limit set register */
351*4882a593Smuzhiyun 	writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /* AVB-DMAC init function */
ravb_dmac_init(struct udevice * dev)357*4882a593Smuzhiyun static int ravb_dmac_init(struct udevice *dev)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
360*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
361*4882a593Smuzhiyun 	int ret = 0;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* Set CONFIG mode */
364*4882a593Smuzhiyun 	ret = ravb_reset(dev);
365*4882a593Smuzhiyun 	if (ret)
366*4882a593Smuzhiyun 		return ret;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* Disable all interrupts */
369*4882a593Smuzhiyun 	writel(0, eth->iobase + RAVB_REG_RIC0);
370*4882a593Smuzhiyun 	writel(0, eth->iobase + RAVB_REG_RIC1);
371*4882a593Smuzhiyun 	writel(0, eth->iobase + RAVB_REG_RIC2);
372*4882a593Smuzhiyun 	writel(0, eth->iobase + RAVB_REG_TIC);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/* Set little endian */
375*4882a593Smuzhiyun 	clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* AVB rx set */
378*4882a593Smuzhiyun 	writel(0x18000001, eth->iobase + RAVB_REG_RCR);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* FIFO size set */
381*4882a593Smuzhiyun 	writel(0x00222210, eth->iobase + RAVB_REG_TGC);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* Delay CLK: 2ns */
384*4882a593Smuzhiyun 	if (pdata->max_speed == 1000)
385*4882a593Smuzhiyun 		writel(BIT(14), eth->iobase + RAVB_REG_APSR);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
ravb_config(struct udevice * dev)390*4882a593Smuzhiyun static int ravb_config(struct udevice *dev)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
393*4882a593Smuzhiyun 	struct phy_device *phy;
394*4882a593Smuzhiyun 	u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
395*4882a593Smuzhiyun 	int ret;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* Configure AVB-DMAC register */
398*4882a593Smuzhiyun 	ravb_dmac_init(dev);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Configure E-MAC registers */
401*4882a593Smuzhiyun 	ravb_mac_init(eth);
402*4882a593Smuzhiyun 	ravb_write_hwaddr(dev);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* Configure phy */
405*4882a593Smuzhiyun 	ret = ravb_phy_config(dev);
406*4882a593Smuzhiyun 	if (ret)
407*4882a593Smuzhiyun 		return ret;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	phy = eth->phydev;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	ret = phy_startup(phy);
412*4882a593Smuzhiyun 	if (ret)
413*4882a593Smuzhiyun 		return ret;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* Set the transfer speed */
416*4882a593Smuzhiyun 	if (phy->speed == 100)
417*4882a593Smuzhiyun 		writel(0, eth->iobase + RAVB_REG_GECMR);
418*4882a593Smuzhiyun 	else if (phy->speed == 1000)
419*4882a593Smuzhiyun 		writel(1, eth->iobase + RAVB_REG_GECMR);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* Check if full duplex mode is supported by the phy */
422*4882a593Smuzhiyun 	if (phy->duplex)
423*4882a593Smuzhiyun 		mask |= ECMR_DM;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	writel(mask, eth->iobase + RAVB_REG_ECMR);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	phy->drv->writeext(phy, -1, 0x02, 0x08, (0x0f << 5) | 0x19);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
ravb_start(struct udevice * dev)432*4882a593Smuzhiyun int ravb_start(struct udevice *dev)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
435*4882a593Smuzhiyun 	int ret;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	ret = clk_enable(&eth->clk);
438*4882a593Smuzhiyun 	if (ret)
439*4882a593Smuzhiyun 		return ret;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	ret = ravb_reset(dev);
442*4882a593Smuzhiyun 	if (ret)
443*4882a593Smuzhiyun 		goto err;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	ravb_base_desc_init(eth);
446*4882a593Smuzhiyun 	ravb_tx_desc_init(eth);
447*4882a593Smuzhiyun 	ravb_rx_desc_init(eth);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	ret = ravb_config(dev);
450*4882a593Smuzhiyun 	if (ret)
451*4882a593Smuzhiyun 		goto err;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Setting the control will start the AVB-DMAC process. */
454*4882a593Smuzhiyun 	writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	return 0;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun err:
459*4882a593Smuzhiyun 	clk_disable(&eth->clk);
460*4882a593Smuzhiyun 	return ret;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
ravb_stop(struct udevice * dev)463*4882a593Smuzhiyun static void ravb_stop(struct udevice *dev)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	ravb_reset(dev);
468*4882a593Smuzhiyun 	clk_disable(&eth->clk);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
ravb_probe(struct udevice * dev)471*4882a593Smuzhiyun static int ravb_probe(struct udevice *dev)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
474*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
475*4882a593Smuzhiyun 	struct mii_dev *mdiodev;
476*4882a593Smuzhiyun 	void __iomem *iobase;
477*4882a593Smuzhiyun 	int ret;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
480*4882a593Smuzhiyun 	eth->iobase = iobase;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	ret = clk_get_by_index(dev, 0, &eth->clk);
483*4882a593Smuzhiyun 	if (ret < 0)
484*4882a593Smuzhiyun 		goto err_mdio_alloc;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	mdiodev = mdio_alloc();
487*4882a593Smuzhiyun 	if (!mdiodev) {
488*4882a593Smuzhiyun 		ret = -ENOMEM;
489*4882a593Smuzhiyun 		goto err_mdio_alloc;
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	mdiodev->read = bb_miiphy_read;
493*4882a593Smuzhiyun 	mdiodev->write = bb_miiphy_write;
494*4882a593Smuzhiyun 	bb_miiphy_buses[0].priv = eth;
495*4882a593Smuzhiyun 	snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	ret = mdio_register(mdiodev);
498*4882a593Smuzhiyun 	if (ret < 0)
499*4882a593Smuzhiyun 		goto err_mdio_register;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	eth->bus = miiphy_get_dev_by_name(dev->name);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return 0;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun err_mdio_register:
506*4882a593Smuzhiyun 	mdio_free(mdiodev);
507*4882a593Smuzhiyun err_mdio_alloc:
508*4882a593Smuzhiyun 	unmap_physmem(eth->iobase, MAP_NOCACHE);
509*4882a593Smuzhiyun 	return ret;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
ravb_remove(struct udevice * dev)512*4882a593Smuzhiyun static int ravb_remove(struct udevice *dev)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct ravb_priv *eth = dev_get_priv(dev);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	free(eth->phydev);
517*4882a593Smuzhiyun 	mdio_unregister(eth->bus);
518*4882a593Smuzhiyun 	mdio_free(eth->bus);
519*4882a593Smuzhiyun 	unmap_physmem(eth->iobase, MAP_NOCACHE);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
ravb_bb_init(struct bb_miiphy_bus * bus)524*4882a593Smuzhiyun int ravb_bb_init(struct bb_miiphy_bus *bus)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
ravb_bb_mdio_active(struct bb_miiphy_bus * bus)529*4882a593Smuzhiyun int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct ravb_priv *eth = bus->priv;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
ravb_bb_mdio_tristate(struct bb_miiphy_bus * bus)538*4882a593Smuzhiyun int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	struct ravb_priv *eth = bus->priv;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
ravb_bb_set_mdio(struct bb_miiphy_bus * bus,int v)547*4882a593Smuzhiyun int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct ravb_priv *eth = bus->priv;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (v)
552*4882a593Smuzhiyun 		setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
553*4882a593Smuzhiyun 	else
554*4882a593Smuzhiyun 		clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	return 0;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
ravb_bb_get_mdio(struct bb_miiphy_bus * bus,int * v)559*4882a593Smuzhiyun int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	struct ravb_priv *eth = bus->priv;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	*v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	return 0;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
ravb_bb_set_mdc(struct bb_miiphy_bus * bus,int v)568*4882a593Smuzhiyun int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct ravb_priv *eth = bus->priv;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (v)
573*4882a593Smuzhiyun 		setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
574*4882a593Smuzhiyun 	else
575*4882a593Smuzhiyun 		clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	return 0;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
ravb_bb_delay(struct bb_miiphy_bus * bus)580*4882a593Smuzhiyun int ravb_bb_delay(struct bb_miiphy_bus *bus)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	udelay(10);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	return 0;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun struct bb_miiphy_bus bb_miiphy_buses[] = {
588*4882a593Smuzhiyun 	{
589*4882a593Smuzhiyun 		.name		= "ravb",
590*4882a593Smuzhiyun 		.init		= ravb_bb_init,
591*4882a593Smuzhiyun 		.mdio_active	= ravb_bb_mdio_active,
592*4882a593Smuzhiyun 		.mdio_tristate	= ravb_bb_mdio_tristate,
593*4882a593Smuzhiyun 		.set_mdio	= ravb_bb_set_mdio,
594*4882a593Smuzhiyun 		.get_mdio	= ravb_bb_get_mdio,
595*4882a593Smuzhiyun 		.set_mdc	= ravb_bb_set_mdc,
596*4882a593Smuzhiyun 		.delay		= ravb_bb_delay,
597*4882a593Smuzhiyun 	},
598*4882a593Smuzhiyun };
599*4882a593Smuzhiyun int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun static const struct eth_ops ravb_ops = {
602*4882a593Smuzhiyun 	.start			= ravb_start,
603*4882a593Smuzhiyun 	.send			= ravb_send,
604*4882a593Smuzhiyun 	.recv			= ravb_recv,
605*4882a593Smuzhiyun 	.free_pkt		= ravb_free_pkt,
606*4882a593Smuzhiyun 	.stop			= ravb_stop,
607*4882a593Smuzhiyun 	.write_hwaddr		= ravb_write_hwaddr,
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun 
ravb_ofdata_to_platdata(struct udevice * dev)610*4882a593Smuzhiyun int ravb_ofdata_to_platdata(struct udevice *dev)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	struct eth_pdata *pdata = dev_get_platdata(dev);
613*4882a593Smuzhiyun 	const char *phy_mode;
614*4882a593Smuzhiyun 	const fdt32_t *cell;
615*4882a593Smuzhiyun 	int ret = 0;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	pdata->iobase = devfdt_get_addr(dev);
618*4882a593Smuzhiyun 	pdata->phy_interface = -1;
619*4882a593Smuzhiyun 	phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
620*4882a593Smuzhiyun 			       NULL);
621*4882a593Smuzhiyun 	if (phy_mode)
622*4882a593Smuzhiyun 		pdata->phy_interface = phy_get_interface_by_name(phy_mode);
623*4882a593Smuzhiyun 	if (pdata->phy_interface == -1) {
624*4882a593Smuzhiyun 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
625*4882a593Smuzhiyun 		return -EINVAL;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	pdata->max_speed = 1000;
629*4882a593Smuzhiyun 	cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
630*4882a593Smuzhiyun 	if (cell)
631*4882a593Smuzhiyun 		pdata->max_speed = fdt32_to_cpu(*cell);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	sprintf(bb_miiphy_buses[0].name, dev->name);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	return ret;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun static const struct udevice_id ravb_ids[] = {
639*4882a593Smuzhiyun 	{ .compatible = "renesas,etheravb-r8a7795" },
640*4882a593Smuzhiyun 	{ .compatible = "renesas,etheravb-r8a7796" },
641*4882a593Smuzhiyun 	{ .compatible = "renesas,etheravb-rcar-gen3" },
642*4882a593Smuzhiyun 	{ }
643*4882a593Smuzhiyun };
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun U_BOOT_DRIVER(eth_ravb) = {
646*4882a593Smuzhiyun 	.name		= "ravb",
647*4882a593Smuzhiyun 	.id		= UCLASS_ETH,
648*4882a593Smuzhiyun 	.of_match	= ravb_ids,
649*4882a593Smuzhiyun 	.ofdata_to_platdata = ravb_ofdata_to_platdata,
650*4882a593Smuzhiyun 	.probe		= ravb_probe,
651*4882a593Smuzhiyun 	.remove		= ravb_remove,
652*4882a593Smuzhiyun 	.ops		= &ravb_ops,
653*4882a593Smuzhiyun 	.priv_auto_alloc_size = sizeof(struct ravb_priv),
654*4882a593Smuzhiyun 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
655*4882a593Smuzhiyun 	.flags		= DM_FLAG_ALLOC_PRIV_DMA,
656*4882a593Smuzhiyun };
657