xref: /OK3568_Linux_fs/u-boot/drivers/net/bcm-sf2-eth-gmac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2014-2017 Broadcom.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifdef BCM_GMAC_DEBUG
8*4882a593Smuzhiyun #ifndef DEBUG
9*4882a593Smuzhiyun #define DEBUG
10*4882a593Smuzhiyun #endif
11*4882a593Smuzhiyun #endif
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <config.h>
14*4882a593Smuzhiyun #include <common.h>
15*4882a593Smuzhiyun #include <malloc.h>
16*4882a593Smuzhiyun #include <net.h>
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <phy.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "bcm-sf2-eth.h"
21*4882a593Smuzhiyun #include "bcm-sf2-eth-gmac.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define SPINWAIT(exp, us) { \
24*4882a593Smuzhiyun 	uint countdown = (us) + 9; \
25*4882a593Smuzhiyun 	while ((exp) && (countdown >= 10)) {\
26*4882a593Smuzhiyun 		udelay(10); \
27*4882a593Smuzhiyun 		countdown -= 10; \
28*4882a593Smuzhiyun 	} \
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define RX_BUF_SIZE_ALIGNED	ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
32*4882a593Smuzhiyun #define TX_BUF_SIZE_ALIGNED	ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
33*4882a593Smuzhiyun #define DESCP_SIZE_ALIGNED	ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static int gmac_disable_dma(struct eth_dma *dma, int dir);
36*4882a593Smuzhiyun static int gmac_enable_dma(struct eth_dma *dma, int dir);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /* DMA Descriptor */
39*4882a593Smuzhiyun typedef struct {
40*4882a593Smuzhiyun 	/* misc control bits */
41*4882a593Smuzhiyun 	uint32_t	ctrl1;
42*4882a593Smuzhiyun 	/* buffer count and address extension */
43*4882a593Smuzhiyun 	uint32_t	ctrl2;
44*4882a593Smuzhiyun 	/* memory address of the date buffer, bits 31:0 */
45*4882a593Smuzhiyun 	uint32_t	addrlow;
46*4882a593Smuzhiyun 	/* memory address of the date buffer, bits 63:32 */
47*4882a593Smuzhiyun 	uint32_t	addrhigh;
48*4882a593Smuzhiyun } dma64dd_t;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun uint32_t g_dmactrlflags;
51*4882a593Smuzhiyun 
dma_ctrlflags(uint32_t mask,uint32_t flags)52*4882a593Smuzhiyun static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	g_dmactrlflags &= ~mask;
57*4882a593Smuzhiyun 	g_dmactrlflags |= flags;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/* If trying to enable parity, check if parity is actually supported */
60*4882a593Smuzhiyun 	if (g_dmactrlflags & DMA_CTRL_PEN) {
61*4882a593Smuzhiyun 		uint32_t control;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 		control = readl(GMAC0_DMA_TX_CTRL_ADDR);
64*4882a593Smuzhiyun 		writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
65*4882a593Smuzhiyun 		if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
66*4882a593Smuzhiyun 			/*
67*4882a593Smuzhiyun 			 * We *can* disable it, therefore it is supported;
68*4882a593Smuzhiyun 			 * restore control register
69*4882a593Smuzhiyun 			 */
70*4882a593Smuzhiyun 			writel(control, GMAC0_DMA_TX_CTRL_ADDR);
71*4882a593Smuzhiyun 		} else {
72*4882a593Smuzhiyun 			/* Not supported, don't allow it to be enabled */
73*4882a593Smuzhiyun 			g_dmactrlflags &= ~DMA_CTRL_PEN;
74*4882a593Smuzhiyun 		}
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	return g_dmactrlflags;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
reg32_clear_bits(uint32_t reg,uint32_t value)80*4882a593Smuzhiyun static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	uint32_t v = readl(reg);
83*4882a593Smuzhiyun 	v &= ~(value);
84*4882a593Smuzhiyun 	writel(v, reg);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
reg32_set_bits(uint32_t reg,uint32_t value)87*4882a593Smuzhiyun static inline void reg32_set_bits(uint32_t reg, uint32_t value)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	uint32_t v = readl(reg);
90*4882a593Smuzhiyun 	v |= value;
91*4882a593Smuzhiyun 	writel(v, reg);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #ifdef BCM_GMAC_DEBUG
dma_tx_dump(struct eth_dma * dma)95*4882a593Smuzhiyun static void dma_tx_dump(struct eth_dma *dma)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	dma64dd_t *descp = NULL;
98*4882a593Smuzhiyun 	uint8_t *bufp;
99*4882a593Smuzhiyun 	int i;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	printf("TX DMA Register:\n");
102*4882a593Smuzhiyun 	printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
103*4882a593Smuzhiyun 	       readl(GMAC0_DMA_TX_CTRL_ADDR),
104*4882a593Smuzhiyun 	       readl(GMAC0_DMA_TX_PTR_ADDR),
105*4882a593Smuzhiyun 	       readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
106*4882a593Smuzhiyun 	       readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
107*4882a593Smuzhiyun 	       readl(GMAC0_DMA_TX_STATUS0_ADDR),
108*4882a593Smuzhiyun 	       readl(GMAC0_DMA_TX_STATUS1_ADDR));
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	printf("TX Descriptors:\n");
111*4882a593Smuzhiyun 	for (i = 0; i < TX_BUF_NUM; i++) {
112*4882a593Smuzhiyun 		descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
113*4882a593Smuzhiyun 		printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
114*4882a593Smuzhiyun 		       descp->ctrl1, descp->ctrl2,
115*4882a593Smuzhiyun 		       descp->addrhigh, descp->addrlow);
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	printf("TX Buffers:\n");
119*4882a593Smuzhiyun 	/* Initialize TX DMA descriptor table */
120*4882a593Smuzhiyun 	for (i = 0; i < TX_BUF_NUM; i++) {
121*4882a593Smuzhiyun 		bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
122*4882a593Smuzhiyun 		printf("buf%d:0x%x; ", i, (uint32_t)bufp);
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 	printf("\n");
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
dma_rx_dump(struct eth_dma * dma)127*4882a593Smuzhiyun static void dma_rx_dump(struct eth_dma *dma)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	dma64dd_t *descp = NULL;
130*4882a593Smuzhiyun 	uint8_t *bufp;
131*4882a593Smuzhiyun 	int i;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	printf("RX DMA Register:\n");
134*4882a593Smuzhiyun 	printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
135*4882a593Smuzhiyun 	       readl(GMAC0_DMA_RX_CTRL_ADDR),
136*4882a593Smuzhiyun 	       readl(GMAC0_DMA_RX_PTR_ADDR),
137*4882a593Smuzhiyun 	       readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
138*4882a593Smuzhiyun 	       readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
139*4882a593Smuzhiyun 	       readl(GMAC0_DMA_RX_STATUS0_ADDR),
140*4882a593Smuzhiyun 	       readl(GMAC0_DMA_RX_STATUS1_ADDR));
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	printf("RX Descriptors:\n");
143*4882a593Smuzhiyun 	for (i = 0; i < RX_BUF_NUM; i++) {
144*4882a593Smuzhiyun 		descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
145*4882a593Smuzhiyun 		printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
146*4882a593Smuzhiyun 		       descp->ctrl1, descp->ctrl2,
147*4882a593Smuzhiyun 		       descp->addrhigh, descp->addrlow);
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	printf("RX Buffers:\n");
151*4882a593Smuzhiyun 	for (i = 0; i < RX_BUF_NUM; i++) {
152*4882a593Smuzhiyun 		bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
153*4882a593Smuzhiyun 		printf("buf%d:0x%x; ", i, (uint32_t)bufp);
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 	printf("\n");
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun 
dma_tx_init(struct eth_dma * dma)159*4882a593Smuzhiyun static int dma_tx_init(struct eth_dma *dma)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	dma64dd_t *descp = NULL;
162*4882a593Smuzhiyun 	uint8_t *bufp;
163*4882a593Smuzhiyun 	int i;
164*4882a593Smuzhiyun 	uint32_t ctrl;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* clear descriptor memory */
169*4882a593Smuzhiyun 	memset((void *)(dma->tx_desc_aligned), 0,
170*4882a593Smuzhiyun 	       TX_BUF_NUM * DESCP_SIZE_ALIGNED);
171*4882a593Smuzhiyun 	memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* Initialize TX DMA descriptor table */
174*4882a593Smuzhiyun 	for (i = 0; i < TX_BUF_NUM; i++) {
175*4882a593Smuzhiyun 		descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
176*4882a593Smuzhiyun 		bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
177*4882a593Smuzhiyun 		/* clear buffer memory */
178*4882a593Smuzhiyun 		memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		ctrl = 0;
181*4882a593Smuzhiyun 		/* if last descr set endOfTable */
182*4882a593Smuzhiyun 		if (i == (TX_BUF_NUM-1))
183*4882a593Smuzhiyun 			ctrl = D64_CTRL1_EOT;
184*4882a593Smuzhiyun 		descp->ctrl1 = ctrl;
185*4882a593Smuzhiyun 		descp->ctrl2 = 0;
186*4882a593Smuzhiyun 		descp->addrlow = (uint32_t)bufp;
187*4882a593Smuzhiyun 		descp->addrhigh = 0;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* flush descriptor and buffer */
191*4882a593Smuzhiyun 	descp = dma->tx_desc_aligned;
192*4882a593Smuzhiyun 	bufp = dma->tx_buf;
193*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)descp,
194*4882a593Smuzhiyun 			   (unsigned long)descp +
195*4882a593Smuzhiyun 			   DESCP_SIZE_ALIGNED * TX_BUF_NUM);
196*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)bufp,
197*4882a593Smuzhiyun 			   (unsigned long)bufp +
198*4882a593Smuzhiyun 			   TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* initialize the DMA channel */
201*4882a593Smuzhiyun 	writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
202*4882a593Smuzhiyun 	writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* now update the dma last descriptor */
205*4882a593Smuzhiyun 	writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
206*4882a593Smuzhiyun 	       GMAC0_DMA_TX_PTR_ADDR);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
dma_rx_init(struct eth_dma * dma)211*4882a593Smuzhiyun static int dma_rx_init(struct eth_dma *dma)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	uint32_t last_desc;
214*4882a593Smuzhiyun 	dma64dd_t *descp = NULL;
215*4882a593Smuzhiyun 	uint8_t *bufp;
216*4882a593Smuzhiyun 	uint32_t ctrl;
217*4882a593Smuzhiyun 	int i;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* clear descriptor memory */
222*4882a593Smuzhiyun 	memset((void *)(dma->rx_desc_aligned), 0,
223*4882a593Smuzhiyun 	       RX_BUF_NUM * DESCP_SIZE_ALIGNED);
224*4882a593Smuzhiyun 	/* clear buffer memory */
225*4882a593Smuzhiyun 	memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/* Initialize RX DMA descriptor table */
228*4882a593Smuzhiyun 	for (i = 0; i < RX_BUF_NUM; i++) {
229*4882a593Smuzhiyun 		descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
230*4882a593Smuzhiyun 		bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
231*4882a593Smuzhiyun 		ctrl = 0;
232*4882a593Smuzhiyun 		/* if last descr set endOfTable */
233*4882a593Smuzhiyun 		if (i == (RX_BUF_NUM - 1))
234*4882a593Smuzhiyun 			ctrl = D64_CTRL1_EOT;
235*4882a593Smuzhiyun 		descp->ctrl1 = ctrl;
236*4882a593Smuzhiyun 		descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
237*4882a593Smuzhiyun 		descp->addrlow = (uint32_t)bufp;
238*4882a593Smuzhiyun 		descp->addrhigh = 0;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
241*4882a593Smuzhiyun 				+ sizeof(dma64dd_t);
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	descp = dma->rx_desc_aligned;
245*4882a593Smuzhiyun 	bufp = dma->rx_buf;
246*4882a593Smuzhiyun 	/* flush descriptor and buffer */
247*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)descp,
248*4882a593Smuzhiyun 			   (unsigned long)descp +
249*4882a593Smuzhiyun 			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
250*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)(bufp),
251*4882a593Smuzhiyun 			   (unsigned long)bufp +
252*4882a593Smuzhiyun 			   RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* initailize the DMA channel */
255*4882a593Smuzhiyun 	writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
256*4882a593Smuzhiyun 	writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* now update the dma last descriptor */
259*4882a593Smuzhiyun 	writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
dma_init(struct eth_dma * dma)264*4882a593Smuzhiyun static int dma_init(struct eth_dma *dma)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	debug(" %s enter\n", __func__);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * Default flags: For backwards compatibility both
270*4882a593Smuzhiyun 	 * Rx Overflow Continue and Parity are DISABLED.
271*4882a593Smuzhiyun 	 */
272*4882a593Smuzhiyun 	dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	debug("rx burst len 0x%x\n",
275*4882a593Smuzhiyun 	      (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
276*4882a593Smuzhiyun 	      >> D64_RC_BL_SHIFT);
277*4882a593Smuzhiyun 	debug("tx burst len 0x%x\n",
278*4882a593Smuzhiyun 	      (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
279*4882a593Smuzhiyun 	      >> D64_XC_BL_SHIFT);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	dma_tx_init(dma);
282*4882a593Smuzhiyun 	dma_rx_init(dma);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* From end of chip_init() */
285*4882a593Smuzhiyun 	/* enable the overflow continue feature and disable parity */
286*4882a593Smuzhiyun 	dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
287*4882a593Smuzhiyun 		      DMA_CTRL_ROC /* value */);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return 0;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
dma_deinit(struct eth_dma * dma)292*4882a593Smuzhiyun static int dma_deinit(struct eth_dma *dma)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	debug(" %s enter\n", __func__);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	gmac_disable_dma(dma, MAC_DMA_RX);
297*4882a593Smuzhiyun 	gmac_disable_dma(dma, MAC_DMA_TX);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	free(dma->tx_buf);
300*4882a593Smuzhiyun 	dma->tx_buf = NULL;
301*4882a593Smuzhiyun 	free(dma->tx_desc_aligned);
302*4882a593Smuzhiyun 	dma->tx_desc_aligned = NULL;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	free(dma->rx_buf);
305*4882a593Smuzhiyun 	dma->rx_buf = NULL;
306*4882a593Smuzhiyun 	free(dma->rx_desc_aligned);
307*4882a593Smuzhiyun 	dma->rx_desc_aligned = NULL;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
gmac_tx_packet(struct eth_dma * dma,void * packet,int length)312*4882a593Smuzhiyun int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* kick off the dma */
317*4882a593Smuzhiyun 	size_t len = length;
318*4882a593Smuzhiyun 	int txout = dma->cur_tx_index;
319*4882a593Smuzhiyun 	uint32_t flags;
320*4882a593Smuzhiyun 	dma64dd_t *descp = NULL;
321*4882a593Smuzhiyun 	uint32_t ctrl;
322*4882a593Smuzhiyun 	uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
323*4882a593Smuzhiyun 			      sizeof(dma64dd_t)) & D64_XP_LD_MASK;
324*4882a593Smuzhiyun 	size_t buflen;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* load the buffer */
329*4882a593Smuzhiyun 	memcpy(bufp, packet, len);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* Add 4 bytes for Ethernet FCS/CRC */
332*4882a593Smuzhiyun 	buflen = len + 4;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	ctrl = (buflen & D64_CTRL2_BC_MASK);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* the transmit will only be one frame or set SOF, EOF */
337*4882a593Smuzhiyun 	/* also set int on completion */
338*4882a593Smuzhiyun 	flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* txout points to the descriptor to uset */
341*4882a593Smuzhiyun 	/* if last descriptor then set EOT */
342*4882a593Smuzhiyun 	if (txout == (TX_BUF_NUM - 1)) {
343*4882a593Smuzhiyun 		flags |= D64_CTRL1_EOT;
344*4882a593Smuzhiyun 		last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* write the descriptor */
348*4882a593Smuzhiyun 	descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
349*4882a593Smuzhiyun 	descp->addrlow = (uint32_t)bufp;
350*4882a593Smuzhiyun 	descp->addrhigh = 0;
351*4882a593Smuzhiyun 	descp->ctrl1 = flags;
352*4882a593Smuzhiyun 	descp->ctrl2 = ctrl;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/* flush descriptor and buffer */
355*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)dma->tx_desc_aligned,
356*4882a593Smuzhiyun 			   (unsigned long)dma->tx_desc_aligned +
357*4882a593Smuzhiyun 			   DESCP_SIZE_ALIGNED * TX_BUF_NUM);
358*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)bufp,
359*4882a593Smuzhiyun 			   (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* now update the dma last descriptor */
362*4882a593Smuzhiyun 	writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/* tx dma should be enabled so packet should go out */
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* update txout */
367*4882a593Smuzhiyun 	dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
gmac_check_tx_done(struct eth_dma * dma)372*4882a593Smuzhiyun bool gmac_check_tx_done(struct eth_dma *dma)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	/* wait for tx to complete */
375*4882a593Smuzhiyun 	uint32_t intstatus;
376*4882a593Smuzhiyun 	bool xfrdone = false;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	intstatus = readl(GMAC0_INT_STATUS_ADDR);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	debug("int(0x%x)\n", intstatus);
383*4882a593Smuzhiyun 	if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
384*4882a593Smuzhiyun 		xfrdone = true;
385*4882a593Smuzhiyun 		/* clear the int bits */
386*4882a593Smuzhiyun 		intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
387*4882a593Smuzhiyun 		writel(intstatus, GMAC0_INT_STATUS_ADDR);
388*4882a593Smuzhiyun 	} else {
389*4882a593Smuzhiyun 		debug("Tx int(0x%x)\n", intstatus);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return xfrdone;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
gmac_check_rx_done(struct eth_dma * dma,uint8_t * buf)395*4882a593Smuzhiyun int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	void *bufp, *datap;
398*4882a593Smuzhiyun 	size_t rcvlen = 0, buflen = 0;
399*4882a593Smuzhiyun 	uint32_t stat0 = 0, stat1 = 0;
400*4882a593Smuzhiyun 	uint32_t control, offset;
401*4882a593Smuzhiyun 	uint8_t statbuf[HWRXOFF*2];
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	int index, curr, active;
404*4882a593Smuzhiyun 	dma64dd_t *descp = NULL;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	/* udelay(50); */
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/*
409*4882a593Smuzhiyun 	 * this api will check if a packet has been received.
410*4882a593Smuzhiyun 	 * If so it will return the address of the buffer and current
411*4882a593Smuzhiyun 	 * descriptor index will be incremented to the
412*4882a593Smuzhiyun 	 * next descriptor. Once done with the frame the buffer should be
413*4882a593Smuzhiyun 	 * added back onto the descriptor and the lastdscr should be updated
414*4882a593Smuzhiyun 	 * to this descriptor.
415*4882a593Smuzhiyun 	 */
416*4882a593Smuzhiyun 	index = dma->cur_rx_index;
417*4882a593Smuzhiyun 	offset = (uint32_t)(dma->rx_desc_aligned);
418*4882a593Smuzhiyun 	stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
419*4882a593Smuzhiyun 	stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
420*4882a593Smuzhiyun 	curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
421*4882a593Smuzhiyun 	active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/* check if any frame */
424*4882a593Smuzhiyun 	if (index == curr)
425*4882a593Smuzhiyun 		return -1;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	debug("received packet\n");
428*4882a593Smuzhiyun 	debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
429*4882a593Smuzhiyun 	/* remove warning */
430*4882a593Smuzhiyun 	if (index == active)
431*4882a593Smuzhiyun 		;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/* get the packet pointer that corresponds to the rx descriptor */
434*4882a593Smuzhiyun 	bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
437*4882a593Smuzhiyun 	/* flush descriptor and buffer */
438*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)dma->rx_desc_aligned,
439*4882a593Smuzhiyun 			   (unsigned long)dma->rx_desc_aligned +
440*4882a593Smuzhiyun 			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
441*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)bufp,
442*4882a593Smuzhiyun 			   (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
447*4882a593Smuzhiyun 	stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
450*4882a593Smuzhiyun 	      (uint32_t)bufp, index, buflen, stat0, stat1);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* get buffer offset */
455*4882a593Smuzhiyun 	control = readl(GMAC0_DMA_RX_CTRL_ADDR);
456*4882a593Smuzhiyun 	offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
457*4882a593Smuzhiyun 	rcvlen = *(uint16_t *)bufp;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	debug("Received %d bytes\n", rcvlen);
460*4882a593Smuzhiyun 	/* copy status into temp buf then copy data from rx buffer */
461*4882a593Smuzhiyun 	memcpy(statbuf, bufp, offset);
462*4882a593Smuzhiyun 	datap = (void *)((uint32_t)bufp + offset);
463*4882a593Smuzhiyun 	memcpy(buf, datap, rcvlen);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* update descriptor that is being added back on ring */
466*4882a593Smuzhiyun 	descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
467*4882a593Smuzhiyun 	descp->addrlow = (uint32_t)bufp;
468*4882a593Smuzhiyun 	descp->addrhigh = 0;
469*4882a593Smuzhiyun 	/* flush descriptor */
470*4882a593Smuzhiyun 	flush_dcache_range((unsigned long)dma->rx_desc_aligned,
471*4882a593Smuzhiyun 			   (unsigned long)dma->rx_desc_aligned +
472*4882a593Smuzhiyun 			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* set the lastdscr for the rx ring */
475*4882a593Smuzhiyun 	writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	return (int)rcvlen;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
gmac_disable_dma(struct eth_dma * dma,int dir)480*4882a593Smuzhiyun static int gmac_disable_dma(struct eth_dma *dma, int dir)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	int status;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (dir == MAC_DMA_TX) {
487*4882a593Smuzhiyun 		/* address PR8249/PR7577 issue */
488*4882a593Smuzhiyun 		/* suspend tx DMA first */
489*4882a593Smuzhiyun 		writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
490*4882a593Smuzhiyun 		SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
491*4882a593Smuzhiyun 				     D64_XS0_XS_MASK)) !=
492*4882a593Smuzhiyun 			  D64_XS0_XS_DISABLED) &&
493*4882a593Smuzhiyun 			 (status != D64_XS0_XS_IDLE) &&
494*4882a593Smuzhiyun 			 (status != D64_XS0_XS_STOPPED), 10000);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		/*
497*4882a593Smuzhiyun 		 * PR2414 WAR: DMA engines are not disabled until
498*4882a593Smuzhiyun 		 * transfer finishes
499*4882a593Smuzhiyun 		 */
500*4882a593Smuzhiyun 		writel(0, GMAC0_DMA_TX_CTRL_ADDR);
501*4882a593Smuzhiyun 		SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
502*4882a593Smuzhiyun 				     D64_XS0_XS_MASK)) !=
503*4882a593Smuzhiyun 			  D64_XS0_XS_DISABLED), 10000);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		/* wait for the last transaction to complete */
506*4882a593Smuzhiyun 		udelay(2);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		status = (status == D64_XS0_XS_DISABLED);
509*4882a593Smuzhiyun 	} else {
510*4882a593Smuzhiyun 		/*
511*4882a593Smuzhiyun 		 * PR2414 WAR: DMA engines are not disabled until
512*4882a593Smuzhiyun 		 * transfer finishes
513*4882a593Smuzhiyun 		 */
514*4882a593Smuzhiyun 		writel(0, GMAC0_DMA_RX_CTRL_ADDR);
515*4882a593Smuzhiyun 		SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
516*4882a593Smuzhiyun 				     D64_RS0_RS_MASK)) !=
517*4882a593Smuzhiyun 			  D64_RS0_RS_DISABLED), 10000);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		status = (status == D64_RS0_RS_DISABLED);
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	return status;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
gmac_enable_dma(struct eth_dma * dma,int dir)525*4882a593Smuzhiyun static int gmac_enable_dma(struct eth_dma *dma, int dir)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	uint32_t control;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (dir == MAC_DMA_TX) {
532*4882a593Smuzhiyun 		dma->cur_tx_index = 0;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		/*
535*4882a593Smuzhiyun 		 * These bits 20:18 (burstLen) of control register can be
536*4882a593Smuzhiyun 		 * written but will take effect only if these bits are
537*4882a593Smuzhiyun 		 * valid. So this will not affect previous versions
538*4882a593Smuzhiyun 		 * of the DMA. They will continue to have those bits set to 0.
539*4882a593Smuzhiyun 		 */
540*4882a593Smuzhiyun 		control = readl(GMAC0_DMA_TX_CTRL_ADDR);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		control |= D64_XC_XE;
543*4882a593Smuzhiyun 		if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
544*4882a593Smuzhiyun 			control |= D64_XC_PD;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		writel(control, GMAC0_DMA_TX_CTRL_ADDR);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		/* initailize the DMA channel */
549*4882a593Smuzhiyun 		writel((uint32_t)(dma->tx_desc_aligned),
550*4882a593Smuzhiyun 		       GMAC0_DMA_TX_ADDR_LOW_ADDR);
551*4882a593Smuzhiyun 		writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
552*4882a593Smuzhiyun 	} else {
553*4882a593Smuzhiyun 		dma->cur_rx_index = 0;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
556*4882a593Smuzhiyun 			   D64_RC_AE) | D64_RC_RE;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
559*4882a593Smuzhiyun 			control |= D64_RC_PD;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		if (g_dmactrlflags & DMA_CTRL_ROC)
562*4882a593Smuzhiyun 			control |= D64_RC_OC;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		/*
565*4882a593Smuzhiyun 		 * These bits 20:18 (burstLen) of control register can be
566*4882a593Smuzhiyun 		 * written but will take effect only if these bits are
567*4882a593Smuzhiyun 		 * valid. So this will not affect previous versions
568*4882a593Smuzhiyun 		 * of the DMA. They will continue to have those bits set to 0.
569*4882a593Smuzhiyun 		 */
570*4882a593Smuzhiyun 		control &= ~D64_RC_BL_MASK;
571*4882a593Smuzhiyun 		/* Keep default Rx burstlen */
572*4882a593Smuzhiyun 		control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
573*4882a593Smuzhiyun 		control |= HWRXOFF << D64_RC_RO_SHIFT;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		writel(control, GMAC0_DMA_RX_CTRL_ADDR);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 		/*
578*4882a593Smuzhiyun 		 * the rx descriptor ring should have
579*4882a593Smuzhiyun 		 * the addresses set properly;
580*4882a593Smuzhiyun 		 * set the lastdscr for the rx ring
581*4882a593Smuzhiyun 		 */
582*4882a593Smuzhiyun 		writel(((uint32_t)(dma->rx_desc_aligned) +
583*4882a593Smuzhiyun 			(RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
584*4882a593Smuzhiyun 		       D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	return 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
gmac_mii_busywait(unsigned int timeout)590*4882a593Smuzhiyun bool gmac_mii_busywait(unsigned int timeout)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	uint32_t tmp = 0;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	while (timeout > 10) {
595*4882a593Smuzhiyun 		tmp = readl(GMAC_MII_CTRL_ADDR);
596*4882a593Smuzhiyun 		if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
597*4882a593Smuzhiyun 			udelay(10);
598*4882a593Smuzhiyun 			timeout -= 10;
599*4882a593Smuzhiyun 		} else {
600*4882a593Smuzhiyun 			break;
601*4882a593Smuzhiyun 		}
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 	return tmp & (1 << GMAC_MII_BUSY_SHIFT);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
gmac_miiphy_read(struct mii_dev * bus,int phyaddr,int devad,int reg)606*4882a593Smuzhiyun int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	uint32_t tmp = 0;
609*4882a593Smuzhiyun 	u16 value = 0;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* Busy wait timeout is 1ms */
612*4882a593Smuzhiyun 	if (gmac_mii_busywait(1000)) {
613*4882a593Smuzhiyun 		pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
614*4882a593Smuzhiyun 		return -1;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/* Read operation */
618*4882a593Smuzhiyun 	tmp = GMAC_MII_DATA_READ_CMD;
619*4882a593Smuzhiyun 	tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
620*4882a593Smuzhiyun 		(reg << GMAC_MII_PHY_REG_SHIFT);
621*4882a593Smuzhiyun 	debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
622*4882a593Smuzhiyun 	writel(tmp, GMAC_MII_DATA_ADDR);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (gmac_mii_busywait(1000)) {
625*4882a593Smuzhiyun 		pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
626*4882a593Smuzhiyun 		return -1;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
630*4882a593Smuzhiyun 	debug("MII read data 0x%x\n", value);
631*4882a593Smuzhiyun 	return value;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
gmac_miiphy_write(struct mii_dev * bus,int phyaddr,int devad,int reg,u16 value)634*4882a593Smuzhiyun int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
635*4882a593Smuzhiyun 		      u16 value)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	uint32_t tmp = 0;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	/* Busy wait timeout is 1ms */
640*4882a593Smuzhiyun 	if (gmac_mii_busywait(1000)) {
641*4882a593Smuzhiyun 		pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
642*4882a593Smuzhiyun 		return -1;
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* Write operation */
646*4882a593Smuzhiyun 	tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
647*4882a593Smuzhiyun 	tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
648*4882a593Smuzhiyun 		(reg << GMAC_MII_PHY_REG_SHIFT));
649*4882a593Smuzhiyun 	debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
650*4882a593Smuzhiyun 	      tmp, phyaddr, reg, value);
651*4882a593Smuzhiyun 	writel(tmp, GMAC_MII_DATA_ADDR);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	if (gmac_mii_busywait(1000)) {
654*4882a593Smuzhiyun 		pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
655*4882a593Smuzhiyun 		return -1;
656*4882a593Smuzhiyun 	}
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	return 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
gmac_init_reset(void)661*4882a593Smuzhiyun void gmac_init_reset(void)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	/* set command config reg CC_SR */
666*4882a593Smuzhiyun 	reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
667*4882a593Smuzhiyun 	udelay(GMAC_RESET_DELAY);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
gmac_clear_reset(void)670*4882a593Smuzhiyun void gmac_clear_reset(void)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/* clear command config reg CC_SR */
675*4882a593Smuzhiyun 	reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
676*4882a593Smuzhiyun 	udelay(GMAC_RESET_DELAY);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
gmac_enable_local(bool en)679*4882a593Smuzhiyun static void gmac_enable_local(bool en)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	uint32_t cmdcfg;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* read command config reg */
686*4882a593Smuzhiyun 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/* put mac in reset */
689*4882a593Smuzhiyun 	gmac_init_reset();
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	cmdcfg |= CC_SR;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/* first deassert rx_ena and tx_ena while in reset */
694*4882a593Smuzhiyun 	cmdcfg &= ~(CC_RE | CC_TE);
695*4882a593Smuzhiyun 	/* write command config reg */
696*4882a593Smuzhiyun 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/* bring mac out of reset */
699*4882a593Smuzhiyun 	gmac_clear_reset();
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	/* if not enable exit now */
702*4882a593Smuzhiyun 	if (!en)
703*4882a593Smuzhiyun 		return;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	/* enable the mac transmit and receive paths now */
706*4882a593Smuzhiyun 	udelay(2);
707*4882a593Smuzhiyun 	cmdcfg &= ~CC_SR;
708*4882a593Smuzhiyun 	cmdcfg |= (CC_RE | CC_TE);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/* assert rx_ena and tx_ena when out of reset to enable the mac */
711*4882a593Smuzhiyun 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	return;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
gmac_enable(void)716*4882a593Smuzhiyun int gmac_enable(void)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	gmac_enable_local(1);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	/* clear interrupts */
721*4882a593Smuzhiyun 	writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
722*4882a593Smuzhiyun 	return 0;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
gmac_disable(void)725*4882a593Smuzhiyun int gmac_disable(void)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	gmac_enable_local(0);
728*4882a593Smuzhiyun 	return 0;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
gmac_set_speed(int speed,int duplex)731*4882a593Smuzhiyun int gmac_set_speed(int speed, int duplex)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	uint32_t cmdcfg;
734*4882a593Smuzhiyun 	uint32_t hd_ena;
735*4882a593Smuzhiyun 	uint32_t speed_cfg;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	hd_ena = duplex ? 0 : CC_HD;
738*4882a593Smuzhiyun 	if (speed == 1000) {
739*4882a593Smuzhiyun 		speed_cfg = 2;
740*4882a593Smuzhiyun 	} else if (speed == 100) {
741*4882a593Smuzhiyun 		speed_cfg = 1;
742*4882a593Smuzhiyun 	} else if (speed == 10) {
743*4882a593Smuzhiyun 		speed_cfg = 0;
744*4882a593Smuzhiyun 	} else {
745*4882a593Smuzhiyun 		pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
746*4882a593Smuzhiyun 		return -1;
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
750*4882a593Smuzhiyun 	cmdcfg &= ~(CC_ES_MASK | CC_HD);
751*4882a593Smuzhiyun 	cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	printf("Change GMAC speed to %dMB\n", speed);
754*4882a593Smuzhiyun 	debug("GMAC speed cfg 0x%x\n", cmdcfg);
755*4882a593Smuzhiyun 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	return 0;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun 
gmac_set_mac_addr(unsigned char * mac)760*4882a593Smuzhiyun int gmac_set_mac_addr(unsigned char *mac)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	/* set our local address */
763*4882a593Smuzhiyun 	debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
764*4882a593Smuzhiyun 	      mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
765*4882a593Smuzhiyun 	writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
766*4882a593Smuzhiyun 	writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	return 0;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
gmac_mac_init(struct eth_device * dev)771*4882a593Smuzhiyun int gmac_mac_init(struct eth_device *dev)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	struct eth_info *eth = (struct eth_info *)(dev->priv);
774*4882a593Smuzhiyun 	struct eth_dma *dma = &(eth->dma);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	uint32_t tmp;
777*4882a593Smuzhiyun 	uint32_t cmdcfg;
778*4882a593Smuzhiyun 	int chipid;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	debug("%s enter\n", __func__);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/* Always use GMAC0 */
783*4882a593Smuzhiyun 	printf("Using GMAC%d\n", 0);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/* Reset AMAC0 core */
786*4882a593Smuzhiyun 	writel(0, AMAC0_IDM_RESET_ADDR);
787*4882a593Smuzhiyun 	tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
788*4882a593Smuzhiyun 	/* Set clock */
789*4882a593Smuzhiyun 	tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
790*4882a593Smuzhiyun 	tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
791*4882a593Smuzhiyun 	/* Set Tx clock */
792*4882a593Smuzhiyun 	tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
793*4882a593Smuzhiyun 	writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/* reset gmac */
796*4882a593Smuzhiyun 	/*
797*4882a593Smuzhiyun 	 * As AMAC is just reset, NO need?
798*4882a593Smuzhiyun 	 * set eth_data into loopback mode to ensure no rx traffic
799*4882a593Smuzhiyun 	 * gmac_loopback(eth_data, TRUE);
800*4882a593Smuzhiyun 	 * ET_TRACE(("%s gmac loopback\n", __func__));
801*4882a593Smuzhiyun 	 * udelay(1);
802*4882a593Smuzhiyun 	 */
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
805*4882a593Smuzhiyun 	cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
806*4882a593Smuzhiyun 		    CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
807*4882a593Smuzhiyun 		    CC_PAD_EN | CC_PF);
808*4882a593Smuzhiyun 	cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
809*4882a593Smuzhiyun 	/* put mac in reset */
810*4882a593Smuzhiyun 	gmac_init_reset();
811*4882a593Smuzhiyun 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
812*4882a593Smuzhiyun 	gmac_clear_reset();
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/* enable clear MIB on read */
815*4882a593Smuzhiyun 	reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
816*4882a593Smuzhiyun 	/* PHY: set smi_master to drive mdc_clk */
817*4882a593Smuzhiyun 	reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	/* clear persistent sw intstatus */
820*4882a593Smuzhiyun 	writel(0, GMAC0_INT_STATUS_ADDR);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	if (dma_init(dma) < 0) {
823*4882a593Smuzhiyun 		pr_err("%s: GMAC dma_init failed\n", __func__);
824*4882a593Smuzhiyun 		goto err_exit;
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	chipid = CHIPID;
828*4882a593Smuzhiyun 	printf("%s: Chip ID: 0x%x\n", __func__, chipid);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/* set switch bypass mode */
831*4882a593Smuzhiyun 	tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
832*4882a593Smuzhiyun 	tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* Switch mode */
835*4882a593Smuzhiyun 	/* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
840*4882a593Smuzhiyun 	tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
841*4882a593Smuzhiyun 	writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	/* Set MDIO to internal GPHY */
844*4882a593Smuzhiyun 	tmp = readl(GMAC_MII_CTRL_ADDR);
845*4882a593Smuzhiyun 	/* Select internal MDC/MDIO bus*/
846*4882a593Smuzhiyun 	tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
847*4882a593Smuzhiyun 	/* select MDC/MDIO connecting to on-chip internal PHYs */
848*4882a593Smuzhiyun 	tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
849*4882a593Smuzhiyun 	/*
850*4882a593Smuzhiyun 	 * give bit[6:0](MDCDIV) with required divisor to set
851*4882a593Smuzhiyun 	 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
852*4882a593Smuzhiyun 	 */
853*4882a593Smuzhiyun 	tmp |= 0x1A;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	writel(tmp, GMAC_MII_CTRL_ADDR);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	if (gmac_mii_busywait(1000)) {
858*4882a593Smuzhiyun 		pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
859*4882a593Smuzhiyun 		goto err_exit;
860*4882a593Smuzhiyun 	}
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	/* Configure GMAC0 */
863*4882a593Smuzhiyun 	/* enable one rx interrupt per received frame */
864*4882a593Smuzhiyun 	writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* read command config reg */
867*4882a593Smuzhiyun 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
868*4882a593Smuzhiyun 	/* enable 802.3x tx flow control (honor received PAUSE frames) */
869*4882a593Smuzhiyun 	cmdcfg &= ~CC_RPI;
870*4882a593Smuzhiyun 	/* enable promiscuous mode */
871*4882a593Smuzhiyun 	cmdcfg |= CC_PROM;
872*4882a593Smuzhiyun 	/* Disable loopback mode */
873*4882a593Smuzhiyun 	cmdcfg &= ~CC_ML;
874*4882a593Smuzhiyun 	/* set the speed */
875*4882a593Smuzhiyun 	cmdcfg &= ~(CC_ES_MASK | CC_HD);
876*4882a593Smuzhiyun 	/* Set to 1Gbps and full duplex by default */
877*4882a593Smuzhiyun 	cmdcfg |= (2 << CC_ES_SHIFT);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	/* put mac in reset */
880*4882a593Smuzhiyun 	gmac_init_reset();
881*4882a593Smuzhiyun 	/* write register */
882*4882a593Smuzhiyun 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
883*4882a593Smuzhiyun 	/* bring mac out of reset */
884*4882a593Smuzhiyun 	gmac_clear_reset();
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	/* set max frame lengths; account for possible vlan tag */
887*4882a593Smuzhiyun 	writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	return 0;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun err_exit:
892*4882a593Smuzhiyun 	dma_deinit(dma);
893*4882a593Smuzhiyun 	return -1;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
gmac_add(struct eth_device * dev)896*4882a593Smuzhiyun int gmac_add(struct eth_device *dev)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	struct eth_info *eth = (struct eth_info *)(dev->priv);
899*4882a593Smuzhiyun 	struct eth_dma *dma = &(eth->dma);
900*4882a593Smuzhiyun 	void *tmp;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	/*
903*4882a593Smuzhiyun 	 * Desc has to be 16-byte aligned. But for dcache flush it must be
904*4882a593Smuzhiyun 	 * aligned to ARCH_DMA_MINALIGN.
905*4882a593Smuzhiyun 	 */
906*4882a593Smuzhiyun 	tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
907*4882a593Smuzhiyun 	if (tmp == NULL) {
908*4882a593Smuzhiyun 		printf("%s: Failed to allocate TX desc Buffer\n", __func__);
909*4882a593Smuzhiyun 		return -1;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	dma->tx_desc_aligned = (void *)tmp;
913*4882a593Smuzhiyun 	debug("TX Descriptor Buffer: %p; length: 0x%x\n",
914*4882a593Smuzhiyun 	      dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
917*4882a593Smuzhiyun 	if (tmp == NULL) {
918*4882a593Smuzhiyun 		printf("%s: Failed to allocate TX Data Buffer\n", __func__);
919*4882a593Smuzhiyun 		free(dma->tx_desc_aligned);
920*4882a593Smuzhiyun 		return -1;
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 	dma->tx_buf = (uint8_t *)tmp;
923*4882a593Smuzhiyun 	debug("TX Data Buffer: %p; length: 0x%x\n",
924*4882a593Smuzhiyun 	      dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* Desc has to be 16-byte aligned */
927*4882a593Smuzhiyun 	tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
928*4882a593Smuzhiyun 	if (tmp == NULL) {
929*4882a593Smuzhiyun 		printf("%s: Failed to allocate RX Descriptor\n", __func__);
930*4882a593Smuzhiyun 		free(dma->tx_desc_aligned);
931*4882a593Smuzhiyun 		free(dma->tx_buf);
932*4882a593Smuzhiyun 		return -1;
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 	dma->rx_desc_aligned = (void *)tmp;
935*4882a593Smuzhiyun 	debug("RX Descriptor Buffer: %p, length: 0x%x\n",
936*4882a593Smuzhiyun 	      dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
939*4882a593Smuzhiyun 	if (tmp == NULL) {
940*4882a593Smuzhiyun 		printf("%s: Failed to allocate RX Data Buffer\n", __func__);
941*4882a593Smuzhiyun 		free(dma->tx_desc_aligned);
942*4882a593Smuzhiyun 		free(dma->tx_buf);
943*4882a593Smuzhiyun 		free(dma->rx_desc_aligned);
944*4882a593Smuzhiyun 		return -1;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 	dma->rx_buf = (uint8_t *)tmp;
947*4882a593Smuzhiyun 	debug("RX Data Buffer: %p; length: 0x%x\n",
948*4882a593Smuzhiyun 	      dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	g_dmactrlflags = 0;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	eth->phy_interface = PHY_INTERFACE_MODE_GMII;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	dma->tx_packet = gmac_tx_packet;
955*4882a593Smuzhiyun 	dma->check_tx_done = gmac_check_tx_done;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	dma->check_rx_done = gmac_check_rx_done;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	dma->enable_dma = gmac_enable_dma;
960*4882a593Smuzhiyun 	dma->disable_dma = gmac_disable_dma;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	eth->miiphy_read = gmac_miiphy_read;
963*4882a593Smuzhiyun 	eth->miiphy_write = gmac_miiphy_write;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	eth->mac_init = gmac_mac_init;
966*4882a593Smuzhiyun 	eth->disable_mac = gmac_disable;
967*4882a593Smuzhiyun 	eth->enable_mac = gmac_enable;
968*4882a593Smuzhiyun 	eth->set_mac_addr = gmac_set_mac_addr;
969*4882a593Smuzhiyun 	eth->set_mac_speed = gmac_set_speed;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	return 0;
972*4882a593Smuzhiyun }
973