xref: /OK3568_Linux_fs/u-boot/drivers/net/calxedaxgmac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2010-2011 Calxeda, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <common.h>
8*4882a593Smuzhiyun #include <malloc.h>
9*4882a593Smuzhiyun #include <linux/compiler.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <asm/io.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define TX_NUM_DESC			1
14*4882a593Smuzhiyun #define RX_NUM_DESC			32
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define MAC_TIMEOUT			(5*CONFIG_SYS_HZ)
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define ETH_BUF_SZ			2048
19*4882a593Smuzhiyun #define TX_BUF_SZ			(ETH_BUF_SZ * TX_NUM_DESC)
20*4882a593Smuzhiyun #define RX_BUF_SZ			(ETH_BUF_SZ * RX_NUM_DESC)
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define RXSTART				0x00000002
23*4882a593Smuzhiyun #define TXSTART				0x00002000
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define RXENABLE			0x00000004
26*4882a593Smuzhiyun #define TXENABLE			0x00000008
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define XGMAC_CONTROL_SPD		0x40000000
29*4882a593Smuzhiyun #define XGMAC_CONTROL_SPD_MASK		0x60000000
30*4882a593Smuzhiyun #define XGMAC_CONTROL_SARC		0x10000000
31*4882a593Smuzhiyun #define XGMAC_CONTROL_SARK_MASK		0x18000000
32*4882a593Smuzhiyun #define XGMAC_CONTROL_CAR		0x04000000
33*4882a593Smuzhiyun #define XGMAC_CONTROL_CAR_MASK		0x06000000
34*4882a593Smuzhiyun #define XGMAC_CONTROL_CAR_SHIFT		25
35*4882a593Smuzhiyun #define XGMAC_CONTROL_DP		0x01000000
36*4882a593Smuzhiyun #define XGMAC_CONTROL_WD		0x00800000
37*4882a593Smuzhiyun #define XGMAC_CONTROL_JD		0x00400000
38*4882a593Smuzhiyun #define XGMAC_CONTROL_JE		0x00100000
39*4882a593Smuzhiyun #define XGMAC_CONTROL_LM		0x00001000
40*4882a593Smuzhiyun #define XGMAC_CONTROL_IPC		0x00000400
41*4882a593Smuzhiyun #define XGMAC_CONTROL_ACS		0x00000080
42*4882a593Smuzhiyun #define XGMAC_CONTROL_DDIC		0x00000010
43*4882a593Smuzhiyun #define XGMAC_CONTROL_TE		0x00000008
44*4882a593Smuzhiyun #define XGMAC_CONTROL_RE		0x00000004
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_RESET		0x00000001
47*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_DSL		0x00000004
48*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_DSL_MASK	0x0000007c
49*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_DSL_SHIFT	2
50*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_ATDS		0x00000080
51*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_PBL_MASK	0x00003f00
52*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_PBL_SHIFT	8
53*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_FB		0x00010000
54*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_USP		0x00800000
55*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_8PBL		0x01000000
56*4882a593Smuzhiyun #define XGMAC_DMA_BUSMODE_AAL		0x02000000
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_ENLPI		0x80000000
59*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_MGK		0x40000000
60*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_WROSR		0x00100000
61*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_WROSR_MASK	0x00F00000
62*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_WROSR_SHIFT	20
63*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_RDOSR		0x00010000
64*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_RDOSR_MASK	0x000F0000
65*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_RDOSR_SHIFT	16
66*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_AAL		0x00001000
67*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN256	0x00000080
68*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN128	0x00000040
69*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN64	0x00000020
70*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN32	0x00000010
71*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN16	0x00000008
72*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN8		0x00000004
73*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_BLEN4		0x00000002
74*4882a593Smuzhiyun #define XGMAC_DMA_AXIMODE_UNDEF		0x00000001
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RTC_SHIFT	3
77*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RTC_MASK		0x00000018
78*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RTC		0x00000010
79*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RSF		0x00000020
80*4882a593Smuzhiyun #define XGMAC_CORE_OMR_DT		0x00000040
81*4882a593Smuzhiyun #define XGMAC_CORE_OMR_FEF		0x00000080
82*4882a593Smuzhiyun #define XGMAC_CORE_OMR_EFC		0x00000100
83*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RFA_SHIFT	9
84*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RFA_MASK		0x00000E00
85*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RFD_SHIFT	12
86*4882a593Smuzhiyun #define XGMAC_CORE_OMR_RFD_MASK		0x00007000
87*4882a593Smuzhiyun #define XGMAC_CORE_OMR_TTC_SHIFT	16
88*4882a593Smuzhiyun #define XGMAC_CORE_OMR_TTC_MASK		0x00030000
89*4882a593Smuzhiyun #define XGMAC_CORE_OMR_TTC		0x00020000
90*4882a593Smuzhiyun #define XGMAC_CORE_OMR_FTF		0x00100000
91*4882a593Smuzhiyun #define XGMAC_CORE_OMR_TSF		0x00200000
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define FIFO_MINUS_1K			0x0
94*4882a593Smuzhiyun #define FIFO_MINUS_2K			0x1
95*4882a593Smuzhiyun #define FIFO_MINUS_3K			0x2
96*4882a593Smuzhiyun #define FIFO_MINUS_4K			0x3
97*4882a593Smuzhiyun #define FIFO_MINUS_6K			0x4
98*4882a593Smuzhiyun #define FIFO_MINUS_8K			0x5
99*4882a593Smuzhiyun #define FIFO_MINUS_12K			0x6
100*4882a593Smuzhiyun #define FIFO_MINUS_16K			0x7
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_PT_SHIFT	16
103*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_PT_MASK		0xFFFF0000
104*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_PT		0x00010000
105*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_DZQP		0x00000080
106*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_PLT_SHIFT	4
107*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_PLT_MASK	0x00000030
108*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_PLT		0x00000010
109*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_UP		0x00000008
110*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_RFE		0x00000004
111*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_TFE		0x00000002
112*4882a593Smuzhiyun #define XGMAC_CORE_FLOW_FCB		0x00000001
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* XGMAC Descriptor Defines */
115*4882a593Smuzhiyun #define MAX_DESC_BUF_SZ			(0x2000 - 8)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define RXDESC_EXT_STATUS		0x00000001
118*4882a593Smuzhiyun #define RXDESC_CRC_ERR			0x00000002
119*4882a593Smuzhiyun #define RXDESC_RX_ERR			0x00000008
120*4882a593Smuzhiyun #define RXDESC_RX_WDOG			0x00000010
121*4882a593Smuzhiyun #define RXDESC_FRAME_TYPE		0x00000020
122*4882a593Smuzhiyun #define RXDESC_GIANT_FRAME		0x00000080
123*4882a593Smuzhiyun #define RXDESC_LAST_SEG			0x00000100
124*4882a593Smuzhiyun #define RXDESC_FIRST_SEG		0x00000200
125*4882a593Smuzhiyun #define RXDESC_VLAN_FRAME		0x00000400
126*4882a593Smuzhiyun #define RXDESC_OVERFLOW_ERR		0x00000800
127*4882a593Smuzhiyun #define RXDESC_LENGTH_ERR		0x00001000
128*4882a593Smuzhiyun #define RXDESC_SA_FILTER_FAIL		0x00002000
129*4882a593Smuzhiyun #define RXDESC_DESCRIPTOR_ERR		0x00004000
130*4882a593Smuzhiyun #define RXDESC_ERROR_SUMMARY		0x00008000
131*4882a593Smuzhiyun #define RXDESC_FRAME_LEN_OFFSET		16
132*4882a593Smuzhiyun #define RXDESC_FRAME_LEN_MASK		0x3fff0000
133*4882a593Smuzhiyun #define RXDESC_DA_FILTER_FAIL		0x40000000
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun #define RXDESC1_END_RING		0x00008000
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define RXDESC_IP_PAYLOAD_MASK		0x00000003
138*4882a593Smuzhiyun #define RXDESC_IP_PAYLOAD_UDP		0x00000001
139*4882a593Smuzhiyun #define RXDESC_IP_PAYLOAD_TCP		0x00000002
140*4882a593Smuzhiyun #define RXDESC_IP_PAYLOAD_ICMP		0x00000003
141*4882a593Smuzhiyun #define RXDESC_IP_HEADER_ERR		0x00000008
142*4882a593Smuzhiyun #define RXDESC_IP_PAYLOAD_ERR		0x00000010
143*4882a593Smuzhiyun #define RXDESC_IPV4_PACKET		0x00000040
144*4882a593Smuzhiyun #define RXDESC_IPV6_PACKET		0x00000080
145*4882a593Smuzhiyun #define TXDESC_UNDERFLOW_ERR		0x00000001
146*4882a593Smuzhiyun #define TXDESC_JABBER_TIMEOUT		0x00000002
147*4882a593Smuzhiyun #define TXDESC_LOCAL_FAULT		0x00000004
148*4882a593Smuzhiyun #define TXDESC_REMOTE_FAULT		0x00000008
149*4882a593Smuzhiyun #define TXDESC_VLAN_FRAME		0x00000010
150*4882a593Smuzhiyun #define TXDESC_FRAME_FLUSHED		0x00000020
151*4882a593Smuzhiyun #define TXDESC_IP_HEADER_ERR		0x00000040
152*4882a593Smuzhiyun #define TXDESC_PAYLOAD_CSUM_ERR		0x00000080
153*4882a593Smuzhiyun #define TXDESC_ERROR_SUMMARY		0x00008000
154*4882a593Smuzhiyun #define TXDESC_SA_CTRL_INSERT		0x00040000
155*4882a593Smuzhiyun #define TXDESC_SA_CTRL_REPLACE		0x00080000
156*4882a593Smuzhiyun #define TXDESC_2ND_ADDR_CHAINED		0x00100000
157*4882a593Smuzhiyun #define TXDESC_END_RING			0x00200000
158*4882a593Smuzhiyun #define TXDESC_CSUM_IP			0x00400000
159*4882a593Smuzhiyun #define TXDESC_CSUM_IP_PAYLD		0x00800000
160*4882a593Smuzhiyun #define TXDESC_CSUM_ALL			0x00C00000
161*4882a593Smuzhiyun #define TXDESC_CRC_EN_REPLACE		0x01000000
162*4882a593Smuzhiyun #define TXDESC_CRC_EN_APPEND		0x02000000
163*4882a593Smuzhiyun #define TXDESC_DISABLE_PAD		0x04000000
164*4882a593Smuzhiyun #define TXDESC_FIRST_SEG		0x10000000
165*4882a593Smuzhiyun #define TXDESC_LAST_SEG			0x20000000
166*4882a593Smuzhiyun #define TXDESC_INTERRUPT		0x40000000
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun #define DESC_OWN			0x80000000
169*4882a593Smuzhiyun #define DESC_BUFFER1_SZ_MASK		0x00001fff
170*4882a593Smuzhiyun #define DESC_BUFFER2_SZ_MASK		0x1fff0000
171*4882a593Smuzhiyun #define DESC_BUFFER2_SZ_OFFSET		16
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun struct xgmac_regs {
174*4882a593Smuzhiyun 	u32 config;
175*4882a593Smuzhiyun 	u32 framefilter;
176*4882a593Smuzhiyun 	u32 resv_1[4];
177*4882a593Smuzhiyun 	u32 flow_control;
178*4882a593Smuzhiyun 	u32 vlantag;
179*4882a593Smuzhiyun 	u32 version;
180*4882a593Smuzhiyun 	u32 vlaninclude;
181*4882a593Smuzhiyun 	u32 resv_2[2];
182*4882a593Smuzhiyun 	u32 pacestretch;
183*4882a593Smuzhiyun 	u32 vlanhash;
184*4882a593Smuzhiyun 	u32 resv_3;
185*4882a593Smuzhiyun 	u32 intreg;
186*4882a593Smuzhiyun 	struct {
187*4882a593Smuzhiyun 		u32 hi;         /* 0x40 */
188*4882a593Smuzhiyun 		u32 lo;         /* 0x44 */
189*4882a593Smuzhiyun 	} macaddr[16];
190*4882a593Smuzhiyun 	u32 resv_4[0xd0];
191*4882a593Smuzhiyun 	u32 core_opmode;	/* 0x400 */
192*4882a593Smuzhiyun 	u32 resv_5[0x2bf];
193*4882a593Smuzhiyun 	u32 busmode;		/* 0xf00 */
194*4882a593Smuzhiyun 	u32 txpoll;
195*4882a593Smuzhiyun 	u32 rxpoll;
196*4882a593Smuzhiyun 	u32 rxdesclist;
197*4882a593Smuzhiyun 	u32 txdesclist;
198*4882a593Smuzhiyun 	u32 dma_status;
199*4882a593Smuzhiyun 	u32 dma_opmode;
200*4882a593Smuzhiyun 	u32 intenable;
201*4882a593Smuzhiyun 	u32 resv_6[2];
202*4882a593Smuzhiyun 	u32 axi_mode;		/* 0xf28 */
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun struct xgmac_dma_desc {
206*4882a593Smuzhiyun 	__le32 flags;
207*4882a593Smuzhiyun 	__le32 buf_size;
208*4882a593Smuzhiyun 	__le32 buf1_addr;		/* Buffer 1 Address Pointer */
209*4882a593Smuzhiyun 	__le32 buf2_addr;		/* Buffer 2 Address Pointer */
210*4882a593Smuzhiyun 	__le32 ext_status;
211*4882a593Smuzhiyun 	__le32 res[3];
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /* XGMAC Descriptor Access Helpers */
desc_set_buf_len(struct xgmac_dma_desc * p,u32 buf_sz)215*4882a593Smuzhiyun static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	if (buf_sz > MAX_DESC_BUF_SZ)
218*4882a593Smuzhiyun 		p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
219*4882a593Smuzhiyun 			(buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
220*4882a593Smuzhiyun 	else
221*4882a593Smuzhiyun 		p->buf_size = cpu_to_le32(buf_sz);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
desc_get_buf_len(struct xgmac_dma_desc * p)224*4882a593Smuzhiyun static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	u32 len = le32_to_cpu(p->buf_size);
227*4882a593Smuzhiyun 	return (len & DESC_BUFFER1_SZ_MASK) +
228*4882a593Smuzhiyun 		((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
desc_init_rx_desc(struct xgmac_dma_desc * p,int ring_size,int buf_sz)231*4882a593Smuzhiyun static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
232*4882a593Smuzhiyun 				     int buf_sz)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct xgmac_dma_desc *end = p + ring_size - 1;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	memset(p, 0, sizeof(*p) * ring_size);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	for (; p <= end; p++)
239*4882a593Smuzhiyun 		desc_set_buf_len(p, buf_sz);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
desc_init_tx_desc(struct xgmac_dma_desc * p,u32 ring_size)244*4882a593Smuzhiyun static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	memset(p, 0, sizeof(*p) * ring_size);
247*4882a593Smuzhiyun 	p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
desc_get_owner(struct xgmac_dma_desc * p)250*4882a593Smuzhiyun static inline int desc_get_owner(struct xgmac_dma_desc *p)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	return le32_to_cpu(p->flags) & DESC_OWN;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
desc_set_rx_owner(struct xgmac_dma_desc * p)255*4882a593Smuzhiyun static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	/* Clear all fields and set the owner */
258*4882a593Smuzhiyun 	p->flags = cpu_to_le32(DESC_OWN);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
desc_set_tx_owner(struct xgmac_dma_desc * p,u32 flags)261*4882a593Smuzhiyun static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	u32 tmpflags = le32_to_cpu(p->flags);
264*4882a593Smuzhiyun 	tmpflags &= TXDESC_END_RING;
265*4882a593Smuzhiyun 	tmpflags |= flags | DESC_OWN;
266*4882a593Smuzhiyun 	p->flags = cpu_to_le32(tmpflags);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
desc_get_buf_addr(struct xgmac_dma_desc * p)269*4882a593Smuzhiyun static inline void *desc_get_buf_addr(struct xgmac_dma_desc *p)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return (void *)le32_to_cpu(p->buf1_addr);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
desc_set_buf_addr(struct xgmac_dma_desc * p,void * paddr,int len)274*4882a593Smuzhiyun static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
275*4882a593Smuzhiyun 				     void *paddr, int len)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	p->buf1_addr = cpu_to_le32(paddr);
278*4882a593Smuzhiyun 	if (len > MAX_DESC_BUF_SZ)
279*4882a593Smuzhiyun 		p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
desc_set_buf_addr_and_size(struct xgmac_dma_desc * p,void * paddr,int len)282*4882a593Smuzhiyun static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
283*4882a593Smuzhiyun 					      void *paddr, int len)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	desc_set_buf_len(p, len);
286*4882a593Smuzhiyun 	desc_set_buf_addr(p, paddr, len);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
desc_get_rx_frame_len(struct xgmac_dma_desc * p)289*4882a593Smuzhiyun static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	u32 data = le32_to_cpu(p->flags);
292*4882a593Smuzhiyun 	u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
293*4882a593Smuzhiyun 	if (data & RXDESC_FRAME_TYPE)
294*4882a593Smuzhiyun 		len -= 4;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return len;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun struct calxeda_eth_dev {
300*4882a593Smuzhiyun 	struct xgmac_dma_desc rx_chain[RX_NUM_DESC];
301*4882a593Smuzhiyun 	struct xgmac_dma_desc tx_chain[TX_NUM_DESC];
302*4882a593Smuzhiyun 	char rxbuffer[RX_BUF_SZ];
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	u32 tx_currdesc;
305*4882a593Smuzhiyun 	u32 rx_currdesc;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	struct eth_device *dev;
308*4882a593Smuzhiyun } __aligned(32);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * Initialize a descriptor ring.  Calxeda XGMAC is configured to use
312*4882a593Smuzhiyun  * advanced descriptors.
313*4882a593Smuzhiyun  */
314*4882a593Smuzhiyun 
init_rx_desc(struct calxeda_eth_dev * priv)315*4882a593Smuzhiyun static void init_rx_desc(struct calxeda_eth_dev *priv)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct xgmac_dma_desc *rxdesc = priv->rx_chain;
318*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)priv->dev->iobase;
319*4882a593Smuzhiyun 	void *rxbuffer = priv->rxbuffer;
320*4882a593Smuzhiyun 	int i;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	desc_init_rx_desc(rxdesc, RX_NUM_DESC, ETH_BUF_SZ);
323*4882a593Smuzhiyun 	writel((ulong)rxdesc, &regs->rxdesclist);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	for (i = 0; i < RX_NUM_DESC; i++) {
326*4882a593Smuzhiyun 		desc_set_buf_addr(rxdesc + i, rxbuffer + (i * ETH_BUF_SZ),
327*4882a593Smuzhiyun 				  ETH_BUF_SZ);
328*4882a593Smuzhiyun 		desc_set_rx_owner(rxdesc + i);
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
init_tx_desc(struct calxeda_eth_dev * priv)332*4882a593Smuzhiyun static void init_tx_desc(struct calxeda_eth_dev *priv)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)priv->dev->iobase;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	desc_init_tx_desc(priv->tx_chain, TX_NUM_DESC);
337*4882a593Smuzhiyun 	writel((ulong)priv->tx_chain, &regs->txdesclist);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
xgmac_reset(struct eth_device * dev)340*4882a593Smuzhiyun static int xgmac_reset(struct eth_device *dev)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
343*4882a593Smuzhiyun 	int timeout = MAC_TIMEOUT;
344*4882a593Smuzhiyun 	u32 value;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	value = readl(&regs->config) & XGMAC_CONTROL_SPD_MASK;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	writel(XGMAC_DMA_BUSMODE_RESET, &regs->busmode);
349*4882a593Smuzhiyun 	while ((timeout-- >= 0) &&
350*4882a593Smuzhiyun 		(readl(&regs->busmode) & XGMAC_DMA_BUSMODE_RESET))
351*4882a593Smuzhiyun 		udelay(1);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	writel(value, &regs->config);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return timeout;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
xgmac_hwmacaddr(struct eth_device * dev)358*4882a593Smuzhiyun static void xgmac_hwmacaddr(struct eth_device *dev)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
361*4882a593Smuzhiyun 	u32 macaddr[2];
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	memcpy(macaddr, dev->enetaddr, 6);
364*4882a593Smuzhiyun 	writel(macaddr[1], &regs->macaddr[0].hi);
365*4882a593Smuzhiyun 	writel(macaddr[0], &regs->macaddr[0].lo);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
xgmac_init(struct eth_device * dev,bd_t * bis)368*4882a593Smuzhiyun static int xgmac_init(struct eth_device *dev, bd_t * bis)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
371*4882a593Smuzhiyun 	struct calxeda_eth_dev *priv = dev->priv;
372*4882a593Smuzhiyun 	int value;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	if (xgmac_reset(dev) < 0)
375*4882a593Smuzhiyun 		return -1;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* set the hardware MAC address */
378*4882a593Smuzhiyun 	xgmac_hwmacaddr(dev);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* set the AXI bus modes */
381*4882a593Smuzhiyun 	value = XGMAC_DMA_BUSMODE_ATDS |
382*4882a593Smuzhiyun 		(16 << XGMAC_DMA_BUSMODE_PBL_SHIFT) |
383*4882a593Smuzhiyun 		XGMAC_DMA_BUSMODE_FB | XGMAC_DMA_BUSMODE_AAL;
384*4882a593Smuzhiyun 	writel(value, &regs->busmode);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	value = XGMAC_DMA_AXIMODE_AAL | XGMAC_DMA_AXIMODE_BLEN16 |
387*4882a593Smuzhiyun 		XGMAC_DMA_AXIMODE_BLEN8 | XGMAC_DMA_AXIMODE_BLEN4;
388*4882a593Smuzhiyun 	writel(value, &regs->axi_mode);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* set flow control parameters and store and forward mode */
391*4882a593Smuzhiyun 	value = (FIFO_MINUS_12K << XGMAC_CORE_OMR_RFD_SHIFT) |
392*4882a593Smuzhiyun 		(FIFO_MINUS_4K << XGMAC_CORE_OMR_RFA_SHIFT) |
393*4882a593Smuzhiyun 		XGMAC_CORE_OMR_EFC | XGMAC_CORE_OMR_TSF;
394*4882a593Smuzhiyun 	writel(value, &regs->core_opmode);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* enable pause frames */
397*4882a593Smuzhiyun 	value = (1024 << XGMAC_CORE_FLOW_PT_SHIFT) |
398*4882a593Smuzhiyun 		(1 << XGMAC_CORE_FLOW_PLT_SHIFT) |
399*4882a593Smuzhiyun 		XGMAC_CORE_FLOW_UP | XGMAC_CORE_FLOW_RFE | XGMAC_CORE_FLOW_TFE;
400*4882a593Smuzhiyun 	writel(value, &regs->flow_control);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/* Initialize the descriptor chains */
403*4882a593Smuzhiyun 	init_rx_desc(priv);
404*4882a593Smuzhiyun 	init_tx_desc(priv);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	/* must set to 0, or when started up will cause issues */
407*4882a593Smuzhiyun 	priv->tx_currdesc = 0;
408*4882a593Smuzhiyun 	priv->rx_currdesc = 0;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* set default core values */
411*4882a593Smuzhiyun 	value = readl(&regs->config);
412*4882a593Smuzhiyun 	value &= XGMAC_CONTROL_SPD_MASK;
413*4882a593Smuzhiyun 	value |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_ACS |
414*4882a593Smuzhiyun 		XGMAC_CONTROL_IPC | XGMAC_CONTROL_CAR;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* Everything is ready enable both mac and DMA */
417*4882a593Smuzhiyun 	value |= RXENABLE | TXENABLE;
418*4882a593Smuzhiyun 	writel(value, &regs->config);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	value = readl(&regs->dma_opmode);
421*4882a593Smuzhiyun 	value |= RXSTART | TXSTART;
422*4882a593Smuzhiyun 	writel(value, &regs->dma_opmode);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return 0;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
xgmac_tx(struct eth_device * dev,void * packet,int length)427*4882a593Smuzhiyun static int xgmac_tx(struct eth_device *dev, void *packet, int length)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
430*4882a593Smuzhiyun 	struct calxeda_eth_dev *priv = dev->priv;
431*4882a593Smuzhiyun 	u32 currdesc = priv->tx_currdesc;
432*4882a593Smuzhiyun 	struct xgmac_dma_desc *txdesc = &priv->tx_chain[currdesc];
433*4882a593Smuzhiyun 	int timeout;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	desc_set_buf_addr_and_size(txdesc, packet, length);
436*4882a593Smuzhiyun 	desc_set_tx_owner(txdesc, TXDESC_FIRST_SEG |
437*4882a593Smuzhiyun 		TXDESC_LAST_SEG | TXDESC_CRC_EN_APPEND);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/* write poll demand */
440*4882a593Smuzhiyun 	writel(1, &regs->txpoll);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	timeout = 1000000;
443*4882a593Smuzhiyun 	while (desc_get_owner(txdesc)) {
444*4882a593Smuzhiyun 		if (timeout-- < 0) {
445*4882a593Smuzhiyun 			printf("xgmac: TX timeout\n");
446*4882a593Smuzhiyun 			return -ETIMEDOUT;
447*4882a593Smuzhiyun 		}
448*4882a593Smuzhiyun 		udelay(1);
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	priv->tx_currdesc = (currdesc + 1) & (TX_NUM_DESC - 1);
452*4882a593Smuzhiyun 	return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
xgmac_rx(struct eth_device * dev)455*4882a593Smuzhiyun static int xgmac_rx(struct eth_device *dev)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
458*4882a593Smuzhiyun 	struct calxeda_eth_dev *priv = dev->priv;
459*4882a593Smuzhiyun 	u32 currdesc = priv->rx_currdesc;
460*4882a593Smuzhiyun 	struct xgmac_dma_desc *rxdesc = &priv->rx_chain[currdesc];
461*4882a593Smuzhiyun 	int length = 0;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* check if the host has the desc */
464*4882a593Smuzhiyun 	if (desc_get_owner(rxdesc))
465*4882a593Smuzhiyun 		return -1; /* something bad happened */
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	length = desc_get_rx_frame_len(rxdesc);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	net_process_received_packet(desc_get_buf_addr(rxdesc), length);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	/* set descriptor back to owned by XGMAC */
472*4882a593Smuzhiyun 	desc_set_rx_owner(rxdesc);
473*4882a593Smuzhiyun 	writel(1, &regs->rxpoll);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	priv->rx_currdesc = (currdesc + 1) & (RX_NUM_DESC - 1);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	return length;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
xgmac_halt(struct eth_device * dev)480*4882a593Smuzhiyun static void xgmac_halt(struct eth_device *dev)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	struct xgmac_regs *regs = (struct xgmac_regs *)dev->iobase;
483*4882a593Smuzhiyun 	struct calxeda_eth_dev *priv = dev->priv;
484*4882a593Smuzhiyun 	int value;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* Disable TX/RX */
487*4882a593Smuzhiyun 	value = readl(&regs->config);
488*4882a593Smuzhiyun 	value &= ~(RXENABLE | TXENABLE);
489*4882a593Smuzhiyun 	writel(value, &regs->config);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Disable DMA */
492*4882a593Smuzhiyun 	value = readl(&regs->dma_opmode);
493*4882a593Smuzhiyun 	value &= ~(RXSTART | TXSTART);
494*4882a593Smuzhiyun 	writel(value, &regs->dma_opmode);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/* must set to 0, or when started up will cause issues */
497*4882a593Smuzhiyun 	priv->tx_currdesc = 0;
498*4882a593Smuzhiyun 	priv->rx_currdesc = 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
calxedaxgmac_initialize(u32 id,ulong base_addr)501*4882a593Smuzhiyun int calxedaxgmac_initialize(u32 id, ulong base_addr)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	struct eth_device *dev;
504*4882a593Smuzhiyun 	struct calxeda_eth_dev *priv;
505*4882a593Smuzhiyun 	struct xgmac_regs *regs;
506*4882a593Smuzhiyun 	u32 macaddr[2];
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	regs = (struct xgmac_regs *)base_addr;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	/* check hardware version */
511*4882a593Smuzhiyun 	if (readl(&regs->version) != 0x1012)
512*4882a593Smuzhiyun 		return -1;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	dev = malloc(sizeof(*dev));
515*4882a593Smuzhiyun 	if (!dev)
516*4882a593Smuzhiyun 		return 0;
517*4882a593Smuzhiyun 	memset(dev, 0, sizeof(*dev));
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/* Structure must be aligned, because it contains the descriptors */
520*4882a593Smuzhiyun 	priv = memalign(32, sizeof(*priv));
521*4882a593Smuzhiyun 	if (!priv) {
522*4882a593Smuzhiyun 		free(dev);
523*4882a593Smuzhiyun 		return 0;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	dev->iobase = (int)base_addr;
527*4882a593Smuzhiyun 	dev->priv = priv;
528*4882a593Smuzhiyun 	priv->dev = dev;
529*4882a593Smuzhiyun 	sprintf(dev->name, "xgmac%d", id);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* The MAC address is already configured, so read it from registers. */
532*4882a593Smuzhiyun 	macaddr[1] = readl(&regs->macaddr[0].hi);
533*4882a593Smuzhiyun 	macaddr[0] = readl(&regs->macaddr[0].lo);
534*4882a593Smuzhiyun 	memcpy(dev->enetaddr, macaddr, 6);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	dev->init = xgmac_init;
537*4882a593Smuzhiyun 	dev->send = xgmac_tx;
538*4882a593Smuzhiyun 	dev->recv = xgmac_rx;
539*4882a593Smuzhiyun 	dev->halt = xgmac_halt;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	eth_register(dev);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	return 1;
544*4882a593Smuzhiyun }
545