1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * sh_eth.c - Driver for Renesas ethernet controller.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2008, 2011 Renesas Solutions Corp.
5*4882a593Smuzhiyun * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
6*4882a593Smuzhiyun * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
7*4882a593Smuzhiyun * Copyright (C) 2013, 2014 Renesas Electronics Corporation
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <config.h>
13*4882a593Smuzhiyun #include <common.h>
14*4882a593Smuzhiyun #include <malloc.h>
15*4882a593Smuzhiyun #include <net.h>
16*4882a593Smuzhiyun #include <netdev.h>
17*4882a593Smuzhiyun #include <miiphy.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "sh_eth.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #ifndef CONFIG_SH_ETHER_USE_PORT
24*4882a593Smuzhiyun # error "Please define CONFIG_SH_ETHER_USE_PORT"
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun #ifndef CONFIG_SH_ETHER_PHY_ADDR
27*4882a593Smuzhiyun # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
31*4882a593Smuzhiyun #define flush_cache_wback(addr, len) \
32*4882a593Smuzhiyun flush_dcache_range((u32)addr, (u32)(addr + len - 1))
33*4882a593Smuzhiyun #else
34*4882a593Smuzhiyun #define flush_cache_wback(...)
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
38*4882a593Smuzhiyun #define invalidate_cache(addr, len) \
39*4882a593Smuzhiyun { \
40*4882a593Smuzhiyun u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
41*4882a593Smuzhiyun u32 start, end; \
42*4882a593Smuzhiyun \
43*4882a593Smuzhiyun start = (u32)addr; \
44*4882a593Smuzhiyun end = start + len; \
45*4882a593Smuzhiyun start &= ~(line_size - 1); \
46*4882a593Smuzhiyun end = ((end + line_size - 1) & ~(line_size - 1)); \
47*4882a593Smuzhiyun \
48*4882a593Smuzhiyun invalidate_dcache_range(start, end); \
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun #else
51*4882a593Smuzhiyun #define invalidate_cache(...)
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define TIMEOUT_CNT 1000
55*4882a593Smuzhiyun
sh_eth_send(struct eth_device * dev,void * packet,int len)56*4882a593Smuzhiyun int sh_eth_send(struct eth_device *dev, void *packet, int len)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct sh_eth_dev *eth = dev->priv;
59*4882a593Smuzhiyun int port = eth->port, ret = 0, timeout;
60*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (!packet || len > 0xffff) {
63*4882a593Smuzhiyun printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
64*4882a593Smuzhiyun ret = -EINVAL;
65*4882a593Smuzhiyun goto err;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* packet must be a 4 byte boundary */
69*4882a593Smuzhiyun if ((int)packet & 3) {
70*4882a593Smuzhiyun printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n"
71*4882a593Smuzhiyun , __func__);
72*4882a593Smuzhiyun ret = -EFAULT;
73*4882a593Smuzhiyun goto err;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Update tx descriptor */
77*4882a593Smuzhiyun flush_cache_wback(packet, len);
78*4882a593Smuzhiyun port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
79*4882a593Smuzhiyun port_info->tx_desc_cur->td1 = len << 16;
80*4882a593Smuzhiyun /* Must preserve the end of descriptor list indication */
81*4882a593Smuzhiyun if (port_info->tx_desc_cur->td0 & TD_TDLE)
82*4882a593Smuzhiyun port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
83*4882a593Smuzhiyun else
84*4882a593Smuzhiyun port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Restart the transmitter if disabled */
89*4882a593Smuzhiyun if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS))
90*4882a593Smuzhiyun sh_eth_write(eth, EDTRR_TRNS, EDTRR);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Wait until packet is transmitted */
93*4882a593Smuzhiyun timeout = TIMEOUT_CNT;
94*4882a593Smuzhiyun do {
95*4882a593Smuzhiyun invalidate_cache(port_info->tx_desc_cur,
96*4882a593Smuzhiyun sizeof(struct tx_desc_s));
97*4882a593Smuzhiyun udelay(100);
98*4882a593Smuzhiyun } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (timeout < 0) {
101*4882a593Smuzhiyun printf(SHETHER_NAME ": transmit timeout\n");
102*4882a593Smuzhiyun ret = -ETIMEDOUT;
103*4882a593Smuzhiyun goto err;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun port_info->tx_desc_cur++;
107*4882a593Smuzhiyun if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
108*4882a593Smuzhiyun port_info->tx_desc_cur = port_info->tx_desc_base;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun err:
111*4882a593Smuzhiyun return ret;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
sh_eth_recv(struct eth_device * dev)114*4882a593Smuzhiyun int sh_eth_recv(struct eth_device *dev)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct sh_eth_dev *eth = dev->priv;
117*4882a593Smuzhiyun int port = eth->port, len = 0;
118*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
119*4882a593Smuzhiyun uchar *packet;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Check if the rx descriptor is ready */
122*4882a593Smuzhiyun invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
123*4882a593Smuzhiyun if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) {
124*4882a593Smuzhiyun /* Check for errors */
125*4882a593Smuzhiyun if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) {
126*4882a593Smuzhiyun len = port_info->rx_desc_cur->rd1 & 0xffff;
127*4882a593Smuzhiyun packet = (uchar *)
128*4882a593Smuzhiyun ADDR_TO_P2(port_info->rx_desc_cur->rd2);
129*4882a593Smuzhiyun invalidate_cache(packet, len);
130*4882a593Smuzhiyun net_process_received_packet(packet, len);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Make current descriptor available again */
134*4882a593Smuzhiyun if (port_info->rx_desc_cur->rd0 & RD_RDLE)
135*4882a593Smuzhiyun port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
136*4882a593Smuzhiyun else
137*4882a593Smuzhiyun port_info->rx_desc_cur->rd0 = RD_RACT;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun flush_cache_wback(port_info->rx_desc_cur,
140*4882a593Smuzhiyun sizeof(struct rx_desc_s));
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Point to the next descriptor */
143*4882a593Smuzhiyun port_info->rx_desc_cur++;
144*4882a593Smuzhiyun if (port_info->rx_desc_cur >=
145*4882a593Smuzhiyun port_info->rx_desc_base + NUM_RX_DESC)
146*4882a593Smuzhiyun port_info->rx_desc_cur = port_info->rx_desc_base;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Restart the receiver if disabled */
150*4882a593Smuzhiyun if (!(sh_eth_read(eth, EDRRR) & EDRRR_R))
151*4882a593Smuzhiyun sh_eth_write(eth, EDRRR_R, EDRRR);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return len;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
sh_eth_reset(struct sh_eth_dev * eth)156*4882a593Smuzhiyun static int sh_eth_reset(struct sh_eth_dev *eth)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
159*4882a593Smuzhiyun int ret = 0, i;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Start e-dmac transmitter and receiver */
162*4882a593Smuzhiyun sh_eth_write(eth, EDSR_ENALL, EDSR);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* Perform a software reset and wait for it to complete */
165*4882a593Smuzhiyun sh_eth_write(eth, EDMR_SRST, EDMR);
166*4882a593Smuzhiyun for (i = 0; i < TIMEOUT_CNT; i++) {
167*4882a593Smuzhiyun if (!(sh_eth_read(eth, EDMR) & EDMR_SRST))
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun udelay(1000);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (i == TIMEOUT_CNT) {
173*4882a593Smuzhiyun printf(SHETHER_NAME ": Software reset timeout\n");
174*4882a593Smuzhiyun ret = -EIO;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return ret;
178*4882a593Smuzhiyun #else
179*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, EDMR) | EDMR_SRST, EDMR);
180*4882a593Smuzhiyun udelay(3000);
181*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, EDMR) & ~EDMR_SRST, EDMR);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return 0;
184*4882a593Smuzhiyun #endif
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
sh_eth_tx_desc_init(struct sh_eth_dev * eth)187*4882a593Smuzhiyun static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun int port = eth->port, i, ret = 0;
190*4882a593Smuzhiyun u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
191*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
192*4882a593Smuzhiyun struct tx_desc_s *cur_tx_desc;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Allocate rx descriptors. They must be aligned to size of struct
196*4882a593Smuzhiyun * tx_desc_s.
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun port_info->tx_desc_alloc =
199*4882a593Smuzhiyun memalign(sizeof(struct tx_desc_s), alloc_desc_size);
200*4882a593Smuzhiyun if (!port_info->tx_desc_alloc) {
201*4882a593Smuzhiyun printf(SHETHER_NAME ": memalign failed\n");
202*4882a593Smuzhiyun ret = -ENOMEM;
203*4882a593Smuzhiyun goto err;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun flush_cache_wback((u32)port_info->tx_desc_alloc, alloc_desc_size);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Make sure we use a P2 address (non-cacheable) */
209*4882a593Smuzhiyun port_info->tx_desc_base =
210*4882a593Smuzhiyun (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
211*4882a593Smuzhiyun port_info->tx_desc_cur = port_info->tx_desc_base;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Initialize all descriptors */
214*4882a593Smuzhiyun for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
215*4882a593Smuzhiyun cur_tx_desc++, i++) {
216*4882a593Smuzhiyun cur_tx_desc->td0 = 0x00;
217*4882a593Smuzhiyun cur_tx_desc->td1 = 0x00;
218*4882a593Smuzhiyun cur_tx_desc->td2 = 0x00;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Mark the end of the descriptors */
222*4882a593Smuzhiyun cur_tx_desc--;
223*4882a593Smuzhiyun cur_tx_desc->td0 |= TD_TDLE;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* Point the controller to the tx descriptor list. Must use physical
226*4882a593Smuzhiyun addresses */
227*4882a593Smuzhiyun sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
228*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
229*4882a593Smuzhiyun sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
230*4882a593Smuzhiyun sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR);
231*4882a593Smuzhiyun sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */
232*4882a593Smuzhiyun #endif
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun err:
235*4882a593Smuzhiyun return ret;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
sh_eth_rx_desc_init(struct sh_eth_dev * eth)238*4882a593Smuzhiyun static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun int port = eth->port, i , ret = 0;
241*4882a593Smuzhiyun u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
242*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
243*4882a593Smuzhiyun struct rx_desc_s *cur_rx_desc;
244*4882a593Smuzhiyun u8 *rx_buf;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Allocate rx descriptors. They must be aligned to size of struct
248*4882a593Smuzhiyun * rx_desc_s.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun port_info->rx_desc_alloc =
251*4882a593Smuzhiyun memalign(sizeof(struct rx_desc_s), alloc_desc_size);
252*4882a593Smuzhiyun if (!port_info->rx_desc_alloc) {
253*4882a593Smuzhiyun printf(SHETHER_NAME ": memalign failed\n");
254*4882a593Smuzhiyun ret = -ENOMEM;
255*4882a593Smuzhiyun goto err;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Make sure we use a P2 address (non-cacheable) */
261*4882a593Smuzhiyun port_info->rx_desc_base =
262*4882a593Smuzhiyun (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun port_info->rx_desc_cur = port_info->rx_desc_base;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
268*4882a593Smuzhiyun * aligned and in P2 area.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun port_info->rx_buf_alloc =
271*4882a593Smuzhiyun memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
272*4882a593Smuzhiyun if (!port_info->rx_buf_alloc) {
273*4882a593Smuzhiyun printf(SHETHER_NAME ": alloc failed\n");
274*4882a593Smuzhiyun ret = -ENOMEM;
275*4882a593Smuzhiyun goto err_buf_alloc;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* Initialize all descriptors */
281*4882a593Smuzhiyun for (cur_rx_desc = port_info->rx_desc_base,
282*4882a593Smuzhiyun rx_buf = port_info->rx_buf_base, i = 0;
283*4882a593Smuzhiyun i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
284*4882a593Smuzhiyun cur_rx_desc->rd0 = RD_RACT;
285*4882a593Smuzhiyun cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
286*4882a593Smuzhiyun cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* Mark the end of the descriptors */
290*4882a593Smuzhiyun cur_rx_desc--;
291*4882a593Smuzhiyun cur_rx_desc->rd0 |= RD_RDLE;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Point the controller to the rx descriptor list */
294*4882a593Smuzhiyun sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
295*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
296*4882a593Smuzhiyun sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
297*4882a593Smuzhiyun sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR);
298*4882a593Smuzhiyun sh_eth_write(eth, RDFFR_RDLF, RDFFR);
299*4882a593Smuzhiyun #endif
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return ret;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun err_buf_alloc:
304*4882a593Smuzhiyun free(port_info->rx_desc_alloc);
305*4882a593Smuzhiyun port_info->rx_desc_alloc = NULL;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun err:
308*4882a593Smuzhiyun return ret;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
sh_eth_tx_desc_free(struct sh_eth_dev * eth)311*4882a593Smuzhiyun static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun int port = eth->port;
314*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (port_info->tx_desc_alloc) {
317*4882a593Smuzhiyun free(port_info->tx_desc_alloc);
318*4882a593Smuzhiyun port_info->tx_desc_alloc = NULL;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
sh_eth_rx_desc_free(struct sh_eth_dev * eth)322*4882a593Smuzhiyun static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun int port = eth->port;
325*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (port_info->rx_desc_alloc) {
328*4882a593Smuzhiyun free(port_info->rx_desc_alloc);
329*4882a593Smuzhiyun port_info->rx_desc_alloc = NULL;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (port_info->rx_buf_alloc) {
333*4882a593Smuzhiyun free(port_info->rx_buf_alloc);
334*4882a593Smuzhiyun port_info->rx_buf_alloc = NULL;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
sh_eth_desc_init(struct sh_eth_dev * eth)338*4882a593Smuzhiyun static int sh_eth_desc_init(struct sh_eth_dev *eth)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun int ret = 0;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun ret = sh_eth_tx_desc_init(eth);
343*4882a593Smuzhiyun if (ret)
344*4882a593Smuzhiyun goto err_tx_init;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun ret = sh_eth_rx_desc_init(eth);
347*4882a593Smuzhiyun if (ret)
348*4882a593Smuzhiyun goto err_rx_init;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun return ret;
351*4882a593Smuzhiyun err_rx_init:
352*4882a593Smuzhiyun sh_eth_tx_desc_free(eth);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun err_tx_init:
355*4882a593Smuzhiyun return ret;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
sh_eth_phy_config(struct sh_eth_dev * eth)358*4882a593Smuzhiyun static int sh_eth_phy_config(struct sh_eth_dev *eth)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun int port = eth->port, ret = 0;
361*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
362*4882a593Smuzhiyun struct eth_device *dev = port_info->dev;
363*4882a593Smuzhiyun struct phy_device *phydev;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun phydev = phy_connect(
366*4882a593Smuzhiyun miiphy_get_dev_by_name(dev->name),
367*4882a593Smuzhiyun port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
368*4882a593Smuzhiyun port_info->phydev = phydev;
369*4882a593Smuzhiyun phy_config(phydev);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
sh_eth_config(struct sh_eth_dev * eth,bd_t * bd)374*4882a593Smuzhiyun static int sh_eth_config(struct sh_eth_dev *eth, bd_t *bd)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun int port = eth->port, ret = 0;
377*4882a593Smuzhiyun u32 val;
378*4882a593Smuzhiyun struct sh_eth_info *port_info = ð->port_info[port];
379*4882a593Smuzhiyun struct eth_device *dev = port_info->dev;
380*4882a593Smuzhiyun struct phy_device *phy;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* Configure e-dmac registers */
383*4882a593Smuzhiyun sh_eth_write(eth, (sh_eth_read(eth, EDMR) & ~EMDR_DESC_R) |
384*4882a593Smuzhiyun (EMDR_DESC | EDMR_EL), EDMR);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun sh_eth_write(eth, 0, EESIPR);
387*4882a593Smuzhiyun sh_eth_write(eth, 0, TRSCER);
388*4882a593Smuzhiyun sh_eth_write(eth, 0, TFTR);
389*4882a593Smuzhiyun sh_eth_write(eth, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
390*4882a593Smuzhiyun sh_eth_write(eth, RMCR_RST, RMCR);
391*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
392*4882a593Smuzhiyun sh_eth_write(eth, 0, RPADIR);
393*4882a593Smuzhiyun #endif
394*4882a593Smuzhiyun sh_eth_write(eth, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* Configure e-mac registers */
397*4882a593Smuzhiyun sh_eth_write(eth, 0, ECSIPR);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Set Mac address */
400*4882a593Smuzhiyun val = dev->enetaddr[0] << 24 | dev->enetaddr[1] << 16 |
401*4882a593Smuzhiyun dev->enetaddr[2] << 8 | dev->enetaddr[3];
402*4882a593Smuzhiyun sh_eth_write(eth, val, MAHR);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun val = dev->enetaddr[4] << 8 | dev->enetaddr[5];
405*4882a593Smuzhiyun sh_eth_write(eth, val, MALR);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun sh_eth_write(eth, RFLR_RFL_MIN, RFLR);
408*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER)
409*4882a593Smuzhiyun sh_eth_write(eth, 0, PIPR);
410*4882a593Smuzhiyun #endif
411*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
412*4882a593Smuzhiyun sh_eth_write(eth, APR_AP, APR);
413*4882a593Smuzhiyun sh_eth_write(eth, MPR_MP, MPR);
414*4882a593Smuzhiyun sh_eth_write(eth, TPAUSER_TPAUSE, TPAUSER);
415*4882a593Smuzhiyun #endif
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
418*4882a593Smuzhiyun sh_eth_write(eth, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
419*4882a593Smuzhiyun #elif defined(CONFIG_R8A7790) || defined(CONFIG_R8A7791) || \
420*4882a593Smuzhiyun defined(CONFIG_R8A7793) || defined(CONFIG_R8A7794)
421*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, RMIIMR) | 0x1, RMIIMR);
422*4882a593Smuzhiyun #endif
423*4882a593Smuzhiyun /* Configure phy */
424*4882a593Smuzhiyun ret = sh_eth_phy_config(eth);
425*4882a593Smuzhiyun if (ret) {
426*4882a593Smuzhiyun printf(SHETHER_NAME ": phy config timeout\n");
427*4882a593Smuzhiyun goto err_phy_cfg;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun phy = port_info->phydev;
430*4882a593Smuzhiyun ret = phy_startup(phy);
431*4882a593Smuzhiyun if (ret) {
432*4882a593Smuzhiyun printf(SHETHER_NAME ": phy startup failure\n");
433*4882a593Smuzhiyun return ret;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun val = 0;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* Set the transfer speed */
439*4882a593Smuzhiyun if (phy->speed == 100) {
440*4882a593Smuzhiyun printf(SHETHER_NAME ": 100Base/");
441*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER)
442*4882a593Smuzhiyun sh_eth_write(eth, GECMR_100B, GECMR);
443*4882a593Smuzhiyun #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
444*4882a593Smuzhiyun sh_eth_write(eth, 1, RTRATE);
445*4882a593Smuzhiyun #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_R8A7790) || \
446*4882a593Smuzhiyun defined(CONFIG_R8A7791) || defined(CONFIG_R8A7793) || \
447*4882a593Smuzhiyun defined(CONFIG_R8A7794)
448*4882a593Smuzhiyun val = ECMR_RTM;
449*4882a593Smuzhiyun #endif
450*4882a593Smuzhiyun } else if (phy->speed == 10) {
451*4882a593Smuzhiyun printf(SHETHER_NAME ": 10Base/");
452*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER)
453*4882a593Smuzhiyun sh_eth_write(eth, GECMR_10B, GECMR);
454*4882a593Smuzhiyun #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
455*4882a593Smuzhiyun sh_eth_write(eth, 0, RTRATE);
456*4882a593Smuzhiyun #endif
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun #if defined(SH_ETH_TYPE_GETHER)
459*4882a593Smuzhiyun else if (phy->speed == 1000) {
460*4882a593Smuzhiyun printf(SHETHER_NAME ": 1000Base/");
461*4882a593Smuzhiyun sh_eth_write(eth, GECMR_1000B, GECMR);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun #endif
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Check if full duplex mode is supported by the phy */
466*4882a593Smuzhiyun if (phy->duplex) {
467*4882a593Smuzhiyun printf("Full\n");
468*4882a593Smuzhiyun sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE|ECMR_DM),
469*4882a593Smuzhiyun ECMR);
470*4882a593Smuzhiyun } else {
471*4882a593Smuzhiyun printf("Half\n");
472*4882a593Smuzhiyun sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE), ECMR);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return ret;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun err_phy_cfg:
478*4882a593Smuzhiyun return ret;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
sh_eth_start(struct sh_eth_dev * eth)481*4882a593Smuzhiyun static void sh_eth_start(struct sh_eth_dev *eth)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * Enable the e-dmac receiver only. The transmitter will be enabled when
485*4882a593Smuzhiyun * we have something to transmit
486*4882a593Smuzhiyun */
487*4882a593Smuzhiyun sh_eth_write(eth, EDRRR_R, EDRRR);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
sh_eth_stop(struct sh_eth_dev * eth)490*4882a593Smuzhiyun static void sh_eth_stop(struct sh_eth_dev *eth)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun sh_eth_write(eth, ~EDRRR_R, EDRRR);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
sh_eth_init(struct eth_device * dev,bd_t * bd)495*4882a593Smuzhiyun int sh_eth_init(struct eth_device *dev, bd_t *bd)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun int ret = 0;
498*4882a593Smuzhiyun struct sh_eth_dev *eth = dev->priv;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun ret = sh_eth_reset(eth);
501*4882a593Smuzhiyun if (ret)
502*4882a593Smuzhiyun goto err;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun ret = sh_eth_desc_init(eth);
505*4882a593Smuzhiyun if (ret)
506*4882a593Smuzhiyun goto err;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun ret = sh_eth_config(eth, bd);
509*4882a593Smuzhiyun if (ret)
510*4882a593Smuzhiyun goto err_config;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun sh_eth_start(eth);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return ret;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun err_config:
517*4882a593Smuzhiyun sh_eth_tx_desc_free(eth);
518*4882a593Smuzhiyun sh_eth_rx_desc_free(eth);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun err:
521*4882a593Smuzhiyun return ret;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
sh_eth_halt(struct eth_device * dev)524*4882a593Smuzhiyun void sh_eth_halt(struct eth_device *dev)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun struct sh_eth_dev *eth = dev->priv;
527*4882a593Smuzhiyun sh_eth_stop(eth);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
sh_eth_initialize(bd_t * bd)530*4882a593Smuzhiyun int sh_eth_initialize(bd_t *bd)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun int ret = 0;
533*4882a593Smuzhiyun struct sh_eth_dev *eth = NULL;
534*4882a593Smuzhiyun struct eth_device *dev = NULL;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
537*4882a593Smuzhiyun if (!eth) {
538*4882a593Smuzhiyun printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
539*4882a593Smuzhiyun ret = -ENOMEM;
540*4882a593Smuzhiyun goto err;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun dev = (struct eth_device *)malloc(sizeof(struct eth_device));
544*4882a593Smuzhiyun if (!dev) {
545*4882a593Smuzhiyun printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
546*4882a593Smuzhiyun ret = -ENOMEM;
547*4882a593Smuzhiyun goto err;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun memset(dev, 0, sizeof(struct eth_device));
550*4882a593Smuzhiyun memset(eth, 0, sizeof(struct sh_eth_dev));
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun eth->port = CONFIG_SH_ETHER_USE_PORT;
553*4882a593Smuzhiyun eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun dev->priv = (void *)eth;
556*4882a593Smuzhiyun dev->iobase = 0;
557*4882a593Smuzhiyun dev->init = sh_eth_init;
558*4882a593Smuzhiyun dev->halt = sh_eth_halt;
559*4882a593Smuzhiyun dev->send = sh_eth_send;
560*4882a593Smuzhiyun dev->recv = sh_eth_recv;
561*4882a593Smuzhiyun eth->port_info[eth->port].dev = dev;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun strcpy(dev->name, SHETHER_NAME);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Register Device to EtherNet subsystem */
566*4882a593Smuzhiyun eth_register(dev);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun bb_miiphy_buses[0].priv = eth;
569*4882a593Smuzhiyun int retval;
570*4882a593Smuzhiyun struct mii_dev *mdiodev = mdio_alloc();
571*4882a593Smuzhiyun if (!mdiodev)
572*4882a593Smuzhiyun return -ENOMEM;
573*4882a593Smuzhiyun strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
574*4882a593Smuzhiyun mdiodev->read = bb_miiphy_read;
575*4882a593Smuzhiyun mdiodev->write = bb_miiphy_write;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun retval = mdio_register(mdiodev);
578*4882a593Smuzhiyun if (retval < 0)
579*4882a593Smuzhiyun return retval;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
582*4882a593Smuzhiyun puts("Please set MAC address\n");
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun return ret;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun err:
587*4882a593Smuzhiyun if (dev)
588*4882a593Smuzhiyun free(dev);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (eth)
591*4882a593Smuzhiyun free(eth);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun printf(SHETHER_NAME ": Failed\n");
594*4882a593Smuzhiyun return ret;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /******* for bb_miiphy *******/
sh_eth_bb_init(struct bb_miiphy_bus * bus)598*4882a593Smuzhiyun static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
sh_eth_bb_mdio_active(struct bb_miiphy_bus * bus)603*4882a593Smuzhiyun static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct sh_eth_dev *eth = bus->priv;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MMD, PIR);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
sh_eth_bb_mdio_tristate(struct bb_miiphy_bus * bus)612*4882a593Smuzhiyun static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun struct sh_eth_dev *eth = bus->priv;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MMD, PIR);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return 0;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
sh_eth_bb_set_mdio(struct bb_miiphy_bus * bus,int v)621*4882a593Smuzhiyun static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct sh_eth_dev *eth = bus->priv;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (v)
626*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDO, PIR);
627*4882a593Smuzhiyun else
628*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDO, PIR);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun return 0;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
sh_eth_bb_get_mdio(struct bb_miiphy_bus * bus,int * v)633*4882a593Smuzhiyun static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun struct sh_eth_dev *eth = bus->priv;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun *v = (sh_eth_read(eth, PIR) & PIR_MDI) >> 3;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return 0;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
sh_eth_bb_set_mdc(struct bb_miiphy_bus * bus,int v)642*4882a593Smuzhiyun static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct sh_eth_dev *eth = bus->priv;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if (v)
647*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDC, PIR);
648*4882a593Smuzhiyun else
649*4882a593Smuzhiyun sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDC, PIR);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
sh_eth_bb_delay(struct bb_miiphy_bus * bus)654*4882a593Smuzhiyun static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun udelay(10);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun return 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun struct bb_miiphy_bus bb_miiphy_buses[] = {
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun .name = "sh_eth",
664*4882a593Smuzhiyun .init = sh_eth_bb_init,
665*4882a593Smuzhiyun .mdio_active = sh_eth_bb_mdio_active,
666*4882a593Smuzhiyun .mdio_tristate = sh_eth_bb_mdio_tristate,
667*4882a593Smuzhiyun .set_mdio = sh_eth_bb_set_mdio,
668*4882a593Smuzhiyun .get_mdio = sh_eth_bb_get_mdio,
669*4882a593Smuzhiyun .set_mdc = sh_eth_bb_set_mdc,
670*4882a593Smuzhiyun .delay = sh_eth_bb_delay,
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun };
673*4882a593Smuzhiyun int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
674