xref: /rk3399_rockchip-uboot/drivers/net/macb.c (revision d5555b70e6cdbce4e1395f40c19a504015f93668)
1 /*
2  * Copyright (C) 2005-2006 Atmel Corporation
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 #include <common.h>
7 
8 /*
9  * The u-boot networking stack is a little weird.  It seems like the
10  * networking core allocates receive buffers up front without any
11  * regard to the hardware that's supposed to actually receive those
12  * packets.
13  *
14  * The MACB receives packets into 128-byte receive buffers, so the
15  * buffers allocated by the core isn't very practical to use.  We'll
16  * allocate our own, but we need one such buffer in case a packet
17  * wraps around the DMA ring so that we have to copy it.
18  *
19  * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
20  * configuration header.  This way, the core allocates one RX buffer
21  * and one TX buffer, each of which can hold a ethernet packet of
22  * maximum size.
23  *
24  * For some reason, the networking core unconditionally specifies a
25  * 32-byte packet "alignment" (which really should be called
26  * "padding").  MACB shouldn't need that, but we'll refrain from any
27  * core modifications here...
28  */
29 
30 #include <net.h>
31 #include <netdev.h>
32 #include <malloc.h>
33 #include <miiphy.h>
34 
35 #include <linux/mii.h>
36 #include <asm/io.h>
37 #include <asm/dma-mapping.h>
38 #include <asm/arch/clk.h>
39 #include <asm-generic/errno.h>
40 
41 #include "macb.h"
42 
43 #define MACB_RX_BUFFER_SIZE		4096
44 #define MACB_RX_RING_SIZE		(MACB_RX_BUFFER_SIZE / 128)
45 #define MACB_TX_RING_SIZE		16
46 #define MACB_TX_TIMEOUT		1000
47 #define MACB_AUTONEG_TIMEOUT	5000000
48 
49 struct macb_dma_desc {
50 	u32	addr;
51 	u32	ctrl;
52 };
53 
54 #define DMA_DESC_BYTES(n)	(n * sizeof(struct macb_dma_desc))
55 #define MACB_TX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_TX_RING_SIZE))
56 #define MACB_RX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_RX_RING_SIZE))
57 #define MACB_TX_DUMMY_DMA_DESC_SIZE	(DMA_DESC_BYTES(1))
58 
59 #define RXADDR_USED		0x00000001
60 #define RXADDR_WRAP		0x00000002
61 
62 #define RXBUF_FRMLEN_MASK	0x00000fff
63 #define RXBUF_FRAME_START	0x00004000
64 #define RXBUF_FRAME_END		0x00008000
65 #define RXBUF_TYPEID_MATCH	0x00400000
66 #define RXBUF_ADDR4_MATCH	0x00800000
67 #define RXBUF_ADDR3_MATCH	0x01000000
68 #define RXBUF_ADDR2_MATCH	0x02000000
69 #define RXBUF_ADDR1_MATCH	0x04000000
70 #define RXBUF_BROADCAST		0x80000000
71 
72 #define TXBUF_FRMLEN_MASK	0x000007ff
73 #define TXBUF_FRAME_END		0x00008000
74 #define TXBUF_NOCRC		0x00010000
75 #define TXBUF_EXHAUSTED		0x08000000
76 #define TXBUF_UNDERRUN		0x10000000
77 #define TXBUF_MAXRETRY		0x20000000
78 #define TXBUF_WRAP		0x40000000
79 #define TXBUF_USED		0x80000000
80 
81 struct macb_device {
82 	void			*regs;
83 
84 	unsigned int		rx_tail;
85 	unsigned int		tx_head;
86 	unsigned int		tx_tail;
87 	unsigned int		next_rx_tail;
88 	bool			wrapped;
89 
90 	void			*rx_buffer;
91 	void			*tx_buffer;
92 	struct macb_dma_desc	*rx_ring;
93 	struct macb_dma_desc	*tx_ring;
94 
95 	unsigned long		rx_buffer_dma;
96 	unsigned long		rx_ring_dma;
97 	unsigned long		tx_ring_dma;
98 
99 	struct macb_dma_desc	*dummy_desc;
100 	unsigned long		dummy_desc_dma;
101 
102 	const struct device	*dev;
103 	struct eth_device	netdev;
104 	unsigned short		phy_addr;
105 	struct mii_dev		*bus;
106 };
107 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
108 
109 static int macb_is_gem(struct macb_device *macb)
110 {
111 	return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
112 }
113 
114 #ifndef cpu_is_sama5d2
115 #define cpu_is_sama5d2() 0
116 #endif
117 
118 #ifndef cpu_is_sama5d4
119 #define cpu_is_sama5d4() 0
120 #endif
121 
122 static int gem_is_gigabit_capable(struct macb_device *macb)
123 {
124 	/*
125 	 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
126 	 * configured to support only 10/100.
127 	 */
128 	return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
129 }
130 
131 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
132 {
133 	unsigned long netctl;
134 	unsigned long netstat;
135 	unsigned long frame;
136 
137 	netctl = macb_readl(macb, NCR);
138 	netctl |= MACB_BIT(MPE);
139 	macb_writel(macb, NCR, netctl);
140 
141 	frame = (MACB_BF(SOF, 1)
142 		 | MACB_BF(RW, 1)
143 		 | MACB_BF(PHYA, macb->phy_addr)
144 		 | MACB_BF(REGA, reg)
145 		 | MACB_BF(CODE, 2)
146 		 | MACB_BF(DATA, value));
147 	macb_writel(macb, MAN, frame);
148 
149 	do {
150 		netstat = macb_readl(macb, NSR);
151 	} while (!(netstat & MACB_BIT(IDLE)));
152 
153 	netctl = macb_readl(macb, NCR);
154 	netctl &= ~MACB_BIT(MPE);
155 	macb_writel(macb, NCR, netctl);
156 }
157 
158 static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
159 {
160 	unsigned long netctl;
161 	unsigned long netstat;
162 	unsigned long frame;
163 
164 	netctl = macb_readl(macb, NCR);
165 	netctl |= MACB_BIT(MPE);
166 	macb_writel(macb, NCR, netctl);
167 
168 	frame = (MACB_BF(SOF, 1)
169 		 | MACB_BF(RW, 2)
170 		 | MACB_BF(PHYA, macb->phy_addr)
171 		 | MACB_BF(REGA, reg)
172 		 | MACB_BF(CODE, 2));
173 	macb_writel(macb, MAN, frame);
174 
175 	do {
176 		netstat = macb_readl(macb, NSR);
177 	} while (!(netstat & MACB_BIT(IDLE)));
178 
179 	frame = macb_readl(macb, MAN);
180 
181 	netctl = macb_readl(macb, NCR);
182 	netctl &= ~MACB_BIT(MPE);
183 	macb_writel(macb, NCR, netctl);
184 
185 	return MACB_BFEXT(DATA, frame);
186 }
187 
188 void __weak arch_get_mdio_control(const char *name)
189 {
190 	return;
191 }
192 
193 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
194 
195 int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value)
196 {
197 	struct eth_device *dev = eth_get_dev_by_name(devname);
198 	struct macb_device *macb = to_macb(dev);
199 
200 	if (macb->phy_addr != phy_adr)
201 		return -1;
202 
203 	arch_get_mdio_control(devname);
204 	*value = macb_mdio_read(macb, reg);
205 
206 	return 0;
207 }
208 
209 int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value)
210 {
211 	struct eth_device *dev = eth_get_dev_by_name(devname);
212 	struct macb_device *macb = to_macb(dev);
213 
214 	if (macb->phy_addr != phy_adr)
215 		return -1;
216 
217 	arch_get_mdio_control(devname);
218 	macb_mdio_write(macb, reg, value);
219 
220 	return 0;
221 }
222 #endif
223 
224 #define RX	1
225 #define TX	0
226 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
227 {
228 	if (rx)
229 		invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
230 			MACB_RX_DMA_DESC_SIZE);
231 	else
232 		invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
233 			MACB_TX_DMA_DESC_SIZE);
234 }
235 
236 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
237 {
238 	if (rx)
239 		flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
240 			MACB_RX_DMA_DESC_SIZE);
241 	else
242 		flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
243 			MACB_TX_DMA_DESC_SIZE);
244 }
245 
246 static inline void macb_flush_rx_buffer(struct macb_device *macb)
247 {
248 	flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
249 				MACB_RX_BUFFER_SIZE);
250 }
251 
252 static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
253 {
254 	invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
255 				MACB_RX_BUFFER_SIZE);
256 }
257 
258 #if defined(CONFIG_CMD_NET)
259 
260 static int _macb_send(struct macb_device *macb, const char *name, void *packet,
261 		      int length)
262 {
263 	unsigned long paddr, ctrl;
264 	unsigned int tx_head = macb->tx_head;
265 	int i;
266 
267 	paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
268 
269 	ctrl = length & TXBUF_FRMLEN_MASK;
270 	ctrl |= TXBUF_FRAME_END;
271 	if (tx_head == (MACB_TX_RING_SIZE - 1)) {
272 		ctrl |= TXBUF_WRAP;
273 		macb->tx_head = 0;
274 	} else {
275 		macb->tx_head++;
276 	}
277 
278 	macb->tx_ring[tx_head].ctrl = ctrl;
279 	macb->tx_ring[tx_head].addr = paddr;
280 	barrier();
281 	macb_flush_ring_desc(macb, TX);
282 	/* Do we need check paddr and length is dcache line aligned? */
283 	flush_dcache_range(paddr, paddr + length);
284 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
285 
286 	/*
287 	 * I guess this is necessary because the networking core may
288 	 * re-use the transmit buffer as soon as we return...
289 	 */
290 	for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
291 		barrier();
292 		macb_invalidate_ring_desc(macb, TX);
293 		ctrl = macb->tx_ring[tx_head].ctrl;
294 		if (ctrl & TXBUF_USED)
295 			break;
296 		udelay(1);
297 	}
298 
299 	dma_unmap_single(packet, length, paddr);
300 
301 	if (i <= MACB_TX_TIMEOUT) {
302 		if (ctrl & TXBUF_UNDERRUN)
303 			printf("%s: TX underrun\n", name);
304 		if (ctrl & TXBUF_EXHAUSTED)
305 			printf("%s: TX buffers exhausted in mid frame\n", name);
306 	} else {
307 		printf("%s: TX timeout\n", name);
308 	}
309 
310 	/* No one cares anyway */
311 	return 0;
312 }
313 
314 static void reclaim_rx_buffers(struct macb_device *macb,
315 			       unsigned int new_tail)
316 {
317 	unsigned int i;
318 
319 	i = macb->rx_tail;
320 
321 	macb_invalidate_ring_desc(macb, RX);
322 	while (i > new_tail) {
323 		macb->rx_ring[i].addr &= ~RXADDR_USED;
324 		i++;
325 		if (i > MACB_RX_RING_SIZE)
326 			i = 0;
327 	}
328 
329 	while (i < new_tail) {
330 		macb->rx_ring[i].addr &= ~RXADDR_USED;
331 		i++;
332 	}
333 
334 	barrier();
335 	macb_flush_ring_desc(macb, RX);
336 	macb->rx_tail = new_tail;
337 }
338 
339 static int _macb_recv(struct macb_device *macb, uchar **packetp)
340 {
341 	unsigned int next_rx_tail = macb->next_rx_tail;
342 	void *buffer;
343 	int length;
344 	u32 status;
345 
346 	macb->wrapped = false;
347 	for (;;) {
348 		macb_invalidate_ring_desc(macb, RX);
349 
350 		if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED))
351 			return -EAGAIN;
352 
353 		status = macb->rx_ring[next_rx_tail].ctrl;
354 		if (status & RXBUF_FRAME_START) {
355 			if (next_rx_tail != macb->rx_tail)
356 				reclaim_rx_buffers(macb, next_rx_tail);
357 			macb->wrapped = false;
358 		}
359 
360 		if (status & RXBUF_FRAME_END) {
361 			buffer = macb->rx_buffer + 128 * macb->rx_tail;
362 			length = status & RXBUF_FRMLEN_MASK;
363 
364 			macb_invalidate_rx_buffer(macb);
365 			if (macb->wrapped) {
366 				unsigned int headlen, taillen;
367 
368 				headlen = 128 * (MACB_RX_RING_SIZE
369 						 - macb->rx_tail);
370 				taillen = length - headlen;
371 				memcpy((void *)net_rx_packets[0],
372 				       buffer, headlen);
373 				memcpy((void *)net_rx_packets[0] + headlen,
374 				       macb->rx_buffer, taillen);
375 				*packetp = (void *)net_rx_packets[0];
376 			} else {
377 				*packetp = buffer;
378 			}
379 
380 			if (++next_rx_tail >= MACB_RX_RING_SIZE)
381 				next_rx_tail = 0;
382 			macb->next_rx_tail = next_rx_tail;
383 			return length;
384 		} else {
385 			if (++next_rx_tail >= MACB_RX_RING_SIZE) {
386 				macb->wrapped = true;
387 				next_rx_tail = 0;
388 			}
389 		}
390 		barrier();
391 	}
392 }
393 
394 static void macb_phy_reset(struct macb_device *macb, const char *name)
395 {
396 	int i;
397 	u16 status, adv;
398 
399 	adv = ADVERTISE_CSMA | ADVERTISE_ALL;
400 	macb_mdio_write(macb, MII_ADVERTISE, adv);
401 	printf("%s: Starting autonegotiation...\n", name);
402 	macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
403 					 | BMCR_ANRESTART));
404 
405 	for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
406 		status = macb_mdio_read(macb, MII_BMSR);
407 		if (status & BMSR_ANEGCOMPLETE)
408 			break;
409 		udelay(100);
410 	}
411 
412 	if (status & BMSR_ANEGCOMPLETE)
413 		printf("%s: Autonegotiation complete\n", name);
414 	else
415 		printf("%s: Autonegotiation timed out (status=0x%04x)\n",
416 		       name, status);
417 }
418 
419 #ifdef CONFIG_MACB_SEARCH_PHY
420 static int macb_phy_find(struct macb_device *macb)
421 {
422 	int i;
423 	u16 phy_id;
424 
425 	/* Search for PHY... */
426 	for (i = 0; i < 32; i++) {
427 		macb->phy_addr = i;
428 		phy_id = macb_mdio_read(macb, MII_PHYSID1);
429 		if (phy_id != 0xffff) {
430 			printf("%s: PHY present at %d\n", macb->netdev.name, i);
431 			return 1;
432 		}
433 	}
434 
435 	/* PHY isn't up to snuff */
436 	printf("%s: PHY not found\n", macb->netdev.name);
437 
438 	return 0;
439 }
440 #endif /* CONFIG_MACB_SEARCH_PHY */
441 
442 
443 static int macb_phy_init(struct macb_device *macb, const char *name)
444 {
445 #ifdef CONFIG_PHYLIB
446 	struct phy_device *phydev;
447 #endif
448 	u32 ncfgr;
449 	u16 phy_id, status, adv, lpa;
450 	int media, speed, duplex;
451 	int i;
452 
453 	arch_get_mdio_control(name);
454 #ifdef CONFIG_MACB_SEARCH_PHY
455 	/* Auto-detect phy_addr */
456 	if (!macb_phy_find(macb))
457 		return 0;
458 #endif /* CONFIG_MACB_SEARCH_PHY */
459 
460 	/* Check if the PHY is up to snuff... */
461 	phy_id = macb_mdio_read(macb, MII_PHYSID1);
462 	if (phy_id == 0xffff) {
463 		printf("%s: No PHY present\n", name);
464 		return 0;
465 	}
466 
467 #ifdef CONFIG_PHYLIB
468 	/* need to consider other phy interface mode */
469 	phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
470 			     PHY_INTERFACE_MODE_RGMII);
471 	if (!phydev) {
472 		printf("phy_connect failed\n");
473 		return -ENODEV;
474 	}
475 
476 	phy_config(phydev);
477 #endif
478 
479 	status = macb_mdio_read(macb, MII_BMSR);
480 	if (!(status & BMSR_LSTATUS)) {
481 		/* Try to re-negotiate if we don't have link already. */
482 		macb_phy_reset(macb, name);
483 
484 		for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
485 			status = macb_mdio_read(macb, MII_BMSR);
486 			if (status & BMSR_LSTATUS)
487 				break;
488 			udelay(100);
489 		}
490 	}
491 
492 	if (!(status & BMSR_LSTATUS)) {
493 		printf("%s: link down (status: 0x%04x)\n",
494 		       name, status);
495 		return 0;
496 	}
497 
498 	/* First check for GMAC and that it is GiB capable */
499 	if (gem_is_gigabit_capable(macb)) {
500 		lpa = macb_mdio_read(macb, MII_STAT1000);
501 
502 		if (lpa & (LPA_1000FULL | LPA_1000HALF)) {
503 			duplex = ((lpa & LPA_1000FULL) ? 1 : 0);
504 
505 			printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
506 			       name,
507 			       duplex ? "full" : "half",
508 			       lpa);
509 
510 			ncfgr = macb_readl(macb, NCFGR);
511 			ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
512 			ncfgr |= GEM_BIT(GBE);
513 
514 			if (duplex)
515 				ncfgr |= MACB_BIT(FD);
516 
517 			macb_writel(macb, NCFGR, ncfgr);
518 
519 			return 1;
520 		}
521 	}
522 
523 	/* fall back for EMAC checking */
524 	adv = macb_mdio_read(macb, MII_ADVERTISE);
525 	lpa = macb_mdio_read(macb, MII_LPA);
526 	media = mii_nway_result(lpa & adv);
527 	speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
528 		 ? 1 : 0);
529 	duplex = (media & ADVERTISE_FULL) ? 1 : 0;
530 	printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
531 	       name,
532 	       speed ? "100" : "10",
533 	       duplex ? "full" : "half",
534 	       lpa);
535 
536 	ncfgr = macb_readl(macb, NCFGR);
537 	ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
538 	if (speed)
539 		ncfgr |= MACB_BIT(SPD);
540 	if (duplex)
541 		ncfgr |= MACB_BIT(FD);
542 	macb_writel(macb, NCFGR, ncfgr);
543 
544 	return 1;
545 }
546 
547 static int gmac_init_multi_queues(struct macb_device *macb)
548 {
549 	int i, num_queues = 1;
550 	u32 queue_mask;
551 
552 	/* bit 0 is never set but queue 0 always exists */
553 	queue_mask = gem_readl(macb, DCFG6) & 0xff;
554 	queue_mask |= 0x1;
555 
556 	for (i = 1; i < MACB_MAX_QUEUES; i++)
557 		if (queue_mask & (1 << i))
558 			num_queues++;
559 
560 	macb->dummy_desc->ctrl = TXBUF_USED;
561 	macb->dummy_desc->addr = 0;
562 	flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
563 			MACB_TX_DUMMY_DMA_DESC_SIZE);
564 
565 	for (i = 1; i < num_queues; i++)
566 		gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
567 
568 	return 0;
569 }
570 
571 static int _macb_init(struct macb_device *macb, const char *name)
572 {
573 	unsigned long paddr;
574 	int i;
575 
576 	/*
577 	 * macb_halt should have been called at some point before now,
578 	 * so we'll assume the controller is idle.
579 	 */
580 
581 	/* initialize DMA descriptors */
582 	paddr = macb->rx_buffer_dma;
583 	for (i = 0; i < MACB_RX_RING_SIZE; i++) {
584 		if (i == (MACB_RX_RING_SIZE - 1))
585 			paddr |= RXADDR_WRAP;
586 		macb->rx_ring[i].addr = paddr;
587 		macb->rx_ring[i].ctrl = 0;
588 		paddr += 128;
589 	}
590 	macb_flush_ring_desc(macb, RX);
591 	macb_flush_rx_buffer(macb);
592 
593 	for (i = 0; i < MACB_TX_RING_SIZE; i++) {
594 		macb->tx_ring[i].addr = 0;
595 		if (i == (MACB_TX_RING_SIZE - 1))
596 			macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
597 		else
598 			macb->tx_ring[i].ctrl = TXBUF_USED;
599 	}
600 	macb_flush_ring_desc(macb, TX);
601 
602 	macb->rx_tail = 0;
603 	macb->tx_head = 0;
604 	macb->tx_tail = 0;
605 	macb->next_rx_tail = 0;
606 
607 	macb_writel(macb, RBQP, macb->rx_ring_dma);
608 	macb_writel(macb, TBQP, macb->tx_ring_dma);
609 
610 	if (macb_is_gem(macb)) {
611 		/* Check the multi queue and initialize the queue for tx */
612 		gmac_init_multi_queues(macb);
613 
614 		/*
615 		 * When the GMAC IP with GE feature, this bit is used to
616 		 * select interface between RGMII and GMII.
617 		 * When the GMAC IP without GE feature, this bit is used
618 		 * to select interface between RMII and MII.
619 		 */
620 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
621 		gem_writel(macb, UR, GEM_BIT(RGMII));
622 #else
623 		gem_writel(macb, UR, 0);
624 #endif
625 	} else {
626 	/* choose RMII or MII mode. This depends on the board */
627 #ifdef CONFIG_RMII
628 #ifdef CONFIG_AT91FAMILY
629 	macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
630 #else
631 	macb_writel(macb, USRIO, 0);
632 #endif
633 #else
634 #ifdef CONFIG_AT91FAMILY
635 	macb_writel(macb, USRIO, MACB_BIT(CLKEN));
636 #else
637 	macb_writel(macb, USRIO, MACB_BIT(MII));
638 #endif
639 #endif /* CONFIG_RMII */
640 	}
641 
642 	if (!macb_phy_init(macb, name))
643 		return -1;
644 
645 	/* Enable TX and RX */
646 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
647 
648 	return 0;
649 }
650 
651 static void _macb_halt(struct macb_device *macb)
652 {
653 	u32 ncr, tsr;
654 
655 	/* Halt the controller and wait for any ongoing transmission to end. */
656 	ncr = macb_readl(macb, NCR);
657 	ncr |= MACB_BIT(THALT);
658 	macb_writel(macb, NCR, ncr);
659 
660 	do {
661 		tsr = macb_readl(macb, TSR);
662 	} while (tsr & MACB_BIT(TGO));
663 
664 	/* Disable TX and RX, and clear statistics */
665 	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
666 }
667 
668 static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
669 {
670 	u32 hwaddr_bottom;
671 	u16 hwaddr_top;
672 
673 	/* set hardware address */
674 	hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
675 			enetaddr[2] << 16 | enetaddr[3] << 24;
676 	macb_writel(macb, SA1B, hwaddr_bottom);
677 	hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
678 	macb_writel(macb, SA1T, hwaddr_top);
679 	return 0;
680 }
681 
682 static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
683 {
684 	u32 config;
685 	unsigned long macb_hz = get_macb_pclk_rate(id);
686 
687 	if (macb_hz < 20000000)
688 		config = MACB_BF(CLK, MACB_CLK_DIV8);
689 	else if (macb_hz < 40000000)
690 		config = MACB_BF(CLK, MACB_CLK_DIV16);
691 	else if (macb_hz < 80000000)
692 		config = MACB_BF(CLK, MACB_CLK_DIV32);
693 	else
694 		config = MACB_BF(CLK, MACB_CLK_DIV64);
695 
696 	return config;
697 }
698 
699 static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
700 {
701 	u32 config;
702 	unsigned long macb_hz = get_macb_pclk_rate(id);
703 
704 	if (macb_hz < 20000000)
705 		config = GEM_BF(CLK, GEM_CLK_DIV8);
706 	else if (macb_hz < 40000000)
707 		config = GEM_BF(CLK, GEM_CLK_DIV16);
708 	else if (macb_hz < 80000000)
709 		config = GEM_BF(CLK, GEM_CLK_DIV32);
710 	else if (macb_hz < 120000000)
711 		config = GEM_BF(CLK, GEM_CLK_DIV48);
712 	else if (macb_hz < 160000000)
713 		config = GEM_BF(CLK, GEM_CLK_DIV64);
714 	else
715 		config = GEM_BF(CLK, GEM_CLK_DIV96);
716 
717 	return config;
718 }
719 
720 /*
721  * Get the DMA bus width field of the network configuration register that we
722  * should program. We find the width from decoding the design configuration
723  * register to find the maximum supported data bus width.
724  */
725 static u32 macb_dbw(struct macb_device *macb)
726 {
727 	switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
728 	case 4:
729 		return GEM_BF(DBW, GEM_DBW128);
730 	case 2:
731 		return GEM_BF(DBW, GEM_DBW64);
732 	case 1:
733 	default:
734 		return GEM_BF(DBW, GEM_DBW32);
735 	}
736 }
737 
738 static void _macb_eth_initialize(struct macb_device *macb)
739 {
740 	int id = 0;	/* This is not used by functions we call */
741 	u32 ncfgr;
742 
743 	/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
744 	macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
745 					     &macb->rx_buffer_dma);
746 	macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
747 					   &macb->rx_ring_dma);
748 	macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
749 					   &macb->tx_ring_dma);
750 	macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
751 					   &macb->dummy_desc_dma);
752 
753 	/*
754 	 * Do some basic initialization so that we at least can talk
755 	 * to the PHY
756 	 */
757 	if (macb_is_gem(macb)) {
758 		ncfgr = gem_mdc_clk_div(id, macb);
759 		ncfgr |= macb_dbw(macb);
760 	} else {
761 		ncfgr = macb_mdc_clk_div(id, macb);
762 	}
763 
764 	macb_writel(macb, NCFGR, ncfgr);
765 }
766 
767 static int macb_send(struct eth_device *netdev, void *packet, int length)
768 {
769 	struct macb_device *macb = to_macb(netdev);
770 
771 	return _macb_send(macb, netdev->name, packet, length);
772 }
773 
774 static int macb_recv(struct eth_device *netdev)
775 {
776 	struct macb_device *macb = to_macb(netdev);
777 	uchar *packet;
778 	int length;
779 
780 	macb->wrapped = false;
781 	for (;;) {
782 		macb->next_rx_tail = macb->rx_tail;
783 		length = _macb_recv(macb, &packet);
784 		if (length >= 0) {
785 			net_process_received_packet(packet, length);
786 			reclaim_rx_buffers(macb, macb->next_rx_tail);
787 		} else if (length < 0) {
788 			return length;
789 		}
790 	}
791 }
792 
793 static int macb_init(struct eth_device *netdev, bd_t *bd)
794 {
795 	struct macb_device *macb = to_macb(netdev);
796 
797 	return _macb_init(macb, netdev->name);
798 }
799 
800 static void macb_halt(struct eth_device *netdev)
801 {
802 	struct macb_device *macb = to_macb(netdev);
803 
804 	return _macb_halt(macb);
805 }
806 
807 static int macb_write_hwaddr(struct eth_device *netdev)
808 {
809 	struct macb_device *macb = to_macb(netdev);
810 
811 	return _macb_write_hwaddr(macb, netdev->enetaddr);
812 }
813 
814 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
815 {
816 	struct macb_device *macb;
817 	struct eth_device *netdev;
818 
819 	macb = malloc(sizeof(struct macb_device));
820 	if (!macb) {
821 		printf("Error: Failed to allocate memory for MACB%d\n", id);
822 		return -1;
823 	}
824 	memset(macb, 0, sizeof(struct macb_device));
825 
826 	netdev = &macb->netdev;
827 
828 	macb->regs = regs;
829 	macb->phy_addr = phy_addr;
830 
831 	if (macb_is_gem(macb))
832 		sprintf(netdev->name, "gmac%d", id);
833 	else
834 		sprintf(netdev->name, "macb%d", id);
835 
836 	netdev->init = macb_init;
837 	netdev->halt = macb_halt;
838 	netdev->send = macb_send;
839 	netdev->recv = macb_recv;
840 	netdev->write_hwaddr = macb_write_hwaddr;
841 
842 	_macb_eth_initialize(macb);
843 
844 	eth_register(netdev);
845 
846 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
847 	miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
848 	macb->bus = miiphy_get_dev_by_name(netdev->name);
849 #endif
850 	return 0;
851 }
852 
853 #endif
854