xref: /rk3399_rockchip-uboot/drivers/net/cpsw.c (revision 9702ec00e95dbc1fd66ef8e9624c649e1ee818e5)
1 /*
2  * CPSW Ethernet Switch Driver
3  *
4  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <asm/errno.h>
25 #include <asm/io.h>
26 #include <phy.h>
27 #include <asm/arch/cpu.h>
28 #include <dm.h>
29 #include <fdt_support.h>
30 
31 DECLARE_GLOBAL_DATA_PTR;
32 
33 #define BITMASK(bits)		(BIT(bits) - 1)
34 #define PHY_REG_MASK		0x1f
35 #define PHY_ID_MASK		0x1f
36 #define NUM_DESCS		(PKTBUFSRX * 2)
37 #define PKT_MIN			60
38 #define PKT_MAX			(1500 + 14 + 4 + 4)
39 #define CLEAR_BIT		1
40 #define GIGABITEN		BIT(7)
41 #define FULLDUPLEXEN		BIT(0)
42 #define MIIEN			BIT(15)
43 
44 /* reg offset */
45 #define CPSW_HOST_PORT_OFFSET	0x108
46 #define CPSW_SLAVE0_OFFSET	0x208
47 #define CPSW_SLAVE1_OFFSET	0x308
48 #define CPSW_SLAVE_SIZE		0x100
49 #define CPSW_CPDMA_OFFSET	0x800
50 #define CPSW_HW_STATS		0x900
51 #define CPSW_STATERAM_OFFSET	0xa00
52 #define CPSW_CPTS_OFFSET	0xc00
53 #define CPSW_ALE_OFFSET		0xd00
54 #define CPSW_SLIVER0_OFFSET	0xd80
55 #define CPSW_SLIVER1_OFFSET	0xdc0
56 #define CPSW_BD_OFFSET		0x2000
57 #define CPSW_MDIO_DIV		0xff
58 
59 #define AM335X_GMII_SEL_OFFSET	0x630
60 
61 /* DMA Registers */
62 #define CPDMA_TXCONTROL		0x004
63 #define CPDMA_RXCONTROL		0x014
64 #define CPDMA_SOFTRESET		0x01c
65 #define CPDMA_RXFREE		0x0e0
66 #define CPDMA_TXHDP_VER1	0x100
67 #define CPDMA_TXHDP_VER2	0x200
68 #define CPDMA_RXHDP_VER1	0x120
69 #define CPDMA_RXHDP_VER2	0x220
70 #define CPDMA_TXCP_VER1		0x140
71 #define CPDMA_TXCP_VER2		0x240
72 #define CPDMA_RXCP_VER1		0x160
73 #define CPDMA_RXCP_VER2		0x260
74 
75 /* Descriptor mode bits */
76 #define CPDMA_DESC_SOP		BIT(31)
77 #define CPDMA_DESC_EOP		BIT(30)
78 #define CPDMA_DESC_OWNER	BIT(29)
79 #define CPDMA_DESC_EOQ		BIT(28)
80 
81 /*
82  * This timeout definition is a worst-case ultra defensive measure against
83  * unexpected controller lock ups.  Ideally, we should never ever hit this
84  * scenario in practice.
85  */
86 #define MDIO_TIMEOUT            100 /* msecs */
87 #define CPDMA_TIMEOUT		100 /* msecs */
88 
89 struct cpsw_mdio_regs {
90 	u32	version;
91 	u32	control;
92 #define CONTROL_IDLE		BIT(31)
93 #define CONTROL_ENABLE		BIT(30)
94 
95 	u32	alive;
96 	u32	link;
97 	u32	linkintraw;
98 	u32	linkintmasked;
99 	u32	__reserved_0[2];
100 	u32	userintraw;
101 	u32	userintmasked;
102 	u32	userintmaskset;
103 	u32	userintmaskclr;
104 	u32	__reserved_1[20];
105 
106 	struct {
107 		u32		access;
108 		u32		physel;
109 #define USERACCESS_GO		BIT(31)
110 #define USERACCESS_WRITE	BIT(30)
111 #define USERACCESS_ACK		BIT(29)
112 #define USERACCESS_READ		(0)
113 #define USERACCESS_DATA		(0xffff)
114 	} user[0];
115 };
116 
117 struct cpsw_regs {
118 	u32	id_ver;
119 	u32	control;
120 	u32	soft_reset;
121 	u32	stat_port_en;
122 	u32	ptype;
123 };
124 
125 struct cpsw_slave_regs {
126 	u32	max_blks;
127 	u32	blk_cnt;
128 	u32	flow_thresh;
129 	u32	port_vlan;
130 	u32	tx_pri_map;
131 #ifdef CONFIG_AM33XX
132 	u32	gap_thresh;
133 #elif defined(CONFIG_TI814X)
134 	u32	ts_ctl;
135 	u32	ts_seq_ltype;
136 	u32	ts_vlan;
137 #endif
138 	u32	sa_lo;
139 	u32	sa_hi;
140 };
141 
142 struct cpsw_host_regs {
143 	u32	max_blks;
144 	u32	blk_cnt;
145 	u32	flow_thresh;
146 	u32	port_vlan;
147 	u32	tx_pri_map;
148 	u32	cpdma_tx_pri_map;
149 	u32	cpdma_rx_chan_map;
150 };
151 
152 struct cpsw_sliver_regs {
153 	u32	id_ver;
154 	u32	mac_control;
155 	u32	mac_status;
156 	u32	soft_reset;
157 	u32	rx_maxlen;
158 	u32	__reserved_0;
159 	u32	rx_pause;
160 	u32	tx_pause;
161 	u32	__reserved_1;
162 	u32	rx_pri_map;
163 };
164 
165 #define ALE_ENTRY_BITS		68
166 #define ALE_ENTRY_WORDS		DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
167 
168 /* ALE Registers */
169 #define ALE_CONTROL		0x08
170 #define ALE_UNKNOWNVLAN		0x18
171 #define ALE_TABLE_CONTROL	0x20
172 #define ALE_TABLE		0x34
173 #define ALE_PORTCTL		0x40
174 
175 #define ALE_TABLE_WRITE		BIT(31)
176 
177 #define ALE_TYPE_FREE			0
178 #define ALE_TYPE_ADDR			1
179 #define ALE_TYPE_VLAN			2
180 #define ALE_TYPE_VLAN_ADDR		3
181 
182 #define ALE_UCAST_PERSISTANT		0
183 #define ALE_UCAST_UNTOUCHED		1
184 #define ALE_UCAST_OUI			2
185 #define ALE_UCAST_TOUCHED		3
186 
187 #define ALE_MCAST_FWD			0
188 #define ALE_MCAST_BLOCK_LEARN_FWD	1
189 #define ALE_MCAST_FWD_LEARN		2
190 #define ALE_MCAST_FWD_2			3
191 
192 enum cpsw_ale_port_state {
193 	ALE_PORT_STATE_DISABLE	= 0x00,
194 	ALE_PORT_STATE_BLOCK	= 0x01,
195 	ALE_PORT_STATE_LEARN	= 0x02,
196 	ALE_PORT_STATE_FORWARD	= 0x03,
197 };
198 
199 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
200 #define ALE_SECURE	1
201 #define ALE_BLOCKED	2
202 
203 struct cpsw_slave {
204 	struct cpsw_slave_regs		*regs;
205 	struct cpsw_sliver_regs		*sliver;
206 	int				slave_num;
207 	u32				mac_control;
208 	struct cpsw_slave_data		*data;
209 };
210 
211 struct cpdma_desc {
212 	/* hardware fields */
213 	u32			hw_next;
214 	u32			hw_buffer;
215 	u32			hw_len;
216 	u32			hw_mode;
217 	/* software fields */
218 	u32			sw_buffer;
219 	u32			sw_len;
220 };
221 
222 struct cpdma_chan {
223 	struct cpdma_desc	*head, *tail;
224 	void			*hdp, *cp, *rxfree;
225 };
226 
227 #define desc_write(desc, fld, val)	__raw_writel((u32)(val), &(desc)->fld)
228 #define desc_read(desc, fld)		__raw_readl(&(desc)->fld)
229 #define desc_read_ptr(desc, fld)	((void *)__raw_readl(&(desc)->fld))
230 
231 #define chan_write(chan, fld, val)	__raw_writel((u32)(val), (chan)->fld)
232 #define chan_read(chan, fld)		__raw_readl((chan)->fld)
233 #define chan_read_ptr(chan, fld)	((void *)__raw_readl((chan)->fld))
234 
235 #define for_active_slave(slave, priv) \
236 	slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
237 #define for_each_slave(slave, priv) \
238 	for (slave = (priv)->slaves; slave != (priv)->slaves + \
239 				(priv)->data.slaves; slave++)
240 
241 struct cpsw_priv {
242 #ifdef CONFIG_DM_ETH
243 	struct udevice			*dev;
244 #else
245 	struct eth_device		*dev;
246 #endif
247 	struct cpsw_platform_data	data;
248 	int				host_port;
249 
250 	struct cpsw_regs		*regs;
251 	void				*dma_regs;
252 	struct cpsw_host_regs		*host_port_regs;
253 	void				*ale_regs;
254 
255 	struct cpdma_desc		*descs;
256 	struct cpdma_desc		*desc_free;
257 	struct cpdma_chan		rx_chan, tx_chan;
258 
259 	struct cpsw_slave		*slaves;
260 	struct phy_device		*phydev;
261 	struct mii_dev			*bus;
262 
263 	u32				phy_mask;
264 };
265 
266 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
267 {
268 	int idx;
269 
270 	idx    = start / 32;
271 	start -= idx * 32;
272 	idx    = 2 - idx; /* flip */
273 	return (ale_entry[idx] >> start) & BITMASK(bits);
274 }
275 
276 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
277 				      u32 value)
278 {
279 	int idx;
280 
281 	value &= BITMASK(bits);
282 	idx    = start / 32;
283 	start -= idx * 32;
284 	idx    = 2 - idx; /* flip */
285 	ale_entry[idx] &= ~(BITMASK(bits) << start);
286 	ale_entry[idx] |=  (value << start);
287 }
288 
289 #define DEFINE_ALE_FIELD(name, start, bits)				\
290 static inline int cpsw_ale_get_##name(u32 *ale_entry)			\
291 {									\
292 	return cpsw_ale_get_field(ale_entry, start, bits);		\
293 }									\
294 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value)	\
295 {									\
296 	cpsw_ale_set_field(ale_entry, start, bits, value);		\
297 }
298 
299 DEFINE_ALE_FIELD(entry_type,		60,	2)
300 DEFINE_ALE_FIELD(mcast_state,		62,	2)
301 DEFINE_ALE_FIELD(port_mask,		66,	3)
302 DEFINE_ALE_FIELD(ucast_type,		62,	2)
303 DEFINE_ALE_FIELD(port_num,		66,	2)
304 DEFINE_ALE_FIELD(blocked,		65,	1)
305 DEFINE_ALE_FIELD(secure,		64,	1)
306 DEFINE_ALE_FIELD(mcast,			40,	1)
307 
308 /* The MAC address field in the ALE entry cannot be macroized as above */
309 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
310 {
311 	int i;
312 
313 	for (i = 0; i < 6; i++)
314 		addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
315 }
316 
317 static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
318 {
319 	int i;
320 
321 	for (i = 0; i < 6; i++)
322 		cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
323 }
324 
325 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
326 {
327 	int i;
328 
329 	__raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
330 
331 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
332 		ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
333 
334 	return idx;
335 }
336 
337 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
338 {
339 	int i;
340 
341 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
342 		__raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
343 
344 	__raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
345 
346 	return idx;
347 }
348 
349 static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
350 {
351 	u32 ale_entry[ALE_ENTRY_WORDS];
352 	int type, idx;
353 
354 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
355 		u8 entry_addr[6];
356 
357 		cpsw_ale_read(priv, idx, ale_entry);
358 		type = cpsw_ale_get_entry_type(ale_entry);
359 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
360 			continue;
361 		cpsw_ale_get_addr(ale_entry, entry_addr);
362 		if (memcmp(entry_addr, addr, 6) == 0)
363 			return idx;
364 	}
365 	return -ENOENT;
366 }
367 
368 static int cpsw_ale_match_free(struct cpsw_priv *priv)
369 {
370 	u32 ale_entry[ALE_ENTRY_WORDS];
371 	int type, idx;
372 
373 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
374 		cpsw_ale_read(priv, idx, ale_entry);
375 		type = cpsw_ale_get_entry_type(ale_entry);
376 		if (type == ALE_TYPE_FREE)
377 			return idx;
378 	}
379 	return -ENOENT;
380 }
381 
382 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
383 {
384 	u32 ale_entry[ALE_ENTRY_WORDS];
385 	int type, idx;
386 
387 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
388 		cpsw_ale_read(priv, idx, ale_entry);
389 		type = cpsw_ale_get_entry_type(ale_entry);
390 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
391 			continue;
392 		if (cpsw_ale_get_mcast(ale_entry))
393 			continue;
394 		type = cpsw_ale_get_ucast_type(ale_entry);
395 		if (type != ALE_UCAST_PERSISTANT &&
396 		    type != ALE_UCAST_OUI)
397 			return idx;
398 	}
399 	return -ENOENT;
400 }
401 
402 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
403 			      int port, int flags)
404 {
405 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
406 	int idx;
407 
408 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
409 	cpsw_ale_set_addr(ale_entry, addr);
410 	cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
411 	cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
412 	cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
413 	cpsw_ale_set_port_num(ale_entry, port);
414 
415 	idx = cpsw_ale_match_addr(priv, addr);
416 	if (idx < 0)
417 		idx = cpsw_ale_match_free(priv);
418 	if (idx < 0)
419 		idx = cpsw_ale_find_ageable(priv);
420 	if (idx < 0)
421 		return -ENOMEM;
422 
423 	cpsw_ale_write(priv, idx, ale_entry);
424 	return 0;
425 }
426 
427 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
428 			      int port_mask)
429 {
430 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
431 	int idx, mask;
432 
433 	idx = cpsw_ale_match_addr(priv, addr);
434 	if (idx >= 0)
435 		cpsw_ale_read(priv, idx, ale_entry);
436 
437 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
438 	cpsw_ale_set_addr(ale_entry, addr);
439 	cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
440 
441 	mask = cpsw_ale_get_port_mask(ale_entry);
442 	port_mask |= mask;
443 	cpsw_ale_set_port_mask(ale_entry, port_mask);
444 
445 	if (idx < 0)
446 		idx = cpsw_ale_match_free(priv);
447 	if (idx < 0)
448 		idx = cpsw_ale_find_ageable(priv);
449 	if (idx < 0)
450 		return -ENOMEM;
451 
452 	cpsw_ale_write(priv, idx, ale_entry);
453 	return 0;
454 }
455 
456 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
457 {
458 	u32 tmp, mask = BIT(bit);
459 
460 	tmp  = __raw_readl(priv->ale_regs + ALE_CONTROL);
461 	tmp &= ~mask;
462 	tmp |= val ? mask : 0;
463 	__raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
464 }
465 
466 #define cpsw_ale_enable(priv, val)	cpsw_ale_control(priv, 31, val)
467 #define cpsw_ale_clear(priv, val)	cpsw_ale_control(priv, 30, val)
468 #define cpsw_ale_vlan_aware(priv, val)	cpsw_ale_control(priv,  2, val)
469 
470 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
471 				       int val)
472 {
473 	int offset = ALE_PORTCTL + 4 * port;
474 	u32 tmp, mask = 0x3;
475 
476 	tmp  = __raw_readl(priv->ale_regs + offset);
477 	tmp &= ~mask;
478 	tmp |= val & mask;
479 	__raw_writel(tmp, priv->ale_regs + offset);
480 }
481 
482 static struct cpsw_mdio_regs *mdio_regs;
483 
484 /* wait until hardware is ready for another user access */
485 static inline u32 wait_for_user_access(void)
486 {
487 	u32 reg = 0;
488 	int timeout = MDIO_TIMEOUT;
489 
490 	while (timeout-- &&
491 	((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
492 		udelay(10);
493 
494 	if (timeout == -1) {
495 		printf("wait_for_user_access Timeout\n");
496 		return -ETIMEDOUT;
497 	}
498 	return reg;
499 }
500 
501 /* wait until hardware state machine is idle */
502 static inline void wait_for_idle(void)
503 {
504 	int timeout = MDIO_TIMEOUT;
505 
506 	while (timeout-- &&
507 		((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
508 		udelay(10);
509 
510 	if (timeout == -1)
511 		printf("wait_for_idle Timeout\n");
512 }
513 
514 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
515 				int dev_addr, int phy_reg)
516 {
517 	int data;
518 	u32 reg;
519 
520 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
521 		return -EINVAL;
522 
523 	wait_for_user_access();
524 	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
525 	       (phy_id << 16));
526 	__raw_writel(reg, &mdio_regs->user[0].access);
527 	reg = wait_for_user_access();
528 
529 	data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
530 	return data;
531 }
532 
533 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
534 				int phy_reg, u16 data)
535 {
536 	u32 reg;
537 
538 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
539 		return -EINVAL;
540 
541 	wait_for_user_access();
542 	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
543 		   (phy_id << 16) | (data & USERACCESS_DATA));
544 	__raw_writel(reg, &mdio_regs->user[0].access);
545 	wait_for_user_access();
546 
547 	return 0;
548 }
549 
550 static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
551 {
552 	struct mii_dev *bus = mdio_alloc();
553 
554 	mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
555 
556 	/* set enable and clock divider */
557 	__raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
558 
559 	/*
560 	 * wait for scan logic to settle:
561 	 * the scan time consists of (a) a large fixed component, and (b) a
562 	 * small component that varies with the mii bus frequency.  These
563 	 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
564 	 * silicon.  Since the effect of (b) was found to be largely
565 	 * negligible, we keep things simple here.
566 	 */
567 	udelay(1000);
568 
569 	bus->read = cpsw_mdio_read;
570 	bus->write = cpsw_mdio_write;
571 	strcpy(bus->name, name);
572 
573 	mdio_register(bus);
574 }
575 
576 /* Set a self-clearing bit in a register, and wait for it to clear */
577 static inline void setbit_and_wait_for_clear32(void *addr)
578 {
579 	__raw_writel(CLEAR_BIT, addr);
580 	while (__raw_readl(addr) & CLEAR_BIT)
581 		;
582 }
583 
584 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
585 			 ((mac)[2] << 16) | ((mac)[3] << 24))
586 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
587 
588 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
589 			       struct cpsw_priv *priv)
590 {
591 #ifdef CONFIG_DM_ETH
592 	struct eth_pdata *pdata = dev_get_platdata(priv->dev);
593 
594 	writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
595 	writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
596 #else
597 	__raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
598 	__raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
599 #endif
600 }
601 
602 static void cpsw_slave_update_link(struct cpsw_slave *slave,
603 				   struct cpsw_priv *priv, int *link)
604 {
605 	struct phy_device *phy;
606 	u32 mac_control = 0;
607 
608 	phy = priv->phydev;
609 
610 	if (!phy)
611 		return;
612 
613 	phy_startup(phy);
614 	*link = phy->link;
615 
616 	if (*link) { /* link up */
617 		mac_control = priv->data.mac_control;
618 		if (phy->speed == 1000)
619 			mac_control |= GIGABITEN;
620 		if (phy->duplex == DUPLEX_FULL)
621 			mac_control |= FULLDUPLEXEN;
622 		if (phy->speed == 100)
623 			mac_control |= MIIEN;
624 	}
625 
626 	if (mac_control == slave->mac_control)
627 		return;
628 
629 	if (mac_control) {
630 		printf("link up on port %d, speed %d, %s duplex\n",
631 				slave->slave_num, phy->speed,
632 				(phy->duplex == DUPLEX_FULL) ? "full" : "half");
633 	} else {
634 		printf("link down on port %d\n", slave->slave_num);
635 	}
636 
637 	__raw_writel(mac_control, &slave->sliver->mac_control);
638 	slave->mac_control = mac_control;
639 }
640 
641 static int cpsw_update_link(struct cpsw_priv *priv)
642 {
643 	int link = 0;
644 	struct cpsw_slave *slave;
645 
646 	for_active_slave(slave, priv)
647 		cpsw_slave_update_link(slave, priv, &link);
648 
649 	return link;
650 }
651 
652 static inline u32  cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
653 {
654 	if (priv->host_port == 0)
655 		return slave_num + 1;
656 	else
657 		return slave_num;
658 }
659 
660 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
661 {
662 	u32     slave_port;
663 
664 	setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
665 
666 	/* setup priority mapping */
667 	__raw_writel(0x76543210, &slave->sliver->rx_pri_map);
668 	__raw_writel(0x33221100, &slave->regs->tx_pri_map);
669 
670 	/* setup max packet size, and mac address */
671 	__raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
672 	cpsw_set_slave_mac(slave, priv);
673 
674 	slave->mac_control = 0;	/* no link yet */
675 
676 	/* enable forwarding */
677 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
678 	cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
679 
680 	cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
681 
682 	priv->phy_mask |= 1 << slave->data->phy_addr;
683 }
684 
685 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
686 {
687 	struct cpdma_desc *desc = priv->desc_free;
688 
689 	if (desc)
690 		priv->desc_free = desc_read_ptr(desc, hw_next);
691 	return desc;
692 }
693 
694 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
695 {
696 	if (desc) {
697 		desc_write(desc, hw_next, priv->desc_free);
698 		priv->desc_free = desc;
699 	}
700 }
701 
702 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
703 			void *buffer, int len)
704 {
705 	struct cpdma_desc *desc, *prev;
706 	u32 mode;
707 
708 	desc = cpdma_desc_alloc(priv);
709 	if (!desc)
710 		return -ENOMEM;
711 
712 	if (len < PKT_MIN)
713 		len = PKT_MIN;
714 
715 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
716 
717 	desc_write(desc, hw_next,   0);
718 	desc_write(desc, hw_buffer, buffer);
719 	desc_write(desc, hw_len,    len);
720 	desc_write(desc, hw_mode,   mode | len);
721 	desc_write(desc, sw_buffer, buffer);
722 	desc_write(desc, sw_len,    len);
723 
724 	if (!chan->head) {
725 		/* simple case - first packet enqueued */
726 		chan->head = desc;
727 		chan->tail = desc;
728 		chan_write(chan, hdp, desc);
729 		goto done;
730 	}
731 
732 	/* not the first packet - enqueue at the tail */
733 	prev = chan->tail;
734 	desc_write(prev, hw_next, desc);
735 	chan->tail = desc;
736 
737 	/* next check if EOQ has been triggered already */
738 	if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
739 		chan_write(chan, hdp, desc);
740 
741 done:
742 	if (chan->rxfree)
743 		chan_write(chan, rxfree, 1);
744 	return 0;
745 }
746 
747 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
748 			 void **buffer, int *len)
749 {
750 	struct cpdma_desc *desc = chan->head;
751 	u32 status;
752 
753 	if (!desc)
754 		return -ENOENT;
755 
756 	status = desc_read(desc, hw_mode);
757 
758 	if (len)
759 		*len = status & 0x7ff;
760 
761 	if (buffer)
762 		*buffer = desc_read_ptr(desc, sw_buffer);
763 
764 	if (status & CPDMA_DESC_OWNER) {
765 		if (chan_read(chan, hdp) == 0) {
766 			if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
767 				chan_write(chan, hdp, desc);
768 		}
769 
770 		return -EBUSY;
771 	}
772 
773 	chan->head = desc_read_ptr(desc, hw_next);
774 	chan_write(chan, cp, desc);
775 
776 	cpdma_desc_free(priv, desc);
777 	return 0;
778 }
779 
780 static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
781 {
782 	struct cpsw_slave	*slave;
783 	int i, ret;
784 
785 	/* soft reset the controller and initialize priv */
786 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
787 
788 	/* initialize and reset the address lookup engine */
789 	cpsw_ale_enable(priv, 1);
790 	cpsw_ale_clear(priv, 1);
791 	cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
792 
793 	/* setup host port priority mapping */
794 	__raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
795 	__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
796 
797 	/* disable priority elevation and enable statistics on all ports */
798 	__raw_writel(0, &priv->regs->ptype);
799 
800 	/* enable statistics collection only on the host port */
801 	__raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
802 	__raw_writel(0x7, &priv->regs->stat_port_en);
803 
804 	cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
805 
806 	cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
807 	cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
808 
809 	for_active_slave(slave, priv)
810 		cpsw_slave_init(slave, priv);
811 
812 	cpsw_update_link(priv);
813 
814 	/* init descriptor pool */
815 	for (i = 0; i < NUM_DESCS; i++) {
816 		desc_write(&priv->descs[i], hw_next,
817 			   (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
818 	}
819 	priv->desc_free = &priv->descs[0];
820 
821 	/* initialize channels */
822 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
823 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
824 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER2;
825 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER2;
826 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
827 
828 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
829 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER2;
830 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER2;
831 	} else {
832 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
833 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER1;
834 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER1;
835 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
836 
837 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
838 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER1;
839 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER1;
840 	}
841 
842 	/* clear dma state */
843 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
844 
845 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
846 		for (i = 0; i < priv->data.channels; i++) {
847 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
848 					* i);
849 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
850 					* i);
851 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
852 					* i);
853 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
854 					* i);
855 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
856 					* i);
857 		}
858 	} else {
859 		for (i = 0; i < priv->data.channels; i++) {
860 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
861 					* i);
862 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
863 					* i);
864 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
865 					* i);
866 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
867 					* i);
868 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
869 					* i);
870 
871 		}
872 	}
873 
874 	__raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
875 	__raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
876 
877 	/* submit rx descs */
878 	for (i = 0; i < PKTBUFSRX; i++) {
879 		ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
880 				   PKTSIZE);
881 		if (ret < 0) {
882 			printf("error %d submitting rx desc\n", ret);
883 			break;
884 		}
885 	}
886 
887 	return 0;
888 }
889 
890 static void _cpsw_halt(struct cpsw_priv *priv)
891 {
892 	writel(0, priv->dma_regs + CPDMA_TXCONTROL);
893 	writel(0, priv->dma_regs + CPDMA_RXCONTROL);
894 
895 	/* soft reset the controller and initialize priv */
896 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
897 
898 	/* clear dma state */
899 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
900 
901 }
902 
903 static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
904 {
905 	void *buffer;
906 	int len;
907 	int timeout = CPDMA_TIMEOUT;
908 
909 	flush_dcache_range((unsigned long)packet,
910 			   (unsigned long)packet + length);
911 
912 	/* first reap completed packets */
913 	while (timeout-- &&
914 		(cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
915 		;
916 
917 	if (timeout == -1) {
918 		printf("cpdma_process timeout\n");
919 		return -ETIMEDOUT;
920 	}
921 
922 	return cpdma_submit(priv, &priv->tx_chan, packet, length);
923 }
924 
925 static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
926 {
927 	void *buffer;
928 	int len;
929 	int ret = -EAGAIN;
930 
931 	ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
932 	if (ret < 0)
933 		return ret;
934 
935 	invalidate_dcache_range((unsigned long)buffer,
936 				(unsigned long)buffer + PKTSIZE_ALIGN);
937 	*pkt = buffer;
938 
939 	return len;
940 }
941 
942 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
943 			    struct cpsw_priv *priv)
944 {
945 	void			*regs = priv->regs;
946 	struct cpsw_slave_data	*data = priv->data.slave_data + slave_num;
947 	slave->slave_num = slave_num;
948 	slave->data	= data;
949 	slave->regs	= regs + data->slave_reg_ofs;
950 	slave->sliver	= regs + data->sliver_reg_ofs;
951 }
952 
953 static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
954 {
955 	struct phy_device *phydev;
956 	u32 supported = PHY_GBIT_FEATURES;
957 
958 	phydev = phy_connect(priv->bus,
959 			slave->data->phy_addr,
960 			priv->dev,
961 			slave->data->phy_if);
962 
963 	if (!phydev)
964 		return -1;
965 
966 	phydev->supported &= supported;
967 	phydev->advertising = phydev->supported;
968 
969 #ifdef CONFIG_DM_ETH
970 	if (slave->data->phy_of_handle)
971 		phydev->dev->of_offset = slave->data->phy_of_handle;
972 #endif
973 
974 	priv->phydev = phydev;
975 	phy_config(phydev);
976 
977 	return 1;
978 }
979 
980 int _cpsw_register(struct cpsw_priv *priv)
981 {
982 	struct cpsw_slave	*slave;
983 	struct cpsw_platform_data *data = &priv->data;
984 	void			*regs = (void *)data->cpsw_base;
985 
986 	priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
987 	if (!priv->slaves) {
988 		return -ENOMEM;
989 	}
990 
991 	priv->host_port		= data->host_port_num;
992 	priv->regs		= regs;
993 	priv->host_port_regs	= regs + data->host_port_reg_ofs;
994 	priv->dma_regs		= regs + data->cpdma_reg_ofs;
995 	priv->ale_regs		= regs + data->ale_reg_ofs;
996 	priv->descs		= (void *)regs + data->bd_ram_ofs;
997 
998 	int idx = 0;
999 
1000 	for_each_slave(slave, priv) {
1001 		cpsw_slave_setup(slave, idx, priv);
1002 		idx = idx + 1;
1003 	}
1004 
1005 	cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
1006 	priv->bus = miiphy_get_dev_by_name(priv->dev->name);
1007 	for_active_slave(slave, priv)
1008 		cpsw_phy_init(priv, slave);
1009 
1010 	return 0;
1011 }
1012 
1013 #ifndef CONFIG_DM_ETH
1014 static int cpsw_init(struct eth_device *dev, bd_t *bis)
1015 {
1016 	struct cpsw_priv	*priv = dev->priv;
1017 
1018 	return _cpsw_init(priv, dev->enetaddr);
1019 }
1020 
1021 static void cpsw_halt(struct eth_device *dev)
1022 {
1023 	struct cpsw_priv *priv = dev->priv;
1024 
1025 	return _cpsw_halt(priv);
1026 }
1027 
1028 static int cpsw_send(struct eth_device *dev, void *packet, int length)
1029 {
1030 	struct cpsw_priv	*priv = dev->priv;
1031 
1032 	return _cpsw_send(priv, packet, length);
1033 }
1034 
1035 static int cpsw_recv(struct eth_device *dev)
1036 {
1037 	struct cpsw_priv *priv = dev->priv;
1038 	uchar *pkt = NULL;
1039 	int len;
1040 
1041 	len = _cpsw_recv(priv, &pkt);
1042 
1043 	if (len > 0) {
1044 		net_process_received_packet(pkt, len);
1045 		cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
1046 	}
1047 
1048 	return len;
1049 }
1050 
1051 int cpsw_register(struct cpsw_platform_data *data)
1052 {
1053 	struct cpsw_priv	*priv;
1054 	struct eth_device	*dev;
1055 	int ret;
1056 
1057 	dev = calloc(sizeof(*dev), 1);
1058 	if (!dev)
1059 		return -ENOMEM;
1060 
1061 	priv = calloc(sizeof(*priv), 1);
1062 	if (!priv) {
1063 		free(dev);
1064 		return -ENOMEM;
1065 	}
1066 
1067 	priv->dev = dev;
1068 	priv->data = *data;
1069 
1070 	strcpy(dev->name, "cpsw");
1071 	dev->iobase	= 0;
1072 	dev->init	= cpsw_init;
1073 	dev->halt	= cpsw_halt;
1074 	dev->send	= cpsw_send;
1075 	dev->recv	= cpsw_recv;
1076 	dev->priv	= priv;
1077 
1078 	eth_register(dev);
1079 
1080 	ret = _cpsw_register(priv);
1081 	if (ret < 0) {
1082 		eth_unregister(dev);
1083 		free(dev);
1084 		free(priv);
1085 		return ret;
1086 	}
1087 
1088 	return 1;
1089 }
1090 #else
1091 static int cpsw_eth_start(struct udevice *dev)
1092 {
1093 	struct eth_pdata *pdata = dev_get_platdata(dev);
1094 	struct cpsw_priv *priv = dev_get_priv(dev);
1095 
1096 	return _cpsw_init(priv, pdata->enetaddr);
1097 }
1098 
1099 static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
1100 {
1101 	struct cpsw_priv *priv = dev_get_priv(dev);
1102 
1103 	return _cpsw_send(priv, packet, length);
1104 }
1105 
1106 static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1107 {
1108 	struct cpsw_priv *priv = dev_get_priv(dev);
1109 
1110 	return _cpsw_recv(priv, packetp);
1111 }
1112 
1113 static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
1114 				   int length)
1115 {
1116 	struct cpsw_priv *priv = dev_get_priv(dev);
1117 
1118 	return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
1119 }
1120 
1121 static void cpsw_eth_stop(struct udevice *dev)
1122 {
1123 	struct cpsw_priv *priv = dev_get_priv(dev);
1124 
1125 	return _cpsw_halt(priv);
1126 }
1127 
1128 
1129 static int cpsw_eth_probe(struct udevice *dev)
1130 {
1131 	struct cpsw_priv *priv = dev_get_priv(dev);
1132 
1133 	priv->dev = dev;
1134 
1135 	return _cpsw_register(priv);
1136 }
1137 
1138 static const struct eth_ops cpsw_eth_ops = {
1139 	.start		= cpsw_eth_start,
1140 	.send		= cpsw_eth_send,
1141 	.recv		= cpsw_eth_recv,
1142 	.free_pkt	= cpsw_eth_free_pkt,
1143 	.stop		= cpsw_eth_stop,
1144 };
1145 
1146 static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
1147 {
1148 	return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL);
1149 }
1150 
1151 static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
1152 {
1153 	struct eth_pdata *pdata = dev_get_platdata(dev);
1154 	struct cpsw_priv *priv = dev_get_priv(dev);
1155 	const char *phy_mode;
1156 	const void *fdt = gd->fdt_blob;
1157 	int node = dev->of_offset;
1158 	int subnode;
1159 	int slave_index = 0;
1160 	int active_slave;
1161 	int ret;
1162 
1163 	pdata->iobase = dev_get_addr(dev);
1164 	priv->data.version = CPSW_CTRL_VERSION_2;
1165 	priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
1166 	priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
1167 	priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
1168 	priv->data.mdio_div = CPSW_MDIO_DIV;
1169 	priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
1170 
1171 	pdata->phy_interface = -1;
1172 
1173 	priv->data.cpsw_base = pdata->iobase;
1174 	priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
1175 	if (priv->data.channels <= 0) {
1176 		printf("error: cpdma_channels not found in dt\n");
1177 		return -ENOENT;
1178 	}
1179 
1180 	priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
1181 	if (priv->data.slaves <= 0) {
1182 		printf("error: slaves not found in dt\n");
1183 		return -ENOENT;
1184 	}
1185 	priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
1186 				       priv->data.slaves);
1187 
1188 	priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
1189 	if (priv->data.ale_entries <= 0) {
1190 		printf("error: ale_entries not found in dt\n");
1191 		return -ENOENT;
1192 	}
1193 
1194 	priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
1195 	if (priv->data.bd_ram_ofs <= 0) {
1196 		printf("error: bd_ram_size not found in dt\n");
1197 		return -ENOENT;
1198 	}
1199 
1200 	priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
1201 	if (priv->data.mac_control <= 0) {
1202 		printf("error: ale_entries not found in dt\n");
1203 		return -ENOENT;
1204 	}
1205 
1206 	active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
1207 	priv->data.active_slave = active_slave;
1208 
1209 	fdt_for_each_subnode(fdt, subnode, node) {
1210 		int len;
1211 		const char *name;
1212 
1213 		name = fdt_get_name(fdt, subnode, &len);
1214 		if (!strncmp(name, "mdio", 4)) {
1215 			u32 mdio_base;
1216 
1217 			mdio_base = cpsw_get_addr_by_node(fdt, subnode);
1218 			if (mdio_base == FDT_ADDR_T_NONE) {
1219 				error("Not able to get MDIO address space\n");
1220 				return -ENOENT;
1221 			}
1222 			priv->data.mdio_base = mdio_base;
1223 		}
1224 
1225 		if (!strncmp(name, "slave", 5)) {
1226 			u32 phy_id[2];
1227 
1228 			if (slave_index >= priv->data.slaves)
1229 				continue;
1230 			phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
1231 			if (phy_mode)
1232 				priv->data.slave_data[slave_index].phy_if =
1233 					phy_get_interface_by_name(phy_mode);
1234 
1235 			priv->data.slave_data[slave_index].phy_of_handle =
1236 				fdtdec_lookup_phandle(fdt, subnode,
1237 						      "phy-handle");
1238 
1239 			if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
1240 				priv->data.slave_data[slave_index].phy_addr =
1241 						fdtdec_get_int(gd->fdt_blob,
1242 							       priv->data.slave_data[slave_index].phy_of_handle,
1243 							       "reg", -1);
1244 			} else {
1245 				fdtdec_get_int_array(fdt, subnode, "phy_id",
1246 						     phy_id, 2);
1247 				priv->data.slave_data[slave_index].phy_addr =
1248 						phy_id[1];
1249 			}
1250 			slave_index++;
1251 		}
1252 
1253 		if (!strncmp(name, "cpsw-phy-sel", 12)) {
1254 			priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
1255 								    subnode);
1256 
1257 			if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
1258 				error("Not able to get gmii_sel reg address\n");
1259 				return -ENOENT;
1260 			}
1261 		}
1262 	}
1263 
1264 	priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
1265 	priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
1266 
1267 	if (priv->data.slaves == 2) {
1268 		priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
1269 		priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
1270 	}
1271 
1272 	ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
1273 	if (ret < 0) {
1274 		error("cpsw read efuse mac failed\n");
1275 		return ret;
1276 	}
1277 
1278 	pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
1279 	if (pdata->phy_interface == -1) {
1280 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1281 		return -EINVAL;
1282 	}
1283 	switch (pdata->phy_interface) {
1284 	case PHY_INTERFACE_MODE_MII:
1285 		writel(MII_MODE_ENABLE, priv->data.gmii_sel);
1286 		break;
1287 	case PHY_INTERFACE_MODE_RMII:
1288 		writel(RMII_MODE_ENABLE, priv->data.gmii_sel);
1289 		break;
1290 	case PHY_INTERFACE_MODE_RGMII:
1291 	case PHY_INTERFACE_MODE_RGMII_ID:
1292 	case PHY_INTERFACE_MODE_RGMII_RXID:
1293 	case PHY_INTERFACE_MODE_RGMII_TXID:
1294 		writel(RGMII_MODE_ENABLE, priv->data.gmii_sel);
1295 		break;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 
1302 static const struct udevice_id cpsw_eth_ids[] = {
1303 	{ .compatible = "ti,cpsw" },
1304 	{ .compatible = "ti,am335x-cpsw" },
1305 	{ }
1306 };
1307 
1308 U_BOOT_DRIVER(eth_cpsw) = {
1309 	.name	= "eth_cpsw",
1310 	.id	= UCLASS_ETH,
1311 	.of_match = cpsw_eth_ids,
1312 	.ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
1313 	.probe	= cpsw_eth_probe,
1314 	.ops	= &cpsw_eth_ops,
1315 	.priv_auto_alloc_size = sizeof(struct cpsw_priv),
1316 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
1317 	.flags = DM_FLAG_ALLOC_PRIV_DMA,
1318 };
1319 #endif /* CONFIG_DM_ETH */
1320