xref: /rk3399_rockchip-uboot/drivers/net/cpsw.c (revision cbe7706ab8aab06c18edaa9b120371f9c8012728)
1 /*
2  * CPSW Ethernet Switch Driver
3  *
4  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <linux/errno.h>
25 #include <asm/gpio.h>
26 #include <asm/io.h>
27 #include <phy.h>
28 #include <asm/arch/cpu.h>
29 #include <dm.h>
30 #include <fdt_support.h>
31 
32 DECLARE_GLOBAL_DATA_PTR;
33 
34 #define BITMASK(bits)		(BIT(bits) - 1)
35 #define PHY_REG_MASK		0x1f
36 #define PHY_ID_MASK		0x1f
37 #define NUM_DESCS		(PKTBUFSRX * 2)
38 #define PKT_MIN			60
39 #define PKT_MAX			(1500 + 14 + 4 + 4)
40 #define CLEAR_BIT		1
41 #define GIGABITEN		BIT(7)
42 #define FULLDUPLEXEN		BIT(0)
43 #define MIIEN			BIT(15)
44 
45 /* reg offset */
46 #define CPSW_HOST_PORT_OFFSET	0x108
47 #define CPSW_SLAVE0_OFFSET	0x208
48 #define CPSW_SLAVE1_OFFSET	0x308
49 #define CPSW_SLAVE_SIZE		0x100
50 #define CPSW_CPDMA_OFFSET	0x800
51 #define CPSW_HW_STATS		0x900
52 #define CPSW_STATERAM_OFFSET	0xa00
53 #define CPSW_CPTS_OFFSET	0xc00
54 #define CPSW_ALE_OFFSET		0xd00
55 #define CPSW_SLIVER0_OFFSET	0xd80
56 #define CPSW_SLIVER1_OFFSET	0xdc0
57 #define CPSW_BD_OFFSET		0x2000
58 #define CPSW_MDIO_DIV		0xff
59 
60 #define AM335X_GMII_SEL_OFFSET	0x630
61 
62 /* DMA Registers */
63 #define CPDMA_TXCONTROL		0x004
64 #define CPDMA_RXCONTROL		0x014
65 #define CPDMA_SOFTRESET		0x01c
66 #define CPDMA_RXFREE		0x0e0
67 #define CPDMA_TXHDP_VER1	0x100
68 #define CPDMA_TXHDP_VER2	0x200
69 #define CPDMA_RXHDP_VER1	0x120
70 #define CPDMA_RXHDP_VER2	0x220
71 #define CPDMA_TXCP_VER1		0x140
72 #define CPDMA_TXCP_VER2		0x240
73 #define CPDMA_RXCP_VER1		0x160
74 #define CPDMA_RXCP_VER2		0x260
75 
76 /* Descriptor mode bits */
77 #define CPDMA_DESC_SOP		BIT(31)
78 #define CPDMA_DESC_EOP		BIT(30)
79 #define CPDMA_DESC_OWNER	BIT(29)
80 #define CPDMA_DESC_EOQ		BIT(28)
81 
82 /*
83  * This timeout definition is a worst-case ultra defensive measure against
84  * unexpected controller lock ups.  Ideally, we should never ever hit this
85  * scenario in practice.
86  */
87 #define MDIO_TIMEOUT            100 /* msecs */
88 #define CPDMA_TIMEOUT		100 /* msecs */
89 
90 struct cpsw_mdio_regs {
91 	u32	version;
92 	u32	control;
93 #define CONTROL_IDLE		BIT(31)
94 #define CONTROL_ENABLE		BIT(30)
95 
96 	u32	alive;
97 	u32	link;
98 	u32	linkintraw;
99 	u32	linkintmasked;
100 	u32	__reserved_0[2];
101 	u32	userintraw;
102 	u32	userintmasked;
103 	u32	userintmaskset;
104 	u32	userintmaskclr;
105 	u32	__reserved_1[20];
106 
107 	struct {
108 		u32		access;
109 		u32		physel;
110 #define USERACCESS_GO		BIT(31)
111 #define USERACCESS_WRITE	BIT(30)
112 #define USERACCESS_ACK		BIT(29)
113 #define USERACCESS_READ		(0)
114 #define USERACCESS_DATA		(0xffff)
115 	} user[0];
116 };
117 
118 struct cpsw_regs {
119 	u32	id_ver;
120 	u32	control;
121 	u32	soft_reset;
122 	u32	stat_port_en;
123 	u32	ptype;
124 };
125 
126 struct cpsw_slave_regs {
127 	u32	max_blks;
128 	u32	blk_cnt;
129 	u32	flow_thresh;
130 	u32	port_vlan;
131 	u32	tx_pri_map;
132 #ifdef CONFIG_AM33XX
133 	u32	gap_thresh;
134 #elif defined(CONFIG_TI814X)
135 	u32	ts_ctl;
136 	u32	ts_seq_ltype;
137 	u32	ts_vlan;
138 #endif
139 	u32	sa_lo;
140 	u32	sa_hi;
141 };
142 
143 struct cpsw_host_regs {
144 	u32	max_blks;
145 	u32	blk_cnt;
146 	u32	flow_thresh;
147 	u32	port_vlan;
148 	u32	tx_pri_map;
149 	u32	cpdma_tx_pri_map;
150 	u32	cpdma_rx_chan_map;
151 };
152 
153 struct cpsw_sliver_regs {
154 	u32	id_ver;
155 	u32	mac_control;
156 	u32	mac_status;
157 	u32	soft_reset;
158 	u32	rx_maxlen;
159 	u32	__reserved_0;
160 	u32	rx_pause;
161 	u32	tx_pause;
162 	u32	__reserved_1;
163 	u32	rx_pri_map;
164 };
165 
166 #define ALE_ENTRY_BITS		68
167 #define ALE_ENTRY_WORDS		DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
168 
169 /* ALE Registers */
170 #define ALE_CONTROL		0x08
171 #define ALE_UNKNOWNVLAN		0x18
172 #define ALE_TABLE_CONTROL	0x20
173 #define ALE_TABLE		0x34
174 #define ALE_PORTCTL		0x40
175 
176 #define ALE_TABLE_WRITE		BIT(31)
177 
178 #define ALE_TYPE_FREE			0
179 #define ALE_TYPE_ADDR			1
180 #define ALE_TYPE_VLAN			2
181 #define ALE_TYPE_VLAN_ADDR		3
182 
183 #define ALE_UCAST_PERSISTANT		0
184 #define ALE_UCAST_UNTOUCHED		1
185 #define ALE_UCAST_OUI			2
186 #define ALE_UCAST_TOUCHED		3
187 
188 #define ALE_MCAST_FWD			0
189 #define ALE_MCAST_BLOCK_LEARN_FWD	1
190 #define ALE_MCAST_FWD_LEARN		2
191 #define ALE_MCAST_FWD_2			3
192 
193 enum cpsw_ale_port_state {
194 	ALE_PORT_STATE_DISABLE	= 0x00,
195 	ALE_PORT_STATE_BLOCK	= 0x01,
196 	ALE_PORT_STATE_LEARN	= 0x02,
197 	ALE_PORT_STATE_FORWARD	= 0x03,
198 };
199 
200 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
201 #define ALE_SECURE	1
202 #define ALE_BLOCKED	2
203 
204 struct cpsw_slave {
205 	struct cpsw_slave_regs		*regs;
206 	struct cpsw_sliver_regs		*sliver;
207 	int				slave_num;
208 	u32				mac_control;
209 	struct cpsw_slave_data		*data;
210 };
211 
212 struct cpdma_desc {
213 	/* hardware fields */
214 	u32			hw_next;
215 	u32			hw_buffer;
216 	u32			hw_len;
217 	u32			hw_mode;
218 	/* software fields */
219 	u32			sw_buffer;
220 	u32			sw_len;
221 };
222 
223 struct cpdma_chan {
224 	struct cpdma_desc	*head, *tail;
225 	void			*hdp, *cp, *rxfree;
226 };
227 
228 #define desc_write(desc, fld, val)	__raw_writel((u32)(val), &(desc)->fld)
229 #define desc_read(desc, fld)		__raw_readl(&(desc)->fld)
230 #define desc_read_ptr(desc, fld)	((void *)__raw_readl(&(desc)->fld))
231 
232 #define chan_write(chan, fld, val)	__raw_writel((u32)(val), (chan)->fld)
233 #define chan_read(chan, fld)		__raw_readl((chan)->fld)
234 #define chan_read_ptr(chan, fld)	((void *)__raw_readl((chan)->fld))
235 
236 #define for_active_slave(slave, priv) \
237 	slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
238 #define for_each_slave(slave, priv) \
239 	for (slave = (priv)->slaves; slave != (priv)->slaves + \
240 				(priv)->data.slaves; slave++)
241 
242 struct cpsw_priv {
243 #ifdef CONFIG_DM_ETH
244 	struct udevice			*dev;
245 #else
246 	struct eth_device		*dev;
247 #endif
248 	struct cpsw_platform_data	data;
249 	int				host_port;
250 
251 	struct cpsw_regs		*regs;
252 	void				*dma_regs;
253 	struct cpsw_host_regs		*host_port_regs;
254 	void				*ale_regs;
255 
256 	struct cpdma_desc		*descs;
257 	struct cpdma_desc		*desc_free;
258 	struct cpdma_chan		rx_chan, tx_chan;
259 
260 	struct cpsw_slave		*slaves;
261 	struct phy_device		*phydev;
262 	struct mii_dev			*bus;
263 
264 	u32				phy_mask;
265 };
266 
267 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
268 {
269 	int idx;
270 
271 	idx    = start / 32;
272 	start -= idx * 32;
273 	idx    = 2 - idx; /* flip */
274 	return (ale_entry[idx] >> start) & BITMASK(bits);
275 }
276 
277 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
278 				      u32 value)
279 {
280 	int idx;
281 
282 	value &= BITMASK(bits);
283 	idx    = start / 32;
284 	start -= idx * 32;
285 	idx    = 2 - idx; /* flip */
286 	ale_entry[idx] &= ~(BITMASK(bits) << start);
287 	ale_entry[idx] |=  (value << start);
288 }
289 
290 #define DEFINE_ALE_FIELD(name, start, bits)				\
291 static inline int cpsw_ale_get_##name(u32 *ale_entry)			\
292 {									\
293 	return cpsw_ale_get_field(ale_entry, start, bits);		\
294 }									\
295 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value)	\
296 {									\
297 	cpsw_ale_set_field(ale_entry, start, bits, value);		\
298 }
299 
300 DEFINE_ALE_FIELD(entry_type,		60,	2)
301 DEFINE_ALE_FIELD(mcast_state,		62,	2)
302 DEFINE_ALE_FIELD(port_mask,		66,	3)
303 DEFINE_ALE_FIELD(ucast_type,		62,	2)
304 DEFINE_ALE_FIELD(port_num,		66,	2)
305 DEFINE_ALE_FIELD(blocked,		65,	1)
306 DEFINE_ALE_FIELD(secure,		64,	1)
307 DEFINE_ALE_FIELD(mcast,			40,	1)
308 
309 /* The MAC address field in the ALE entry cannot be macroized as above */
310 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
311 {
312 	int i;
313 
314 	for (i = 0; i < 6; i++)
315 		addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
316 }
317 
318 static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
319 {
320 	int i;
321 
322 	for (i = 0; i < 6; i++)
323 		cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
324 }
325 
326 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
327 {
328 	int i;
329 
330 	__raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
331 
332 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
333 		ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
334 
335 	return idx;
336 }
337 
338 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
339 {
340 	int i;
341 
342 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
343 		__raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
344 
345 	__raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
346 
347 	return idx;
348 }
349 
350 static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
351 {
352 	u32 ale_entry[ALE_ENTRY_WORDS];
353 	int type, idx;
354 
355 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
356 		u8 entry_addr[6];
357 
358 		cpsw_ale_read(priv, idx, ale_entry);
359 		type = cpsw_ale_get_entry_type(ale_entry);
360 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
361 			continue;
362 		cpsw_ale_get_addr(ale_entry, entry_addr);
363 		if (memcmp(entry_addr, addr, 6) == 0)
364 			return idx;
365 	}
366 	return -ENOENT;
367 }
368 
369 static int cpsw_ale_match_free(struct cpsw_priv *priv)
370 {
371 	u32 ale_entry[ALE_ENTRY_WORDS];
372 	int type, idx;
373 
374 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
375 		cpsw_ale_read(priv, idx, ale_entry);
376 		type = cpsw_ale_get_entry_type(ale_entry);
377 		if (type == ALE_TYPE_FREE)
378 			return idx;
379 	}
380 	return -ENOENT;
381 }
382 
383 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
384 {
385 	u32 ale_entry[ALE_ENTRY_WORDS];
386 	int type, idx;
387 
388 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
389 		cpsw_ale_read(priv, idx, ale_entry);
390 		type = cpsw_ale_get_entry_type(ale_entry);
391 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
392 			continue;
393 		if (cpsw_ale_get_mcast(ale_entry))
394 			continue;
395 		type = cpsw_ale_get_ucast_type(ale_entry);
396 		if (type != ALE_UCAST_PERSISTANT &&
397 		    type != ALE_UCAST_OUI)
398 			return idx;
399 	}
400 	return -ENOENT;
401 }
402 
403 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
404 			      int port, int flags)
405 {
406 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
407 	int idx;
408 
409 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
410 	cpsw_ale_set_addr(ale_entry, addr);
411 	cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
412 	cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
413 	cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
414 	cpsw_ale_set_port_num(ale_entry, port);
415 
416 	idx = cpsw_ale_match_addr(priv, addr);
417 	if (idx < 0)
418 		idx = cpsw_ale_match_free(priv);
419 	if (idx < 0)
420 		idx = cpsw_ale_find_ageable(priv);
421 	if (idx < 0)
422 		return -ENOMEM;
423 
424 	cpsw_ale_write(priv, idx, ale_entry);
425 	return 0;
426 }
427 
428 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
429 			      int port_mask)
430 {
431 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
432 	int idx, mask;
433 
434 	idx = cpsw_ale_match_addr(priv, addr);
435 	if (idx >= 0)
436 		cpsw_ale_read(priv, idx, ale_entry);
437 
438 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
439 	cpsw_ale_set_addr(ale_entry, addr);
440 	cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
441 
442 	mask = cpsw_ale_get_port_mask(ale_entry);
443 	port_mask |= mask;
444 	cpsw_ale_set_port_mask(ale_entry, port_mask);
445 
446 	if (idx < 0)
447 		idx = cpsw_ale_match_free(priv);
448 	if (idx < 0)
449 		idx = cpsw_ale_find_ageable(priv);
450 	if (idx < 0)
451 		return -ENOMEM;
452 
453 	cpsw_ale_write(priv, idx, ale_entry);
454 	return 0;
455 }
456 
457 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
458 {
459 	u32 tmp, mask = BIT(bit);
460 
461 	tmp  = __raw_readl(priv->ale_regs + ALE_CONTROL);
462 	tmp &= ~mask;
463 	tmp |= val ? mask : 0;
464 	__raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
465 }
466 
467 #define cpsw_ale_enable(priv, val)	cpsw_ale_control(priv, 31, val)
468 #define cpsw_ale_clear(priv, val)	cpsw_ale_control(priv, 30, val)
469 #define cpsw_ale_vlan_aware(priv, val)	cpsw_ale_control(priv,  2, val)
470 
471 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
472 				       int val)
473 {
474 	int offset = ALE_PORTCTL + 4 * port;
475 	u32 tmp, mask = 0x3;
476 
477 	tmp  = __raw_readl(priv->ale_regs + offset);
478 	tmp &= ~mask;
479 	tmp |= val & mask;
480 	__raw_writel(tmp, priv->ale_regs + offset);
481 }
482 
483 static struct cpsw_mdio_regs *mdio_regs;
484 
485 /* wait until hardware is ready for another user access */
486 static inline u32 wait_for_user_access(void)
487 {
488 	u32 reg = 0;
489 	int timeout = MDIO_TIMEOUT;
490 
491 	while (timeout-- &&
492 	((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
493 		udelay(10);
494 
495 	if (timeout == -1) {
496 		printf("wait_for_user_access Timeout\n");
497 		return -ETIMEDOUT;
498 	}
499 	return reg;
500 }
501 
502 /* wait until hardware state machine is idle */
503 static inline void wait_for_idle(void)
504 {
505 	int timeout = MDIO_TIMEOUT;
506 
507 	while (timeout-- &&
508 		((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
509 		udelay(10);
510 
511 	if (timeout == -1)
512 		printf("wait_for_idle Timeout\n");
513 }
514 
515 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
516 				int dev_addr, int phy_reg)
517 {
518 	int data;
519 	u32 reg;
520 
521 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
522 		return -EINVAL;
523 
524 	wait_for_user_access();
525 	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
526 	       (phy_id << 16));
527 	__raw_writel(reg, &mdio_regs->user[0].access);
528 	reg = wait_for_user_access();
529 
530 	data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
531 	return data;
532 }
533 
534 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
535 				int phy_reg, u16 data)
536 {
537 	u32 reg;
538 
539 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
540 		return -EINVAL;
541 
542 	wait_for_user_access();
543 	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
544 		   (phy_id << 16) | (data & USERACCESS_DATA));
545 	__raw_writel(reg, &mdio_regs->user[0].access);
546 	wait_for_user_access();
547 
548 	return 0;
549 }
550 
551 static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
552 {
553 	struct mii_dev *bus = mdio_alloc();
554 
555 	mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
556 
557 	/* set enable and clock divider */
558 	__raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
559 
560 	/*
561 	 * wait for scan logic to settle:
562 	 * the scan time consists of (a) a large fixed component, and (b) a
563 	 * small component that varies with the mii bus frequency.  These
564 	 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
565 	 * silicon.  Since the effect of (b) was found to be largely
566 	 * negligible, we keep things simple here.
567 	 */
568 	udelay(1000);
569 
570 	bus->read = cpsw_mdio_read;
571 	bus->write = cpsw_mdio_write;
572 	strcpy(bus->name, name);
573 
574 	mdio_register(bus);
575 }
576 
577 /* Set a self-clearing bit in a register, and wait for it to clear */
578 static inline void setbit_and_wait_for_clear32(void *addr)
579 {
580 	__raw_writel(CLEAR_BIT, addr);
581 	while (__raw_readl(addr) & CLEAR_BIT)
582 		;
583 }
584 
585 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
586 			 ((mac)[2] << 16) | ((mac)[3] << 24))
587 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
588 
589 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
590 			       struct cpsw_priv *priv)
591 {
592 #ifdef CONFIG_DM_ETH
593 	struct eth_pdata *pdata = dev_get_platdata(priv->dev);
594 
595 	writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
596 	writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
597 #else
598 	__raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
599 	__raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
600 #endif
601 }
602 
603 static void cpsw_slave_update_link(struct cpsw_slave *slave,
604 				   struct cpsw_priv *priv, int *link)
605 {
606 	struct phy_device *phy;
607 	u32 mac_control = 0;
608 
609 	phy = priv->phydev;
610 
611 	if (!phy)
612 		return;
613 
614 	phy_startup(phy);
615 	*link = phy->link;
616 
617 	if (*link) { /* link up */
618 		mac_control = priv->data.mac_control;
619 		if (phy->speed == 1000)
620 			mac_control |= GIGABITEN;
621 		if (phy->duplex == DUPLEX_FULL)
622 			mac_control |= FULLDUPLEXEN;
623 		if (phy->speed == 100)
624 			mac_control |= MIIEN;
625 	}
626 
627 	if (mac_control == slave->mac_control)
628 		return;
629 
630 	if (mac_control) {
631 		printf("link up on port %d, speed %d, %s duplex\n",
632 				slave->slave_num, phy->speed,
633 				(phy->duplex == DUPLEX_FULL) ? "full" : "half");
634 	} else {
635 		printf("link down on port %d\n", slave->slave_num);
636 	}
637 
638 	__raw_writel(mac_control, &slave->sliver->mac_control);
639 	slave->mac_control = mac_control;
640 }
641 
642 static int cpsw_update_link(struct cpsw_priv *priv)
643 {
644 	int link = 0;
645 	struct cpsw_slave *slave;
646 
647 	for_active_slave(slave, priv)
648 		cpsw_slave_update_link(slave, priv, &link);
649 
650 	return link;
651 }
652 
653 static inline u32  cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
654 {
655 	if (priv->host_port == 0)
656 		return slave_num + 1;
657 	else
658 		return slave_num;
659 }
660 
661 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
662 {
663 	u32     slave_port;
664 
665 	setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
666 
667 	/* setup priority mapping */
668 	__raw_writel(0x76543210, &slave->sliver->rx_pri_map);
669 	__raw_writel(0x33221100, &slave->regs->tx_pri_map);
670 
671 	/* setup max packet size, and mac address */
672 	__raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
673 	cpsw_set_slave_mac(slave, priv);
674 
675 	slave->mac_control = 0;	/* no link yet */
676 
677 	/* enable forwarding */
678 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
679 	cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
680 
681 	cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
682 
683 	priv->phy_mask |= 1 << slave->data->phy_addr;
684 }
685 
686 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
687 {
688 	struct cpdma_desc *desc = priv->desc_free;
689 
690 	if (desc)
691 		priv->desc_free = desc_read_ptr(desc, hw_next);
692 	return desc;
693 }
694 
695 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
696 {
697 	if (desc) {
698 		desc_write(desc, hw_next, priv->desc_free);
699 		priv->desc_free = desc;
700 	}
701 }
702 
703 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
704 			void *buffer, int len)
705 {
706 	struct cpdma_desc *desc, *prev;
707 	u32 mode;
708 
709 	desc = cpdma_desc_alloc(priv);
710 	if (!desc)
711 		return -ENOMEM;
712 
713 	if (len < PKT_MIN)
714 		len = PKT_MIN;
715 
716 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
717 
718 	desc_write(desc, hw_next,   0);
719 	desc_write(desc, hw_buffer, buffer);
720 	desc_write(desc, hw_len,    len);
721 	desc_write(desc, hw_mode,   mode | len);
722 	desc_write(desc, sw_buffer, buffer);
723 	desc_write(desc, sw_len,    len);
724 
725 	if (!chan->head) {
726 		/* simple case - first packet enqueued */
727 		chan->head = desc;
728 		chan->tail = desc;
729 		chan_write(chan, hdp, desc);
730 		goto done;
731 	}
732 
733 	/* not the first packet - enqueue at the tail */
734 	prev = chan->tail;
735 	desc_write(prev, hw_next, desc);
736 	chan->tail = desc;
737 
738 	/* next check if EOQ has been triggered already */
739 	if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
740 		chan_write(chan, hdp, desc);
741 
742 done:
743 	if (chan->rxfree)
744 		chan_write(chan, rxfree, 1);
745 	return 0;
746 }
747 
748 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
749 			 void **buffer, int *len)
750 {
751 	struct cpdma_desc *desc = chan->head;
752 	u32 status;
753 
754 	if (!desc)
755 		return -ENOENT;
756 
757 	status = desc_read(desc, hw_mode);
758 
759 	if (len)
760 		*len = status & 0x7ff;
761 
762 	if (buffer)
763 		*buffer = desc_read_ptr(desc, sw_buffer);
764 
765 	if (status & CPDMA_DESC_OWNER) {
766 		if (chan_read(chan, hdp) == 0) {
767 			if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
768 				chan_write(chan, hdp, desc);
769 		}
770 
771 		return -EBUSY;
772 	}
773 
774 	chan->head = desc_read_ptr(desc, hw_next);
775 	chan_write(chan, cp, desc);
776 
777 	cpdma_desc_free(priv, desc);
778 	return 0;
779 }
780 
781 static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
782 {
783 	struct cpsw_slave	*slave;
784 	int i, ret;
785 
786 	/* soft reset the controller and initialize priv */
787 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
788 
789 	/* initialize and reset the address lookup engine */
790 	cpsw_ale_enable(priv, 1);
791 	cpsw_ale_clear(priv, 1);
792 	cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
793 
794 	/* setup host port priority mapping */
795 	__raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
796 	__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
797 
798 	/* disable priority elevation and enable statistics on all ports */
799 	__raw_writel(0, &priv->regs->ptype);
800 
801 	/* enable statistics collection only on the host port */
802 	__raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
803 	__raw_writel(0x7, &priv->regs->stat_port_en);
804 
805 	cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
806 
807 	cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
808 	cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
809 
810 	for_active_slave(slave, priv)
811 		cpsw_slave_init(slave, priv);
812 
813 	cpsw_update_link(priv);
814 
815 	/* init descriptor pool */
816 	for (i = 0; i < NUM_DESCS; i++) {
817 		desc_write(&priv->descs[i], hw_next,
818 			   (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
819 	}
820 	priv->desc_free = &priv->descs[0];
821 
822 	/* initialize channels */
823 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
824 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
825 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER2;
826 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER2;
827 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
828 
829 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
830 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER2;
831 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER2;
832 	} else {
833 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
834 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER1;
835 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER1;
836 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
837 
838 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
839 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER1;
840 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER1;
841 	}
842 
843 	/* clear dma state */
844 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
845 
846 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
847 		for (i = 0; i < priv->data.channels; i++) {
848 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
849 					* i);
850 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
851 					* i);
852 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
853 					* i);
854 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
855 					* i);
856 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
857 					* i);
858 		}
859 	} else {
860 		for (i = 0; i < priv->data.channels; i++) {
861 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
862 					* i);
863 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
864 					* i);
865 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
866 					* i);
867 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
868 					* i);
869 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
870 					* i);
871 
872 		}
873 	}
874 
875 	__raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
876 	__raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
877 
878 	/* submit rx descs */
879 	for (i = 0; i < PKTBUFSRX; i++) {
880 		ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
881 				   PKTSIZE);
882 		if (ret < 0) {
883 			printf("error %d submitting rx desc\n", ret);
884 			break;
885 		}
886 	}
887 
888 	return 0;
889 }
890 
891 static void _cpsw_halt(struct cpsw_priv *priv)
892 {
893 	writel(0, priv->dma_regs + CPDMA_TXCONTROL);
894 	writel(0, priv->dma_regs + CPDMA_RXCONTROL);
895 
896 	/* soft reset the controller and initialize priv */
897 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
898 
899 	/* clear dma state */
900 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
901 
902 }
903 
904 static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
905 {
906 	void *buffer;
907 	int len;
908 	int timeout = CPDMA_TIMEOUT;
909 
910 	flush_dcache_range((unsigned long)packet,
911 			   (unsigned long)packet + ALIGN(length, PKTALIGN));
912 
913 	/* first reap completed packets */
914 	while (timeout-- &&
915 		(cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
916 		;
917 
918 	if (timeout == -1) {
919 		printf("cpdma_process timeout\n");
920 		return -ETIMEDOUT;
921 	}
922 
923 	return cpdma_submit(priv, &priv->tx_chan, packet, length);
924 }
925 
926 static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
927 {
928 	void *buffer;
929 	int len;
930 	int ret = -EAGAIN;
931 
932 	ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
933 	if (ret < 0)
934 		return ret;
935 
936 	invalidate_dcache_range((unsigned long)buffer,
937 				(unsigned long)buffer + PKTSIZE_ALIGN);
938 	*pkt = buffer;
939 
940 	return len;
941 }
942 
943 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
944 			    struct cpsw_priv *priv)
945 {
946 	void			*regs = priv->regs;
947 	struct cpsw_slave_data	*data = priv->data.slave_data + slave_num;
948 	slave->slave_num = slave_num;
949 	slave->data	= data;
950 	slave->regs	= regs + data->slave_reg_ofs;
951 	slave->sliver	= regs + data->sliver_reg_ofs;
952 }
953 
954 static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
955 {
956 	struct phy_device *phydev;
957 	u32 supported = PHY_GBIT_FEATURES;
958 
959 	phydev = phy_connect(priv->bus,
960 			slave->data->phy_addr,
961 			priv->dev,
962 			slave->data->phy_if);
963 
964 	if (!phydev)
965 		return -1;
966 
967 	phydev->supported &= supported;
968 	phydev->advertising = phydev->supported;
969 
970 #ifdef CONFIG_DM_ETH
971 	if (slave->data->phy_of_handle)
972 		phydev->dev->of_offset = slave->data->phy_of_handle;
973 #endif
974 
975 	priv->phydev = phydev;
976 	phy_config(phydev);
977 
978 	return 1;
979 }
980 
981 int _cpsw_register(struct cpsw_priv *priv)
982 {
983 	struct cpsw_slave	*slave;
984 	struct cpsw_platform_data *data = &priv->data;
985 	void			*regs = (void *)data->cpsw_base;
986 
987 	priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
988 	if (!priv->slaves) {
989 		return -ENOMEM;
990 	}
991 
992 	priv->host_port		= data->host_port_num;
993 	priv->regs		= regs;
994 	priv->host_port_regs	= regs + data->host_port_reg_ofs;
995 	priv->dma_regs		= regs + data->cpdma_reg_ofs;
996 	priv->ale_regs		= regs + data->ale_reg_ofs;
997 	priv->descs		= (void *)regs + data->bd_ram_ofs;
998 
999 	int idx = 0;
1000 
1001 	for_each_slave(slave, priv) {
1002 		cpsw_slave_setup(slave, idx, priv);
1003 		idx = idx + 1;
1004 	}
1005 
1006 	cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
1007 	priv->bus = miiphy_get_dev_by_name(priv->dev->name);
1008 	for_active_slave(slave, priv)
1009 		cpsw_phy_init(priv, slave);
1010 
1011 	return 0;
1012 }
1013 
1014 #ifndef CONFIG_DM_ETH
1015 static int cpsw_init(struct eth_device *dev, bd_t *bis)
1016 {
1017 	struct cpsw_priv	*priv = dev->priv;
1018 
1019 	return _cpsw_init(priv, dev->enetaddr);
1020 }
1021 
1022 static void cpsw_halt(struct eth_device *dev)
1023 {
1024 	struct cpsw_priv *priv = dev->priv;
1025 
1026 	return _cpsw_halt(priv);
1027 }
1028 
1029 static int cpsw_send(struct eth_device *dev, void *packet, int length)
1030 {
1031 	struct cpsw_priv	*priv = dev->priv;
1032 
1033 	return _cpsw_send(priv, packet, length);
1034 }
1035 
1036 static int cpsw_recv(struct eth_device *dev)
1037 {
1038 	struct cpsw_priv *priv = dev->priv;
1039 	uchar *pkt = NULL;
1040 	int len;
1041 
1042 	len = _cpsw_recv(priv, &pkt);
1043 
1044 	if (len > 0) {
1045 		net_process_received_packet(pkt, len);
1046 		cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
1047 	}
1048 
1049 	return len;
1050 }
1051 
1052 int cpsw_register(struct cpsw_platform_data *data)
1053 {
1054 	struct cpsw_priv	*priv;
1055 	struct eth_device	*dev;
1056 	int ret;
1057 
1058 	dev = calloc(sizeof(*dev), 1);
1059 	if (!dev)
1060 		return -ENOMEM;
1061 
1062 	priv = calloc(sizeof(*priv), 1);
1063 	if (!priv) {
1064 		free(dev);
1065 		return -ENOMEM;
1066 	}
1067 
1068 	priv->dev = dev;
1069 	priv->data = *data;
1070 
1071 	strcpy(dev->name, "cpsw");
1072 	dev->iobase	= 0;
1073 	dev->init	= cpsw_init;
1074 	dev->halt	= cpsw_halt;
1075 	dev->send	= cpsw_send;
1076 	dev->recv	= cpsw_recv;
1077 	dev->priv	= priv;
1078 
1079 	eth_register(dev);
1080 
1081 	ret = _cpsw_register(priv);
1082 	if (ret < 0) {
1083 		eth_unregister(dev);
1084 		free(dev);
1085 		free(priv);
1086 		return ret;
1087 	}
1088 
1089 	return 1;
1090 }
1091 #else
1092 static int cpsw_eth_start(struct udevice *dev)
1093 {
1094 	struct eth_pdata *pdata = dev_get_platdata(dev);
1095 	struct cpsw_priv *priv = dev_get_priv(dev);
1096 
1097 	return _cpsw_init(priv, pdata->enetaddr);
1098 }
1099 
1100 static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
1101 {
1102 	struct cpsw_priv *priv = dev_get_priv(dev);
1103 
1104 	return _cpsw_send(priv, packet, length);
1105 }
1106 
1107 static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1108 {
1109 	struct cpsw_priv *priv = dev_get_priv(dev);
1110 
1111 	return _cpsw_recv(priv, packetp);
1112 }
1113 
1114 static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
1115 				   int length)
1116 {
1117 	struct cpsw_priv *priv = dev_get_priv(dev);
1118 
1119 	return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
1120 }
1121 
1122 static void cpsw_eth_stop(struct udevice *dev)
1123 {
1124 	struct cpsw_priv *priv = dev_get_priv(dev);
1125 
1126 	return _cpsw_halt(priv);
1127 }
1128 
1129 
1130 static int cpsw_eth_probe(struct udevice *dev)
1131 {
1132 	struct cpsw_priv *priv = dev_get_priv(dev);
1133 
1134 	priv->dev = dev;
1135 
1136 	return _cpsw_register(priv);
1137 }
1138 
1139 static const struct eth_ops cpsw_eth_ops = {
1140 	.start		= cpsw_eth_start,
1141 	.send		= cpsw_eth_send,
1142 	.recv		= cpsw_eth_recv,
1143 	.free_pkt	= cpsw_eth_free_pkt,
1144 	.stop		= cpsw_eth_stop,
1145 };
1146 
1147 static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
1148 {
1149 	return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
1150 						  false);
1151 }
1152 
1153 static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
1154 {
1155 	struct eth_pdata *pdata = dev_get_platdata(dev);
1156 	struct cpsw_priv *priv = dev_get_priv(dev);
1157 	struct gpio_desc *mode_gpios;
1158 	const char *phy_mode;
1159 	const void *fdt = gd->fdt_blob;
1160 	int node = dev->of_offset;
1161 	int subnode;
1162 	int slave_index = 0;
1163 	int active_slave;
1164 	int num_mode_gpios;
1165 	int ret;
1166 
1167 	pdata->iobase = dev_get_addr(dev);
1168 	priv->data.version = CPSW_CTRL_VERSION_2;
1169 	priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
1170 	priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
1171 	priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
1172 	priv->data.mdio_div = CPSW_MDIO_DIV;
1173 	priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
1174 
1175 	pdata->phy_interface = -1;
1176 
1177 	priv->data.cpsw_base = pdata->iobase;
1178 	priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
1179 	if (priv->data.channels <= 0) {
1180 		printf("error: cpdma_channels not found in dt\n");
1181 		return -ENOENT;
1182 	}
1183 
1184 	priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
1185 	if (priv->data.slaves <= 0) {
1186 		printf("error: slaves not found in dt\n");
1187 		return -ENOENT;
1188 	}
1189 	priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
1190 				       priv->data.slaves);
1191 
1192 	priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
1193 	if (priv->data.ale_entries <= 0) {
1194 		printf("error: ale_entries not found in dt\n");
1195 		return -ENOENT;
1196 	}
1197 
1198 	priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
1199 	if (priv->data.bd_ram_ofs <= 0) {
1200 		printf("error: bd_ram_size not found in dt\n");
1201 		return -ENOENT;
1202 	}
1203 
1204 	priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
1205 	if (priv->data.mac_control <= 0) {
1206 		printf("error: ale_entries not found in dt\n");
1207 		return -ENOENT;
1208 	}
1209 
1210 	num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
1211 	if (num_mode_gpios > 0) {
1212 		mode_gpios = malloc(sizeof(struct gpio_desc) *
1213 				    num_mode_gpios);
1214 		gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
1215 					  num_mode_gpios, GPIOD_IS_OUT);
1216 		free(mode_gpios);
1217 	}
1218 
1219 	active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
1220 	priv->data.active_slave = active_slave;
1221 
1222 	fdt_for_each_subnode(fdt, subnode, node) {
1223 		int len;
1224 		const char *name;
1225 
1226 		name = fdt_get_name(fdt, subnode, &len);
1227 		if (!strncmp(name, "mdio", 4)) {
1228 			u32 mdio_base;
1229 
1230 			mdio_base = cpsw_get_addr_by_node(fdt, subnode);
1231 			if (mdio_base == FDT_ADDR_T_NONE) {
1232 				error("Not able to get MDIO address space\n");
1233 				return -ENOENT;
1234 			}
1235 			priv->data.mdio_base = mdio_base;
1236 		}
1237 
1238 		if (!strncmp(name, "slave", 5)) {
1239 			u32 phy_id[2];
1240 
1241 			if (slave_index >= priv->data.slaves)
1242 				continue;
1243 			phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
1244 			if (phy_mode)
1245 				priv->data.slave_data[slave_index].phy_if =
1246 					phy_get_interface_by_name(phy_mode);
1247 
1248 			priv->data.slave_data[slave_index].phy_of_handle =
1249 				fdtdec_lookup_phandle(fdt, subnode,
1250 						      "phy-handle");
1251 
1252 			if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
1253 				priv->data.slave_data[slave_index].phy_addr =
1254 						fdtdec_get_int(gd->fdt_blob,
1255 							       priv->data.slave_data[slave_index].phy_of_handle,
1256 							       "reg", -1);
1257 			} else {
1258 				fdtdec_get_int_array(fdt, subnode, "phy_id",
1259 						     phy_id, 2);
1260 				priv->data.slave_data[slave_index].phy_addr =
1261 						phy_id[1];
1262 			}
1263 			slave_index++;
1264 		}
1265 
1266 		if (!strncmp(name, "cpsw-phy-sel", 12)) {
1267 			priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
1268 								    subnode);
1269 
1270 			if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
1271 				error("Not able to get gmii_sel reg address\n");
1272 				return -ENOENT;
1273 			}
1274 		}
1275 	}
1276 
1277 	priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
1278 	priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
1279 
1280 	if (priv->data.slaves == 2) {
1281 		priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
1282 		priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
1283 	}
1284 
1285 	ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
1286 	if (ret < 0) {
1287 		error("cpsw read efuse mac failed\n");
1288 		return ret;
1289 	}
1290 
1291 	pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
1292 	if (pdata->phy_interface == -1) {
1293 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1294 		return -EINVAL;
1295 	}
1296 	switch (pdata->phy_interface) {
1297 	case PHY_INTERFACE_MODE_MII:
1298 		writel(MII_MODE_ENABLE, priv->data.gmii_sel);
1299 		break;
1300 	case PHY_INTERFACE_MODE_RMII:
1301 		writel(RMII_MODE_ENABLE, priv->data.gmii_sel);
1302 		break;
1303 	case PHY_INTERFACE_MODE_RGMII:
1304 	case PHY_INTERFACE_MODE_RGMII_ID:
1305 	case PHY_INTERFACE_MODE_RGMII_RXID:
1306 	case PHY_INTERFACE_MODE_RGMII_TXID:
1307 		writel(RGMII_MODE_ENABLE, priv->data.gmii_sel);
1308 		break;
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 
1315 static const struct udevice_id cpsw_eth_ids[] = {
1316 	{ .compatible = "ti,cpsw" },
1317 	{ .compatible = "ti,am335x-cpsw" },
1318 	{ }
1319 };
1320 
1321 U_BOOT_DRIVER(eth_cpsw) = {
1322 	.name	= "eth_cpsw",
1323 	.id	= UCLASS_ETH,
1324 	.of_match = cpsw_eth_ids,
1325 	.ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
1326 	.probe	= cpsw_eth_probe,
1327 	.ops	= &cpsw_eth_ops,
1328 	.priv_auto_alloc_size = sizeof(struct cpsw_priv),
1329 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
1330 	.flags = DM_FLAG_ALLOC_PRIV_DMA,
1331 };
1332 #endif /* CONFIG_DM_ETH */
1333