xref: /rk3399_rockchip-uboot/drivers/net/cpsw.c (revision 72df68cc6b73febcaacbb9e2e232b6580672c8bd)
1 /*
2  * CPSW Ethernet Switch Driver
3  *
4  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <asm/errno.h>
25 #include <asm/io.h>
26 #include <phy.h>
27 #include <asm/arch/cpu.h>
28 
29 #define BITMASK(bits)		(BIT(bits) - 1)
30 #define PHY_REG_MASK		0x1f
31 #define PHY_ID_MASK		0x1f
32 #define NUM_DESCS		(PKTBUFSRX * 2)
33 #define PKT_MIN			60
34 #define PKT_MAX			(1500 + 14 + 4 + 4)
35 #define CLEAR_BIT		1
36 #define GIGABITEN		BIT(7)
37 #define FULLDUPLEXEN		BIT(0)
38 #define MIIEN			BIT(15)
39 
40 /* DMA Registers */
41 #define CPDMA_TXCONTROL		0x004
42 #define CPDMA_RXCONTROL		0x014
43 #define CPDMA_SOFTRESET		0x01c
44 #define CPDMA_RXFREE		0x0e0
45 #define CPDMA_TXHDP_VER1	0x100
46 #define CPDMA_TXHDP_VER2	0x200
47 #define CPDMA_RXHDP_VER1	0x120
48 #define CPDMA_RXHDP_VER2	0x220
49 #define CPDMA_TXCP_VER1		0x140
50 #define CPDMA_TXCP_VER2		0x240
51 #define CPDMA_RXCP_VER1		0x160
52 #define CPDMA_RXCP_VER2		0x260
53 
54 /* Descriptor mode bits */
55 #define CPDMA_DESC_SOP		BIT(31)
56 #define CPDMA_DESC_EOP		BIT(30)
57 #define CPDMA_DESC_OWNER	BIT(29)
58 #define CPDMA_DESC_EOQ		BIT(28)
59 
60 /*
61  * This timeout definition is a worst-case ultra defensive measure against
62  * unexpected controller lock ups.  Ideally, we should never ever hit this
63  * scenario in practice.
64  */
65 #define MDIO_TIMEOUT            100 /* msecs */
66 #define CPDMA_TIMEOUT		100 /* msecs */
67 
68 struct cpsw_mdio_regs {
69 	u32	version;
70 	u32	control;
71 #define CONTROL_IDLE		BIT(31)
72 #define CONTROL_ENABLE		BIT(30)
73 
74 	u32	alive;
75 	u32	link;
76 	u32	linkintraw;
77 	u32	linkintmasked;
78 	u32	__reserved_0[2];
79 	u32	userintraw;
80 	u32	userintmasked;
81 	u32	userintmaskset;
82 	u32	userintmaskclr;
83 	u32	__reserved_1[20];
84 
85 	struct {
86 		u32		access;
87 		u32		physel;
88 #define USERACCESS_GO		BIT(31)
89 #define USERACCESS_WRITE	BIT(30)
90 #define USERACCESS_ACK		BIT(29)
91 #define USERACCESS_READ		(0)
92 #define USERACCESS_DATA		(0xffff)
93 	} user[0];
94 };
95 
96 struct cpsw_regs {
97 	u32	id_ver;
98 	u32	control;
99 	u32	soft_reset;
100 	u32	stat_port_en;
101 	u32	ptype;
102 };
103 
104 struct cpsw_slave_regs {
105 	u32	max_blks;
106 	u32	blk_cnt;
107 	u32	flow_thresh;
108 	u32	port_vlan;
109 	u32	tx_pri_map;
110 #ifdef CONFIG_AM33XX
111 	u32	gap_thresh;
112 #elif defined(CONFIG_TI814X)
113 	u32	ts_ctl;
114 	u32	ts_seq_ltype;
115 	u32	ts_vlan;
116 #endif
117 	u32	sa_lo;
118 	u32	sa_hi;
119 };
120 
121 struct cpsw_host_regs {
122 	u32	max_blks;
123 	u32	blk_cnt;
124 	u32	flow_thresh;
125 	u32	port_vlan;
126 	u32	tx_pri_map;
127 	u32	cpdma_tx_pri_map;
128 	u32	cpdma_rx_chan_map;
129 };
130 
131 struct cpsw_sliver_regs {
132 	u32	id_ver;
133 	u32	mac_control;
134 	u32	mac_status;
135 	u32	soft_reset;
136 	u32	rx_maxlen;
137 	u32	__reserved_0;
138 	u32	rx_pause;
139 	u32	tx_pause;
140 	u32	__reserved_1;
141 	u32	rx_pri_map;
142 };
143 
144 #define ALE_ENTRY_BITS		68
145 #define ALE_ENTRY_WORDS		DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
146 
147 /* ALE Registers */
148 #define ALE_CONTROL		0x08
149 #define ALE_UNKNOWNVLAN		0x18
150 #define ALE_TABLE_CONTROL	0x20
151 #define ALE_TABLE		0x34
152 #define ALE_PORTCTL		0x40
153 
154 #define ALE_TABLE_WRITE		BIT(31)
155 
156 #define ALE_TYPE_FREE			0
157 #define ALE_TYPE_ADDR			1
158 #define ALE_TYPE_VLAN			2
159 #define ALE_TYPE_VLAN_ADDR		3
160 
161 #define ALE_UCAST_PERSISTANT		0
162 #define ALE_UCAST_UNTOUCHED		1
163 #define ALE_UCAST_OUI			2
164 #define ALE_UCAST_TOUCHED		3
165 
166 #define ALE_MCAST_FWD			0
167 #define ALE_MCAST_BLOCK_LEARN_FWD	1
168 #define ALE_MCAST_FWD_LEARN		2
169 #define ALE_MCAST_FWD_2			3
170 
171 enum cpsw_ale_port_state {
172 	ALE_PORT_STATE_DISABLE	= 0x00,
173 	ALE_PORT_STATE_BLOCK	= 0x01,
174 	ALE_PORT_STATE_LEARN	= 0x02,
175 	ALE_PORT_STATE_FORWARD	= 0x03,
176 };
177 
178 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
179 #define ALE_SECURE	1
180 #define ALE_BLOCKED	2
181 
182 struct cpsw_slave {
183 	struct cpsw_slave_regs		*regs;
184 	struct cpsw_sliver_regs		*sliver;
185 	int				slave_num;
186 	u32				mac_control;
187 	struct cpsw_slave_data		*data;
188 };
189 
190 struct cpdma_desc {
191 	/* hardware fields */
192 	u32			hw_next;
193 	u32			hw_buffer;
194 	u32			hw_len;
195 	u32			hw_mode;
196 	/* software fields */
197 	u32			sw_buffer;
198 	u32			sw_len;
199 };
200 
201 struct cpdma_chan {
202 	struct cpdma_desc	*head, *tail;
203 	void			*hdp, *cp, *rxfree;
204 };
205 
206 #define desc_write(desc, fld, val)	__raw_writel((u32)(val), &(desc)->fld)
207 #define desc_read(desc, fld)		__raw_readl(&(desc)->fld)
208 #define desc_read_ptr(desc, fld)	((void *)__raw_readl(&(desc)->fld))
209 
210 #define chan_write(chan, fld, val)	__raw_writel((u32)(val), (chan)->fld)
211 #define chan_read(chan, fld)		__raw_readl((chan)->fld)
212 #define chan_read_ptr(chan, fld)	((void *)__raw_readl((chan)->fld))
213 
214 #define for_active_slave(slave, priv) \
215 	slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
216 #define for_each_slave(slave, priv) \
217 	for (slave = (priv)->slaves; slave != (priv)->slaves + \
218 				(priv)->data.slaves; slave++)
219 
220 struct cpsw_priv {
221 	struct eth_device		*dev;
222 	struct cpsw_platform_data	data;
223 	int				host_port;
224 
225 	struct cpsw_regs		*regs;
226 	void				*dma_regs;
227 	struct cpsw_host_regs		*host_port_regs;
228 	void				*ale_regs;
229 
230 	struct cpdma_desc		*descs;
231 	struct cpdma_desc		*desc_free;
232 	struct cpdma_chan		rx_chan, tx_chan;
233 
234 	struct cpsw_slave		*slaves;
235 	struct phy_device		*phydev;
236 	struct mii_dev			*bus;
237 
238 	u32				phy_mask;
239 };
240 
241 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
242 {
243 	int idx;
244 
245 	idx    = start / 32;
246 	start -= idx * 32;
247 	idx    = 2 - idx; /* flip */
248 	return (ale_entry[idx] >> start) & BITMASK(bits);
249 }
250 
251 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
252 				      u32 value)
253 {
254 	int idx;
255 
256 	value &= BITMASK(bits);
257 	idx    = start / 32;
258 	start -= idx * 32;
259 	idx    = 2 - idx; /* flip */
260 	ale_entry[idx] &= ~(BITMASK(bits) << start);
261 	ale_entry[idx] |=  (value << start);
262 }
263 
264 #define DEFINE_ALE_FIELD(name, start, bits)				\
265 static inline int cpsw_ale_get_##name(u32 *ale_entry)			\
266 {									\
267 	return cpsw_ale_get_field(ale_entry, start, bits);		\
268 }									\
269 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value)	\
270 {									\
271 	cpsw_ale_set_field(ale_entry, start, bits, value);		\
272 }
273 
274 DEFINE_ALE_FIELD(entry_type,		60,	2)
275 DEFINE_ALE_FIELD(mcast_state,		62,	2)
276 DEFINE_ALE_FIELD(port_mask,		66,	3)
277 DEFINE_ALE_FIELD(ucast_type,		62,	2)
278 DEFINE_ALE_FIELD(port_num,		66,	2)
279 DEFINE_ALE_FIELD(blocked,		65,	1)
280 DEFINE_ALE_FIELD(secure,		64,	1)
281 DEFINE_ALE_FIELD(mcast,			40,	1)
282 
283 /* The MAC address field in the ALE entry cannot be macroized as above */
284 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
285 {
286 	int i;
287 
288 	for (i = 0; i < 6; i++)
289 		addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
290 }
291 
292 static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
293 {
294 	int i;
295 
296 	for (i = 0; i < 6; i++)
297 		cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
298 }
299 
300 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
301 {
302 	int i;
303 
304 	__raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
305 
306 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
307 		ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
308 
309 	return idx;
310 }
311 
312 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
313 {
314 	int i;
315 
316 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
317 		__raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
318 
319 	__raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
320 
321 	return idx;
322 }
323 
324 static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
325 {
326 	u32 ale_entry[ALE_ENTRY_WORDS];
327 	int type, idx;
328 
329 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
330 		u8 entry_addr[6];
331 
332 		cpsw_ale_read(priv, idx, ale_entry);
333 		type = cpsw_ale_get_entry_type(ale_entry);
334 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
335 			continue;
336 		cpsw_ale_get_addr(ale_entry, entry_addr);
337 		if (memcmp(entry_addr, addr, 6) == 0)
338 			return idx;
339 	}
340 	return -ENOENT;
341 }
342 
343 static int cpsw_ale_match_free(struct cpsw_priv *priv)
344 {
345 	u32 ale_entry[ALE_ENTRY_WORDS];
346 	int type, idx;
347 
348 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
349 		cpsw_ale_read(priv, idx, ale_entry);
350 		type = cpsw_ale_get_entry_type(ale_entry);
351 		if (type == ALE_TYPE_FREE)
352 			return idx;
353 	}
354 	return -ENOENT;
355 }
356 
357 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
358 {
359 	u32 ale_entry[ALE_ENTRY_WORDS];
360 	int type, idx;
361 
362 	for (idx = 0; idx < priv->data.ale_entries; idx++) {
363 		cpsw_ale_read(priv, idx, ale_entry);
364 		type = cpsw_ale_get_entry_type(ale_entry);
365 		if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
366 			continue;
367 		if (cpsw_ale_get_mcast(ale_entry))
368 			continue;
369 		type = cpsw_ale_get_ucast_type(ale_entry);
370 		if (type != ALE_UCAST_PERSISTANT &&
371 		    type != ALE_UCAST_OUI)
372 			return idx;
373 	}
374 	return -ENOENT;
375 }
376 
377 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
378 			      int port, int flags)
379 {
380 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
381 	int idx;
382 
383 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
384 	cpsw_ale_set_addr(ale_entry, addr);
385 	cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
386 	cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
387 	cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
388 	cpsw_ale_set_port_num(ale_entry, port);
389 
390 	idx = cpsw_ale_match_addr(priv, addr);
391 	if (idx < 0)
392 		idx = cpsw_ale_match_free(priv);
393 	if (idx < 0)
394 		idx = cpsw_ale_find_ageable(priv);
395 	if (idx < 0)
396 		return -ENOMEM;
397 
398 	cpsw_ale_write(priv, idx, ale_entry);
399 	return 0;
400 }
401 
402 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
403 {
404 	u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
405 	int idx, mask;
406 
407 	idx = cpsw_ale_match_addr(priv, addr);
408 	if (idx >= 0)
409 		cpsw_ale_read(priv, idx, ale_entry);
410 
411 	cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
412 	cpsw_ale_set_addr(ale_entry, addr);
413 	cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
414 
415 	mask = cpsw_ale_get_port_mask(ale_entry);
416 	port_mask |= mask;
417 	cpsw_ale_set_port_mask(ale_entry, port_mask);
418 
419 	if (idx < 0)
420 		idx = cpsw_ale_match_free(priv);
421 	if (idx < 0)
422 		idx = cpsw_ale_find_ageable(priv);
423 	if (idx < 0)
424 		return -ENOMEM;
425 
426 	cpsw_ale_write(priv, idx, ale_entry);
427 	return 0;
428 }
429 
430 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
431 {
432 	u32 tmp, mask = BIT(bit);
433 
434 	tmp  = __raw_readl(priv->ale_regs + ALE_CONTROL);
435 	tmp &= ~mask;
436 	tmp |= val ? mask : 0;
437 	__raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
438 }
439 
440 #define cpsw_ale_enable(priv, val)	cpsw_ale_control(priv, 31, val)
441 #define cpsw_ale_clear(priv, val)	cpsw_ale_control(priv, 30, val)
442 #define cpsw_ale_vlan_aware(priv, val)	cpsw_ale_control(priv,  2, val)
443 
444 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
445 				       int val)
446 {
447 	int offset = ALE_PORTCTL + 4 * port;
448 	u32 tmp, mask = 0x3;
449 
450 	tmp  = __raw_readl(priv->ale_regs + offset);
451 	tmp &= ~mask;
452 	tmp |= val & mask;
453 	__raw_writel(tmp, priv->ale_regs + offset);
454 }
455 
456 static struct cpsw_mdio_regs *mdio_regs;
457 
458 /* wait until hardware is ready for another user access */
459 static inline u32 wait_for_user_access(void)
460 {
461 	u32 reg = 0;
462 	int timeout = MDIO_TIMEOUT;
463 
464 	while (timeout-- &&
465 	((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
466 		udelay(10);
467 
468 	if (timeout == -1) {
469 		printf("wait_for_user_access Timeout\n");
470 		return -ETIMEDOUT;
471 	}
472 	return reg;
473 }
474 
475 /* wait until hardware state machine is idle */
476 static inline void wait_for_idle(void)
477 {
478 	int timeout = MDIO_TIMEOUT;
479 
480 	while (timeout-- &&
481 		((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
482 		udelay(10);
483 
484 	if (timeout == -1)
485 		printf("wait_for_idle Timeout\n");
486 }
487 
488 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
489 				int dev_addr, int phy_reg)
490 {
491 	int data;
492 	u32 reg;
493 
494 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
495 		return -EINVAL;
496 
497 	wait_for_user_access();
498 	reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
499 	       (phy_id << 16));
500 	__raw_writel(reg, &mdio_regs->user[0].access);
501 	reg = wait_for_user_access();
502 
503 	data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
504 	return data;
505 }
506 
507 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
508 				int phy_reg, u16 data)
509 {
510 	u32 reg;
511 
512 	if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
513 		return -EINVAL;
514 
515 	wait_for_user_access();
516 	reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
517 		   (phy_id << 16) | (data & USERACCESS_DATA));
518 	__raw_writel(reg, &mdio_regs->user[0].access);
519 	wait_for_user_access();
520 
521 	return 0;
522 }
523 
524 static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
525 {
526 	struct mii_dev *bus = mdio_alloc();
527 
528 	mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
529 
530 	/* set enable and clock divider */
531 	__raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
532 
533 	/*
534 	 * wait for scan logic to settle:
535 	 * the scan time consists of (a) a large fixed component, and (b) a
536 	 * small component that varies with the mii bus frequency.  These
537 	 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
538 	 * silicon.  Since the effect of (b) was found to be largely
539 	 * negligible, we keep things simple here.
540 	 */
541 	udelay(1000);
542 
543 	bus->read = cpsw_mdio_read;
544 	bus->write = cpsw_mdio_write;
545 	sprintf(bus->name, name);
546 
547 	mdio_register(bus);
548 }
549 
550 /* Set a self-clearing bit in a register, and wait for it to clear */
551 static inline void setbit_and_wait_for_clear32(void *addr)
552 {
553 	__raw_writel(CLEAR_BIT, addr);
554 	while (__raw_readl(addr) & CLEAR_BIT)
555 		;
556 }
557 
558 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
559 			 ((mac)[2] << 16) | ((mac)[3] << 24))
560 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
561 
562 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
563 			       struct cpsw_priv *priv)
564 {
565 	__raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
566 	__raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
567 }
568 
569 static void cpsw_slave_update_link(struct cpsw_slave *slave,
570 				   struct cpsw_priv *priv, int *link)
571 {
572 	struct phy_device *phy;
573 	u32 mac_control = 0;
574 
575 	phy = priv->phydev;
576 
577 	if (!phy)
578 		return;
579 
580 	phy_startup(phy);
581 	*link = phy->link;
582 
583 	if (*link) { /* link up */
584 		mac_control = priv->data.mac_control;
585 		if (phy->speed == 1000)
586 			mac_control |= GIGABITEN;
587 		if (phy->duplex == DUPLEX_FULL)
588 			mac_control |= FULLDUPLEXEN;
589 		if (phy->speed == 100)
590 			mac_control |= MIIEN;
591 	}
592 
593 	if (mac_control == slave->mac_control)
594 		return;
595 
596 	if (mac_control) {
597 		printf("link up on port %d, speed %d, %s duplex\n",
598 				slave->slave_num, phy->speed,
599 				(phy->duplex == DUPLEX_FULL) ? "full" : "half");
600 	} else {
601 		printf("link down on port %d\n", slave->slave_num);
602 	}
603 
604 	__raw_writel(mac_control, &slave->sliver->mac_control);
605 	slave->mac_control = mac_control;
606 }
607 
608 static int cpsw_update_link(struct cpsw_priv *priv)
609 {
610 	int link = 0;
611 	struct cpsw_slave *slave;
612 
613 	for_active_slave(slave, priv)
614 		cpsw_slave_update_link(slave, priv, &link);
615 
616 	return link;
617 }
618 
619 static inline u32  cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
620 {
621 	if (priv->host_port == 0)
622 		return slave_num + 1;
623 	else
624 		return slave_num;
625 }
626 
627 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
628 {
629 	u32     slave_port;
630 
631 	setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
632 
633 	/* setup priority mapping */
634 	__raw_writel(0x76543210, &slave->sliver->rx_pri_map);
635 	__raw_writel(0x33221100, &slave->regs->tx_pri_map);
636 
637 	/* setup max packet size, and mac address */
638 	__raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
639 	cpsw_set_slave_mac(slave, priv);
640 
641 	slave->mac_control = 0;	/* no link yet */
642 
643 	/* enable forwarding */
644 	slave_port = cpsw_get_slave_port(priv, slave->slave_num);
645 	cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
646 
647 	cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
648 
649 	priv->phy_mask |= 1 << slave->data->phy_addr;
650 }
651 
652 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
653 {
654 	struct cpdma_desc *desc = priv->desc_free;
655 
656 	if (desc)
657 		priv->desc_free = desc_read_ptr(desc, hw_next);
658 	return desc;
659 }
660 
661 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
662 {
663 	if (desc) {
664 		desc_write(desc, hw_next, priv->desc_free);
665 		priv->desc_free = desc;
666 	}
667 }
668 
669 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
670 			void *buffer, int len)
671 {
672 	struct cpdma_desc *desc, *prev;
673 	u32 mode;
674 
675 	desc = cpdma_desc_alloc(priv);
676 	if (!desc)
677 		return -ENOMEM;
678 
679 	if (len < PKT_MIN)
680 		len = PKT_MIN;
681 
682 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
683 
684 	desc_write(desc, hw_next,   0);
685 	desc_write(desc, hw_buffer, buffer);
686 	desc_write(desc, hw_len,    len);
687 	desc_write(desc, hw_mode,   mode | len);
688 	desc_write(desc, sw_buffer, buffer);
689 	desc_write(desc, sw_len,    len);
690 
691 	if (!chan->head) {
692 		/* simple case - first packet enqueued */
693 		chan->head = desc;
694 		chan->tail = desc;
695 		chan_write(chan, hdp, desc);
696 		goto done;
697 	}
698 
699 	/* not the first packet - enqueue at the tail */
700 	prev = chan->tail;
701 	desc_write(prev, hw_next, desc);
702 	chan->tail = desc;
703 
704 	/* next check if EOQ has been triggered already */
705 	if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
706 		chan_write(chan, hdp, desc);
707 
708 done:
709 	if (chan->rxfree)
710 		chan_write(chan, rxfree, 1);
711 	return 0;
712 }
713 
714 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
715 			 void **buffer, int *len)
716 {
717 	struct cpdma_desc *desc = chan->head;
718 	u32 status;
719 
720 	if (!desc)
721 		return -ENOENT;
722 
723 	status = desc_read(desc, hw_mode);
724 
725 	if (len)
726 		*len = status & 0x7ff;
727 
728 	if (buffer)
729 		*buffer = desc_read_ptr(desc, sw_buffer);
730 
731 	if (status & CPDMA_DESC_OWNER) {
732 		if (chan_read(chan, hdp) == 0) {
733 			if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
734 				chan_write(chan, hdp, desc);
735 		}
736 
737 		return -EBUSY;
738 	}
739 
740 	chan->head = desc_read_ptr(desc, hw_next);
741 	chan_write(chan, cp, desc);
742 
743 	cpdma_desc_free(priv, desc);
744 	return 0;
745 }
746 
747 static int cpsw_init(struct eth_device *dev, bd_t *bis)
748 {
749 	struct cpsw_priv	*priv = dev->priv;
750 	struct cpsw_slave	*slave;
751 	int i, ret;
752 
753 	/* soft reset the controller and initialize priv */
754 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
755 
756 	/* initialize and reset the address lookup engine */
757 	cpsw_ale_enable(priv, 1);
758 	cpsw_ale_clear(priv, 1);
759 	cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
760 
761 	/* setup host port priority mapping */
762 	__raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
763 	__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
764 
765 	/* disable priority elevation and enable statistics on all ports */
766 	__raw_writel(0, &priv->regs->ptype);
767 
768 	/* enable statistics collection only on the host port */
769 	__raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
770 	__raw_writel(0x7, &priv->regs->stat_port_en);
771 
772 	cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
773 
774 	cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
775 			   ALE_SECURE);
776 	cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
777 
778 	for_active_slave(slave, priv)
779 		cpsw_slave_init(slave, priv);
780 
781 	cpsw_update_link(priv);
782 
783 	/* init descriptor pool */
784 	for (i = 0; i < NUM_DESCS; i++) {
785 		desc_write(&priv->descs[i], hw_next,
786 			   (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
787 	}
788 	priv->desc_free = &priv->descs[0];
789 
790 	/* initialize channels */
791 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
792 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
793 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER2;
794 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER2;
795 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
796 
797 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
798 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER2;
799 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER2;
800 	} else {
801 		memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
802 		priv->rx_chan.hdp       = priv->dma_regs + CPDMA_RXHDP_VER1;
803 		priv->rx_chan.cp        = priv->dma_regs + CPDMA_RXCP_VER1;
804 		priv->rx_chan.rxfree    = priv->dma_regs + CPDMA_RXFREE;
805 
806 		memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
807 		priv->tx_chan.hdp       = priv->dma_regs + CPDMA_TXHDP_VER1;
808 		priv->tx_chan.cp        = priv->dma_regs + CPDMA_TXCP_VER1;
809 	}
810 
811 	/* clear dma state */
812 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
813 
814 	if (priv->data.version == CPSW_CTRL_VERSION_2) {
815 		for (i = 0; i < priv->data.channels; i++) {
816 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
817 					* i);
818 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
819 					* i);
820 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
821 					* i);
822 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
823 					* i);
824 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
825 					* i);
826 		}
827 	} else {
828 		for (i = 0; i < priv->data.channels; i++) {
829 			__raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
830 					* i);
831 			__raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
832 					* i);
833 			__raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
834 					* i);
835 			__raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
836 					* i);
837 			__raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
838 					* i);
839 
840 		}
841 	}
842 
843 	__raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
844 	__raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
845 
846 	/* submit rx descs */
847 	for (i = 0; i < PKTBUFSRX; i++) {
848 		ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
849 				   PKTSIZE);
850 		if (ret < 0) {
851 			printf("error %d submitting rx desc\n", ret);
852 			break;
853 		}
854 	}
855 
856 	return 0;
857 }
858 
859 static void cpsw_halt(struct eth_device *dev)
860 {
861 	struct cpsw_priv	*priv = dev->priv;
862 
863 	writel(0, priv->dma_regs + CPDMA_TXCONTROL);
864 	writel(0, priv->dma_regs + CPDMA_RXCONTROL);
865 
866 	/* soft reset the controller and initialize priv */
867 	setbit_and_wait_for_clear32(&priv->regs->soft_reset);
868 
869 	/* clear dma state */
870 	setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
871 
872 	priv->data.control(0);
873 }
874 
875 static int cpsw_send(struct eth_device *dev, void *packet, int length)
876 {
877 	struct cpsw_priv	*priv = dev->priv;
878 	void *buffer;
879 	int len;
880 	int timeout = CPDMA_TIMEOUT;
881 
882 	flush_dcache_range((unsigned long)packet,
883 			   (unsigned long)packet + length);
884 
885 	/* first reap completed packets */
886 	while (timeout-- &&
887 		(cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
888 		;
889 
890 	if (timeout == -1) {
891 		printf("cpdma_process timeout\n");
892 		return -ETIMEDOUT;
893 	}
894 
895 	return cpdma_submit(priv, &priv->tx_chan, packet, length);
896 }
897 
898 static int cpsw_recv(struct eth_device *dev)
899 {
900 	struct cpsw_priv	*priv = dev->priv;
901 	void *buffer;
902 	int len;
903 
904 	while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
905 		invalidate_dcache_range((unsigned long)buffer,
906 					(unsigned long)buffer + PKTSIZE_ALIGN);
907 		NetReceive(buffer, len);
908 		cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
909 	}
910 
911 	return 0;
912 }
913 
914 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
915 			    struct cpsw_priv *priv)
916 {
917 	void			*regs = priv->regs;
918 	struct cpsw_slave_data	*data = priv->data.slave_data + slave_num;
919 	slave->slave_num = slave_num;
920 	slave->data	= data;
921 	slave->regs	= regs + data->slave_reg_ofs;
922 	slave->sliver	= regs + data->sliver_reg_ofs;
923 }
924 
925 static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
926 {
927 	struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
928 	struct phy_device *phydev;
929 	u32 supported = PHY_GBIT_FEATURES;
930 
931 	phydev = phy_connect(priv->bus,
932 			slave->data->phy_addr,
933 			dev,
934 			slave->data->phy_if);
935 
936 	if (!phydev)
937 		return -1;
938 
939 	phydev->supported &= supported;
940 	phydev->advertising = phydev->supported;
941 
942 	priv->phydev = phydev;
943 	phy_config(phydev);
944 
945 	return 1;
946 }
947 
948 int cpsw_register(struct cpsw_platform_data *data)
949 {
950 	struct cpsw_priv	*priv;
951 	struct cpsw_slave	*slave;
952 	void			*regs = (void *)data->cpsw_base;
953 	struct eth_device	*dev;
954 
955 	dev = calloc(sizeof(*dev), 1);
956 	if (!dev)
957 		return -ENOMEM;
958 
959 	priv = calloc(sizeof(*priv), 1);
960 	if (!priv) {
961 		free(dev);
962 		return -ENOMEM;
963 	}
964 
965 	priv->data = *data;
966 	priv->dev = dev;
967 
968 	priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
969 	if (!priv->slaves) {
970 		free(dev);
971 		free(priv);
972 		return -ENOMEM;
973 	}
974 
975 	priv->host_port		= data->host_port_num;
976 	priv->regs		= regs;
977 	priv->host_port_regs	= regs + data->host_port_reg_ofs;
978 	priv->dma_regs		= regs + data->cpdma_reg_ofs;
979 	priv->ale_regs		= regs + data->ale_reg_ofs;
980 	priv->descs		= (void *)regs + data->bd_ram_ofs;
981 
982 	int idx = 0;
983 
984 	for_each_slave(slave, priv) {
985 		cpsw_slave_setup(slave, idx, priv);
986 		idx = idx + 1;
987 	}
988 
989 	strcpy(dev->name, "cpsw");
990 	dev->iobase	= 0;
991 	dev->init	= cpsw_init;
992 	dev->halt	= cpsw_halt;
993 	dev->send	= cpsw_send;
994 	dev->recv	= cpsw_recv;
995 	dev->priv	= priv;
996 
997 	eth_register(dev);
998 
999 	cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
1000 	priv->bus = miiphy_get_dev_by_name(dev->name);
1001 	for_active_slave(slave, priv)
1002 		cpsw_phy_init(dev, slave);
1003 
1004 	return 1;
1005 }
1006