1 /* 2 * CPSW Ethernet Switch Driver 3 * 4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <common.h> 17 #include <command.h> 18 #include <net.h> 19 #include <miiphy.h> 20 #include <malloc.h> 21 #include <net.h> 22 #include <netdev.h> 23 #include <cpsw.h> 24 #include <asm/errno.h> 25 #include <asm/io.h> 26 #include <phy.h> 27 #include <asm/arch/cpu.h> 28 29 #define BITMASK(bits) (BIT(bits) - 1) 30 #define PHY_REG_MASK 0x1f 31 #define PHY_ID_MASK 0x1f 32 #define NUM_DESCS (PKTBUFSRX * 2) 33 #define PKT_MIN 60 34 #define PKT_MAX (1500 + 14 + 4 + 4) 35 #define CLEAR_BIT 1 36 #define GIGABITEN BIT(7) 37 #define FULLDUPLEXEN BIT(0) 38 #define MIIEN BIT(15) 39 40 /* DMA Registers */ 41 #define CPDMA_TXCONTROL 0x004 42 #define CPDMA_RXCONTROL 0x014 43 #define CPDMA_SOFTRESET 0x01c 44 #define CPDMA_RXFREE 0x0e0 45 #define CPDMA_TXHDP_VER1 0x100 46 #define CPDMA_TXHDP_VER2 0x200 47 #define CPDMA_RXHDP_VER1 0x120 48 #define CPDMA_RXHDP_VER2 0x220 49 #define CPDMA_TXCP_VER1 0x140 50 #define CPDMA_TXCP_VER2 0x240 51 #define CPDMA_RXCP_VER1 0x160 52 #define CPDMA_RXCP_VER2 0x260 53 54 /* Descriptor mode bits */ 55 #define CPDMA_DESC_SOP BIT(31) 56 #define CPDMA_DESC_EOP BIT(30) 57 #define CPDMA_DESC_OWNER BIT(29) 58 #define CPDMA_DESC_EOQ BIT(28) 59 60 /* 61 * This timeout definition is a worst-case ultra defensive measure against 62 * unexpected controller lock ups. Ideally, we should never ever hit this 63 * scenario in practice. 64 */ 65 #define MDIO_TIMEOUT 100 /* msecs */ 66 #define CPDMA_TIMEOUT 100 /* msecs */ 67 68 struct cpsw_mdio_regs { 69 u32 version; 70 u32 control; 71 #define CONTROL_IDLE BIT(31) 72 #define CONTROL_ENABLE BIT(30) 73 74 u32 alive; 75 u32 link; 76 u32 linkintraw; 77 u32 linkintmasked; 78 u32 __reserved_0[2]; 79 u32 userintraw; 80 u32 userintmasked; 81 u32 userintmaskset; 82 u32 userintmaskclr; 83 u32 __reserved_1[20]; 84 85 struct { 86 u32 access; 87 u32 physel; 88 #define USERACCESS_GO BIT(31) 89 #define USERACCESS_WRITE BIT(30) 90 #define USERACCESS_ACK BIT(29) 91 #define USERACCESS_READ (0) 92 #define USERACCESS_DATA (0xffff) 93 } user[0]; 94 }; 95 96 struct cpsw_regs { 97 u32 id_ver; 98 u32 control; 99 u32 soft_reset; 100 u32 stat_port_en; 101 u32 ptype; 102 }; 103 104 struct cpsw_slave_regs { 105 u32 max_blks; 106 u32 blk_cnt; 107 u32 flow_thresh; 108 u32 port_vlan; 109 u32 tx_pri_map; 110 #ifdef CONFIG_AM33XX 111 u32 gap_thresh; 112 #elif defined(CONFIG_TI814X) 113 u32 ts_ctl; 114 u32 ts_seq_ltype; 115 u32 ts_vlan; 116 #endif 117 u32 sa_lo; 118 u32 sa_hi; 119 }; 120 121 struct cpsw_host_regs { 122 u32 max_blks; 123 u32 blk_cnt; 124 u32 flow_thresh; 125 u32 port_vlan; 126 u32 tx_pri_map; 127 u32 cpdma_tx_pri_map; 128 u32 cpdma_rx_chan_map; 129 }; 130 131 struct cpsw_sliver_regs { 132 u32 id_ver; 133 u32 mac_control; 134 u32 mac_status; 135 u32 soft_reset; 136 u32 rx_maxlen; 137 u32 __reserved_0; 138 u32 rx_pause; 139 u32 tx_pause; 140 u32 __reserved_1; 141 u32 rx_pri_map; 142 }; 143 144 #define ALE_ENTRY_BITS 68 145 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32) 146 147 /* ALE Registers */ 148 #define ALE_CONTROL 0x08 149 #define ALE_UNKNOWNVLAN 0x18 150 #define ALE_TABLE_CONTROL 0x20 151 #define ALE_TABLE 0x34 152 #define ALE_PORTCTL 0x40 153 154 #define ALE_TABLE_WRITE BIT(31) 155 156 #define ALE_TYPE_FREE 0 157 #define ALE_TYPE_ADDR 1 158 #define ALE_TYPE_VLAN 2 159 #define ALE_TYPE_VLAN_ADDR 3 160 161 #define ALE_UCAST_PERSISTANT 0 162 #define ALE_UCAST_UNTOUCHED 1 163 #define ALE_UCAST_OUI 2 164 #define ALE_UCAST_TOUCHED 3 165 166 #define ALE_MCAST_FWD 0 167 #define ALE_MCAST_BLOCK_LEARN_FWD 1 168 #define ALE_MCAST_FWD_LEARN 2 169 #define ALE_MCAST_FWD_2 3 170 171 enum cpsw_ale_port_state { 172 ALE_PORT_STATE_DISABLE = 0x00, 173 ALE_PORT_STATE_BLOCK = 0x01, 174 ALE_PORT_STATE_LEARN = 0x02, 175 ALE_PORT_STATE_FORWARD = 0x03, 176 }; 177 178 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */ 179 #define ALE_SECURE 1 180 #define ALE_BLOCKED 2 181 182 struct cpsw_slave { 183 struct cpsw_slave_regs *regs; 184 struct cpsw_sliver_regs *sliver; 185 int slave_num; 186 u32 mac_control; 187 struct cpsw_slave_data *data; 188 }; 189 190 struct cpdma_desc { 191 /* hardware fields */ 192 u32 hw_next; 193 u32 hw_buffer; 194 u32 hw_len; 195 u32 hw_mode; 196 /* software fields */ 197 u32 sw_buffer; 198 u32 sw_len; 199 }; 200 201 struct cpdma_chan { 202 struct cpdma_desc *head, *tail; 203 void *hdp, *cp, *rxfree; 204 }; 205 206 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld) 207 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 208 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld)) 209 210 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld) 211 #define chan_read(chan, fld) __raw_readl((chan)->fld) 212 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld)) 213 214 #define for_each_slave(slave, priv) \ 215 for (slave = (priv)->slaves; slave != (priv)->slaves + \ 216 (priv)->data.slaves; slave++) 217 218 struct cpsw_priv { 219 struct eth_device *dev; 220 struct cpsw_platform_data data; 221 int host_port; 222 223 struct cpsw_regs *regs; 224 void *dma_regs; 225 struct cpsw_host_regs *host_port_regs; 226 void *ale_regs; 227 228 struct cpdma_desc *descs; 229 struct cpdma_desc *desc_free; 230 struct cpdma_chan rx_chan, tx_chan; 231 232 struct cpsw_slave *slaves; 233 struct phy_device *phydev; 234 struct mii_dev *bus; 235 236 u32 mdio_link; 237 u32 phy_mask; 238 }; 239 240 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) 241 { 242 int idx; 243 244 idx = start / 32; 245 start -= idx * 32; 246 idx = 2 - idx; /* flip */ 247 return (ale_entry[idx] >> start) & BITMASK(bits); 248 } 249 250 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, 251 u32 value) 252 { 253 int idx; 254 255 value &= BITMASK(bits); 256 idx = start / 32; 257 start -= idx * 32; 258 idx = 2 - idx; /* flip */ 259 ale_entry[idx] &= ~(BITMASK(bits) << start); 260 ale_entry[idx] |= (value << start); 261 } 262 263 #define DEFINE_ALE_FIELD(name, start, bits) \ 264 static inline int cpsw_ale_get_##name(u32 *ale_entry) \ 265 { \ 266 return cpsw_ale_get_field(ale_entry, start, bits); \ 267 } \ 268 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ 269 { \ 270 cpsw_ale_set_field(ale_entry, start, bits, value); \ 271 } 272 273 DEFINE_ALE_FIELD(entry_type, 60, 2) 274 DEFINE_ALE_FIELD(mcast_state, 62, 2) 275 DEFINE_ALE_FIELD(port_mask, 66, 3) 276 DEFINE_ALE_FIELD(ucast_type, 62, 2) 277 DEFINE_ALE_FIELD(port_num, 66, 2) 278 DEFINE_ALE_FIELD(blocked, 65, 1) 279 DEFINE_ALE_FIELD(secure, 64, 1) 280 DEFINE_ALE_FIELD(mcast, 40, 1) 281 282 /* The MAC address field in the ALE entry cannot be macroized as above */ 283 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) 284 { 285 int i; 286 287 for (i = 0; i < 6; i++) 288 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8); 289 } 290 291 static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr) 292 { 293 int i; 294 295 for (i = 0; i < 6; i++) 296 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]); 297 } 298 299 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry) 300 { 301 int i; 302 303 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL); 304 305 for (i = 0; i < ALE_ENTRY_WORDS; i++) 306 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i); 307 308 return idx; 309 } 310 311 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry) 312 { 313 int i; 314 315 for (i = 0; i < ALE_ENTRY_WORDS; i++) 316 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i); 317 318 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL); 319 320 return idx; 321 } 322 323 static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr) 324 { 325 u32 ale_entry[ALE_ENTRY_WORDS]; 326 int type, idx; 327 328 for (idx = 0; idx < priv->data.ale_entries; idx++) { 329 u8 entry_addr[6]; 330 331 cpsw_ale_read(priv, idx, ale_entry); 332 type = cpsw_ale_get_entry_type(ale_entry); 333 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) 334 continue; 335 cpsw_ale_get_addr(ale_entry, entry_addr); 336 if (memcmp(entry_addr, addr, 6) == 0) 337 return idx; 338 } 339 return -ENOENT; 340 } 341 342 static int cpsw_ale_match_free(struct cpsw_priv *priv) 343 { 344 u32 ale_entry[ALE_ENTRY_WORDS]; 345 int type, idx; 346 347 for (idx = 0; idx < priv->data.ale_entries; idx++) { 348 cpsw_ale_read(priv, idx, ale_entry); 349 type = cpsw_ale_get_entry_type(ale_entry); 350 if (type == ALE_TYPE_FREE) 351 return idx; 352 } 353 return -ENOENT; 354 } 355 356 static int cpsw_ale_find_ageable(struct cpsw_priv *priv) 357 { 358 u32 ale_entry[ALE_ENTRY_WORDS]; 359 int type, idx; 360 361 for (idx = 0; idx < priv->data.ale_entries; idx++) { 362 cpsw_ale_read(priv, idx, ale_entry); 363 type = cpsw_ale_get_entry_type(ale_entry); 364 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) 365 continue; 366 if (cpsw_ale_get_mcast(ale_entry)) 367 continue; 368 type = cpsw_ale_get_ucast_type(ale_entry); 369 if (type != ALE_UCAST_PERSISTANT && 370 type != ALE_UCAST_OUI) 371 return idx; 372 } 373 return -ENOENT; 374 } 375 376 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr, 377 int port, int flags) 378 { 379 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; 380 int idx; 381 382 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); 383 cpsw_ale_set_addr(ale_entry, addr); 384 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); 385 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); 386 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); 387 cpsw_ale_set_port_num(ale_entry, port); 388 389 idx = cpsw_ale_match_addr(priv, addr); 390 if (idx < 0) 391 idx = cpsw_ale_match_free(priv); 392 if (idx < 0) 393 idx = cpsw_ale_find_ageable(priv); 394 if (idx < 0) 395 return -ENOMEM; 396 397 cpsw_ale_write(priv, idx, ale_entry); 398 return 0; 399 } 400 401 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask) 402 { 403 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; 404 int idx, mask; 405 406 idx = cpsw_ale_match_addr(priv, addr); 407 if (idx >= 0) 408 cpsw_ale_read(priv, idx, ale_entry); 409 410 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); 411 cpsw_ale_set_addr(ale_entry, addr); 412 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2); 413 414 mask = cpsw_ale_get_port_mask(ale_entry); 415 port_mask |= mask; 416 cpsw_ale_set_port_mask(ale_entry, port_mask); 417 418 if (idx < 0) 419 idx = cpsw_ale_match_free(priv); 420 if (idx < 0) 421 idx = cpsw_ale_find_ageable(priv); 422 if (idx < 0) 423 return -ENOMEM; 424 425 cpsw_ale_write(priv, idx, ale_entry); 426 return 0; 427 } 428 429 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val) 430 { 431 u32 tmp, mask = BIT(bit); 432 433 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL); 434 tmp &= ~mask; 435 tmp |= val ? mask : 0; 436 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL); 437 } 438 439 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val) 440 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val) 441 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val) 442 443 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port, 444 int val) 445 { 446 int offset = ALE_PORTCTL + 4 * port; 447 u32 tmp, mask = 0x3; 448 449 tmp = __raw_readl(priv->ale_regs + offset); 450 tmp &= ~mask; 451 tmp |= val & mask; 452 __raw_writel(tmp, priv->ale_regs + offset); 453 } 454 455 static struct cpsw_mdio_regs *mdio_regs; 456 457 /* wait until hardware is ready for another user access */ 458 static inline u32 wait_for_user_access(void) 459 { 460 u32 reg = 0; 461 int timeout = MDIO_TIMEOUT; 462 463 while (timeout-- && 464 ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO)) 465 udelay(10); 466 467 if (timeout == -1) { 468 printf("wait_for_user_access Timeout\n"); 469 return -ETIMEDOUT; 470 } 471 return reg; 472 } 473 474 /* wait until hardware state machine is idle */ 475 static inline void wait_for_idle(void) 476 { 477 int timeout = MDIO_TIMEOUT; 478 479 while (timeout-- && 480 ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0)) 481 udelay(10); 482 483 if (timeout == -1) 484 printf("wait_for_idle Timeout\n"); 485 } 486 487 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id, 488 int dev_addr, int phy_reg) 489 { 490 int data; 491 u32 reg; 492 493 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 494 return -EINVAL; 495 496 wait_for_user_access(); 497 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | 498 (phy_id << 16)); 499 __raw_writel(reg, &mdio_regs->user[0].access); 500 reg = wait_for_user_access(); 501 502 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1; 503 return data; 504 } 505 506 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr, 507 int phy_reg, u16 data) 508 { 509 u32 reg; 510 511 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 512 return -EINVAL; 513 514 wait_for_user_access(); 515 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | 516 (phy_id << 16) | (data & USERACCESS_DATA)); 517 __raw_writel(reg, &mdio_regs->user[0].access); 518 wait_for_user_access(); 519 520 return 0; 521 } 522 523 static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div) 524 { 525 struct mii_dev *bus = mdio_alloc(); 526 527 mdio_regs = (struct cpsw_mdio_regs *)mdio_base; 528 529 /* set enable and clock divider */ 530 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control); 531 532 /* 533 * wait for scan logic to settle: 534 * the scan time consists of (a) a large fixed component, and (b) a 535 * small component that varies with the mii bus frequency. These 536 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x 537 * silicon. Since the effect of (b) was found to be largely 538 * negligible, we keep things simple here. 539 */ 540 udelay(1000); 541 542 bus->read = cpsw_mdio_read; 543 bus->write = cpsw_mdio_write; 544 sprintf(bus->name, name); 545 546 mdio_register(bus); 547 } 548 549 /* Set a self-clearing bit in a register, and wait for it to clear */ 550 static inline void setbit_and_wait_for_clear32(void *addr) 551 { 552 __raw_writel(CLEAR_BIT, addr); 553 while (__raw_readl(addr) & CLEAR_BIT) 554 ; 555 } 556 557 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 558 ((mac)[2] << 16) | ((mac)[3] << 24)) 559 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 560 561 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 562 struct cpsw_priv *priv) 563 { 564 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi); 565 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo); 566 } 567 568 static void cpsw_slave_update_link(struct cpsw_slave *slave, 569 struct cpsw_priv *priv, int *link) 570 { 571 struct phy_device *phy; 572 u32 mac_control = 0; 573 574 phy = priv->phydev; 575 576 if (!phy) 577 return; 578 579 phy_startup(phy); 580 *link = phy->link; 581 582 if (*link) { /* link up */ 583 mac_control = priv->data.mac_control; 584 if (phy->speed == 1000) 585 mac_control |= GIGABITEN; 586 if (phy->duplex == DUPLEX_FULL) 587 mac_control |= FULLDUPLEXEN; 588 if (phy->speed == 100) 589 mac_control |= MIIEN; 590 } 591 592 if (mac_control == slave->mac_control) 593 return; 594 595 if (mac_control) { 596 printf("link up on port %d, speed %d, %s duplex\n", 597 slave->slave_num, phy->speed, 598 (phy->duplex == DUPLEX_FULL) ? "full" : "half"); 599 } else { 600 printf("link down on port %d\n", slave->slave_num); 601 } 602 603 __raw_writel(mac_control, &slave->sliver->mac_control); 604 slave->mac_control = mac_control; 605 } 606 607 static int cpsw_update_link(struct cpsw_priv *priv) 608 { 609 int link = 0; 610 struct cpsw_slave *slave; 611 612 for_each_slave(slave, priv) 613 cpsw_slave_update_link(slave, priv, &link); 614 priv->mdio_link = readl(&mdio_regs->link); 615 return link; 616 } 617 618 static int cpsw_check_link(struct cpsw_priv *priv) 619 { 620 u32 link = 0; 621 622 link = __raw_readl(&mdio_regs->link) & priv->phy_mask; 623 if ((link) && (link == priv->mdio_link)) 624 return 1; 625 626 return cpsw_update_link(priv); 627 } 628 629 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) 630 { 631 if (priv->host_port == 0) 632 return slave_num + 1; 633 else 634 return slave_num; 635 } 636 637 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) 638 { 639 u32 slave_port; 640 641 setbit_and_wait_for_clear32(&slave->sliver->soft_reset); 642 643 /* setup priority mapping */ 644 __raw_writel(0x76543210, &slave->sliver->rx_pri_map); 645 __raw_writel(0x33221100, &slave->regs->tx_pri_map); 646 647 /* setup max packet size, and mac address */ 648 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen); 649 cpsw_set_slave_mac(slave, priv); 650 651 slave->mac_control = 0; /* no link yet */ 652 653 /* enable forwarding */ 654 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 655 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD); 656 657 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port); 658 659 priv->phy_mask |= 1 << slave->data->phy_id; 660 } 661 662 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv) 663 { 664 struct cpdma_desc *desc = priv->desc_free; 665 666 if (desc) 667 priv->desc_free = desc_read_ptr(desc, hw_next); 668 return desc; 669 } 670 671 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc) 672 { 673 if (desc) { 674 desc_write(desc, hw_next, priv->desc_free); 675 priv->desc_free = desc; 676 } 677 } 678 679 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan, 680 void *buffer, int len) 681 { 682 struct cpdma_desc *desc, *prev; 683 u32 mode; 684 685 desc = cpdma_desc_alloc(priv); 686 if (!desc) 687 return -ENOMEM; 688 689 if (len < PKT_MIN) 690 len = PKT_MIN; 691 692 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 693 694 desc_write(desc, hw_next, 0); 695 desc_write(desc, hw_buffer, buffer); 696 desc_write(desc, hw_len, len); 697 desc_write(desc, hw_mode, mode | len); 698 desc_write(desc, sw_buffer, buffer); 699 desc_write(desc, sw_len, len); 700 701 if (!chan->head) { 702 /* simple case - first packet enqueued */ 703 chan->head = desc; 704 chan->tail = desc; 705 chan_write(chan, hdp, desc); 706 goto done; 707 } 708 709 /* not the first packet - enqueue at the tail */ 710 prev = chan->tail; 711 desc_write(prev, hw_next, desc); 712 chan->tail = desc; 713 714 /* next check if EOQ has been triggered already */ 715 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ) 716 chan_write(chan, hdp, desc); 717 718 done: 719 if (chan->rxfree) 720 chan_write(chan, rxfree, 1); 721 return 0; 722 } 723 724 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan, 725 void **buffer, int *len) 726 { 727 struct cpdma_desc *desc = chan->head; 728 u32 status; 729 730 if (!desc) 731 return -ENOENT; 732 733 status = desc_read(desc, hw_mode); 734 735 if (len) 736 *len = status & 0x7ff; 737 738 if (buffer) 739 *buffer = desc_read_ptr(desc, sw_buffer); 740 741 if (status & CPDMA_DESC_OWNER) { 742 if (chan_read(chan, hdp) == 0) { 743 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER) 744 chan_write(chan, hdp, desc); 745 } 746 747 return -EBUSY; 748 } 749 750 chan->head = desc_read_ptr(desc, hw_next); 751 chan_write(chan, cp, desc); 752 753 cpdma_desc_free(priv, desc); 754 return 0; 755 } 756 757 static int cpsw_init(struct eth_device *dev, bd_t *bis) 758 { 759 struct cpsw_priv *priv = dev->priv; 760 struct cpsw_slave *slave; 761 int i, ret; 762 763 /* soft reset the controller and initialize priv */ 764 setbit_and_wait_for_clear32(&priv->regs->soft_reset); 765 766 /* initialize and reset the address lookup engine */ 767 cpsw_ale_enable(priv, 1); 768 cpsw_ale_clear(priv, 1); 769 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */ 770 771 /* setup host port priority mapping */ 772 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map); 773 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); 774 775 /* disable priority elevation and enable statistics on all ports */ 776 __raw_writel(0, &priv->regs->ptype); 777 778 /* enable statistics collection only on the host port */ 779 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en); 780 __raw_writel(0x7, &priv->regs->stat_port_en); 781 782 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD); 783 784 cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port, 785 ALE_SECURE); 786 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port); 787 788 for_each_slave(slave, priv) 789 cpsw_slave_init(slave, priv); 790 791 cpsw_update_link(priv); 792 793 /* init descriptor pool */ 794 for (i = 0; i < NUM_DESCS; i++) { 795 desc_write(&priv->descs[i], hw_next, 796 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]); 797 } 798 priv->desc_free = &priv->descs[0]; 799 800 /* initialize channels */ 801 if (priv->data.version == CPSW_CTRL_VERSION_2) { 802 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan)); 803 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2; 804 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2; 805 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE; 806 807 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan)); 808 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2; 809 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2; 810 } else { 811 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan)); 812 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1; 813 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1; 814 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE; 815 816 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan)); 817 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1; 818 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1; 819 } 820 821 /* clear dma state */ 822 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET); 823 824 if (priv->data.version == CPSW_CTRL_VERSION_2) { 825 for (i = 0; i < priv->data.channels; i++) { 826 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4 827 * i); 828 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 829 * i); 830 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4 831 * i); 832 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4 833 * i); 834 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4 835 * i); 836 } 837 } else { 838 for (i = 0; i < priv->data.channels; i++) { 839 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4 840 * i); 841 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 842 * i); 843 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4 844 * i); 845 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4 846 * i); 847 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4 848 * i); 849 850 } 851 } 852 853 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL); 854 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL); 855 856 /* submit rx descs */ 857 for (i = 0; i < PKTBUFSRX; i++) { 858 ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i], 859 PKTSIZE); 860 if (ret < 0) { 861 printf("error %d submitting rx desc\n", ret); 862 break; 863 } 864 } 865 866 return 0; 867 } 868 869 static void cpsw_halt(struct eth_device *dev) 870 { 871 struct cpsw_priv *priv = dev->priv; 872 873 writel(0, priv->dma_regs + CPDMA_TXCONTROL); 874 writel(0, priv->dma_regs + CPDMA_RXCONTROL); 875 876 /* soft reset the controller and initialize priv */ 877 setbit_and_wait_for_clear32(&priv->regs->soft_reset); 878 879 /* clear dma state */ 880 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET); 881 882 priv->data.control(0); 883 } 884 885 static int cpsw_send(struct eth_device *dev, void *packet, int length) 886 { 887 struct cpsw_priv *priv = dev->priv; 888 void *buffer; 889 int len; 890 int timeout = CPDMA_TIMEOUT; 891 892 if (!cpsw_check_link(priv)) 893 return -EIO; 894 895 flush_dcache_range((unsigned long)packet, 896 (unsigned long)packet + length); 897 898 /* first reap completed packets */ 899 while (timeout-- && 900 (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0)) 901 ; 902 903 if (timeout == -1) { 904 printf("cpdma_process timeout\n"); 905 return -ETIMEDOUT; 906 } 907 908 return cpdma_submit(priv, &priv->tx_chan, packet, length); 909 } 910 911 static int cpsw_recv(struct eth_device *dev) 912 { 913 struct cpsw_priv *priv = dev->priv; 914 void *buffer; 915 int len; 916 917 cpsw_update_link(priv); 918 919 while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) { 920 invalidate_dcache_range((unsigned long)buffer, 921 (unsigned long)buffer + PKTSIZE_ALIGN); 922 NetReceive(buffer, len); 923 cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE); 924 } 925 926 return 0; 927 } 928 929 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num, 930 struct cpsw_priv *priv) 931 { 932 void *regs = priv->regs; 933 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 934 slave->slave_num = slave_num; 935 slave->data = data; 936 slave->regs = regs + data->slave_reg_ofs; 937 slave->sliver = regs + data->sliver_reg_ofs; 938 } 939 940 static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave) 941 { 942 struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv; 943 struct phy_device *phydev; 944 u32 supported = (SUPPORTED_10baseT_Half | 945 SUPPORTED_10baseT_Full | 946 SUPPORTED_100baseT_Half | 947 SUPPORTED_100baseT_Full | 948 SUPPORTED_1000baseT_Full); 949 950 phydev = phy_connect(priv->bus, 951 CONFIG_PHY_ADDR, 952 dev, 953 slave->data->phy_if); 954 955 if (!phydev) 956 return -1; 957 958 phydev->supported &= supported; 959 phydev->advertising = phydev->supported; 960 961 priv->phydev = phydev; 962 phy_config(phydev); 963 964 return 1; 965 } 966 967 int cpsw_register(struct cpsw_platform_data *data) 968 { 969 struct cpsw_priv *priv; 970 struct cpsw_slave *slave; 971 void *regs = (void *)data->cpsw_base; 972 struct eth_device *dev; 973 974 dev = calloc(sizeof(*dev), 1); 975 if (!dev) 976 return -ENOMEM; 977 978 priv = calloc(sizeof(*priv), 1); 979 if (!priv) { 980 free(dev); 981 return -ENOMEM; 982 } 983 984 priv->data = *data; 985 priv->dev = dev; 986 987 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves); 988 if (!priv->slaves) { 989 free(dev); 990 free(priv); 991 return -ENOMEM; 992 } 993 994 priv->host_port = data->host_port_num; 995 priv->regs = regs; 996 priv->host_port_regs = regs + data->host_port_reg_ofs; 997 priv->dma_regs = regs + data->cpdma_reg_ofs; 998 priv->ale_regs = regs + data->ale_reg_ofs; 999 priv->descs = (void *)regs + data->bd_ram_ofs; 1000 1001 int idx = 0; 1002 1003 for_each_slave(slave, priv) { 1004 cpsw_slave_setup(slave, idx, priv); 1005 idx = idx + 1; 1006 } 1007 1008 strcpy(dev->name, "cpsw"); 1009 dev->iobase = 0; 1010 dev->init = cpsw_init; 1011 dev->halt = cpsw_halt; 1012 dev->send = cpsw_send; 1013 dev->recv = cpsw_recv; 1014 dev->priv = priv; 1015 1016 eth_register(dev); 1017 1018 cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div); 1019 priv->bus = miiphy_get_dev_by_name(dev->name); 1020 for_each_slave(slave, priv) 1021 cpsw_phy_init(dev, slave); 1022 1023 return 1; 1024 } 1025