1 /* 2 * Copyright (C) 2006-2009 Freescale Semiconductor, Inc. 3 * 4 * Dave Liu <daveliu@freescale.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of 9 * the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 19 * MA 02111-1307 USA 20 */ 21 22 #include "common.h" 23 #include "net.h" 24 #include "malloc.h" 25 #include "asm/errno.h" 26 #include "asm/io.h" 27 #include "asm/immap_qe.h" 28 #include "qe.h" 29 #include "uccf.h" 30 #include "uec.h" 31 #include "uec_phy.h" 32 #include "miiphy.h" 33 34 static uec_info_t uec_info[] = { 35 #ifdef CONFIG_UEC_ETH1 36 STD_UEC_INFO(1), /* UEC1 */ 37 #endif 38 #ifdef CONFIG_UEC_ETH2 39 STD_UEC_INFO(2), /* UEC2 */ 40 #endif 41 #ifdef CONFIG_UEC_ETH3 42 STD_UEC_INFO(3), /* UEC3 */ 43 #endif 44 #ifdef CONFIG_UEC_ETH4 45 STD_UEC_INFO(4), /* UEC4 */ 46 #endif 47 #ifdef CONFIG_UEC_ETH5 48 STD_UEC_INFO(5), /* UEC5 */ 49 #endif 50 #ifdef CONFIG_UEC_ETH6 51 STD_UEC_INFO(6), /* UEC6 */ 52 #endif 53 #ifdef CONFIG_UEC_ETH7 54 STD_UEC_INFO(7), /* UEC7 */ 55 #endif 56 #ifdef CONFIG_UEC_ETH8 57 STD_UEC_INFO(8), /* UEC8 */ 58 #endif 59 }; 60 61 #define MAXCONTROLLERS (8) 62 63 static struct eth_device *devlist[MAXCONTROLLERS]; 64 65 u16 phy_read (struct uec_mii_info *mii_info, u16 regnum); 66 void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val); 67 68 static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode) 69 { 70 uec_t *uec_regs; 71 u32 maccfg1; 72 73 if (!uec) { 74 printf("%s: uec not initial\n", __FUNCTION__); 75 return -EINVAL; 76 } 77 uec_regs = uec->uec_regs; 78 79 maccfg1 = in_be32(&uec_regs->maccfg1); 80 81 if (mode & COMM_DIR_TX) { 82 maccfg1 |= MACCFG1_ENABLE_TX; 83 out_be32(&uec_regs->maccfg1, maccfg1); 84 uec->mac_tx_enabled = 1; 85 } 86 87 if (mode & COMM_DIR_RX) { 88 maccfg1 |= MACCFG1_ENABLE_RX; 89 out_be32(&uec_regs->maccfg1, maccfg1); 90 uec->mac_rx_enabled = 1; 91 } 92 93 return 0; 94 } 95 96 static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode) 97 { 98 uec_t *uec_regs; 99 u32 maccfg1; 100 101 if (!uec) { 102 printf("%s: uec not initial\n", __FUNCTION__); 103 return -EINVAL; 104 } 105 uec_regs = uec->uec_regs; 106 107 maccfg1 = in_be32(&uec_regs->maccfg1); 108 109 if (mode & COMM_DIR_TX) { 110 maccfg1 &= ~MACCFG1_ENABLE_TX; 111 out_be32(&uec_regs->maccfg1, maccfg1); 112 uec->mac_tx_enabled = 0; 113 } 114 115 if (mode & COMM_DIR_RX) { 116 maccfg1 &= ~MACCFG1_ENABLE_RX; 117 out_be32(&uec_regs->maccfg1, maccfg1); 118 uec->mac_rx_enabled = 0; 119 } 120 121 return 0; 122 } 123 124 static int uec_graceful_stop_tx(uec_private_t *uec) 125 { 126 ucc_fast_t *uf_regs; 127 u32 cecr_subblock; 128 u32 ucce; 129 130 if (!uec || !uec->uccf) { 131 printf("%s: No handle passed.\n", __FUNCTION__); 132 return -EINVAL; 133 } 134 135 uf_regs = uec->uccf->uf_regs; 136 137 /* Clear the grace stop event */ 138 out_be32(&uf_regs->ucce, UCCE_GRA); 139 140 /* Issue host command */ 141 cecr_subblock = 142 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 143 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 144 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 145 146 /* Wait for command to complete */ 147 do { 148 ucce = in_be32(&uf_regs->ucce); 149 } while (! (ucce & UCCE_GRA)); 150 151 uec->grace_stopped_tx = 1; 152 153 return 0; 154 } 155 156 static int uec_graceful_stop_rx(uec_private_t *uec) 157 { 158 u32 cecr_subblock; 159 u8 ack; 160 161 if (!uec) { 162 printf("%s: No handle passed.\n", __FUNCTION__); 163 return -EINVAL; 164 } 165 166 if (!uec->p_rx_glbl_pram) { 167 printf("%s: No init rx global parameter\n", __FUNCTION__); 168 return -EINVAL; 169 } 170 171 /* Clear acknowledge bit */ 172 ack = uec->p_rx_glbl_pram->rxgstpack; 173 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; 174 uec->p_rx_glbl_pram->rxgstpack = ack; 175 176 /* Keep issuing cmd and checking ack bit until it is asserted */ 177 do { 178 /* Issue host command */ 179 cecr_subblock = 180 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 181 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 182 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 183 ack = uec->p_rx_glbl_pram->rxgstpack; 184 } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX )); 185 186 uec->grace_stopped_rx = 1; 187 188 return 0; 189 } 190 191 static int uec_restart_tx(uec_private_t *uec) 192 { 193 u32 cecr_subblock; 194 195 if (!uec || !uec->uec_info) { 196 printf("%s: No handle passed.\n", __FUNCTION__); 197 return -EINVAL; 198 } 199 200 cecr_subblock = 201 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 202 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, 203 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 204 205 uec->grace_stopped_tx = 0; 206 207 return 0; 208 } 209 210 static int uec_restart_rx(uec_private_t *uec) 211 { 212 u32 cecr_subblock; 213 214 if (!uec || !uec->uec_info) { 215 printf("%s: No handle passed.\n", __FUNCTION__); 216 return -EINVAL; 217 } 218 219 cecr_subblock = 220 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); 221 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, 222 (u8)QE_CR_PROTOCOL_ETHERNET, 0); 223 224 uec->grace_stopped_rx = 0; 225 226 return 0; 227 } 228 229 static int uec_open(uec_private_t *uec, comm_dir_e mode) 230 { 231 ucc_fast_private_t *uccf; 232 233 if (!uec || !uec->uccf) { 234 printf("%s: No handle passed.\n", __FUNCTION__); 235 return -EINVAL; 236 } 237 uccf = uec->uccf; 238 239 /* check if the UCC number is in range. */ 240 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { 241 printf("%s: ucc_num out of range.\n", __FUNCTION__); 242 return -EINVAL; 243 } 244 245 /* Enable MAC */ 246 uec_mac_enable(uec, mode); 247 248 /* Enable UCC fast */ 249 ucc_fast_enable(uccf, mode); 250 251 /* RISC microcode start */ 252 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) { 253 uec_restart_tx(uec); 254 } 255 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) { 256 uec_restart_rx(uec); 257 } 258 259 return 0; 260 } 261 262 static int uec_stop(uec_private_t *uec, comm_dir_e mode) 263 { 264 ucc_fast_private_t *uccf; 265 266 if (!uec || !uec->uccf) { 267 printf("%s: No handle passed.\n", __FUNCTION__); 268 return -EINVAL; 269 } 270 uccf = uec->uccf; 271 272 /* check if the UCC number is in range. */ 273 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { 274 printf("%s: ucc_num out of range.\n", __FUNCTION__); 275 return -EINVAL; 276 } 277 /* Stop any transmissions */ 278 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) { 279 uec_graceful_stop_tx(uec); 280 } 281 /* Stop any receptions */ 282 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) { 283 uec_graceful_stop_rx(uec); 284 } 285 286 /* Disable the UCC fast */ 287 ucc_fast_disable(uec->uccf, mode); 288 289 /* Disable the MAC */ 290 uec_mac_disable(uec, mode); 291 292 return 0; 293 } 294 295 static int uec_set_mac_duplex(uec_private_t *uec, int duplex) 296 { 297 uec_t *uec_regs; 298 u32 maccfg2; 299 300 if (!uec) { 301 printf("%s: uec not initial\n", __FUNCTION__); 302 return -EINVAL; 303 } 304 uec_regs = uec->uec_regs; 305 306 if (duplex == DUPLEX_HALF) { 307 maccfg2 = in_be32(&uec_regs->maccfg2); 308 maccfg2 &= ~MACCFG2_FDX; 309 out_be32(&uec_regs->maccfg2, maccfg2); 310 } 311 312 if (duplex == DUPLEX_FULL) { 313 maccfg2 = in_be32(&uec_regs->maccfg2); 314 maccfg2 |= MACCFG2_FDX; 315 out_be32(&uec_regs->maccfg2, maccfg2); 316 } 317 318 return 0; 319 } 320 321 static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode) 322 { 323 enet_interface_e enet_if_mode; 324 uec_info_t *uec_info; 325 uec_t *uec_regs; 326 u32 upsmr; 327 u32 maccfg2; 328 329 if (!uec) { 330 printf("%s: uec not initial\n", __FUNCTION__); 331 return -EINVAL; 332 } 333 334 uec_info = uec->uec_info; 335 uec_regs = uec->uec_regs; 336 enet_if_mode = if_mode; 337 338 maccfg2 = in_be32(&uec_regs->maccfg2); 339 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; 340 341 upsmr = in_be32(&uec->uccf->uf_regs->upsmr); 342 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM); 343 344 switch (enet_if_mode) { 345 case ENET_100_MII: 346 case ENET_10_MII: 347 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 348 break; 349 case ENET_1000_GMII: 350 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 351 break; 352 case ENET_1000_TBI: 353 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 354 upsmr |= UPSMR_TBIM; 355 break; 356 case ENET_1000_RTBI: 357 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 358 upsmr |= (UPSMR_RPM | UPSMR_TBIM); 359 break; 360 case ENET_1000_RGMII_RXID: 361 case ENET_1000_RGMII_ID: 362 case ENET_1000_RGMII: 363 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; 364 upsmr |= UPSMR_RPM; 365 break; 366 case ENET_100_RGMII: 367 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 368 upsmr |= UPSMR_RPM; 369 break; 370 case ENET_10_RGMII: 371 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 372 upsmr |= (UPSMR_RPM | UPSMR_R10M); 373 break; 374 case ENET_100_RMII: 375 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 376 upsmr |= UPSMR_RMM; 377 break; 378 case ENET_10_RMII: 379 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; 380 upsmr |= (UPSMR_R10M | UPSMR_RMM); 381 break; 382 default: 383 return -EINVAL; 384 break; 385 } 386 out_be32(&uec_regs->maccfg2, maccfg2); 387 out_be32(&uec->uccf->uf_regs->upsmr, upsmr); 388 389 return 0; 390 } 391 392 static int init_mii_management_configuration(uec_mii_t *uec_mii_regs) 393 { 394 uint timeout = 0x1000; 395 u32 miimcfg = 0; 396 397 miimcfg = in_be32(&uec_mii_regs->miimcfg); 398 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE; 399 out_be32(&uec_mii_regs->miimcfg, miimcfg); 400 401 /* Wait until the bus is free */ 402 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--); 403 if (timeout <= 0) { 404 printf("%s: The MII Bus is stuck!", __FUNCTION__); 405 return -ETIMEDOUT; 406 } 407 408 return 0; 409 } 410 411 static int init_phy(struct eth_device *dev) 412 { 413 uec_private_t *uec; 414 uec_mii_t *umii_regs; 415 struct uec_mii_info *mii_info; 416 struct phy_info *curphy; 417 int err; 418 419 uec = (uec_private_t *)dev->priv; 420 umii_regs = uec->uec_mii_regs; 421 422 uec->oldlink = 0; 423 uec->oldspeed = 0; 424 uec->oldduplex = -1; 425 426 mii_info = malloc(sizeof(*mii_info)); 427 if (!mii_info) { 428 printf("%s: Could not allocate mii_info", dev->name); 429 return -ENOMEM; 430 } 431 memset(mii_info, 0, sizeof(*mii_info)); 432 433 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) { 434 mii_info->speed = SPEED_1000; 435 } else { 436 mii_info->speed = SPEED_100; 437 } 438 439 mii_info->duplex = DUPLEX_FULL; 440 mii_info->pause = 0; 441 mii_info->link = 1; 442 443 mii_info->advertising = (ADVERTISED_10baseT_Half | 444 ADVERTISED_10baseT_Full | 445 ADVERTISED_100baseT_Half | 446 ADVERTISED_100baseT_Full | 447 ADVERTISED_1000baseT_Full); 448 mii_info->autoneg = 1; 449 mii_info->mii_id = uec->uec_info->phy_address; 450 mii_info->dev = dev; 451 452 mii_info->mdio_read = &uec_read_phy_reg; 453 mii_info->mdio_write = &uec_write_phy_reg; 454 455 uec->mii_info = mii_info; 456 457 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num); 458 459 if (init_mii_management_configuration(umii_regs)) { 460 printf("%s: The MII Bus is stuck!", dev->name); 461 err = -1; 462 goto bus_fail; 463 } 464 465 /* get info for this PHY */ 466 curphy = uec_get_phy_info(uec->mii_info); 467 if (!curphy) { 468 printf("%s: No PHY found", dev->name); 469 err = -1; 470 goto no_phy; 471 } 472 473 mii_info->phyinfo = curphy; 474 475 /* Run the commands which initialize the PHY */ 476 if (curphy->init) { 477 err = curphy->init(uec->mii_info); 478 if (err) 479 goto phy_init_fail; 480 } 481 482 return 0; 483 484 phy_init_fail: 485 no_phy: 486 bus_fail: 487 free(mii_info); 488 return err; 489 } 490 491 static void adjust_link(struct eth_device *dev) 492 { 493 uec_private_t *uec = (uec_private_t *)dev->priv; 494 uec_t *uec_regs; 495 struct uec_mii_info *mii_info = uec->mii_info; 496 497 extern void change_phy_interface_mode(struct eth_device *dev, 498 enet_interface_e mode); 499 uec_regs = uec->uec_regs; 500 501 if (mii_info->link) { 502 /* Now we make sure that we can be in full duplex mode. 503 * If not, we operate in half-duplex mode. */ 504 if (mii_info->duplex != uec->oldduplex) { 505 if (!(mii_info->duplex)) { 506 uec_set_mac_duplex(uec, DUPLEX_HALF); 507 printf("%s: Half Duplex\n", dev->name); 508 } else { 509 uec_set_mac_duplex(uec, DUPLEX_FULL); 510 printf("%s: Full Duplex\n", dev->name); 511 } 512 uec->oldduplex = mii_info->duplex; 513 } 514 515 if (mii_info->speed != uec->oldspeed) { 516 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) { 517 switch (mii_info->speed) { 518 case 1000: 519 break; 520 case 100: 521 printf ("switching to rgmii 100\n"); 522 /* change phy to rgmii 100 */ 523 change_phy_interface_mode(dev, 524 ENET_100_RGMII); 525 /* change the MAC interface mode */ 526 uec_set_mac_if_mode(uec,ENET_100_RGMII); 527 break; 528 case 10: 529 printf ("switching to rgmii 10\n"); 530 /* change phy to rgmii 10 */ 531 change_phy_interface_mode(dev, 532 ENET_10_RGMII); 533 /* change the MAC interface mode */ 534 uec_set_mac_if_mode(uec,ENET_10_RGMII); 535 break; 536 default: 537 printf("%s: Ack,Speed(%d)is illegal\n", 538 dev->name, mii_info->speed); 539 break; 540 } 541 } 542 543 printf("%s: Speed %dBT\n", dev->name, mii_info->speed); 544 uec->oldspeed = mii_info->speed; 545 } 546 547 if (!uec->oldlink) { 548 printf("%s: Link is up\n", dev->name); 549 uec->oldlink = 1; 550 } 551 552 } else { /* if (mii_info->link) */ 553 if (uec->oldlink) { 554 printf("%s: Link is down\n", dev->name); 555 uec->oldlink = 0; 556 uec->oldspeed = 0; 557 uec->oldduplex = -1; 558 } 559 } 560 } 561 562 static void phy_change(struct eth_device *dev) 563 { 564 uec_private_t *uec = (uec_private_t *)dev->priv; 565 566 /* Update the link, speed, duplex */ 567 uec->mii_info->phyinfo->read_status(uec->mii_info); 568 569 /* Adjust the interface according to speed */ 570 adjust_link(dev); 571 } 572 573 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \ 574 && !defined(BITBANGMII) 575 576 /* 577 * Find a device index from the devlist by name 578 * 579 * Returns: 580 * The index where the device is located, -1 on error 581 */ 582 static int uec_miiphy_find_dev_by_name(char *devname) 583 { 584 int i; 585 586 for (i = 0; i < MAXCONTROLLERS; i++) { 587 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) { 588 break; 589 } 590 } 591 592 /* If device cannot be found, returns -1 */ 593 if (i == MAXCONTROLLERS) { 594 debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname); 595 i = -1; 596 } 597 598 return i; 599 } 600 601 /* 602 * Read a MII PHY register. 603 * 604 * Returns: 605 * 0 on success 606 */ 607 static int uec_miiphy_read(char *devname, unsigned char addr, 608 unsigned char reg, unsigned short *value) 609 { 610 int devindex = 0; 611 612 if (devname == NULL || value == NULL) { 613 debug("%s: NULL pointer given\n", __FUNCTION__); 614 } else { 615 devindex = uec_miiphy_find_dev_by_name(devname); 616 if (devindex >= 0) { 617 *value = uec_read_phy_reg(devlist[devindex], addr, reg); 618 } 619 } 620 return 0; 621 } 622 623 /* 624 * Write a MII PHY register. 625 * 626 * Returns: 627 * 0 on success 628 */ 629 static int uec_miiphy_write(char *devname, unsigned char addr, 630 unsigned char reg, unsigned short value) 631 { 632 int devindex = 0; 633 634 if (devname == NULL) { 635 debug("%s: NULL pointer given\n", __FUNCTION__); 636 } else { 637 devindex = uec_miiphy_find_dev_by_name(devname); 638 if (devindex >= 0) { 639 uec_write_phy_reg(devlist[devindex], addr, reg, value); 640 } 641 } 642 return 0; 643 } 644 #endif 645 646 static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr) 647 { 648 uec_t *uec_regs; 649 u32 mac_addr1; 650 u32 mac_addr2; 651 652 if (!uec) { 653 printf("%s: uec not initial\n", __FUNCTION__); 654 return -EINVAL; 655 } 656 657 uec_regs = uec->uec_regs; 658 659 /* if a station address of 0x12345678ABCD, perform a write to 660 MACSTNADDR1 of 0xCDAB7856, 661 MACSTNADDR2 of 0x34120000 */ 662 663 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \ 664 (mac_addr[3] << 8) | (mac_addr[2]); 665 out_be32(&uec_regs->macstnaddr1, mac_addr1); 666 667 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000; 668 out_be32(&uec_regs->macstnaddr2, mac_addr2); 669 670 return 0; 671 } 672 673 static int uec_convert_threads_num(uec_num_of_threads_e threads_num, 674 int *threads_num_ret) 675 { 676 int num_threads_numerica; 677 678 switch (threads_num) { 679 case UEC_NUM_OF_THREADS_1: 680 num_threads_numerica = 1; 681 break; 682 case UEC_NUM_OF_THREADS_2: 683 num_threads_numerica = 2; 684 break; 685 case UEC_NUM_OF_THREADS_4: 686 num_threads_numerica = 4; 687 break; 688 case UEC_NUM_OF_THREADS_6: 689 num_threads_numerica = 6; 690 break; 691 case UEC_NUM_OF_THREADS_8: 692 num_threads_numerica = 8; 693 break; 694 default: 695 printf("%s: Bad number of threads value.", 696 __FUNCTION__); 697 return -EINVAL; 698 } 699 700 *threads_num_ret = num_threads_numerica; 701 702 return 0; 703 } 704 705 static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx) 706 { 707 uec_info_t *uec_info; 708 u32 end_bd; 709 u8 bmrx = 0; 710 int i; 711 712 uec_info = uec->uec_info; 713 714 /* Alloc global Tx parameter RAM page */ 715 uec->tx_glbl_pram_offset = qe_muram_alloc( 716 sizeof(uec_tx_global_pram_t), 717 UEC_TX_GLOBAL_PRAM_ALIGNMENT); 718 uec->p_tx_glbl_pram = (uec_tx_global_pram_t *) 719 qe_muram_addr(uec->tx_glbl_pram_offset); 720 721 /* Zero the global Tx prameter RAM */ 722 memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t)); 723 724 /* Init global Tx parameter RAM */ 725 726 /* TEMODER, RMON statistics disable, one Tx queue */ 727 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE); 728 729 /* SQPTR */ 730 uec->send_q_mem_reg_offset = qe_muram_alloc( 731 sizeof(uec_send_queue_qd_t), 732 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 733 uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *) 734 qe_muram_addr(uec->send_q_mem_reg_offset); 735 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset); 736 737 /* Setup the table with TxBDs ring */ 738 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1) 739 * SIZEOFBD; 740 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base, 741 (u32)(uec->p_tx_bd_ring)); 742 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address, 743 end_bd); 744 745 /* Scheduler Base Pointer, we have only one Tx queue, no need it */ 746 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0); 747 748 /* TxRMON Base Pointer, TxRMON disable, we don't need it */ 749 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0); 750 751 /* TSTATE, global snooping, big endian, the CSB bus selected */ 752 bmrx = BMR_INIT_VALUE; 753 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT)); 754 755 /* IPH_Offset */ 756 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) { 757 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0); 758 } 759 760 /* VTAG table */ 761 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) { 762 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0); 763 } 764 765 /* TQPTR */ 766 uec->thread_dat_tx_offset = qe_muram_alloc( 767 num_threads_tx * sizeof(uec_thread_data_tx_t) + 768 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT); 769 770 uec->p_thread_data_tx = (uec_thread_data_tx_t *) 771 qe_muram_addr(uec->thread_dat_tx_offset); 772 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset); 773 } 774 775 static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx) 776 { 777 u8 bmrx = 0; 778 int i; 779 uec_82xx_address_filtering_pram_t *p_af_pram; 780 781 /* Allocate global Rx parameter RAM page */ 782 uec->rx_glbl_pram_offset = qe_muram_alloc( 783 sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT); 784 uec->p_rx_glbl_pram = (uec_rx_global_pram_t *) 785 qe_muram_addr(uec->rx_glbl_pram_offset); 786 787 /* Zero Global Rx parameter RAM */ 788 memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t)); 789 790 /* Init global Rx parameter RAM */ 791 /* REMODER, Extended feature mode disable, VLAN disable, 792 LossLess flow control disable, Receive firmware statisic disable, 793 Extended address parsing mode disable, One Rx queues, 794 Dynamic maximum/minimum frame length disable, IP checksum check 795 disable, IP address alignment disable 796 */ 797 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE); 798 799 /* RQPTR */ 800 uec->thread_dat_rx_offset = qe_muram_alloc( 801 num_threads_rx * sizeof(uec_thread_data_rx_t), 802 UEC_THREAD_DATA_ALIGNMENT); 803 uec->p_thread_data_rx = (uec_thread_data_rx_t *) 804 qe_muram_addr(uec->thread_dat_rx_offset); 805 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset); 806 807 /* Type_or_Len */ 808 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072); 809 810 /* RxRMON base pointer, we don't need it */ 811 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0); 812 813 /* IntCoalescingPTR, we don't need it, no interrupt */ 814 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0); 815 816 /* RSTATE, global snooping, big endian, the CSB bus selected */ 817 bmrx = BMR_INIT_VALUE; 818 out_8(&uec->p_rx_glbl_pram->rstate, bmrx); 819 820 /* MRBLR */ 821 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN); 822 823 /* RBDQPTR */ 824 uec->rx_bd_qs_tbl_offset = qe_muram_alloc( 825 sizeof(uec_rx_bd_queues_entry_t) + \ 826 sizeof(uec_rx_prefetched_bds_t), 827 UEC_RX_BD_QUEUES_ALIGNMENT); 828 uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *) 829 qe_muram_addr(uec->rx_bd_qs_tbl_offset); 830 831 /* Zero it */ 832 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \ 833 sizeof(uec_rx_prefetched_bds_t)); 834 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset); 835 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr, 836 (u32)uec->p_rx_bd_ring); 837 838 /* MFLR */ 839 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN); 840 /* MINFLR */ 841 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN); 842 /* MAXD1 */ 843 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN); 844 /* MAXD2 */ 845 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN); 846 /* ECAM_PTR */ 847 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0); 848 /* L2QT */ 849 out_be32(&uec->p_rx_glbl_pram->l2qt, 0); 850 /* L3QT */ 851 for (i = 0; i < 8; i++) { 852 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0); 853 } 854 855 /* VLAN_TYPE */ 856 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100); 857 /* TCI */ 858 out_be16(&uec->p_rx_glbl_pram->vlantci, 0); 859 860 /* Clear PQ2 style address filtering hash table */ 861 p_af_pram = (uec_82xx_address_filtering_pram_t *) \ 862 uec->p_rx_glbl_pram->addressfiltering; 863 864 p_af_pram->iaddr_h = 0; 865 p_af_pram->iaddr_l = 0; 866 p_af_pram->gaddr_h = 0; 867 p_af_pram->gaddr_l = 0; 868 } 869 870 static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec, 871 int thread_tx, int thread_rx) 872 { 873 uec_init_cmd_pram_t *p_init_enet_param; 874 u32 init_enet_param_offset; 875 uec_info_t *uec_info; 876 int i; 877 int snum; 878 u32 init_enet_offset; 879 u32 entry_val; 880 u32 command; 881 u32 cecr_subblock; 882 883 uec_info = uec->uec_info; 884 885 /* Allocate init enet command parameter */ 886 uec->init_enet_param_offset = qe_muram_alloc( 887 sizeof(uec_init_cmd_pram_t), 4); 888 init_enet_param_offset = uec->init_enet_param_offset; 889 uec->p_init_enet_param = (uec_init_cmd_pram_t *) 890 qe_muram_addr(uec->init_enet_param_offset); 891 892 /* Zero init enet command struct */ 893 memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t)); 894 895 /* Init the command struct */ 896 p_init_enet_param = uec->p_init_enet_param; 897 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0; 898 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1; 899 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2; 900 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3; 901 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4; 902 p_init_enet_param->largestexternallookupkeysize = 0; 903 904 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx) 905 << ENET_INIT_PARAM_RGF_SHIFT; 906 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx) 907 << ENET_INIT_PARAM_TGF_SHIFT; 908 909 /* Init Rx global parameter pointer */ 910 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset | 911 (u32)uec_info->risc_rx; 912 913 /* Init Rx threads */ 914 for (i = 0; i < (thread_rx + 1); i++) { 915 if ((snum = qe_get_snum()) < 0) { 916 printf("%s can not get snum\n", __FUNCTION__); 917 return -ENOMEM; 918 } 919 920 if (i==0) { 921 init_enet_offset = 0; 922 } else { 923 init_enet_offset = qe_muram_alloc( 924 sizeof(uec_thread_rx_pram_t), 925 UEC_THREAD_RX_PRAM_ALIGNMENT); 926 } 927 928 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | 929 init_enet_offset | (u32)uec_info->risc_rx; 930 p_init_enet_param->rxthread[i] = entry_val; 931 } 932 933 /* Init Tx global parameter pointer */ 934 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset | 935 (u32)uec_info->risc_tx; 936 937 /* Init Tx threads */ 938 for (i = 0; i < thread_tx; i++) { 939 if ((snum = qe_get_snum()) < 0) { 940 printf("%s can not get snum\n", __FUNCTION__); 941 return -ENOMEM; 942 } 943 944 init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t), 945 UEC_THREAD_TX_PRAM_ALIGNMENT); 946 947 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | 948 init_enet_offset | (u32)uec_info->risc_tx; 949 p_init_enet_param->txthread[i] = entry_val; 950 } 951 952 __asm__ __volatile__("sync"); 953 954 /* Issue QE command */ 955 command = QE_INIT_TX_RX; 956 cecr_subblock = ucc_fast_get_qe_cr_subblock( 957 uec->uec_info->uf_info.ucc_num); 958 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, 959 init_enet_param_offset); 960 961 return 0; 962 } 963 964 static int uec_startup(uec_private_t *uec) 965 { 966 uec_info_t *uec_info; 967 ucc_fast_info_t *uf_info; 968 ucc_fast_private_t *uccf; 969 ucc_fast_t *uf_regs; 970 uec_t *uec_regs; 971 int num_threads_tx; 972 int num_threads_rx; 973 u32 utbipar; 974 enet_interface_e enet_interface; 975 u32 length; 976 u32 align; 977 qe_bd_t *bd; 978 u8 *buf; 979 int i; 980 981 if (!uec || !uec->uec_info) { 982 printf("%s: uec or uec_info not initial\n", __FUNCTION__); 983 return -EINVAL; 984 } 985 986 uec_info = uec->uec_info; 987 uf_info = &(uec_info->uf_info); 988 989 /* Check if Rx BD ring len is illegal */ 990 if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \ 991 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) { 992 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n", 993 __FUNCTION__); 994 return -EINVAL; 995 } 996 997 /* Check if Tx BD ring len is illegal */ 998 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) { 999 printf("%s: Tx BD ring length must not be smaller than 2.\n", 1000 __FUNCTION__); 1001 return -EINVAL; 1002 } 1003 1004 /* Check if MRBLR is illegal */ 1005 if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) { 1006 printf("%s: max rx buffer length must be mutliple of 128.\n", 1007 __FUNCTION__); 1008 return -EINVAL; 1009 } 1010 1011 /* Both Rx and Tx are stopped */ 1012 uec->grace_stopped_rx = 1; 1013 uec->grace_stopped_tx = 1; 1014 1015 /* Init UCC fast */ 1016 if (ucc_fast_init(uf_info, &uccf)) { 1017 printf("%s: failed to init ucc fast\n", __FUNCTION__); 1018 return -ENOMEM; 1019 } 1020 1021 /* Save uccf */ 1022 uec->uccf = uccf; 1023 1024 /* Convert the Tx threads number */ 1025 if (uec_convert_threads_num(uec_info->num_threads_tx, 1026 &num_threads_tx)) { 1027 return -EINVAL; 1028 } 1029 1030 /* Convert the Rx threads number */ 1031 if (uec_convert_threads_num(uec_info->num_threads_rx, 1032 &num_threads_rx)) { 1033 return -EINVAL; 1034 } 1035 1036 uf_regs = uccf->uf_regs; 1037 1038 /* UEC register is following UCC fast registers */ 1039 uec_regs = (uec_t *)(&uf_regs->ucc_eth); 1040 1041 /* Save the UEC register pointer to UEC private struct */ 1042 uec->uec_regs = uec_regs; 1043 1044 /* Init UPSMR, enable hardware statistics (UCC) */ 1045 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE); 1046 1047 /* Init MACCFG1, flow control disable, disable Tx and Rx */ 1048 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE); 1049 1050 /* Init MACCFG2, length check, MAC PAD and CRC enable */ 1051 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE); 1052 1053 /* Setup MAC interface mode */ 1054 uec_set_mac_if_mode(uec, uec_info->enet_interface); 1055 1056 /* Setup MII management base */ 1057 #ifndef CONFIG_eTSEC_MDIO_BUS 1058 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg); 1059 #else 1060 uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS; 1061 #endif 1062 1063 /* Setup MII master clock source */ 1064 qe_set_mii_clk_src(uec_info->uf_info.ucc_num); 1065 1066 /* Setup UTBIPAR */ 1067 utbipar = in_be32(&uec_regs->utbipar); 1068 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; 1069 enet_interface = uec->uec_info->enet_interface; 1070 if (enet_interface == ENET_1000_TBI || 1071 enet_interface == ENET_1000_RTBI) { 1072 utbipar |= (uec_info->phy_address + uec_info->uf_info.ucc_num) 1073 << UTBIPAR_PHY_ADDRESS_SHIFT; 1074 } else { 1075 utbipar |= (0x10 + uec_info->uf_info.ucc_num) 1076 << UTBIPAR_PHY_ADDRESS_SHIFT; 1077 } 1078 1079 out_be32(&uec_regs->utbipar, utbipar); 1080 1081 /* Allocate Tx BDs */ 1082 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) / 1083 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) * 1084 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 1085 if ((uec_info->tx_bd_ring_len * SIZEOFBD) % 1086 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) { 1087 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; 1088 } 1089 1090 align = UEC_TX_BD_RING_ALIGNMENT; 1091 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align)); 1092 if (uec->tx_bd_ring_offset != 0) { 1093 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align) 1094 & ~(align - 1)); 1095 } 1096 1097 /* Zero all of Tx BDs */ 1098 memset((void *)(uec->tx_bd_ring_offset), 0, length + align); 1099 1100 /* Allocate Rx BDs */ 1101 length = uec_info->rx_bd_ring_len * SIZEOFBD; 1102 align = UEC_RX_BD_RING_ALIGNMENT; 1103 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align))); 1104 if (uec->rx_bd_ring_offset != 0) { 1105 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align) 1106 & ~(align - 1)); 1107 } 1108 1109 /* Zero all of Rx BDs */ 1110 memset((void *)(uec->rx_bd_ring_offset), 0, length + align); 1111 1112 /* Allocate Rx buffer */ 1113 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN; 1114 align = UEC_RX_DATA_BUF_ALIGNMENT; 1115 uec->rx_buf_offset = (u32)malloc(length + align); 1116 if (uec->rx_buf_offset != 0) { 1117 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align) 1118 & ~(align - 1)); 1119 } 1120 1121 /* Zero all of the Rx buffer */ 1122 memset((void *)(uec->rx_buf_offset), 0, length + align); 1123 1124 /* Init TxBD ring */ 1125 bd = (qe_bd_t *)uec->p_tx_bd_ring; 1126 uec->txBd = bd; 1127 1128 for (i = 0; i < uec_info->tx_bd_ring_len; i++) { 1129 BD_DATA_CLEAR(bd); 1130 BD_STATUS_SET(bd, 0); 1131 BD_LENGTH_SET(bd, 0); 1132 bd ++; 1133 } 1134 BD_STATUS_SET((--bd), TxBD_WRAP); 1135 1136 /* Init RxBD ring */ 1137 bd = (qe_bd_t *)uec->p_rx_bd_ring; 1138 uec->rxBd = bd; 1139 buf = uec->p_rx_buf; 1140 for (i = 0; i < uec_info->rx_bd_ring_len; i++) { 1141 BD_DATA_SET(bd, buf); 1142 BD_LENGTH_SET(bd, 0); 1143 BD_STATUS_SET(bd, RxBD_EMPTY); 1144 buf += MAX_RXBUF_LEN; 1145 bd ++; 1146 } 1147 BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY); 1148 1149 /* Init global Tx parameter RAM */ 1150 uec_init_tx_parameter(uec, num_threads_tx); 1151 1152 /* Init global Rx parameter RAM */ 1153 uec_init_rx_parameter(uec, num_threads_rx); 1154 1155 /* Init ethernet Tx and Rx parameter command */ 1156 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx, 1157 num_threads_rx)) { 1158 printf("%s issue init enet cmd failed\n", __FUNCTION__); 1159 return -ENOMEM; 1160 } 1161 1162 return 0; 1163 } 1164 1165 static int uec_init(struct eth_device* dev, bd_t *bd) 1166 { 1167 uec_private_t *uec; 1168 int err, i; 1169 struct phy_info *curphy; 1170 1171 uec = (uec_private_t *)dev->priv; 1172 1173 if (uec->the_first_run == 0) { 1174 err = init_phy(dev); 1175 if (err) { 1176 printf("%s: Cannot initialize PHY, aborting.\n", 1177 dev->name); 1178 return err; 1179 } 1180 1181 curphy = uec->mii_info->phyinfo; 1182 1183 if (curphy->config_aneg) { 1184 err = curphy->config_aneg(uec->mii_info); 1185 if (err) { 1186 printf("%s: Can't negotiate PHY\n", dev->name); 1187 return err; 1188 } 1189 } 1190 1191 /* Give PHYs up to 5 sec to report a link */ 1192 i = 50; 1193 do { 1194 err = curphy->read_status(uec->mii_info); 1195 udelay(100000); 1196 } while (((i-- > 0) && !uec->mii_info->link) || err); 1197 1198 if (err || i <= 0) 1199 printf("warning: %s: timeout on PHY link\n", dev->name); 1200 1201 uec->the_first_run = 1; 1202 } 1203 1204 /* Set up the MAC address */ 1205 if (dev->enetaddr[0] & 0x01) { 1206 printf("%s: MacAddress is multcast address\n", 1207 __FUNCTION__); 1208 return -1; 1209 } 1210 uec_set_mac_address(uec, dev->enetaddr); 1211 1212 1213 err = uec_open(uec, COMM_DIR_RX_AND_TX); 1214 if (err) { 1215 printf("%s: cannot enable UEC device\n", dev->name); 1216 return -1; 1217 } 1218 1219 phy_change(dev); 1220 1221 return (uec->mii_info->link ? 0 : -1); 1222 } 1223 1224 static void uec_halt(struct eth_device* dev) 1225 { 1226 uec_private_t *uec = (uec_private_t *)dev->priv; 1227 uec_stop(uec, COMM_DIR_RX_AND_TX); 1228 } 1229 1230 static int uec_send(struct eth_device* dev, volatile void *buf, int len) 1231 { 1232 uec_private_t *uec; 1233 ucc_fast_private_t *uccf; 1234 volatile qe_bd_t *bd; 1235 u16 status; 1236 int i; 1237 int result = 0; 1238 1239 uec = (uec_private_t *)dev->priv; 1240 uccf = uec->uccf; 1241 bd = uec->txBd; 1242 1243 /* Find an empty TxBD */ 1244 for (i = 0; bd->status & TxBD_READY; i++) { 1245 if (i > 0x100000) { 1246 printf("%s: tx buffer not ready\n", dev->name); 1247 return result; 1248 } 1249 } 1250 1251 /* Init TxBD */ 1252 BD_DATA_SET(bd, buf); 1253 BD_LENGTH_SET(bd, len); 1254 status = bd->status; 1255 status &= BD_WRAP; 1256 status |= (TxBD_READY | TxBD_LAST); 1257 BD_STATUS_SET(bd, status); 1258 1259 /* Tell UCC to transmit the buffer */ 1260 ucc_fast_transmit_on_demand(uccf); 1261 1262 /* Wait for buffer to be transmitted */ 1263 for (i = 0; bd->status & TxBD_READY; i++) { 1264 if (i > 0x100000) { 1265 printf("%s: tx error\n", dev->name); 1266 return result; 1267 } 1268 } 1269 1270 /* Ok, the buffer be transimitted */ 1271 BD_ADVANCE(bd, status, uec->p_tx_bd_ring); 1272 uec->txBd = bd; 1273 result = 1; 1274 1275 return result; 1276 } 1277 1278 static int uec_recv(struct eth_device* dev) 1279 { 1280 uec_private_t *uec = dev->priv; 1281 volatile qe_bd_t *bd; 1282 u16 status; 1283 u16 len; 1284 u8 *data; 1285 1286 bd = uec->rxBd; 1287 status = bd->status; 1288 1289 while (!(status & RxBD_EMPTY)) { 1290 if (!(status & RxBD_ERROR)) { 1291 data = BD_DATA(bd); 1292 len = BD_LENGTH(bd); 1293 NetReceive(data, len); 1294 } else { 1295 printf("%s: Rx error\n", dev->name); 1296 } 1297 status &= BD_CLEAN; 1298 BD_LENGTH_SET(bd, 0); 1299 BD_STATUS_SET(bd, status | RxBD_EMPTY); 1300 BD_ADVANCE(bd, status, uec->p_rx_bd_ring); 1301 status = bd->status; 1302 } 1303 uec->rxBd = bd; 1304 1305 return 1; 1306 } 1307 1308 int uec_initialize(bd_t *bis, uec_info_t *uec_info) 1309 { 1310 struct eth_device *dev; 1311 int i; 1312 uec_private_t *uec; 1313 int err; 1314 1315 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 1316 if (!dev) 1317 return 0; 1318 memset(dev, 0, sizeof(struct eth_device)); 1319 1320 /* Allocate the UEC private struct */ 1321 uec = (uec_private_t *)malloc(sizeof(uec_private_t)); 1322 if (!uec) { 1323 return -ENOMEM; 1324 } 1325 memset(uec, 0, sizeof(uec_private_t)); 1326 1327 /* Adjust uec_info */ 1328 #if (MAX_QE_RISC == 4) 1329 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS; 1330 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS; 1331 #endif 1332 1333 devlist[uec_info->uf_info.ucc_num] = dev; 1334 1335 uec->uec_info = uec_info; 1336 1337 sprintf(dev->name, "FSL UEC%d", uec_info->uf_info.ucc_num); 1338 dev->iobase = 0; 1339 dev->priv = (void *)uec; 1340 dev->init = uec_init; 1341 dev->halt = uec_halt; 1342 dev->send = uec_send; 1343 dev->recv = uec_recv; 1344 1345 /* Clear the ethnet address */ 1346 for (i = 0; i < 6; i++) 1347 dev->enetaddr[i] = 0; 1348 1349 eth_register(dev); 1350 1351 err = uec_startup(uec); 1352 if (err) { 1353 printf("%s: Cannot configure net device, aborting.",dev->name); 1354 return err; 1355 } 1356 1357 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \ 1358 && !defined(BITBANGMII) 1359 miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write); 1360 #endif 1361 1362 return 1; 1363 } 1364 1365 int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num) 1366 { 1367 int i; 1368 1369 for (i = 0; i < num; i++) 1370 uec_initialize(bis, &uecs[i]); 1371 1372 return 0; 1373 } 1374 1375 int uec_standard_init(bd_t *bis) 1376 { 1377 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info)); 1378 } 1379 1380 1381