1 /* 2 * Copyright 2009-2012 Freescale Semiconductor, Inc. 3 * Dave Liu <daveliu@freescale.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 #include <common.h> 8 #include <asm/io.h> 9 #include <malloc.h> 10 #include <net.h> 11 #include <hwconfig.h> 12 #include <fm_eth.h> 13 #include <fsl_mdio.h> 14 #include <miiphy.h> 15 #include <phy.h> 16 #include <asm/fsl_dtsec.h> 17 #include <asm/fsl_tgec.h> 18 #include <fsl_memac.h> 19 20 #include "fm.h" 21 22 static struct eth_device *devlist[NUM_FM_PORTS]; 23 static int num_controllers; 24 25 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII) 26 27 #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \ 28 TBIANA_FULL_DUPLEX) 29 30 #define TBIANA_SGMII_ACK 0x4001 31 32 #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \ 33 TBICR_FULL_DUPLEX | TBICR_SPEED1_SET) 34 35 /* Configure the TBI for SGMII operation */ 36 static void dtsec_configure_serdes(struct fm_eth *priv) 37 { 38 #ifdef CONFIG_SYS_FMAN_V3 39 u32 value; 40 struct mii_dev bus; 41 bus.priv = priv->mac->phyregs; 42 bool sgmii_2500 = (priv->enet_if == 43 PHY_INTERFACE_MODE_SGMII_2500) ? true : false; 44 45 /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */ 46 value = PHY_SGMII_IF_MODE_SGMII; 47 if (!sgmii_2500) 48 value |= PHY_SGMII_IF_MODE_AN; 49 50 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x14, value); 51 52 /* Dev ability according to SGMII specification */ 53 value = PHY_SGMII_DEV_ABILITY_SGMII; 54 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x4, value); 55 56 /* Adjust link timer for SGMII - 57 1.6 ms in units of 8 ns = 2 * 10^5 = 0x30d40 */ 58 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x13, 0x3); 59 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x12, 0xd40); 60 61 /* Restart AN */ 62 value = PHY_SGMII_CR_DEF_VAL; 63 if (!sgmii_2500) 64 value |= PHY_SGMII_CR_RESET_AN; 65 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0, value); 66 #else 67 struct dtsec *regs = priv->mac->base; 68 struct tsec_mii_mng *phyregs = priv->mac->phyregs; 69 70 /* 71 * Access TBI PHY registers at given TSEC register offset as 72 * opposed to the register offset used for external PHY accesses 73 */ 74 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_TBICON, 75 TBICON_CLK_SELECT); 76 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, TBI_ANA, 77 TBIANA_SGMII_ACK); 78 tsec_local_mdio_write(phyregs, in_be32(®s->tbipa), 0, 79 TBI_CR, TBICR_SETTINGS); 80 #endif 81 } 82 83 static void dtsec_init_phy(struct eth_device *dev) 84 { 85 struct fm_eth *fm_eth = dev->priv; 86 #ifndef CONFIG_SYS_FMAN_V3 87 struct dtsec *regs = (struct dtsec *)CONFIG_SYS_FSL_FM1_DTSEC1_ADDR; 88 89 /* Assign a Physical address to the TBI */ 90 out_be32(®s->tbipa, CONFIG_SYS_TBIPA_VALUE); 91 #endif 92 93 if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII || 94 fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII_2500) 95 dtsec_configure_serdes(fm_eth); 96 } 97 98 static int tgec_is_fibre(struct eth_device *dev) 99 { 100 struct fm_eth *fm = dev->priv; 101 char phyopt[20]; 102 103 sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1); 104 105 return hwconfig_arg_cmp(phyopt, "xfi"); 106 } 107 #endif 108 109 static u16 muram_readw(u16 *addr) 110 { 111 ulong base = (ulong)addr & ~0x3UL; 112 u32 val32 = in_be32((void *)base); 113 int byte_pos; 114 u16 ret; 115 116 byte_pos = (ulong)addr & 0x3UL; 117 if (byte_pos) 118 ret = (u16)(val32 & 0x0000ffff); 119 else 120 ret = (u16)((val32 & 0xffff0000) >> 16); 121 122 return ret; 123 } 124 125 static void muram_writew(u16 *addr, u16 val) 126 { 127 ulong base = (ulong)addr & ~0x3UL; 128 u32 org32 = in_be32((void *)base); 129 u32 val32; 130 int byte_pos; 131 132 byte_pos = (ulong)addr & 0x3UL; 133 if (byte_pos) 134 val32 = (org32 & 0xffff0000) | val; 135 else 136 val32 = (org32 & 0x0000ffff) | ((u32)val << 16); 137 138 out_be32((void *)base, val32); 139 } 140 141 static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port) 142 { 143 int timeout = 1000000; 144 145 clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN); 146 147 /* wait until the rx port is not busy */ 148 while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--) 149 ; 150 } 151 152 static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port) 153 { 154 /* set BMI to independent mode, Rx port disable */ 155 out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM); 156 /* clear FOF in IM case */ 157 out_be32(&rx_port->fmbm_rim, 0); 158 /* Rx frame next engine -RISC */ 159 out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX); 160 /* Rx command attribute - no order, MR[3] = 1 */ 161 clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK); 162 setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4)); 163 /* enable Rx statistic counters */ 164 out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN); 165 /* disable Rx performance counters */ 166 out_be32(&rx_port->fmbm_rpc, 0); 167 } 168 169 static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port) 170 { 171 int timeout = 1000000; 172 173 clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN); 174 175 /* wait until the tx port is not busy */ 176 while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--) 177 ; 178 } 179 180 static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port) 181 { 182 /* set BMI to independent mode, Tx port disable */ 183 out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM); 184 /* Tx frame next engine -RISC */ 185 out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 186 out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX); 187 /* Tx command attribute - no order, MR[3] = 1 */ 188 clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK); 189 setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4)); 190 /* enable Tx statistic counters */ 191 out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN); 192 /* disable Tx performance counters */ 193 out_be32(&tx_port->fmbm_tpc, 0); 194 } 195 196 static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth) 197 { 198 struct fm_port_global_pram *pram; 199 u32 pram_page_offset; 200 void *rx_bd_ring_base; 201 void *rx_buf_pool; 202 u32 bd_ring_base_lo, bd_ring_base_hi; 203 u32 buf_lo, buf_hi; 204 struct fm_port_bd *rxbd; 205 struct fm_port_qd *rxqd; 206 struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port; 207 int i; 208 209 /* alloc global parameter ram at MURAM */ 210 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 211 FM_PRAM_SIZE, FM_PRAM_ALIGN); 212 if (!pram) { 213 printf("%s: No muram for Rx global parameter\n", __func__); 214 return -ENOMEM; 215 } 216 217 fm_eth->rx_pram = pram; 218 219 /* parameter page offset to MURAM */ 220 pram_page_offset = (void *)pram - fm_muram_base(fm_eth->fm_index); 221 222 /* enable global mode- snooping data buffers and BDs */ 223 out_be32(&pram->mode, PRAM_MODE_GLOBAL); 224 225 /* init the Rx queue descriptor pionter */ 226 out_be32(&pram->rxqd_ptr, pram_page_offset + 0x20); 227 228 /* set the max receive buffer length, power of 2 */ 229 muram_writew(&pram->mrblr, MAX_RXBUF_LOG2); 230 231 /* alloc Rx buffer descriptors from main memory */ 232 rx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 233 * RX_BD_RING_SIZE); 234 if (!rx_bd_ring_base) 235 return -ENOMEM; 236 237 memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd) 238 * RX_BD_RING_SIZE); 239 240 /* alloc Rx buffer from main memory */ 241 rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE); 242 if (!rx_buf_pool) 243 return -ENOMEM; 244 245 memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE); 246 debug("%s: rx_buf_pool = %p\n", __func__, rx_buf_pool); 247 248 /* save them to fm_eth */ 249 fm_eth->rx_bd_ring = rx_bd_ring_base; 250 fm_eth->cur_rxbd = rx_bd_ring_base; 251 fm_eth->rx_buf = rx_buf_pool; 252 253 /* init Rx BDs ring */ 254 rxbd = (struct fm_port_bd *)rx_bd_ring_base; 255 for (i = 0; i < RX_BD_RING_SIZE; i++) { 256 muram_writew(&rxbd->status, RxBD_EMPTY); 257 muram_writew(&rxbd->len, 0); 258 buf_hi = upper_32_bits(virt_to_phys(rx_buf_pool + 259 i * MAX_RXBUF_LEN)); 260 buf_lo = lower_32_bits(virt_to_phys(rx_buf_pool + 261 i * MAX_RXBUF_LEN)); 262 muram_writew(&rxbd->buf_ptr_hi, (u16)buf_hi); 263 out_be32(&rxbd->buf_ptr_lo, buf_lo); 264 rxbd++; 265 } 266 267 /* set the Rx queue descriptor */ 268 rxqd = &pram->rxqd; 269 muram_writew(&rxqd->gen, 0); 270 bd_ring_base_hi = upper_32_bits(virt_to_phys(rx_bd_ring_base)); 271 bd_ring_base_lo = lower_32_bits(virt_to_phys(rx_bd_ring_base)); 272 muram_writew(&rxqd->bd_ring_base_hi, (u16)bd_ring_base_hi); 273 out_be32(&rxqd->bd_ring_base_lo, bd_ring_base_lo); 274 muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd) 275 * RX_BD_RING_SIZE); 276 muram_writew(&rxqd->offset_in, 0); 277 muram_writew(&rxqd->offset_out, 0); 278 279 /* set IM parameter ram pointer to Rx Frame Queue ID */ 280 out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset); 281 282 return 0; 283 } 284 285 static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth) 286 { 287 struct fm_port_global_pram *pram; 288 u32 pram_page_offset; 289 void *tx_bd_ring_base; 290 u32 bd_ring_base_lo, bd_ring_base_hi; 291 struct fm_port_bd *txbd; 292 struct fm_port_qd *txqd; 293 struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port; 294 int i; 295 296 /* alloc global parameter ram at MURAM */ 297 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index, 298 FM_PRAM_SIZE, FM_PRAM_ALIGN); 299 if (!pram) { 300 printf("%s: No muram for Tx global parameter\n", __func__); 301 return -ENOMEM; 302 } 303 fm_eth->tx_pram = pram; 304 305 /* parameter page offset to MURAM */ 306 pram_page_offset = (void *)pram - fm_muram_base(fm_eth->fm_index); 307 308 /* enable global mode- snooping data buffers and BDs */ 309 out_be32(&pram->mode, PRAM_MODE_GLOBAL); 310 311 /* init the Tx queue descriptor pionter */ 312 out_be32(&pram->txqd_ptr, pram_page_offset + 0x40); 313 314 /* alloc Tx buffer descriptors from main memory */ 315 tx_bd_ring_base = malloc(sizeof(struct fm_port_bd) 316 * TX_BD_RING_SIZE); 317 if (!tx_bd_ring_base) 318 return -ENOMEM; 319 320 memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd) 321 * TX_BD_RING_SIZE); 322 /* save it to fm_eth */ 323 fm_eth->tx_bd_ring = tx_bd_ring_base; 324 fm_eth->cur_txbd = tx_bd_ring_base; 325 326 /* init Tx BDs ring */ 327 txbd = (struct fm_port_bd *)tx_bd_ring_base; 328 for (i = 0; i < TX_BD_RING_SIZE; i++) { 329 muram_writew(&txbd->status, TxBD_LAST); 330 muram_writew(&txbd->len, 0); 331 muram_writew(&txbd->buf_ptr_hi, 0); 332 out_be32(&txbd->buf_ptr_lo, 0); 333 txbd++; 334 } 335 336 /* set the Tx queue decriptor */ 337 txqd = &pram->txqd; 338 bd_ring_base_hi = upper_32_bits(virt_to_phys(tx_bd_ring_base)); 339 bd_ring_base_lo = lower_32_bits(virt_to_phys(tx_bd_ring_base)); 340 muram_writew(&txqd->bd_ring_base_hi, (u16)bd_ring_base_hi); 341 out_be32(&txqd->bd_ring_base_lo, bd_ring_base_lo); 342 muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd) 343 * TX_BD_RING_SIZE); 344 muram_writew(&txqd->offset_in, 0); 345 muram_writew(&txqd->offset_out, 0); 346 347 /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */ 348 out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset); 349 350 return 0; 351 } 352 353 static int fm_eth_init(struct fm_eth *fm_eth) 354 { 355 int ret; 356 357 ret = fm_eth_rx_port_parameter_init(fm_eth); 358 if (ret) 359 return ret; 360 361 ret = fm_eth_tx_port_parameter_init(fm_eth); 362 if (ret) 363 return ret; 364 365 return 0; 366 } 367 368 static int fm_eth_startup(struct fm_eth *fm_eth) 369 { 370 struct fsl_enet_mac *mac; 371 int ret; 372 373 mac = fm_eth->mac; 374 375 /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */ 376 ret = fm_eth_init(fm_eth); 377 if (ret) 378 return ret; 379 /* setup the MAC controller */ 380 mac->init_mac(mac); 381 382 /* For some reason we need to set SPEED_100 */ 383 if (((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) || 384 (fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII)) && 385 mac->set_if_mode) 386 mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100); 387 388 /* init bmi rx port, IM mode and disable */ 389 bmi_rx_port_init(fm_eth->rx_port); 390 /* init bmi tx port, IM mode and disable */ 391 bmi_tx_port_init(fm_eth->tx_port); 392 393 return 0; 394 } 395 396 static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth) 397 { 398 struct fm_port_global_pram *pram; 399 400 pram = fm_eth->tx_pram; 401 /* graceful stop transmission of frames */ 402 setbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP); 403 sync(); 404 } 405 406 static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth) 407 { 408 struct fm_port_global_pram *pram; 409 410 pram = fm_eth->tx_pram; 411 /* re-enable transmission of frames */ 412 clrbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP); 413 sync(); 414 } 415 416 static int fm_eth_open(struct eth_device *dev, bd_t *bd) 417 { 418 struct fm_eth *fm_eth; 419 struct fsl_enet_mac *mac; 420 #ifdef CONFIG_PHYLIB 421 int ret; 422 #endif 423 424 fm_eth = (struct fm_eth *)dev->priv; 425 mac = fm_eth->mac; 426 427 /* setup the MAC address */ 428 if (dev->enetaddr[0] & 0x01) { 429 printf("%s: MacAddress is multcast address\n", __func__); 430 return 1; 431 } 432 mac->set_mac_addr(mac, dev->enetaddr); 433 434 /* enable bmi Rx port */ 435 setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN); 436 /* enable MAC rx/tx port */ 437 mac->enable_mac(mac); 438 /* enable bmi Tx port */ 439 setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN); 440 /* re-enable transmission of frame */ 441 fmc_tx_port_graceful_stop_disable(fm_eth); 442 443 #ifdef CONFIG_PHYLIB 444 if (fm_eth->phydev) { 445 ret = phy_startup(fm_eth->phydev); 446 if (ret) { 447 printf("%s: Could not initialize\n", 448 fm_eth->phydev->dev->name); 449 return ret; 450 } 451 } else { 452 return 0; 453 } 454 #else 455 fm_eth->phydev->speed = SPEED_1000; 456 fm_eth->phydev->link = 1; 457 fm_eth->phydev->duplex = DUPLEX_FULL; 458 #endif 459 460 /* set the MAC-PHY mode */ 461 mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed); 462 463 if (!fm_eth->phydev->link) 464 printf("%s: No link.\n", fm_eth->phydev->dev->name); 465 466 return fm_eth->phydev->link ? 0 : -1; 467 } 468 469 static void fm_eth_halt(struct eth_device *dev) 470 { 471 struct fm_eth *fm_eth; 472 struct fsl_enet_mac *mac; 473 474 fm_eth = (struct fm_eth *)dev->priv; 475 mac = fm_eth->mac; 476 477 /* graceful stop the transmission of frames */ 478 fmc_tx_port_graceful_stop_enable(fm_eth); 479 /* disable bmi Tx port */ 480 bmi_tx_port_disable(fm_eth->tx_port); 481 /* disable MAC rx/tx port */ 482 mac->disable_mac(mac); 483 /* disable bmi Rx port */ 484 bmi_rx_port_disable(fm_eth->rx_port); 485 486 if (fm_eth->phydev) 487 phy_shutdown(fm_eth->phydev); 488 } 489 490 static int fm_eth_send(struct eth_device *dev, void *buf, int len) 491 { 492 struct fm_eth *fm_eth; 493 struct fm_port_global_pram *pram; 494 struct fm_port_bd *txbd, *txbd_base; 495 u16 offset_in; 496 int i; 497 498 fm_eth = (struct fm_eth *)dev->priv; 499 pram = fm_eth->tx_pram; 500 txbd = fm_eth->cur_txbd; 501 502 /* find one empty TxBD */ 503 for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) { 504 udelay(100); 505 if (i > 0x1000) { 506 printf("%s: Tx buffer not ready, txbd->status = 0x%x\n", 507 dev->name, muram_readw(&txbd->status)); 508 return 0; 509 } 510 } 511 /* setup TxBD */ 512 muram_writew(&txbd->buf_ptr_hi, (u16)upper_32_bits(virt_to_phys(buf))); 513 out_be32(&txbd->buf_ptr_lo, lower_32_bits(virt_to_phys(buf))); 514 muram_writew(&txbd->len, len); 515 sync(); 516 muram_writew(&txbd->status, TxBD_READY | TxBD_LAST); 517 sync(); 518 519 /* update TxQD, let RISC to send the packet */ 520 offset_in = muram_readw(&pram->txqd.offset_in); 521 offset_in += sizeof(struct fm_port_bd); 522 if (offset_in >= muram_readw(&pram->txqd.bd_ring_size)) 523 offset_in = 0; 524 muram_writew(&pram->txqd.offset_in, offset_in); 525 sync(); 526 527 /* wait for buffer to be transmitted */ 528 for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) { 529 udelay(100); 530 if (i > 0x10000) { 531 printf("%s: Tx error, txbd->status = 0x%x\n", 532 dev->name, muram_readw(&txbd->status)); 533 return 0; 534 } 535 } 536 537 /* advance the TxBD */ 538 txbd++; 539 txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring; 540 if (txbd >= (txbd_base + TX_BD_RING_SIZE)) 541 txbd = txbd_base; 542 /* update current txbd */ 543 fm_eth->cur_txbd = (void *)txbd; 544 545 return 1; 546 } 547 548 static int fm_eth_recv(struct eth_device *dev) 549 { 550 struct fm_eth *fm_eth; 551 struct fm_port_global_pram *pram; 552 struct fm_port_bd *rxbd, *rxbd_base; 553 u16 status, len; 554 u32 buf_lo, buf_hi; 555 u8 *data; 556 u16 offset_out; 557 int ret = 1; 558 559 fm_eth = (struct fm_eth *)dev->priv; 560 pram = fm_eth->rx_pram; 561 rxbd = fm_eth->cur_rxbd; 562 status = muram_readw(&rxbd->status); 563 564 while (!(status & RxBD_EMPTY)) { 565 if (!(status & RxBD_ERROR)) { 566 buf_hi = muram_readw(&rxbd->buf_ptr_hi); 567 buf_lo = in_be32(&rxbd->buf_ptr_lo); 568 data = (u8 *)((ulong)(buf_hi << 16) << 16 | buf_lo); 569 len = muram_readw(&rxbd->len); 570 net_process_received_packet(data, len); 571 } else { 572 printf("%s: Rx error\n", dev->name); 573 ret = 0; 574 } 575 576 /* clear the RxBDs */ 577 muram_writew(&rxbd->status, RxBD_EMPTY); 578 muram_writew(&rxbd->len, 0); 579 sync(); 580 581 /* advance RxBD */ 582 rxbd++; 583 rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring; 584 if (rxbd >= (rxbd_base + RX_BD_RING_SIZE)) 585 rxbd = rxbd_base; 586 /* read next status */ 587 status = muram_readw(&rxbd->status); 588 589 /* update RxQD */ 590 offset_out = muram_readw(&pram->rxqd.offset_out); 591 offset_out += sizeof(struct fm_port_bd); 592 if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size)) 593 offset_out = 0; 594 muram_writew(&pram->rxqd.offset_out, offset_out); 595 sync(); 596 } 597 fm_eth->cur_rxbd = (void *)rxbd; 598 599 return ret; 600 } 601 602 static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg) 603 { 604 struct fsl_enet_mac *mac; 605 int num; 606 void *base, *phyregs = NULL; 607 608 num = fm_eth->num; 609 610 #ifdef CONFIG_SYS_FMAN_V3 611 #ifndef CONFIG_FSL_FM_10GEC_REGULAR_NOTATION 612 if (fm_eth->type == FM_ETH_10G_E) { 613 /* 10GEC1/10GEC2 use mEMAC9/mEMAC10 on T2080/T4240. 614 * 10GEC3/10GEC4 use mEMAC1/mEMAC2 on T2080. 615 * 10GEC1 uses mEMAC1 on T1024. 616 * so it needs to change the num. 617 */ 618 if (fm_eth->num >= 2) 619 num -= 2; 620 else 621 num += 8; 622 } 623 #endif 624 base = ®->memac[num].fm_memac; 625 phyregs = ®->memac[num].fm_memac_mdio; 626 #else 627 /* Get the mac registers base address */ 628 if (fm_eth->type == FM_ETH_1G_E) { 629 base = ®->mac_1g[num].fm_dtesc; 630 phyregs = ®->mac_1g[num].fm_mdio.miimcfg; 631 } else { 632 base = ®->mac_10g[num].fm_10gec; 633 phyregs = ®->mac_10g[num].fm_10gec_mdio; 634 } 635 #endif 636 637 /* alloc mac controller */ 638 mac = malloc(sizeof(struct fsl_enet_mac)); 639 if (!mac) 640 return -ENOMEM; 641 memset(mac, 0, sizeof(struct fsl_enet_mac)); 642 643 /* save the mac to fm_eth struct */ 644 fm_eth->mac = mac; 645 646 #ifdef CONFIG_SYS_FMAN_V3 647 init_memac(mac, base, phyregs, MAX_RXBUF_LEN); 648 #else 649 if (fm_eth->type == FM_ETH_1G_E) 650 init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN); 651 else 652 init_tgec(mac, base, phyregs, MAX_RXBUF_LEN); 653 #endif 654 655 return 0; 656 } 657 658 static int init_phy(struct eth_device *dev) 659 { 660 struct fm_eth *fm_eth = dev->priv; 661 struct phy_device *phydev = NULL; 662 u32 supported; 663 664 #ifdef CONFIG_PHYLIB 665 if (fm_eth->type == FM_ETH_1G_E) 666 dtsec_init_phy(dev); 667 668 if (fm_eth->bus) { 669 phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev, 670 fm_eth->enet_if); 671 if (!phydev) { 672 printf("Failed to connect\n"); 673 return -1; 674 } 675 } else { 676 return 0; 677 } 678 679 if (fm_eth->type == FM_ETH_1G_E) { 680 supported = (SUPPORTED_10baseT_Half | 681 SUPPORTED_10baseT_Full | 682 SUPPORTED_100baseT_Half | 683 SUPPORTED_100baseT_Full | 684 SUPPORTED_1000baseT_Full); 685 } else { 686 supported = SUPPORTED_10000baseT_Full; 687 688 if (tgec_is_fibre(dev)) 689 phydev->port = PORT_FIBRE; 690 } 691 692 phydev->supported &= supported; 693 phydev->advertising = phydev->supported; 694 695 fm_eth->phydev = phydev; 696 697 phy_config(phydev); 698 #endif 699 700 return 0; 701 } 702 703 int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info) 704 { 705 struct eth_device *dev; 706 struct fm_eth *fm_eth; 707 int i, num = info->num; 708 int ret; 709 710 /* alloc eth device */ 711 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 712 if (!dev) 713 return -ENOMEM; 714 memset(dev, 0, sizeof(struct eth_device)); 715 716 /* alloc the FMan ethernet private struct */ 717 fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth)); 718 if (!fm_eth) 719 return -ENOMEM; 720 memset(fm_eth, 0, sizeof(struct fm_eth)); 721 722 /* save off some things we need from the info struct */ 723 fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */ 724 fm_eth->num = num; 725 fm_eth->type = info->type; 726 727 fm_eth->rx_port = (void *)®->port[info->rx_port_id - 1].fm_bmi; 728 fm_eth->tx_port = (void *)®->port[info->tx_port_id - 1].fm_bmi; 729 730 /* set the ethernet max receive length */ 731 fm_eth->max_rx_len = MAX_RXBUF_LEN; 732 733 /* init global mac structure */ 734 ret = fm_eth_init_mac(fm_eth, reg); 735 if (ret) 736 return ret; 737 738 /* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */ 739 if (fm_eth->type == FM_ETH_1G_E) 740 sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1); 741 else 742 sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1); 743 744 devlist[num_controllers++] = dev; 745 dev->iobase = 0; 746 dev->priv = (void *)fm_eth; 747 dev->init = fm_eth_open; 748 dev->halt = fm_eth_halt; 749 dev->send = fm_eth_send; 750 dev->recv = fm_eth_recv; 751 fm_eth->dev = dev; 752 fm_eth->bus = info->bus; 753 fm_eth->phyaddr = info->phy_addr; 754 fm_eth->enet_if = info->enet_if; 755 756 /* startup the FM im */ 757 ret = fm_eth_startup(fm_eth); 758 if (ret) 759 return ret; 760 761 init_phy(dev); 762 763 /* clear the ethernet address */ 764 for (i = 0; i < 6; i++) 765 dev->enetaddr[i] = 0; 766 eth_register(dev); 767 768 return 0; 769 } 770