1 /* 2 * (C) Copyright 2011 Michal Simek 3 * 4 * Michal SIMEK <monstr@monstr.eu> 5 * 6 * Based on Xilinx gmac driver: 7 * (C) Copyright 2011 Xilinx 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <common.h> 13 #include <net.h> 14 #include <netdev.h> 15 #include <config.h> 16 #include <fdtdec.h> 17 #include <libfdt.h> 18 #include <malloc.h> 19 #include <asm/io.h> 20 #include <phy.h> 21 #include <miiphy.h> 22 #include <watchdog.h> 23 #include <asm/system.h> 24 #include <asm/arch/hardware.h> 25 #include <asm/arch/sys_proto.h> 26 #include <asm-generic/errno.h> 27 28 #if !defined(CONFIG_PHYLIB) 29 # error XILINX_GEM_ETHERNET requires PHYLIB 30 #endif 31 32 /* Bit/mask specification */ 33 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */ 34 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */ 35 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */ 36 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */ 37 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */ 38 39 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */ 40 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */ 41 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */ 42 43 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */ 44 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */ 45 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */ 46 47 /* Wrap bit, last descriptor */ 48 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000 49 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */ 50 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */ 51 52 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */ 53 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */ 54 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */ 55 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */ 56 57 #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */ 58 #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */ 59 #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */ 60 #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */ 61 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x0000c0000 /* Div pclk by 48, max 120MHz */ 62 63 #ifdef CONFIG_ARM64 64 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */ 65 #else 66 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */ 67 #endif 68 69 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \ 70 ZYNQ_GEM_NWCFG_FDEN | \ 71 ZYNQ_GEM_NWCFG_FSREM | \ 72 ZYNQ_GEM_NWCFG_MDCCLKDIV) 73 74 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */ 75 76 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */ 77 /* Use full configured addressable space (8 Kb) */ 78 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300 79 /* Use full configured addressable space (4 Kb) */ 80 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400 81 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */ 82 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000 83 84 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \ 85 ZYNQ_GEM_DMACR_RXSIZE | \ 86 ZYNQ_GEM_DMACR_TXSIZE | \ 87 ZYNQ_GEM_DMACR_RXBUF) 88 89 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */ 90 91 /* Use MII register 1 (MII status register) to detect PHY */ 92 #define PHY_DETECT_REG 1 93 94 /* Mask used to verify certain PHY features (or register contents) 95 * in the register above: 96 * 0x1000: 10Mbps full duplex support 97 * 0x0800: 10Mbps half duplex support 98 * 0x0008: Auto-negotiation support 99 */ 100 #define PHY_DETECT_MASK 0x1808 101 102 /* TX BD status masks */ 103 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff 104 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000 105 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000 106 107 /* Clock frequencies for different speeds */ 108 #define ZYNQ_GEM_FREQUENCY_10 2500000UL 109 #define ZYNQ_GEM_FREQUENCY_100 25000000UL 110 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL 111 112 /* Device registers */ 113 struct zynq_gem_regs { 114 u32 nwctrl; /* 0x0 - Network Control reg */ 115 u32 nwcfg; /* 0x4 - Network Config reg */ 116 u32 nwsr; /* 0x8 - Network Status reg */ 117 u32 reserved1; 118 u32 dmacr; /* 0x10 - DMA Control reg */ 119 u32 txsr; /* 0x14 - TX Status reg */ 120 u32 rxqbase; /* 0x18 - RX Q Base address reg */ 121 u32 txqbase; /* 0x1c - TX Q Base address reg */ 122 u32 rxsr; /* 0x20 - RX Status reg */ 123 u32 reserved2[2]; 124 u32 idr; /* 0x2c - Interrupt Disable reg */ 125 u32 reserved3; 126 u32 phymntnc; /* 0x34 - Phy Maintaince reg */ 127 u32 reserved4[18]; 128 u32 hashl; /* 0x80 - Hash Low address reg */ 129 u32 hashh; /* 0x84 - Hash High address reg */ 130 #define LADDR_LOW 0 131 #define LADDR_HIGH 1 132 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */ 133 u32 match[4]; /* 0xa8 - Type ID1 Match reg */ 134 u32 reserved6[18]; 135 #define STAT_SIZE 44 136 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */ 137 u32 reserved7[164]; 138 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */ 139 u32 reserved8[15]; 140 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */ 141 }; 142 143 /* BD descriptors */ 144 struct emac_bd { 145 u32 addr; /* Next descriptor pointer */ 146 u32 status; 147 }; 148 149 #define RX_BUF 32 150 /* Page table entries are set to 1MB, or multiples of 1MB 151 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 152 */ 153 #define BD_SPACE 0x100000 154 /* BD separation space */ 155 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd)) 156 157 /* Setup the first free TX descriptor */ 158 #define TX_FREE_DESC 2 159 160 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */ 161 struct zynq_gem_priv { 162 struct emac_bd *tx_bd; 163 struct emac_bd *rx_bd; 164 char *rxbuffers; 165 u32 rxbd_current; 166 u32 rx_first_buf; 167 int phyaddr; 168 u32 emio; 169 int init; 170 struct zynq_gem_regs *iobase; 171 phy_interface_t interface; 172 struct phy_device *phydev; 173 struct mii_dev *bus; 174 }; 175 176 static inline int mdio_wait(struct zynq_gem_regs *regs) 177 { 178 u32 timeout = 20000; 179 180 /* Wait till MDIO interface is ready to accept a new transaction. */ 181 while (--timeout) { 182 if (readl(®s->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK) 183 break; 184 WATCHDOG_RESET(); 185 } 186 187 if (!timeout) { 188 printf("%s: Timeout\n", __func__); 189 return 1; 190 } 191 192 return 0; 193 } 194 195 static u32 phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum, 196 u32 op, u16 *data) 197 { 198 u32 mgtcr; 199 struct zynq_gem_regs *regs = priv->iobase; 200 201 if (mdio_wait(regs)) 202 return 1; 203 204 /* Construct mgtcr mask for the operation */ 205 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op | 206 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) | 207 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data; 208 209 /* Write mgtcr and wait for completion */ 210 writel(mgtcr, ®s->phymntnc); 211 212 if (mdio_wait(regs)) 213 return 1; 214 215 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK) 216 *data = readl(®s->phymntnc); 217 218 return 0; 219 } 220 221 static u32 phyread(struct zynq_gem_priv *priv, u32 phy_addr, 222 u32 regnum, u16 *val) 223 { 224 u32 ret; 225 226 ret = phy_setup_op(priv, phy_addr, regnum, 227 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val); 228 229 if (!ret) 230 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__, 231 phy_addr, regnum, *val); 232 233 return ret; 234 } 235 236 static u32 phywrite(struct zynq_gem_priv *priv, u32 phy_addr, 237 u32 regnum, u16 data) 238 { 239 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr, 240 regnum, data); 241 242 return phy_setup_op(priv, phy_addr, regnum, 243 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data); 244 } 245 246 static int phy_detection(struct eth_device *dev) 247 { 248 int i; 249 u16 phyreg; 250 struct zynq_gem_priv *priv = dev->priv; 251 252 if (priv->phyaddr != -1) { 253 phyread(priv, priv->phyaddr, PHY_DETECT_REG, &phyreg); 254 if ((phyreg != 0xFFFF) && 255 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 256 /* Found a valid PHY address */ 257 debug("Default phy address %d is valid\n", 258 priv->phyaddr); 259 return 0; 260 } else { 261 debug("PHY address is not setup correctly %d\n", 262 priv->phyaddr); 263 priv->phyaddr = -1; 264 } 265 } 266 267 debug("detecting phy address\n"); 268 if (priv->phyaddr == -1) { 269 /* detect the PHY address */ 270 for (i = 31; i >= 0; i--) { 271 phyread(priv, i, PHY_DETECT_REG, &phyreg); 272 if ((phyreg != 0xFFFF) && 273 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 274 /* Found a valid PHY address */ 275 priv->phyaddr = i; 276 debug("Found valid phy address, %d\n", i); 277 return 0; 278 } 279 } 280 } 281 printf("PHY is not detected\n"); 282 return -1; 283 } 284 285 static int zynq_gem_setup_mac(struct eth_device *dev) 286 { 287 u32 i, macaddrlow, macaddrhigh; 288 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 289 290 /* Set the MAC bits [31:0] in BOT */ 291 macaddrlow = dev->enetaddr[0]; 292 macaddrlow |= dev->enetaddr[1] << 8; 293 macaddrlow |= dev->enetaddr[2] << 16; 294 macaddrlow |= dev->enetaddr[3] << 24; 295 296 /* Set MAC bits [47:32] in TOP */ 297 macaddrhigh = dev->enetaddr[4]; 298 macaddrhigh |= dev->enetaddr[5] << 8; 299 300 for (i = 0; i < 4; i++) { 301 writel(0, ®s->laddr[i][LADDR_LOW]); 302 writel(0, ®s->laddr[i][LADDR_HIGH]); 303 /* Do not use MATCHx register */ 304 writel(0, ®s->match[i]); 305 } 306 307 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]); 308 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]); 309 310 return 0; 311 } 312 313 static int zynq_phy_init(struct eth_device *dev) 314 { 315 int ret; 316 struct zynq_gem_priv *priv = dev->priv; 317 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 318 const u32 supported = SUPPORTED_10baseT_Half | 319 SUPPORTED_10baseT_Full | 320 SUPPORTED_100baseT_Half | 321 SUPPORTED_100baseT_Full | 322 SUPPORTED_1000baseT_Half | 323 SUPPORTED_1000baseT_Full; 324 325 /* Enable only MDIO bus */ 326 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s->nwctrl); 327 328 ret = phy_detection(dev); 329 if (ret) { 330 printf("GEM PHY init failed\n"); 331 return ret; 332 } 333 334 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev, 335 priv->interface); 336 if (!priv->phydev) 337 return -ENODEV; 338 339 priv->phydev->supported = supported | ADVERTISED_Pause | 340 ADVERTISED_Asym_Pause; 341 priv->phydev->advertising = priv->phydev->supported; 342 phy_config(priv->phydev); 343 344 return 0; 345 } 346 347 static int zynq_gem_init(struct eth_device *dev, bd_t *bis) 348 { 349 u32 i; 350 unsigned long clk_rate = 0; 351 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 352 struct zynq_gem_priv *priv = dev->priv; 353 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC]; 354 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2]; 355 356 if (!priv->init) { 357 /* Disable all interrupts */ 358 writel(0xFFFFFFFF, ®s->idr); 359 360 /* Disable the receiver & transmitter */ 361 writel(0, ®s->nwctrl); 362 writel(0, ®s->txsr); 363 writel(0, ®s->rxsr); 364 writel(0, ®s->phymntnc); 365 366 /* Clear the Hash registers for the mac address 367 * pointed by AddressPtr 368 */ 369 writel(0x0, ®s->hashl); 370 /* Write bits [63:32] in TOP */ 371 writel(0x0, ®s->hashh); 372 373 /* Clear all counters */ 374 for (i = 0; i < STAT_SIZE; i++) 375 readl(®s->stat[i]); 376 377 /* Setup RxBD space */ 378 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd)); 379 380 for (i = 0; i < RX_BUF; i++) { 381 priv->rx_bd[i].status = 0xF0000000; 382 priv->rx_bd[i].addr = 383 ((ulong)(priv->rxbuffers) + 384 (i * PKTSIZE_ALIGN)); 385 } 386 /* WRAP bit to last BD */ 387 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK; 388 /* Write RxBDs to IP */ 389 writel((ulong)priv->rx_bd, ®s->rxqbase); 390 391 /* Setup for DMA Configuration register */ 392 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr); 393 394 /* Setup for Network Control register, MDIO, Rx and Tx enable */ 395 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK); 396 397 /* Disable the second priority queue */ 398 dummy_tx_bd->addr = 0; 399 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK | 400 ZYNQ_GEM_TXBUF_LAST_MASK| 401 ZYNQ_GEM_TXBUF_USED_MASK; 402 403 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK | 404 ZYNQ_GEM_RXBUF_NEW_MASK; 405 dummy_rx_bd->status = 0; 406 flush_dcache_range((ulong)&dummy_tx_bd, (ulong)&dummy_tx_bd + 407 sizeof(dummy_tx_bd)); 408 flush_dcache_range((ulong)&dummy_rx_bd, (ulong)&dummy_rx_bd + 409 sizeof(dummy_rx_bd)); 410 411 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr); 412 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr); 413 414 priv->init++; 415 } 416 417 phy_startup(priv->phydev); 418 419 if (!priv->phydev->link) { 420 printf("%s: No link.\n", priv->phydev->dev->name); 421 return -1; 422 } 423 424 switch (priv->phydev->speed) { 425 case SPEED_1000: 426 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000, 427 ®s->nwcfg); 428 clk_rate = ZYNQ_GEM_FREQUENCY_1000; 429 break; 430 case SPEED_100: 431 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100, 432 ®s->nwcfg); 433 clk_rate = ZYNQ_GEM_FREQUENCY_100; 434 break; 435 case SPEED_10: 436 clk_rate = ZYNQ_GEM_FREQUENCY_10; 437 break; 438 } 439 440 /* Change the rclk and clk only not using EMIO interface */ 441 if (!priv->emio) 442 zynq_slcr_gem_clk_setup(dev->iobase != 443 ZYNQ_GEM_BASEADDR0, clk_rate); 444 445 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 446 ZYNQ_GEM_NWCTRL_TXEN_MASK); 447 448 return 0; 449 } 450 451 static int wait_for_bit(const char *func, u32 *reg, const u32 mask, 452 bool set, unsigned int timeout) 453 { 454 u32 val; 455 unsigned long start = get_timer(0); 456 457 while (1) { 458 val = readl(reg); 459 460 if (!set) 461 val = ~val; 462 463 if ((val & mask) == mask) 464 return 0; 465 466 if (get_timer(start) > timeout) 467 break; 468 469 udelay(1); 470 } 471 472 debug("%s: Timeout (reg=%p mask=%08x wait_set=%i)\n", 473 func, reg, mask, set); 474 475 return -ETIMEDOUT; 476 } 477 478 static int zynq_gem_send(struct eth_device *dev, void *ptr, int len) 479 { 480 u32 addr, size; 481 struct zynq_gem_priv *priv = dev->priv; 482 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 483 struct emac_bd *current_bd = &priv->tx_bd[1]; 484 485 /* Setup Tx BD */ 486 memset(priv->tx_bd, 0, sizeof(struct emac_bd)); 487 488 priv->tx_bd->addr = (ulong)ptr; 489 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) | 490 ZYNQ_GEM_TXBUF_LAST_MASK; 491 /* Dummy descriptor to mark it as the last in descriptor chain */ 492 current_bd->addr = 0x0; 493 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK | 494 ZYNQ_GEM_TXBUF_LAST_MASK| 495 ZYNQ_GEM_TXBUF_USED_MASK; 496 497 /* setup BD */ 498 writel((ulong)priv->tx_bd, ®s->txqbase); 499 500 addr = (ulong) ptr; 501 addr &= ~(ARCH_DMA_MINALIGN - 1); 502 size = roundup(len, ARCH_DMA_MINALIGN); 503 flush_dcache_range(addr, addr + size); 504 505 addr = (ulong)priv->rxbuffers; 506 addr &= ~(ARCH_DMA_MINALIGN - 1); 507 size = roundup((RX_BUF * PKTSIZE_ALIGN), ARCH_DMA_MINALIGN); 508 flush_dcache_range(addr, addr + size); 509 barrier(); 510 511 /* Start transmit */ 512 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK); 513 514 /* Read TX BD status */ 515 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED) 516 printf("TX buffers exhausted in mid frame\n"); 517 518 return wait_for_bit(__func__, ®s->txsr, ZYNQ_GEM_TSR_DONE, 519 true, 20000); 520 } 521 522 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */ 523 static int zynq_gem_recv(struct eth_device *dev) 524 { 525 int frame_len; 526 struct zynq_gem_priv *priv = dev->priv; 527 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current]; 528 struct emac_bd *first_bd; 529 530 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK)) 531 return 0; 532 533 if (!(current_bd->status & 534 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) { 535 printf("GEM: SOF or EOF not set for last buffer received!\n"); 536 return 0; 537 } 538 539 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK; 540 if (frame_len) { 541 u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK; 542 addr &= ~(ARCH_DMA_MINALIGN - 1); 543 544 net_process_received_packet((u8 *)(ulong)addr, frame_len); 545 546 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) 547 priv->rx_first_buf = priv->rxbd_current; 548 else { 549 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 550 current_bd->status = 0xF0000000; /* FIXME */ 551 } 552 553 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) { 554 first_bd = &priv->rx_bd[priv->rx_first_buf]; 555 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 556 first_bd->status = 0xF0000000; 557 } 558 559 if ((++priv->rxbd_current) >= RX_BUF) 560 priv->rxbd_current = 0; 561 } 562 563 return frame_len; 564 } 565 566 static void zynq_gem_halt(struct eth_device *dev) 567 { 568 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 569 570 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 571 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0); 572 } 573 574 static int zynq_gem_miiphyread(const char *devname, uchar addr, 575 uchar reg, ushort *val) 576 { 577 struct eth_device *dev = eth_get_dev(); 578 struct zynq_gem_priv *priv = dev->priv; 579 int ret; 580 581 ret = phyread(priv, addr, reg, val); 582 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val); 583 return ret; 584 } 585 586 static int zynq_gem_miiphy_write(const char *devname, uchar addr, 587 uchar reg, ushort val) 588 { 589 struct eth_device *dev = eth_get_dev(); 590 struct zynq_gem_priv *priv = dev->priv; 591 592 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val); 593 return phywrite(priv, addr, reg, val); 594 } 595 596 int zynq_gem_initialize(bd_t *bis, phys_addr_t base_addr, 597 int phy_addr, u32 emio) 598 { 599 int ret; 600 struct eth_device *dev; 601 struct zynq_gem_priv *priv; 602 void *bd_space; 603 604 dev = calloc(1, sizeof(*dev)); 605 if (dev == NULL) 606 return -1; 607 608 dev->priv = calloc(1, sizeof(struct zynq_gem_priv)); 609 if (dev->priv == NULL) { 610 free(dev); 611 return -1; 612 } 613 priv = dev->priv; 614 615 /* Align rxbuffers to ARCH_DMA_MINALIGN */ 616 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN); 617 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN); 618 619 /* Align bd_space to MMU_SECTION_SHIFT */ 620 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 621 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, 622 BD_SPACE, DCACHE_OFF); 623 624 /* Initialize the bd spaces for tx and rx bd's */ 625 priv->tx_bd = (struct emac_bd *)bd_space; 626 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE); 627 628 priv->phyaddr = phy_addr; 629 priv->emio = emio; 630 631 #ifndef CONFIG_ZYNQ_GEM_INTERFACE 632 priv->interface = PHY_INTERFACE_MODE_MII; 633 #else 634 priv->interface = CONFIG_ZYNQ_GEM_INTERFACE; 635 #endif 636 637 sprintf(dev->name, "Gem.%lx", base_addr); 638 639 dev->iobase = base_addr; 640 priv->iobase = (struct zynq_gem_regs *)base_addr; 641 642 dev->init = zynq_gem_init; 643 dev->halt = zynq_gem_halt; 644 dev->send = zynq_gem_send; 645 dev->recv = zynq_gem_recv; 646 dev->write_hwaddr = zynq_gem_setup_mac; 647 648 eth_register(dev); 649 650 miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write); 651 priv->bus = miiphy_get_dev_by_name(dev->name); 652 653 ret = zynq_phy_init(dev); 654 if (ret) 655 return ret; 656 657 return 1; 658 } 659 660 #if CONFIG_IS_ENABLED(OF_CONTROL) 661 int zynq_gem_of_init(const void *blob) 662 { 663 int offset = 0; 664 u32 ret = 0; 665 u32 reg, phy_reg; 666 667 debug("ZYNQ GEM: Initialization\n"); 668 669 do { 670 offset = fdt_node_offset_by_compatible(blob, offset, 671 "xlnx,ps7-ethernet-1.00.a"); 672 if (offset != -1) { 673 reg = fdtdec_get_addr(blob, offset, "reg"); 674 if (reg != FDT_ADDR_T_NONE) { 675 offset = fdtdec_lookup_phandle(blob, offset, 676 "phy-handle"); 677 if (offset != -1) 678 phy_reg = fdtdec_get_addr(blob, offset, 679 "reg"); 680 else 681 phy_reg = 0; 682 683 debug("ZYNQ GEM: addr %x, phyaddr %x\n", 684 reg, phy_reg); 685 686 ret |= zynq_gem_initialize(NULL, reg, 687 phy_reg, 0); 688 689 } else { 690 debug("ZYNQ GEM: Can't get base address\n"); 691 return -1; 692 } 693 } 694 } while (offset != -1); 695 696 return ret; 697 } 698 #endif 699