1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 #include "dwc_eth_qos.h" 49 50 /* Core registers */ 51 52 #define EQOS_MAC_REGS_BASE 0x000 53 struct eqos_mac_regs { 54 uint32_t configuration; /* 0x000 */ 55 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 56 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 57 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 58 uint32_t rx_flow_ctrl; /* 0x090 */ 59 uint32_t unused_094; /* 0x094 */ 60 uint32_t txq_prty_map0; /* 0x098 */ 61 uint32_t unused_09c; /* 0x09c */ 62 uint32_t rxq_ctrl0; /* 0x0a0 */ 63 uint32_t unused_0a4; /* 0x0a4 */ 64 uint32_t rxq_ctrl2; /* 0x0a8 */ 65 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 66 uint32_t us_tic_counter; /* 0x0dc */ 67 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 68 uint32_t hw_feature0; /* 0x11c */ 69 uint32_t hw_feature1; /* 0x120 */ 70 uint32_t hw_feature2; /* 0x124 */ 71 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 72 uint32_t mdio_address; /* 0x200 */ 73 uint32_t mdio_data; /* 0x204 */ 74 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 75 uint32_t address0_high; /* 0x300 */ 76 uint32_t address0_low; /* 0x304 */ 77 }; 78 79 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 80 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 81 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 82 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 83 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 84 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 85 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 86 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 87 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 88 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 89 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 90 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 91 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 94 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 95 96 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 97 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 99 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 100 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 103 104 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 105 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 106 107 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 108 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 109 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 110 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 111 112 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 113 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 114 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 115 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 116 117 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 118 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 119 120 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 121 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 122 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 123 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 124 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 125 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 126 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 127 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 128 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 129 130 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 131 132 #define EQOS_MTL_REGS_BASE 0xd00 133 struct eqos_mtl_regs { 134 uint32_t txq0_operation_mode; /* 0xd00 */ 135 uint32_t unused_d04; /* 0xd04 */ 136 uint32_t txq0_debug; /* 0xd08 */ 137 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 138 uint32_t txq0_quantum_weight; /* 0xd18 */ 139 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 140 uint32_t rxq0_operation_mode; /* 0xd30 */ 141 uint32_t unused_d34; /* 0xd34 */ 142 uint32_t rxq0_debug; /* 0xd38 */ 143 }; 144 145 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 146 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 147 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 148 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 152 153 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 154 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 155 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 156 157 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 158 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 159 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 160 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 167 168 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 169 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 170 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 171 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 172 173 #define EQOS_DMA_REGS_BASE 0x1000 174 struct eqos_dma_regs { 175 uint32_t mode; /* 0x1000 */ 176 uint32_t sysbus_mode; /* 0x1004 */ 177 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 178 uint32_t ch0_control; /* 0x1100 */ 179 uint32_t ch0_tx_control; /* 0x1104 */ 180 uint32_t ch0_rx_control; /* 0x1108 */ 181 uint32_t unused_110c; /* 0x110c */ 182 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 183 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 184 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 185 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 186 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 187 uint32_t unused_1124; /* 0x1124 */ 188 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 189 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 190 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 191 }; 192 193 #define EQOS_DMA_MODE_SWR BIT(0) 194 195 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 196 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 197 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 198 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 199 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 200 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 201 202 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 203 204 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 205 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 206 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 207 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 208 209 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 210 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 211 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 212 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 213 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 214 215 /* These registers are Tegra186-specific */ 216 #define EQOS_TEGRA186_REGS_BASE 0x8800 217 struct eqos_tegra186_regs { 218 uint32_t sdmemcomppadctrl; /* 0x8800 */ 219 uint32_t auto_cal_config; /* 0x8804 */ 220 uint32_t unused_8808; /* 0x8808 */ 221 uint32_t auto_cal_status; /* 0x880c */ 222 }; 223 224 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 225 226 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 227 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 228 229 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 230 231 /* Descriptors */ 232 233 #define EQOS_DESCRIPTOR_WORDS 4 234 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 235 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 236 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 237 #define EQOS_DESCRIPTORS_TX 4 238 #define EQOS_DESCRIPTORS_RX 4 239 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 240 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 241 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 242 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 243 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 244 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 245 246 /* 247 * Warn if the cache-line size is larger than the descriptor size. In such 248 * cases the driver will likely fail because the CPU needs to flush the cache 249 * when requeuing RX buffers, therefore descriptors written by the hardware 250 * may be discarded. Architectures with full IO coherence, such as x86, do not 251 * experience this issue, and hence are excluded from this condition. 252 * 253 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 254 * the driver to allocate descriptors from a pool of non-cached memory. 255 */ 256 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 257 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 258 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 259 #warning Cache line size is larger than descriptor size 260 #endif 261 #endif 262 263 struct eqos_desc { 264 u32 des0; 265 u32 des1; 266 u32 des2; 267 u32 des3; 268 }; 269 270 #define EQOS_DESC3_OWN BIT(31) 271 #define EQOS_DESC3_FD BIT(29) 272 #define EQOS_DESC3_LD BIT(28) 273 #define EQOS_DESC3_BUF1V BIT(24) 274 275 /* 276 * TX and RX descriptors are 16 bytes. This causes problems with the cache 277 * maintenance on CPUs where the cache-line size exceeds the size of these 278 * descriptors. What will happen is that when the driver receives a packet 279 * it will be immediately requeued for the hardware to reuse. The CPU will 280 * therefore need to flush the cache-line containing the descriptor, which 281 * will cause all other descriptors in the same cache-line to be flushed 282 * along with it. If one of those descriptors had been written to by the 283 * device those changes (and the associated packet) will be lost. 284 * 285 * To work around this, we make use of non-cached memory if available. If 286 * descriptors are mapped uncached there's no need to manually flush them 287 * or invalidate them. 288 * 289 * Note that this only applies to descriptors. The packet data buffers do 290 * not have the same constraints since they are 1536 bytes large, so they 291 * are unlikely to share cache-lines. 292 */ 293 static void *eqos_alloc_descs(unsigned int num) 294 { 295 #ifdef CONFIG_SYS_NONCACHED_MEMORY 296 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 297 EQOS_DESCRIPTOR_ALIGN); 298 #else 299 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 300 #endif 301 } 302 303 static void eqos_free_descs(void *descs) 304 { 305 #ifdef CONFIG_SYS_NONCACHED_MEMORY 306 /* FIXME: noncached_alloc() has no opposite */ 307 #else 308 free(descs); 309 #endif 310 } 311 312 static void eqos_inval_desc_tegra186(void *desc) 313 { 314 #ifndef CONFIG_SYS_NONCACHED_MEMORY 315 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 316 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 317 ARCH_DMA_MINALIGN); 318 319 invalidate_dcache_range(start, end); 320 #endif 321 } 322 323 static void eqos_inval_desc_generic(void *desc) 324 { 325 #ifndef CONFIG_SYS_NONCACHED_MEMORY 326 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 327 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 328 ARCH_DMA_MINALIGN); 329 330 invalidate_dcache_range(start, end); 331 #endif 332 } 333 334 static void eqos_flush_desc_tegra186(void *desc) 335 { 336 #ifndef CONFIG_SYS_NONCACHED_MEMORY 337 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 338 #endif 339 } 340 341 static void eqos_flush_desc_generic(void *desc) 342 { 343 #ifndef CONFIG_SYS_NONCACHED_MEMORY 344 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 345 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 346 ARCH_DMA_MINALIGN); 347 348 flush_dcache_range(start, end); 349 #endif 350 } 351 352 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 353 { 354 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 355 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 356 357 invalidate_dcache_range(start, end); 358 } 359 360 static void eqos_inval_buffer_generic(void *buf, size_t size) 361 { 362 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 363 unsigned long end = roundup((unsigned long)buf + size, 364 ARCH_DMA_MINALIGN); 365 366 invalidate_dcache_range(start, end); 367 } 368 369 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 370 { 371 flush_cache((unsigned long)buf, size); 372 } 373 374 static void eqos_flush_buffer_generic(void *buf, size_t size) 375 { 376 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 377 unsigned long end = roundup((unsigned long)buf + size, 378 ARCH_DMA_MINALIGN); 379 380 flush_dcache_range(start, end); 381 } 382 383 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 384 { 385 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 386 EQOS_MAC_MDIO_ADDRESS_GB, false, 387 1000000, true); 388 } 389 390 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 391 int mdio_reg) 392 { 393 struct eqos_priv *eqos = bus->priv; 394 u32 val; 395 int ret; 396 397 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 398 mdio_reg); 399 400 ret = eqos_mdio_wait_idle(eqos); 401 if (ret) { 402 pr_err("MDIO not idle at entry"); 403 return ret; 404 } 405 406 val = readl(&eqos->mac_regs->mdio_address); 407 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 408 EQOS_MAC_MDIO_ADDRESS_C45E; 409 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 410 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 411 (eqos->config->config_mac_mdio << 412 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 413 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 414 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 415 EQOS_MAC_MDIO_ADDRESS_GB; 416 writel(val, &eqos->mac_regs->mdio_address); 417 418 udelay(eqos->config->mdio_wait); 419 420 ret = eqos_mdio_wait_idle(eqos); 421 if (ret) { 422 pr_err("MDIO read didn't complete"); 423 return ret; 424 } 425 426 val = readl(&eqos->mac_regs->mdio_data); 427 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 428 429 debug("%s: val=%x\n", __func__, val); 430 431 return val; 432 } 433 434 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 435 int mdio_reg, u16 mdio_val) 436 { 437 struct eqos_priv *eqos = bus->priv; 438 u32 val; 439 int ret; 440 441 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 442 mdio_addr, mdio_reg, mdio_val); 443 444 ret = eqos_mdio_wait_idle(eqos); 445 if (ret) { 446 pr_err("MDIO not idle at entry"); 447 return ret; 448 } 449 450 writel(mdio_val, &eqos->mac_regs->mdio_data); 451 452 val = readl(&eqos->mac_regs->mdio_address); 453 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 454 EQOS_MAC_MDIO_ADDRESS_C45E; 455 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 456 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 457 (eqos->config->config_mac_mdio << 458 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 459 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 460 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 461 EQOS_MAC_MDIO_ADDRESS_GB; 462 writel(val, &eqos->mac_regs->mdio_address); 463 464 udelay(eqos->config->mdio_wait); 465 466 ret = eqos_mdio_wait_idle(eqos); 467 if (ret) { 468 pr_err("MDIO read didn't complete"); 469 return ret; 470 } 471 472 return 0; 473 } 474 475 static int eqos_start_clks_tegra186(struct udevice *dev) 476 { 477 #ifdef CONFIG_CLK 478 struct eqos_priv *eqos = dev_get_priv(dev); 479 int ret; 480 481 debug("%s(dev=%p):\n", __func__, dev); 482 483 ret = clk_enable(&eqos->clk_slave_bus); 484 if (ret < 0) { 485 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 486 goto err; 487 } 488 489 ret = clk_enable(&eqos->clk_master_bus); 490 if (ret < 0) { 491 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 492 goto err_disable_clk_slave_bus; 493 } 494 495 ret = clk_enable(&eqos->clk_rx); 496 if (ret < 0) { 497 pr_err("clk_enable(clk_rx) failed: %d", ret); 498 goto err_disable_clk_master_bus; 499 } 500 501 ret = clk_enable(&eqos->clk_ptp_ref); 502 if (ret < 0) { 503 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 504 goto err_disable_clk_rx; 505 } 506 507 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 508 if (ret < 0) { 509 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 510 goto err_disable_clk_ptp_ref; 511 } 512 513 ret = clk_enable(&eqos->clk_tx); 514 if (ret < 0) { 515 pr_err("clk_enable(clk_tx) failed: %d", ret); 516 goto err_disable_clk_ptp_ref; 517 } 518 #endif 519 520 debug("%s: OK\n", __func__); 521 return 0; 522 523 #ifdef CONFIG_CLK 524 err_disable_clk_ptp_ref: 525 clk_disable(&eqos->clk_ptp_ref); 526 err_disable_clk_rx: 527 clk_disable(&eqos->clk_rx); 528 err_disable_clk_master_bus: 529 clk_disable(&eqos->clk_master_bus); 530 err_disable_clk_slave_bus: 531 clk_disable(&eqos->clk_slave_bus); 532 err: 533 debug("%s: FAILED: %d\n", __func__, ret); 534 return ret; 535 #endif 536 } 537 538 static int eqos_start_clks_stm32(struct udevice *dev) 539 { 540 #ifdef CONFIG_CLK 541 struct eqos_priv *eqos = dev_get_priv(dev); 542 int ret; 543 544 debug("%s(dev=%p):\n", __func__, dev); 545 546 ret = clk_enable(&eqos->clk_master_bus); 547 if (ret < 0) { 548 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 549 goto err; 550 } 551 552 if (clk_valid(&eqos->clk_rx)) { 553 ret = clk_enable(&eqos->clk_rx); 554 if (ret < 0) { 555 pr_err("clk_enable(clk_rx) failed: %d", ret); 556 goto err_disable_clk_master_bus; 557 } 558 } 559 560 if (clk_valid(&eqos->clk_tx)) { 561 ret = clk_enable(&eqos->clk_tx); 562 if (ret < 0) { 563 pr_err("clk_enable(clk_tx) failed: %d", ret); 564 goto err_disable_clk_rx; 565 } 566 } 567 568 if (clk_valid(&eqos->clk_ck)) { 569 ret = clk_enable(&eqos->clk_ck); 570 if (ret < 0) { 571 pr_err("clk_enable(clk_ck) failed: %d", ret); 572 goto err_disable_clk_tx; 573 } 574 } 575 #endif 576 577 debug("%s: OK\n", __func__); 578 return 0; 579 580 #ifdef CONFIG_CLK 581 err_disable_clk_tx: 582 if (clk_valid(&eqos->clk_tx)) 583 clk_disable(&eqos->clk_tx); 584 err_disable_clk_rx: 585 if (clk_valid(&eqos->clk_rx)) 586 clk_disable(&eqos->clk_rx); 587 err_disable_clk_master_bus: 588 clk_disable(&eqos->clk_master_bus); 589 err: 590 debug("%s: FAILED: %d\n", __func__, ret); 591 return ret; 592 #endif 593 } 594 595 static int eqos_start_clks_imx(struct udevice *dev) 596 { 597 return 0; 598 } 599 600 static void eqos_stop_clks_tegra186(struct udevice *dev) 601 { 602 #ifdef CONFIG_CLK 603 struct eqos_priv *eqos = dev_get_priv(dev); 604 605 debug("%s(dev=%p):\n", __func__, dev); 606 607 clk_disable(&eqos->clk_tx); 608 clk_disable(&eqos->clk_ptp_ref); 609 clk_disable(&eqos->clk_rx); 610 clk_disable(&eqos->clk_master_bus); 611 clk_disable(&eqos->clk_slave_bus); 612 #endif 613 614 debug("%s: OK\n", __func__); 615 } 616 617 static void eqos_stop_clks_stm32(struct udevice *dev) 618 { 619 #ifdef CONFIG_CLK 620 struct eqos_priv *eqos = dev_get_priv(dev); 621 622 debug("%s(dev=%p):\n", __func__, dev); 623 624 if (clk_valid(&eqos->clk_tx)) 625 clk_disable(&eqos->clk_tx); 626 if (clk_valid(&eqos->clk_rx)) 627 clk_disable(&eqos->clk_rx); 628 clk_disable(&eqos->clk_master_bus); 629 if (clk_valid(&eqos->clk_ck)) 630 clk_disable(&eqos->clk_ck); 631 #endif 632 633 debug("%s: OK\n", __func__); 634 } 635 636 static void eqos_stop_clks_imx(struct udevice *dev) 637 { 638 /* empty */ 639 } 640 641 static int eqos_start_resets_tegra186(struct udevice *dev) 642 { 643 struct eqos_priv *eqos = dev_get_priv(dev); 644 int ret; 645 646 debug("%s(dev=%p):\n", __func__, dev); 647 648 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 649 if (ret < 0) { 650 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 651 return ret; 652 } 653 654 udelay(2); 655 656 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 657 if (ret < 0) { 658 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 659 return ret; 660 } 661 662 ret = reset_assert(&eqos->reset_ctl); 663 if (ret < 0) { 664 pr_err("reset_assert() failed: %d", ret); 665 return ret; 666 } 667 668 udelay(2); 669 670 ret = reset_deassert(&eqos->reset_ctl); 671 if (ret < 0) { 672 pr_err("reset_deassert() failed: %d", ret); 673 return ret; 674 } 675 676 debug("%s: OK\n", __func__); 677 return 0; 678 } 679 680 static int eqos_start_resets_stm32(struct udevice *dev) 681 { 682 struct eqos_priv *eqos = dev_get_priv(dev); 683 int ret; 684 685 debug("%s(dev=%p):\n", __func__, dev); 686 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 687 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 688 if (ret < 0) { 689 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 690 ret); 691 return ret; 692 } 693 694 udelay(eqos->reset_delays[0]); 695 696 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 697 if (ret < 0) { 698 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 699 ret); 700 return ret; 701 } 702 703 udelay(eqos->reset_delays[1]); 704 705 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 706 if (ret < 0) { 707 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 708 ret); 709 return ret; 710 } 711 712 udelay(eqos->reset_delays[2]); 713 } 714 debug("%s: OK\n", __func__); 715 716 return 0; 717 } 718 719 static int eqos_start_resets_imx(struct udevice *dev) 720 { 721 return 0; 722 } 723 724 static int eqos_stop_resets_tegra186(struct udevice *dev) 725 { 726 struct eqos_priv *eqos = dev_get_priv(dev); 727 728 reset_assert(&eqos->reset_ctl); 729 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 730 731 return 0; 732 } 733 734 static int eqos_stop_resets_stm32(struct udevice *dev) 735 { 736 struct eqos_priv *eqos = dev_get_priv(dev); 737 int ret; 738 739 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 740 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 741 if (ret < 0) { 742 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 743 ret); 744 return ret; 745 } 746 } 747 748 return 0; 749 } 750 751 static int eqos_stop_resets_imx(struct udevice *dev) 752 { 753 return 0; 754 } 755 756 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 757 { 758 struct eqos_priv *eqos = dev_get_priv(dev); 759 int ret; 760 761 debug("%s(dev=%p):\n", __func__, dev); 762 763 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 764 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 765 766 udelay(1); 767 768 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 769 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 770 771 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 772 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 773 if (ret) { 774 pr_err("calibrate didn't start"); 775 goto failed; 776 } 777 778 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 779 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 780 if (ret) { 781 pr_err("calibrate didn't finish"); 782 goto failed; 783 } 784 785 ret = 0; 786 787 failed: 788 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 789 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 790 791 debug("%s: returns %d\n", __func__, ret); 792 793 return ret; 794 } 795 796 static int eqos_disable_calibration_tegra186(struct udevice *dev) 797 { 798 struct eqos_priv *eqos = dev_get_priv(dev); 799 800 debug("%s(dev=%p):\n", __func__, dev); 801 802 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 803 EQOS_AUTO_CAL_CONFIG_ENABLE); 804 805 return 0; 806 } 807 808 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 809 { 810 #ifdef CONFIG_CLK 811 struct eqos_priv *eqos = dev_get_priv(dev); 812 813 return clk_get_rate(&eqos->clk_slave_bus); 814 #else 815 return 0; 816 #endif 817 } 818 819 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 820 { 821 #ifdef CONFIG_CLK 822 struct eqos_priv *eqos = dev_get_priv(dev); 823 824 return clk_get_rate(&eqos->clk_master_bus); 825 #else 826 return 0; 827 #endif 828 } 829 830 __weak u32 imx_get_eqos_csr_clk(void) 831 { 832 return 100 * 1000000; 833 } 834 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 835 { 836 return 0; 837 } 838 839 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 840 { 841 return imx_get_eqos_csr_clk(); 842 } 843 844 static int eqos_calibrate_pads_stm32(struct udevice *dev) 845 { 846 return 0; 847 } 848 849 static int eqos_calibrate_pads_imx(struct udevice *dev) 850 { 851 return 0; 852 } 853 854 static int eqos_disable_calibration_stm32(struct udevice *dev) 855 { 856 return 0; 857 } 858 859 static int eqos_disable_calibration_imx(struct udevice *dev) 860 { 861 return 0; 862 } 863 864 static int eqos_set_full_duplex(struct udevice *dev) 865 { 866 struct eqos_priv *eqos = dev_get_priv(dev); 867 868 debug("%s(dev=%p):\n", __func__, dev); 869 870 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 871 872 return 0; 873 } 874 875 static int eqos_set_half_duplex(struct udevice *dev) 876 { 877 struct eqos_priv *eqos = dev_get_priv(dev); 878 879 debug("%s(dev=%p):\n", __func__, dev); 880 881 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 882 883 /* WAR: Flush TX queue when switching to half-duplex */ 884 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 885 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 886 887 return 0; 888 } 889 890 static int eqos_set_gmii_speed(struct udevice *dev) 891 { 892 struct eqos_priv *eqos = dev_get_priv(dev); 893 894 debug("%s(dev=%p):\n", __func__, dev); 895 896 clrbits_le32(&eqos->mac_regs->configuration, 897 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 898 899 return 0; 900 } 901 902 static int eqos_set_mii_speed_100(struct udevice *dev) 903 { 904 struct eqos_priv *eqos = dev_get_priv(dev); 905 906 debug("%s(dev=%p):\n", __func__, dev); 907 908 setbits_le32(&eqos->mac_regs->configuration, 909 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 910 911 return 0; 912 } 913 914 static int eqos_set_mii_speed_10(struct udevice *dev) 915 { 916 struct eqos_priv *eqos = dev_get_priv(dev); 917 918 debug("%s(dev=%p):\n", __func__, dev); 919 920 clrsetbits_le32(&eqos->mac_regs->configuration, 921 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 922 923 return 0; 924 } 925 926 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 927 { 928 #ifdef CONFIG_CLK 929 struct eqos_priv *eqos = dev_get_priv(dev); 930 ulong rate; 931 int ret; 932 933 debug("%s(dev=%p):\n", __func__, dev); 934 935 switch (eqos->phy->speed) { 936 case SPEED_1000: 937 rate = 125 * 1000 * 1000; 938 break; 939 case SPEED_100: 940 rate = 25 * 1000 * 1000; 941 break; 942 case SPEED_10: 943 rate = 2.5 * 1000 * 1000; 944 break; 945 default: 946 pr_err("invalid speed %d", eqos->phy->speed); 947 return -EINVAL; 948 } 949 950 ret = clk_set_rate(&eqos->clk_tx, rate); 951 if (ret < 0) { 952 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 953 return ret; 954 } 955 #endif 956 957 return 0; 958 } 959 960 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 961 { 962 return 0; 963 } 964 965 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 966 { 967 struct eqos_priv *eqos = dev_get_priv(dev); 968 ulong rate; 969 int ret; 970 971 debug("%s(dev=%p):\n", __func__, dev); 972 973 switch (eqos->phy->speed) { 974 case SPEED_1000: 975 rate = 125 * 1000 * 1000; 976 break; 977 case SPEED_100: 978 rate = 25 * 1000 * 1000; 979 break; 980 case SPEED_10: 981 rate = 2.5 * 1000 * 1000; 982 break; 983 default: 984 pr_err("invalid speed %d", eqos->phy->speed); 985 return -EINVAL; 986 } 987 988 ret = imx_eqos_txclk_set_rate(rate); 989 if (ret < 0) { 990 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 991 return ret; 992 } 993 994 return 0; 995 } 996 997 static int eqos_adjust_link(struct udevice *dev) 998 { 999 struct eqos_priv *eqos = dev_get_priv(dev); 1000 int ret; 1001 bool en_calibration; 1002 1003 debug("%s(dev=%p):\n", __func__, dev); 1004 1005 if (eqos->phy->duplex) 1006 ret = eqos_set_full_duplex(dev); 1007 else 1008 ret = eqos_set_half_duplex(dev); 1009 if (ret < 0) { 1010 pr_err("eqos_set_*_duplex() failed: %d", ret); 1011 return ret; 1012 } 1013 1014 switch (eqos->phy->speed) { 1015 case SPEED_1000: 1016 en_calibration = true; 1017 ret = eqos_set_gmii_speed(dev); 1018 break; 1019 case SPEED_100: 1020 en_calibration = true; 1021 ret = eqos_set_mii_speed_100(dev); 1022 break; 1023 case SPEED_10: 1024 en_calibration = false; 1025 ret = eqos_set_mii_speed_10(dev); 1026 break; 1027 default: 1028 pr_err("invalid speed %d", eqos->phy->speed); 1029 return -EINVAL; 1030 } 1031 if (ret < 0) { 1032 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1033 return ret; 1034 } 1035 1036 if (en_calibration) { 1037 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1038 if (ret < 0) { 1039 pr_err("eqos_calibrate_pads() failed: %d", 1040 ret); 1041 return ret; 1042 } 1043 } else { 1044 ret = eqos->config->ops->eqos_disable_calibration(dev); 1045 if (ret < 0) { 1046 pr_err("eqos_disable_calibration() failed: %d", 1047 ret); 1048 return ret; 1049 } 1050 } 1051 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1052 if (ret < 0) { 1053 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1054 return ret; 1055 } 1056 1057 return 0; 1058 } 1059 1060 int eqos_write_hwaddr(struct udevice *dev) 1061 { 1062 struct eth_pdata *plat = dev_get_platdata(dev); 1063 struct eqos_priv *eqos = dev_get_priv(dev); 1064 uint32_t val; 1065 1066 /* 1067 * This function may be called before start() or after stop(). At that 1068 * time, on at least some configurations of the EQoS HW, all clocks to 1069 * the EQoS HW block will be stopped, and a reset signal applied. If 1070 * any register access is attempted in this state, bus timeouts or CPU 1071 * hangs may occur. This check prevents that. 1072 * 1073 * A simple solution to this problem would be to not implement 1074 * write_hwaddr(), since start() always writes the MAC address into HW 1075 * anyway. However, it is desirable to implement write_hwaddr() to 1076 * support the case of SW that runs subsequent to U-Boot which expects 1077 * the MAC address to already be programmed into the EQoS registers, 1078 * which must happen irrespective of whether the U-Boot user (or 1079 * scripts) actually made use of the EQoS device, and hence 1080 * irrespective of whether start() was ever called. 1081 * 1082 * Note that this requirement by subsequent SW is not valid for 1083 * Tegra186, and is likely not valid for any non-PCI instantiation of 1084 * the EQoS HW block. This function is implemented solely as 1085 * future-proofing with the expectation the driver will eventually be 1086 * ported to some system where the expectation above is true. 1087 */ 1088 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1089 return 0; 1090 1091 /* Update the MAC address */ 1092 val = (plat->enetaddr[5] << 8) | 1093 (plat->enetaddr[4]); 1094 writel(val, &eqos->mac_regs->address0_high); 1095 val = (plat->enetaddr[3] << 24) | 1096 (plat->enetaddr[2] << 16) | 1097 (plat->enetaddr[1] << 8) | 1098 (plat->enetaddr[0]); 1099 writel(val, &eqos->mac_regs->address0_low); 1100 1101 return 0; 1102 } 1103 1104 static int eqos_read_rom_hwaddr(struct udevice *dev) 1105 { 1106 struct eth_pdata *pdata = dev_get_platdata(dev); 1107 1108 #ifdef CONFIG_ARCH_IMX8M 1109 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1110 #endif 1111 return !is_valid_ethaddr(pdata->enetaddr); 1112 } 1113 1114 int eqos_init(struct udevice *dev) 1115 { 1116 struct eqos_priv *eqos = dev_get_priv(dev); 1117 int ret; 1118 ulong rate; 1119 u32 val; 1120 1121 debug("%s(dev=%p):\n", __func__, dev); 1122 1123 if (eqos->config->ops->eqos_start_clks) { 1124 ret = eqos->config->ops->eqos_start_clks(dev); 1125 if (ret < 0) { 1126 pr_err("eqos_start_clks() failed: %d", ret); 1127 goto err; 1128 } 1129 } 1130 1131 ret = eqos->config->ops->eqos_start_resets(dev); 1132 if (ret < 0) { 1133 pr_err("eqos_start_resets() failed: %d", ret); 1134 goto err_stop_clks; 1135 } 1136 1137 udelay(10); 1138 1139 eqos->reg_access_ok = true; 1140 1141 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1142 EQOS_DMA_MODE_SWR, false, 1143 eqos->config->swr_wait, false); 1144 if (ret) { 1145 pr_err("EQOS_DMA_MODE_SWR stuck"); 1146 goto err_stop_resets; 1147 } 1148 1149 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1150 if (ret < 0) { 1151 pr_err("eqos_calibrate_pads() failed: %d", ret); 1152 goto err_stop_resets; 1153 } 1154 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1155 1156 val = (rate / 1000000) - 1; 1157 writel(val, &eqos->mac_regs->us_tic_counter); 1158 1159 /* 1160 * if PHY was already connected and configured, 1161 * don't need to reconnect/reconfigure again 1162 */ 1163 if (!eqos->phy) { 1164 int addr = -1; 1165 #ifdef CONFIG_DM_ETH_PHY 1166 addr = eth_phy_get_addr(dev); 1167 #endif 1168 #ifdef DWC_NET_PHYADDR 1169 addr = DWC_NET_PHYADDR; 1170 #endif 1171 eqos->phy = phy_connect(eqos->mii, addr, dev, 1172 eqos->config->ops->eqos_get_interface(dev)); 1173 if (!eqos->phy) { 1174 pr_err("phy_connect() failed"); 1175 goto err_stop_resets; 1176 } 1177 1178 if (eqos->max_speed) { 1179 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1180 if (ret) { 1181 pr_err("phy_set_supported() failed: %d", ret); 1182 goto err_shutdown_phy; 1183 } 1184 } 1185 1186 ret = phy_config(eqos->phy); 1187 if (ret < 0) { 1188 pr_err("phy_config() failed: %d", ret); 1189 goto err_shutdown_phy; 1190 } 1191 } 1192 1193 ret = phy_startup(eqos->phy); 1194 if (ret < 0) { 1195 pr_err("phy_startup() failed: %d", ret); 1196 goto err_shutdown_phy; 1197 } 1198 1199 if (!eqos->phy->link) { 1200 pr_err("No link"); 1201 goto err_shutdown_phy; 1202 } 1203 1204 ret = eqos_adjust_link(dev); 1205 if (ret < 0) { 1206 pr_err("eqos_adjust_link() failed: %d", ret); 1207 goto err_shutdown_phy; 1208 } 1209 1210 debug("%s: OK\n", __func__); 1211 return 0; 1212 1213 err_shutdown_phy: 1214 phy_shutdown(eqos->phy); 1215 err_stop_resets: 1216 eqos->config->ops->eqos_stop_resets(dev); 1217 err_stop_clks: 1218 if (eqos->config->ops->eqos_stop_clks) 1219 eqos->config->ops->eqos_stop_clks(dev); 1220 err: 1221 pr_err("FAILED: %d", ret); 1222 return ret; 1223 } 1224 1225 void eqos_enable(struct udevice *dev) 1226 { 1227 struct eqos_priv *eqos = dev_get_priv(dev); 1228 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1229 ulong last_rx_desc; 1230 int i; 1231 1232 eqos->tx_desc_idx = 0; 1233 eqos->rx_desc_idx = 0; 1234 1235 /* Configure MTL */ 1236 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1237 1238 /* Enable Store and Forward mode for TX */ 1239 /* Program Tx operating mode */ 1240 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1241 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1242 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1243 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1244 1245 /* Transmit Queue weight */ 1246 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1247 1248 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1249 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1250 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1251 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1252 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1253 1254 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1255 val = readl(&eqos->mac_regs->hw_feature1); 1256 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1257 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1258 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1259 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1260 1261 /* 1262 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1263 * r/tqs is encoded as (n / 256) - 1. 1264 */ 1265 tqs = (128 << tx_fifo_sz) / 256 - 1; 1266 rqs = (128 << rx_fifo_sz) / 256 - 1; 1267 1268 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1269 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1270 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1271 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1272 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1273 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1274 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1275 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1276 1277 /* Flow control used only if each channel gets 4KB or more FIFO */ 1278 if (rqs >= ((4096 / 256) - 1)) { 1279 u32 rfd, rfa; 1280 1281 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1282 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1283 1284 /* 1285 * Set Threshold for Activating Flow Contol space for min 2 1286 * frames ie, (1500 * 1) = 1500 bytes. 1287 * 1288 * Set Threshold for Deactivating Flow Contol for space of 1289 * min 1 frame (frame size 1500bytes) in receive fifo 1290 */ 1291 if (rqs == ((4096 / 256) - 1)) { 1292 /* 1293 * This violates the above formula because of FIFO size 1294 * limit therefore overflow may occur inspite of this. 1295 */ 1296 rfd = 0x3; /* Full-3K */ 1297 rfa = 0x1; /* Full-1.5K */ 1298 } else if (rqs == ((8192 / 256) - 1)) { 1299 rfd = 0x6; /* Full-4K */ 1300 rfa = 0xa; /* Full-6K */ 1301 } else if (rqs == ((16384 / 256) - 1)) { 1302 rfd = 0x6; /* Full-4K */ 1303 rfa = 0x12; /* Full-10K */ 1304 } else { 1305 rfd = 0x6; /* Full-4K */ 1306 rfa = 0x1E; /* Full-16K */ 1307 } 1308 1309 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1310 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1311 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1312 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1313 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1314 (rfd << 1315 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1316 (rfa << 1317 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1318 } 1319 1320 /* Configure MAC */ 1321 1322 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1323 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1324 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1325 eqos->config->config_mac << 1326 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1327 1328 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1329 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1330 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1331 0x2 << 1332 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1333 1334 /* Multicast and Broadcast Queue Enable */ 1335 setbits_le32(&eqos->mac_regs->unused_0a4, 1336 0x00100000); 1337 /* enable promise mode */ 1338 setbits_le32(&eqos->mac_regs->unused_004[1], 1339 0x1); 1340 1341 /* Set TX flow control parameters */ 1342 /* Set Pause Time */ 1343 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1344 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1345 /* Assign priority for TX flow control */ 1346 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1347 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1348 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1349 /* Assign priority for RX flow control */ 1350 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1351 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1352 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1353 /* Enable flow control */ 1354 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1355 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1356 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1357 EQOS_MAC_RX_FLOW_CTRL_RFE); 1358 1359 clrsetbits_le32(&eqos->mac_regs->configuration, 1360 EQOS_MAC_CONFIGURATION_GPSLCE | 1361 EQOS_MAC_CONFIGURATION_WD | 1362 EQOS_MAC_CONFIGURATION_JD | 1363 EQOS_MAC_CONFIGURATION_JE, 1364 EQOS_MAC_CONFIGURATION_CST | 1365 EQOS_MAC_CONFIGURATION_ACS); 1366 1367 eqos_write_hwaddr(dev); 1368 1369 /* Configure DMA */ 1370 1371 /* Enable OSP mode */ 1372 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1373 EQOS_DMA_CH0_TX_CONTROL_OSP); 1374 1375 /* RX buffer size. Must be a multiple of bus width */ 1376 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1377 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1378 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1379 EQOS_MAX_PACKET_SIZE << 1380 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1381 1382 setbits_le32(&eqos->dma_regs->ch0_control, 1383 EQOS_DMA_CH0_CONTROL_PBLX8); 1384 1385 /* 1386 * Burst length must be < 1/2 FIFO size. 1387 * FIFO size in tqs is encoded as (n / 256) - 1. 1388 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1389 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1390 */ 1391 pbl = tqs + 1; 1392 if (pbl > 32) 1393 pbl = 32; 1394 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1395 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1396 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1397 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1398 1399 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1400 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1401 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1402 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1403 1404 /* DMA performance configuration */ 1405 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1406 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1407 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1408 writel(val, &eqos->dma_regs->sysbus_mode); 1409 1410 /* Set up descriptors */ 1411 1412 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1413 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1414 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1415 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1416 (i * EQOS_MAX_PACKET_SIZE)); 1417 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1418 mb(); 1419 eqos->config->ops->eqos_flush_desc(rx_desc); 1420 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1421 (i * EQOS_MAX_PACKET_SIZE), 1422 EQOS_MAX_PACKET_SIZE); 1423 } 1424 1425 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1426 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1427 writel(EQOS_DESCRIPTORS_TX - 1, 1428 &eqos->dma_regs->ch0_txdesc_ring_length); 1429 1430 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1431 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1432 writel(EQOS_DESCRIPTORS_RX - 1, 1433 &eqos->dma_regs->ch0_rxdesc_ring_length); 1434 1435 /* Enable everything */ 1436 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1437 EQOS_DMA_CH0_TX_CONTROL_ST); 1438 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1439 EQOS_DMA_CH0_RX_CONTROL_SR); 1440 setbits_le32(&eqos->mac_regs->configuration, 1441 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1442 1443 /* TX tail pointer not written until we need to TX a packet */ 1444 /* 1445 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1446 * first descriptor, implying all descriptors were available. However, 1447 * that's not distinguishable from none of the descriptors being 1448 * available. 1449 */ 1450 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1451 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1452 1453 eqos->started = true; 1454 } 1455 1456 static int eqos_start(struct udevice *dev) 1457 { 1458 int ret; 1459 1460 ret = eqos_init(dev); 1461 if (ret) 1462 return ret; 1463 1464 eqos_enable(dev); 1465 1466 return 0; 1467 } 1468 1469 void eqos_stop(struct udevice *dev) 1470 { 1471 struct eqos_priv *eqos = dev_get_priv(dev); 1472 int i; 1473 1474 debug("%s(dev=%p):\n", __func__, dev); 1475 1476 if (!eqos->started) 1477 return; 1478 eqos->started = false; 1479 eqos->reg_access_ok = false; 1480 1481 /* Disable TX DMA */ 1482 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1483 EQOS_DMA_CH0_TX_CONTROL_ST); 1484 1485 /* Wait for TX all packets to drain out of MTL */ 1486 for (i = 0; i < 1000000; i++) { 1487 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1488 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1489 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1490 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1491 if ((trcsts != 1) && (!txqsts)) 1492 break; 1493 } 1494 1495 /* Turn off MAC TX and RX */ 1496 clrbits_le32(&eqos->mac_regs->configuration, 1497 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1498 1499 /* Wait for all RX packets to drain out of MTL */ 1500 for (i = 0; i < 1000000; i++) { 1501 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1502 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1503 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1504 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1505 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1506 if ((!prxq) && (!rxqsts)) 1507 break; 1508 } 1509 1510 /* Turn off RX DMA */ 1511 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1512 EQOS_DMA_CH0_RX_CONTROL_SR); 1513 1514 if (eqos->phy) { 1515 phy_shutdown(eqos->phy); 1516 } 1517 eqos->config->ops->eqos_stop_resets(dev); 1518 if (eqos->config->ops->eqos_stop_clks) 1519 eqos->config->ops->eqos_stop_clks(dev); 1520 1521 debug("%s: OK\n", __func__); 1522 } 1523 1524 int eqos_send(struct udevice *dev, void *packet, int length) 1525 { 1526 struct eqos_priv *eqos = dev_get_priv(dev); 1527 struct eqos_desc *tx_desc; 1528 int i; 1529 1530 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1531 length); 1532 1533 memcpy(eqos->tx_dma_buf, packet, length); 1534 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1535 1536 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1537 eqos->tx_desc_idx++; 1538 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1539 1540 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1541 tx_desc->des1 = 0; 1542 tx_desc->des2 = length; 1543 /* 1544 * Make sure that if HW sees the _OWN write below, it will see all the 1545 * writes to the rest of the descriptor too. 1546 */ 1547 mb(); 1548 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1549 eqos->config->ops->eqos_flush_desc(tx_desc); 1550 1551 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1552 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1553 1554 for (i = 0; i < 1000000; i++) { 1555 eqos->config->ops->eqos_inval_desc(tx_desc); 1556 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1557 return 0; 1558 udelay(1); 1559 } 1560 1561 debug("%s: TX timeout\n", __func__); 1562 1563 return -ETIMEDOUT; 1564 } 1565 1566 int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1567 { 1568 struct eqos_priv *eqos = dev_get_priv(dev); 1569 struct eqos_desc *rx_desc; 1570 int length; 1571 1572 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1573 1574 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1575 eqos->config->ops->eqos_inval_desc(rx_desc); 1576 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1577 debug("%s: RX packet not available\n", __func__); 1578 return -EAGAIN; 1579 } 1580 1581 *packetp = eqos->rx_dma_buf + 1582 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1583 length = rx_desc->des3 & 0x7fff; 1584 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1585 1586 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1587 1588 return length; 1589 } 1590 1591 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1592 { 1593 struct eqos_priv *eqos = dev_get_priv(dev); 1594 uchar *packet_expected; 1595 struct eqos_desc *rx_desc; 1596 1597 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1598 1599 packet_expected = eqos->rx_dma_buf + 1600 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1601 if (packet != packet_expected) { 1602 debug("%s: Unexpected packet (expected %p)\n", __func__, 1603 packet_expected); 1604 return -EINVAL; 1605 } 1606 1607 eqos->config->ops->eqos_inval_buffer(packet, length); 1608 1609 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1610 1611 rx_desc->des0 = 0; 1612 mb(); 1613 eqos->config->ops->eqos_flush_desc(rx_desc); 1614 eqos->config->ops->eqos_inval_buffer(packet, length); 1615 rx_desc->des0 = (u32)(ulong)packet; 1616 rx_desc->des1 = 0; 1617 rx_desc->des2 = 0; 1618 /* 1619 * Make sure that if HW sees the _OWN write below, it will see all the 1620 * writes to the rest of the descriptor too. 1621 */ 1622 mb(); 1623 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1624 eqos->config->ops->eqos_flush_desc(rx_desc); 1625 1626 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1627 1628 eqos->rx_desc_idx++; 1629 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1630 1631 return 0; 1632 } 1633 1634 static int eqos_probe_resources_core(struct udevice *dev) 1635 { 1636 struct eqos_priv *eqos = dev_get_priv(dev); 1637 int ret; 1638 1639 debug("%s(dev=%p):\n", __func__, dev); 1640 1641 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1642 EQOS_DESCRIPTORS_RX); 1643 if (!eqos->descs) { 1644 debug("%s: eqos_alloc_descs() failed\n", __func__); 1645 ret = -ENOMEM; 1646 goto err; 1647 } 1648 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1649 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1650 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1651 eqos->rx_descs); 1652 1653 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1654 if (!eqos->tx_dma_buf) { 1655 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1656 ret = -ENOMEM; 1657 goto err_free_descs; 1658 } 1659 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1660 1661 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1662 if (!eqos->rx_dma_buf) { 1663 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1664 ret = -ENOMEM; 1665 goto err_free_tx_dma_buf; 1666 } 1667 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1668 1669 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1670 if (!eqos->rx_pkt) { 1671 debug("%s: malloc(rx_pkt) failed\n", __func__); 1672 ret = -ENOMEM; 1673 goto err_free_rx_dma_buf; 1674 } 1675 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1676 1677 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1678 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1679 1680 debug("%s: OK\n", __func__); 1681 return 0; 1682 1683 err_free_rx_dma_buf: 1684 free(eqos->rx_dma_buf); 1685 err_free_tx_dma_buf: 1686 free(eqos->tx_dma_buf); 1687 err_free_descs: 1688 eqos_free_descs(eqos->descs); 1689 err: 1690 1691 debug("%s: returns %d\n", __func__, ret); 1692 return ret; 1693 } 1694 1695 static int eqos_remove_resources_core(struct udevice *dev) 1696 { 1697 struct eqos_priv *eqos = dev_get_priv(dev); 1698 1699 debug("%s(dev=%p):\n", __func__, dev); 1700 1701 free(eqos->rx_pkt); 1702 free(eqos->rx_dma_buf); 1703 free(eqos->tx_dma_buf); 1704 eqos_free_descs(eqos->descs); 1705 1706 debug("%s: OK\n", __func__); 1707 return 0; 1708 } 1709 1710 static int eqos_probe_resources_tegra186(struct udevice *dev) 1711 { 1712 struct eqos_priv *eqos = dev_get_priv(dev); 1713 int ret; 1714 1715 debug("%s(dev=%p):\n", __func__, dev); 1716 1717 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1718 if (ret) { 1719 pr_err("reset_get_by_name(rst) failed: %d", ret); 1720 return ret; 1721 } 1722 1723 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1724 &eqos->phy_reset_gpio, 1725 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1726 if (ret) { 1727 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1728 goto err_free_reset_eqos; 1729 } 1730 1731 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1732 if (ret) { 1733 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1734 goto err_free_gpio_phy_reset; 1735 } 1736 1737 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1738 if (ret) { 1739 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1740 goto err_free_clk_slave_bus; 1741 } 1742 1743 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1744 if (ret) { 1745 pr_err("clk_get_by_name(rx) failed: %d", ret); 1746 goto err_free_clk_master_bus; 1747 } 1748 1749 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1750 if (ret) { 1751 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1752 goto err_free_clk_rx; 1753 return ret; 1754 } 1755 1756 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1757 if (ret) { 1758 pr_err("clk_get_by_name(tx) failed: %d", ret); 1759 goto err_free_clk_ptp_ref; 1760 } 1761 1762 debug("%s: OK\n", __func__); 1763 return 0; 1764 1765 err_free_clk_ptp_ref: 1766 clk_free(&eqos->clk_ptp_ref); 1767 err_free_clk_rx: 1768 clk_free(&eqos->clk_rx); 1769 err_free_clk_master_bus: 1770 clk_free(&eqos->clk_master_bus); 1771 err_free_clk_slave_bus: 1772 clk_free(&eqos->clk_slave_bus); 1773 err_free_gpio_phy_reset: 1774 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1775 err_free_reset_eqos: 1776 reset_free(&eqos->reset_ctl); 1777 1778 debug("%s: returns %d\n", __func__, ret); 1779 return ret; 1780 } 1781 1782 /* board-specific Ethernet Interface initializations. */ 1783 __weak int board_interface_eth_init(struct udevice *dev, 1784 phy_interface_t interface_type) 1785 { 1786 return 0; 1787 } 1788 1789 static int eqos_probe_resources_stm32(struct udevice *dev) 1790 { 1791 struct eqos_priv *eqos = dev_get_priv(dev); 1792 int ret; 1793 phy_interface_t interface; 1794 struct ofnode_phandle_args phandle_args; 1795 1796 debug("%s(dev=%p):\n", __func__, dev); 1797 1798 interface = eqos->config->ops->eqos_get_interface(dev); 1799 1800 if (interface == PHY_INTERFACE_MODE_NONE) { 1801 pr_err("Invalid PHY interface\n"); 1802 return -EINVAL; 1803 } 1804 1805 ret = board_interface_eth_init(dev, interface); 1806 if (ret) 1807 return -EINVAL; 1808 1809 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1810 1811 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1812 if (ret) { 1813 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1814 return ret; 1815 } 1816 1817 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1818 if (ret) 1819 pr_warn("clk_get_by_name(rx) failed: %d", ret); 1820 1821 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1822 if (ret) 1823 pr_warn("clk_get_by_name(tx) failed: %d", ret); 1824 1825 /* Get ETH_CLK clocks (optional) */ 1826 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1827 if (ret) 1828 pr_warn("No phy clock provided %d", ret); 1829 1830 eqos->phyaddr = -1; 1831 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1832 &phandle_args); 1833 if (!ret) { 1834 /* search "reset-gpios" in phy node */ 1835 ret = gpio_request_by_name_nodev(phandle_args.node, 1836 "reset-gpios", 0, 1837 &eqos->phy_reset_gpio, 1838 GPIOD_IS_OUT | 1839 GPIOD_IS_OUT_ACTIVE); 1840 if (ret) 1841 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1842 ret); 1843 else 1844 eqos->reset_delays[1] = 2; 1845 1846 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1847 "reg", -1); 1848 } 1849 1850 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1851 int reset_flags = GPIOD_IS_OUT; 1852 1853 if (dev_read_bool(dev, "snps,reset-active-low")) 1854 reset_flags |= GPIOD_ACTIVE_LOW; 1855 1856 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1857 &eqos->phy_reset_gpio, reset_flags); 1858 if (ret == 0) 1859 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1860 eqos->reset_delays, 3); 1861 else 1862 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1863 ret); 1864 } 1865 1866 debug("%s: OK\n", __func__); 1867 return 0; 1868 } 1869 1870 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1871 { 1872 const char *phy_mode; 1873 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1874 1875 debug("%s(dev=%p):\n", __func__, dev); 1876 1877 phy_mode = dev_read_string(dev, "phy-mode"); 1878 if (phy_mode) 1879 interface = phy_get_interface_by_name(phy_mode); 1880 1881 return interface; 1882 } 1883 1884 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1885 { 1886 return PHY_INTERFACE_MODE_MII; 1887 } 1888 1889 static int eqos_probe_resources_imx(struct udevice *dev) 1890 { 1891 struct eqos_priv *eqos = dev_get_priv(dev); 1892 phy_interface_t interface; 1893 1894 debug("%s(dev=%p):\n", __func__, dev); 1895 1896 interface = eqos->config->ops->eqos_get_interface(dev); 1897 1898 if (interface == PHY_INTERFACE_MODE_NONE) { 1899 pr_err("Invalid PHY interface\n"); 1900 return -EINVAL; 1901 } 1902 1903 debug("%s: OK\n", __func__); 1904 return 0; 1905 } 1906 1907 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1908 { 1909 const char *phy_mode; 1910 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1911 1912 debug("%s(dev=%p):\n", __func__, dev); 1913 1914 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1915 NULL); 1916 if (phy_mode) 1917 interface = phy_get_interface_by_name(phy_mode); 1918 1919 return interface; 1920 } 1921 1922 static int eqos_remove_resources_tegra186(struct udevice *dev) 1923 { 1924 struct eqos_priv *eqos = dev_get_priv(dev); 1925 1926 debug("%s(dev=%p):\n", __func__, dev); 1927 1928 #ifdef CONFIG_CLK 1929 clk_free(&eqos->clk_tx); 1930 clk_free(&eqos->clk_ptp_ref); 1931 clk_free(&eqos->clk_rx); 1932 clk_free(&eqos->clk_slave_bus); 1933 clk_free(&eqos->clk_master_bus); 1934 #endif 1935 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1936 reset_free(&eqos->reset_ctl); 1937 1938 debug("%s: OK\n", __func__); 1939 return 0; 1940 } 1941 1942 static int eqos_remove_resources_stm32(struct udevice *dev) 1943 { 1944 #ifdef CONFIG_CLK 1945 struct eqos_priv *eqos = dev_get_priv(dev); 1946 1947 debug("%s(dev=%p):\n", __func__, dev); 1948 1949 if (clk_valid(&eqos->clk_tx)) 1950 clk_free(&eqos->clk_tx); 1951 if (clk_valid(&eqos->clk_rx)) 1952 clk_free(&eqos->clk_rx); 1953 clk_free(&eqos->clk_master_bus); 1954 if (clk_valid(&eqos->clk_ck)) 1955 clk_free(&eqos->clk_ck); 1956 #endif 1957 1958 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1959 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1960 1961 debug("%s: OK\n", __func__); 1962 return 0; 1963 } 1964 1965 static int eqos_remove_resources_imx(struct udevice *dev) 1966 { 1967 return 0; 1968 } 1969 1970 int eqos_probe(struct udevice *dev) 1971 { 1972 struct eqos_priv *eqos = dev_get_priv(dev); 1973 int ret; 1974 1975 debug("%s(dev=%p):\n", __func__, dev); 1976 1977 eqos->dev = dev; 1978 eqos->config = (void *)dev_get_driver_data(dev); 1979 1980 eqos->regs = dev_read_addr(dev); 1981 if (eqos->regs == FDT_ADDR_T_NONE) { 1982 pr_err("dev_read_addr() failed"); 1983 return -ENODEV; 1984 } 1985 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1986 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1987 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1988 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1989 1990 ret = eqos_probe_resources_core(dev); 1991 if (ret < 0) { 1992 pr_err("eqos_probe_resources_core() failed: %d", ret); 1993 return ret; 1994 } 1995 1996 ret = eqos->config->ops->eqos_probe_resources(dev); 1997 if (ret < 0) { 1998 pr_err("eqos_probe_resources() failed: %d", ret); 1999 goto err_remove_resources_core; 2000 } 2001 2002 #ifdef CONFIG_DM_ETH_PHY 2003 eqos->mii = eth_phy_get_mdio_bus(dev); 2004 #endif 2005 if (!eqos->mii) { 2006 eqos->mii = mdio_alloc(); 2007 if (!eqos->mii) { 2008 pr_err("mdio_alloc() failed"); 2009 ret = -ENOMEM; 2010 goto err_remove_resources_tegra; 2011 } 2012 eqos->mii->read = eqos_mdio_read; 2013 eqos->mii->write = eqos_mdio_write; 2014 eqos->mii->priv = eqos; 2015 strcpy(eqos->mii->name, dev->name); 2016 2017 ret = mdio_register(eqos->mii); 2018 if (ret < 0) { 2019 pr_err("mdio_register() failed: %d", ret); 2020 goto err_free_mdio; 2021 } 2022 } 2023 2024 #ifdef CONFIG_DM_ETH_PHY 2025 eth_phy_set_mdio_bus(dev, eqos->mii); 2026 #endif 2027 2028 debug("%s: OK\n", __func__); 2029 return 0; 2030 2031 err_free_mdio: 2032 mdio_free(eqos->mii); 2033 err_remove_resources_tegra: 2034 eqos->config->ops->eqos_remove_resources(dev); 2035 err_remove_resources_core: 2036 eqos_remove_resources_core(dev); 2037 2038 debug("%s: returns %d\n", __func__, ret); 2039 return ret; 2040 } 2041 2042 static int eqos_remove(struct udevice *dev) 2043 { 2044 struct eqos_priv *eqos = dev_get_priv(dev); 2045 2046 debug("%s(dev=%p):\n", __func__, dev); 2047 2048 mdio_unregister(eqos->mii); 2049 mdio_free(eqos->mii); 2050 eqos->config->ops->eqos_remove_resources(dev); 2051 2052 eqos_probe_resources_core(dev); 2053 2054 debug("%s: OK\n", __func__); 2055 return 0; 2056 } 2057 2058 static const struct eth_ops eqos_ops = { 2059 .start = eqos_start, 2060 .stop = eqos_stop, 2061 .send = eqos_send, 2062 .recv = eqos_recv, 2063 .free_pkt = eqos_free_pkt, 2064 .write_hwaddr = eqos_write_hwaddr, 2065 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2066 }; 2067 2068 static struct eqos_ops eqos_tegra186_ops = { 2069 .eqos_inval_desc = eqos_inval_desc_tegra186, 2070 .eqos_flush_desc = eqos_flush_desc_tegra186, 2071 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2072 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2073 .eqos_probe_resources = eqos_probe_resources_tegra186, 2074 .eqos_remove_resources = eqos_remove_resources_tegra186, 2075 .eqos_stop_resets = eqos_stop_resets_tegra186, 2076 .eqos_start_resets = eqos_start_resets_tegra186, 2077 .eqos_stop_clks = eqos_stop_clks_tegra186, 2078 .eqos_start_clks = eqos_start_clks_tegra186, 2079 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2080 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2081 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2082 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2083 .eqos_get_interface = eqos_get_interface_tegra186 2084 }; 2085 2086 static const struct eqos_config eqos_tegra186_config = { 2087 .reg_access_always_ok = false, 2088 .mdio_wait = 10, 2089 .swr_wait = 10, 2090 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2091 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2092 .ops = &eqos_tegra186_ops 2093 }; 2094 2095 static struct eqos_ops eqos_stm32_ops = { 2096 .eqos_inval_desc = eqos_inval_desc_generic, 2097 .eqos_flush_desc = eqos_flush_desc_generic, 2098 .eqos_inval_buffer = eqos_inval_buffer_generic, 2099 .eqos_flush_buffer = eqos_flush_buffer_generic, 2100 .eqos_probe_resources = eqos_probe_resources_stm32, 2101 .eqos_remove_resources = eqos_remove_resources_stm32, 2102 .eqos_stop_resets = eqos_stop_resets_stm32, 2103 .eqos_start_resets = eqos_start_resets_stm32, 2104 .eqos_stop_clks = eqos_stop_clks_stm32, 2105 .eqos_start_clks = eqos_start_clks_stm32, 2106 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2107 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2108 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2109 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2110 .eqos_get_interface = eqos_get_interface_stm32 2111 }; 2112 2113 static const struct eqos_config eqos_stm32_config = { 2114 .reg_access_always_ok = false, 2115 .mdio_wait = 10000, 2116 .swr_wait = 50, 2117 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2118 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2119 .ops = &eqos_stm32_ops 2120 }; 2121 2122 static struct eqos_ops eqos_imx_ops = { 2123 .eqos_inval_desc = eqos_inval_desc_generic, 2124 .eqos_flush_desc = eqos_flush_desc_generic, 2125 .eqos_inval_buffer = eqos_inval_buffer_generic, 2126 .eqos_flush_buffer = eqos_flush_buffer_generic, 2127 .eqos_probe_resources = eqos_probe_resources_imx, 2128 .eqos_remove_resources = eqos_remove_resources_imx, 2129 .eqos_stop_resets = eqos_stop_resets_imx, 2130 .eqos_start_resets = eqos_start_resets_imx, 2131 .eqos_stop_clks = eqos_stop_clks_imx, 2132 .eqos_start_clks = eqos_start_clks_imx, 2133 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2134 .eqos_disable_calibration = eqos_disable_calibration_imx, 2135 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2136 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2137 .eqos_get_interface = eqos_get_interface_imx 2138 }; 2139 2140 struct eqos_config eqos_imx_config = { 2141 .reg_access_always_ok = false, 2142 .mdio_wait = 10000, 2143 .swr_wait = 50, 2144 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2145 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2146 .ops = &eqos_imx_ops 2147 }; 2148 2149 static const struct udevice_id eqos_ids[] = { 2150 { 2151 .compatible = "nvidia,tegra186-eqos", 2152 .data = (ulong)&eqos_tegra186_config 2153 }, 2154 { 2155 .compatible = "snps,dwmac-4.20a", 2156 .data = (ulong)&eqos_stm32_config 2157 }, 2158 { 2159 .compatible = "fsl,imx-eqos", 2160 .data = (ulong)&eqos_imx_config 2161 }, 2162 2163 { } 2164 }; 2165 2166 U_BOOT_DRIVER(eth_eqos) = { 2167 .name = "eth_eqos", 2168 .id = UCLASS_ETH, 2169 .of_match = of_match_ptr(eqos_ids), 2170 .probe = eqos_probe, 2171 .remove = eqos_remove, 2172 .ops = &eqos_ops, 2173 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2174 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2175 }; 2176