1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 #include "dwc_eth_qos.h" 49 50 /* Core registers */ 51 52 #define EQOS_MAC_REGS_BASE 0x000 53 struct eqos_mac_regs { 54 uint32_t configuration; /* 0x000 */ 55 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 56 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 57 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 58 uint32_t rx_flow_ctrl; /* 0x090 */ 59 uint32_t unused_094; /* 0x094 */ 60 uint32_t txq_prty_map0; /* 0x098 */ 61 uint32_t unused_09c; /* 0x09c */ 62 uint32_t rxq_ctrl0; /* 0x0a0 */ 63 uint32_t unused_0a4; /* 0x0a4 */ 64 uint32_t rxq_ctrl2; /* 0x0a8 */ 65 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 66 uint32_t us_tic_counter; /* 0x0dc */ 67 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 68 uint32_t hw_feature0; /* 0x11c */ 69 uint32_t hw_feature1; /* 0x120 */ 70 uint32_t hw_feature2; /* 0x124 */ 71 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 72 uint32_t mdio_address; /* 0x200 */ 73 uint32_t mdio_data; /* 0x204 */ 74 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 75 uint32_t address0_high; /* 0x300 */ 76 uint32_t address0_low; /* 0x304 */ 77 }; 78 79 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 80 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 81 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 82 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 83 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 84 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 85 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 86 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 87 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 88 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 89 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 90 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 91 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 94 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 95 96 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 97 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 99 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 100 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 103 104 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 105 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 106 107 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 108 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 109 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 110 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 111 112 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 113 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 114 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 115 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 116 117 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 118 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 119 120 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 121 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 122 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 123 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 124 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 125 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 126 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 127 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 128 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 129 130 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 131 132 #define EQOS_MTL_REGS_BASE 0xd00 133 struct eqos_mtl_regs { 134 uint32_t txq0_operation_mode; /* 0xd00 */ 135 uint32_t unused_d04; /* 0xd04 */ 136 uint32_t txq0_debug; /* 0xd08 */ 137 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 138 uint32_t txq0_quantum_weight; /* 0xd18 */ 139 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 140 uint32_t rxq0_operation_mode; /* 0xd30 */ 141 uint32_t unused_d34; /* 0xd34 */ 142 uint32_t rxq0_debug; /* 0xd38 */ 143 }; 144 145 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 146 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 147 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 148 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 152 153 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 154 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 155 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 156 157 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 158 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 159 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 160 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 167 168 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 169 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 170 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 171 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 172 173 #define EQOS_DMA_REGS_BASE 0x1000 174 struct eqos_dma_regs { 175 uint32_t mode; /* 0x1000 */ 176 uint32_t sysbus_mode; /* 0x1004 */ 177 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 178 uint32_t ch0_control; /* 0x1100 */ 179 uint32_t ch0_tx_control; /* 0x1104 */ 180 uint32_t ch0_rx_control; /* 0x1108 */ 181 uint32_t unused_110c; /* 0x110c */ 182 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 183 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 184 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 185 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 186 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 187 uint32_t unused_1124; /* 0x1124 */ 188 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 189 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 190 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 191 }; 192 193 #define EQOS_DMA_MODE_SWR BIT(0) 194 195 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 196 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 197 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 198 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 199 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 200 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 201 202 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 203 204 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 205 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 206 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 207 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 208 209 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 210 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 211 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 212 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 213 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 214 215 /* These registers are Tegra186-specific */ 216 #define EQOS_TEGRA186_REGS_BASE 0x8800 217 struct eqos_tegra186_regs { 218 uint32_t sdmemcomppadctrl; /* 0x8800 */ 219 uint32_t auto_cal_config; /* 0x8804 */ 220 uint32_t unused_8808; /* 0x8808 */ 221 uint32_t auto_cal_status; /* 0x880c */ 222 }; 223 224 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 225 226 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 227 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 228 229 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 230 231 /* Descriptors */ 232 233 #define EQOS_DESCRIPTOR_WORDS 4 234 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 235 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 236 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 237 #define EQOS_DESCRIPTORS_TX 4 238 #define EQOS_DESCRIPTORS_RX 4 239 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 240 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 241 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 242 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 243 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 244 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 245 246 /* 247 * Warn if the cache-line size is larger than the descriptor size. In such 248 * cases the driver will likely fail because the CPU needs to flush the cache 249 * when requeuing RX buffers, therefore descriptors written by the hardware 250 * may be discarded. Architectures with full IO coherence, such as x86, do not 251 * experience this issue, and hence are excluded from this condition. 252 * 253 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 254 * the driver to allocate descriptors from a pool of non-cached memory. 255 */ 256 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 257 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 258 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 259 #warning Cache line size is larger than descriptor size 260 #endif 261 #endif 262 263 struct eqos_desc { 264 u32 des0; 265 u32 des1; 266 u32 des2; 267 u32 des3; 268 }; 269 270 #define EQOS_DESC3_OWN BIT(31) 271 #define EQOS_DESC3_FD BIT(29) 272 #define EQOS_DESC3_LD BIT(28) 273 #define EQOS_DESC3_BUF1V BIT(24) 274 275 /* 276 * TX and RX descriptors are 16 bytes. This causes problems with the cache 277 * maintenance on CPUs where the cache-line size exceeds the size of these 278 * descriptors. What will happen is that when the driver receives a packet 279 * it will be immediately requeued for the hardware to reuse. The CPU will 280 * therefore need to flush the cache-line containing the descriptor, which 281 * will cause all other descriptors in the same cache-line to be flushed 282 * along with it. If one of those descriptors had been written to by the 283 * device those changes (and the associated packet) will be lost. 284 * 285 * To work around this, we make use of non-cached memory if available. If 286 * descriptors are mapped uncached there's no need to manually flush them 287 * or invalidate them. 288 * 289 * Note that this only applies to descriptors. The packet data buffers do 290 * not have the same constraints since they are 1536 bytes large, so they 291 * are unlikely to share cache-lines. 292 */ 293 static void *eqos_alloc_descs(unsigned int num) 294 { 295 #ifdef CONFIG_SYS_NONCACHED_MEMORY 296 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 297 EQOS_DESCRIPTOR_ALIGN); 298 #else 299 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 300 #endif 301 } 302 303 static void eqos_free_descs(void *descs) 304 { 305 #ifdef CONFIG_SYS_NONCACHED_MEMORY 306 /* FIXME: noncached_alloc() has no opposite */ 307 #else 308 free(descs); 309 #endif 310 } 311 312 static void eqos_inval_desc_tegra186(void *desc) 313 { 314 #ifndef CONFIG_SYS_NONCACHED_MEMORY 315 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 316 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 317 ARCH_DMA_MINALIGN); 318 319 invalidate_dcache_range(start, end); 320 #endif 321 } 322 323 static void eqos_inval_desc_generic(void *desc) 324 { 325 #ifndef CONFIG_SYS_NONCACHED_MEMORY 326 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 327 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 328 ARCH_DMA_MINALIGN); 329 330 invalidate_dcache_range(start, end); 331 #endif 332 } 333 334 static void eqos_flush_desc_tegra186(void *desc) 335 { 336 #ifndef CONFIG_SYS_NONCACHED_MEMORY 337 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 338 #endif 339 } 340 341 static void eqos_flush_desc_generic(void *desc) 342 { 343 #ifndef CONFIG_SYS_NONCACHED_MEMORY 344 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 345 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 346 ARCH_DMA_MINALIGN); 347 348 flush_dcache_range(start, end); 349 #endif 350 } 351 352 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 353 { 354 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 355 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 356 357 invalidate_dcache_range(start, end); 358 } 359 360 static void eqos_inval_buffer_generic(void *buf, size_t size) 361 { 362 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 363 unsigned long end = roundup((unsigned long)buf + size, 364 ARCH_DMA_MINALIGN); 365 366 invalidate_dcache_range(start, end); 367 } 368 369 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 370 { 371 flush_cache((unsigned long)buf, size); 372 } 373 374 static void eqos_flush_buffer_generic(void *buf, size_t size) 375 { 376 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 377 unsigned long end = roundup((unsigned long)buf + size, 378 ARCH_DMA_MINALIGN); 379 380 flush_dcache_range(start, end); 381 } 382 383 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 384 { 385 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 386 EQOS_MAC_MDIO_ADDRESS_GB, false, 387 1000000, true); 388 } 389 390 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 391 int mdio_reg) 392 { 393 struct eqos_priv *eqos = bus->priv; 394 u32 val; 395 int ret; 396 397 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 398 mdio_reg); 399 400 ret = eqos_mdio_wait_idle(eqos); 401 if (ret) { 402 pr_err("MDIO not idle at entry"); 403 return ret; 404 } 405 406 val = readl(&eqos->mac_regs->mdio_address); 407 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 408 EQOS_MAC_MDIO_ADDRESS_C45E; 409 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 410 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 411 (eqos->config->config_mac_mdio << 412 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 413 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 414 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 415 EQOS_MAC_MDIO_ADDRESS_GB; 416 writel(val, &eqos->mac_regs->mdio_address); 417 418 udelay(eqos->config->mdio_wait); 419 420 ret = eqos_mdio_wait_idle(eqos); 421 if (ret) { 422 pr_err("MDIO read didn't complete"); 423 return ret; 424 } 425 426 val = readl(&eqos->mac_regs->mdio_data); 427 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 428 429 debug("%s: val=%x\n", __func__, val); 430 431 return val; 432 } 433 434 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 435 int mdio_reg, u16 mdio_val) 436 { 437 struct eqos_priv *eqos = bus->priv; 438 u32 val; 439 int ret; 440 441 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 442 mdio_addr, mdio_reg, mdio_val); 443 444 ret = eqos_mdio_wait_idle(eqos); 445 if (ret) { 446 pr_err("MDIO not idle at entry"); 447 return ret; 448 } 449 450 writel(mdio_val, &eqos->mac_regs->mdio_data); 451 452 val = readl(&eqos->mac_regs->mdio_address); 453 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 454 EQOS_MAC_MDIO_ADDRESS_C45E; 455 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 456 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 457 (eqos->config->config_mac_mdio << 458 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 459 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 460 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 461 EQOS_MAC_MDIO_ADDRESS_GB; 462 writel(val, &eqos->mac_regs->mdio_address); 463 464 udelay(eqos->config->mdio_wait); 465 466 ret = eqos_mdio_wait_idle(eqos); 467 if (ret) { 468 pr_err("MDIO read didn't complete"); 469 return ret; 470 } 471 472 return 0; 473 } 474 475 static int eqos_start_clks_tegra186(struct udevice *dev) 476 { 477 #ifdef CONFIG_CLK 478 struct eqos_priv *eqos = dev_get_priv(dev); 479 int ret; 480 481 debug("%s(dev=%p):\n", __func__, dev); 482 483 ret = clk_enable(&eqos->clk_slave_bus); 484 if (ret < 0) { 485 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 486 goto err; 487 } 488 489 ret = clk_enable(&eqos->clk_master_bus); 490 if (ret < 0) { 491 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 492 goto err_disable_clk_slave_bus; 493 } 494 495 ret = clk_enable(&eqos->clk_rx); 496 if (ret < 0) { 497 pr_err("clk_enable(clk_rx) failed: %d", ret); 498 goto err_disable_clk_master_bus; 499 } 500 501 ret = clk_enable(&eqos->clk_ptp_ref); 502 if (ret < 0) { 503 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 504 goto err_disable_clk_rx; 505 } 506 507 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 508 if (ret < 0) { 509 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 510 goto err_disable_clk_ptp_ref; 511 } 512 513 ret = clk_enable(&eqos->clk_tx); 514 if (ret < 0) { 515 pr_err("clk_enable(clk_tx) failed: %d", ret); 516 goto err_disable_clk_ptp_ref; 517 } 518 #endif 519 520 debug("%s: OK\n", __func__); 521 return 0; 522 523 #ifdef CONFIG_CLK 524 err_disable_clk_ptp_ref: 525 clk_disable(&eqos->clk_ptp_ref); 526 err_disable_clk_rx: 527 clk_disable(&eqos->clk_rx); 528 err_disable_clk_master_bus: 529 clk_disable(&eqos->clk_master_bus); 530 err_disable_clk_slave_bus: 531 clk_disable(&eqos->clk_slave_bus); 532 err: 533 debug("%s: FAILED: %d\n", __func__, ret); 534 return ret; 535 #endif 536 } 537 538 static int eqos_start_clks_stm32(struct udevice *dev) 539 { 540 #ifdef CONFIG_CLK 541 struct eqos_priv *eqos = dev_get_priv(dev); 542 int ret; 543 544 debug("%s(dev=%p):\n", __func__, dev); 545 546 ret = clk_enable(&eqos->clk_master_bus); 547 if (ret < 0) { 548 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 549 goto err; 550 } 551 552 if (clk_valid(&eqos->clk_rx)) { 553 ret = clk_enable(&eqos->clk_rx); 554 if (ret < 0) { 555 pr_err("clk_enable(clk_rx) failed: %d", ret); 556 goto err_disable_clk_master_bus; 557 } 558 } 559 560 if (clk_valid(&eqos->clk_tx)) { 561 ret = clk_enable(&eqos->clk_tx); 562 if (ret < 0) { 563 pr_err("clk_enable(clk_tx) failed: %d", ret); 564 goto err_disable_clk_rx; 565 } 566 } 567 568 if (clk_valid(&eqos->clk_ck)) { 569 ret = clk_enable(&eqos->clk_ck); 570 if (ret < 0) { 571 pr_err("clk_enable(clk_ck) failed: %d", ret); 572 goto err_disable_clk_tx; 573 } 574 } 575 #endif 576 577 debug("%s: OK\n", __func__); 578 return 0; 579 580 #ifdef CONFIG_CLK 581 err_disable_clk_tx: 582 if (clk_valid(&eqos->clk_tx)) 583 clk_disable(&eqos->clk_tx); 584 err_disable_clk_rx: 585 if (clk_valid(&eqos->clk_rx)) 586 clk_disable(&eqos->clk_rx); 587 err_disable_clk_master_bus: 588 clk_disable(&eqos->clk_master_bus); 589 err: 590 debug("%s: FAILED: %d\n", __func__, ret); 591 return ret; 592 #endif 593 } 594 595 static int eqos_start_clks_imx(struct udevice *dev) 596 { 597 return 0; 598 } 599 600 static void eqos_stop_clks_tegra186(struct udevice *dev) 601 { 602 #ifdef CONFIG_CLK 603 struct eqos_priv *eqos = dev_get_priv(dev); 604 605 debug("%s(dev=%p):\n", __func__, dev); 606 607 clk_disable(&eqos->clk_tx); 608 clk_disable(&eqos->clk_ptp_ref); 609 clk_disable(&eqos->clk_rx); 610 clk_disable(&eqos->clk_master_bus); 611 clk_disable(&eqos->clk_slave_bus); 612 #endif 613 614 debug("%s: OK\n", __func__); 615 } 616 617 static void eqos_stop_clks_stm32(struct udevice *dev) 618 { 619 #ifdef CONFIG_CLK 620 struct eqos_priv *eqos = dev_get_priv(dev); 621 622 debug("%s(dev=%p):\n", __func__, dev); 623 624 if (clk_valid(&eqos->clk_tx)) 625 clk_disable(&eqos->clk_tx); 626 if (clk_valid(&eqos->clk_rx)) 627 clk_disable(&eqos->clk_rx); 628 clk_disable(&eqos->clk_master_bus); 629 if (clk_valid(&eqos->clk_ck)) 630 clk_disable(&eqos->clk_ck); 631 #endif 632 633 debug("%s: OK\n", __func__); 634 } 635 636 static void eqos_stop_clks_imx(struct udevice *dev) 637 { 638 /* empty */ 639 } 640 641 static int eqos_start_resets_tegra186(struct udevice *dev) 642 { 643 struct eqos_priv *eqos = dev_get_priv(dev); 644 int ret; 645 646 debug("%s(dev=%p):\n", __func__, dev); 647 648 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 649 if (ret < 0) { 650 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 651 return ret; 652 } 653 654 udelay(2); 655 656 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 657 if (ret < 0) { 658 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 659 return ret; 660 } 661 662 ret = reset_assert(&eqos->reset_ctl); 663 if (ret < 0) { 664 pr_err("reset_assert() failed: %d", ret); 665 return ret; 666 } 667 668 udelay(2); 669 670 ret = reset_deassert(&eqos->reset_ctl); 671 if (ret < 0) { 672 pr_err("reset_deassert() failed: %d", ret); 673 return ret; 674 } 675 676 debug("%s: OK\n", __func__); 677 return 0; 678 } 679 680 static int eqos_start_resets_stm32(struct udevice *dev) 681 { 682 struct eqos_priv *eqos = dev_get_priv(dev); 683 int ret; 684 685 debug("%s(dev=%p):\n", __func__, dev); 686 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 687 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 688 if (ret < 0) { 689 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 690 ret); 691 return ret; 692 } 693 694 udelay(eqos->reset_delays[0]); 695 696 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 697 if (ret < 0) { 698 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 699 ret); 700 return ret; 701 } 702 703 udelay(eqos->reset_delays[1]); 704 705 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 706 if (ret < 0) { 707 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 708 ret); 709 return ret; 710 } 711 712 udelay(eqos->reset_delays[2]); 713 } 714 debug("%s: OK\n", __func__); 715 716 return 0; 717 } 718 719 static int eqos_start_resets_imx(struct udevice *dev) 720 { 721 return 0; 722 } 723 724 static int eqos_stop_resets_tegra186(struct udevice *dev) 725 { 726 struct eqos_priv *eqos = dev_get_priv(dev); 727 728 reset_assert(&eqos->reset_ctl); 729 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 730 731 return 0; 732 } 733 734 static int eqos_stop_resets_stm32(struct udevice *dev) 735 { 736 struct eqos_priv *eqos = dev_get_priv(dev); 737 int ret; 738 739 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 740 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 741 if (ret < 0) { 742 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 743 ret); 744 return ret; 745 } 746 } 747 748 return 0; 749 } 750 751 static int eqos_stop_resets_imx(struct udevice *dev) 752 { 753 return 0; 754 } 755 756 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 757 { 758 struct eqos_priv *eqos = dev_get_priv(dev); 759 int ret; 760 761 debug("%s(dev=%p):\n", __func__, dev); 762 763 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 764 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 765 766 udelay(1); 767 768 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 769 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 770 771 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 772 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 773 if (ret) { 774 pr_err("calibrate didn't start"); 775 goto failed; 776 } 777 778 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 779 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 780 if (ret) { 781 pr_err("calibrate didn't finish"); 782 goto failed; 783 } 784 785 ret = 0; 786 787 failed: 788 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 789 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 790 791 debug("%s: returns %d\n", __func__, ret); 792 793 return ret; 794 } 795 796 static int eqos_disable_calibration_tegra186(struct udevice *dev) 797 { 798 struct eqos_priv *eqos = dev_get_priv(dev); 799 800 debug("%s(dev=%p):\n", __func__, dev); 801 802 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 803 EQOS_AUTO_CAL_CONFIG_ENABLE); 804 805 return 0; 806 } 807 808 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 809 { 810 #ifdef CONFIG_CLK 811 struct eqos_priv *eqos = dev_get_priv(dev); 812 813 return clk_get_rate(&eqos->clk_slave_bus); 814 #else 815 return 0; 816 #endif 817 } 818 819 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 820 { 821 #ifdef CONFIG_CLK 822 struct eqos_priv *eqos = dev_get_priv(dev); 823 824 return clk_get_rate(&eqos->clk_master_bus); 825 #else 826 return 0; 827 #endif 828 } 829 830 __weak u32 imx_get_eqos_csr_clk(void) 831 { 832 return 100 * 1000000; 833 } 834 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 835 { 836 return 0; 837 } 838 839 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 840 { 841 return imx_get_eqos_csr_clk(); 842 } 843 844 static int eqos_calibrate_pads_stm32(struct udevice *dev) 845 { 846 return 0; 847 } 848 849 static int eqos_calibrate_pads_imx(struct udevice *dev) 850 { 851 return 0; 852 } 853 854 static int eqos_disable_calibration_stm32(struct udevice *dev) 855 { 856 return 0; 857 } 858 859 static int eqos_disable_calibration_imx(struct udevice *dev) 860 { 861 return 0; 862 } 863 864 static int eqos_set_full_duplex(struct udevice *dev) 865 { 866 struct eqos_priv *eqos = dev_get_priv(dev); 867 868 debug("%s(dev=%p):\n", __func__, dev); 869 870 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 871 872 return 0; 873 } 874 875 static int eqos_set_half_duplex(struct udevice *dev) 876 { 877 struct eqos_priv *eqos = dev_get_priv(dev); 878 879 debug("%s(dev=%p):\n", __func__, dev); 880 881 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 882 883 /* WAR: Flush TX queue when switching to half-duplex */ 884 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 885 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 886 887 return 0; 888 } 889 890 static int eqos_set_gmii_speed(struct udevice *dev) 891 { 892 struct eqos_priv *eqos = dev_get_priv(dev); 893 894 debug("%s(dev=%p):\n", __func__, dev); 895 896 clrbits_le32(&eqos->mac_regs->configuration, 897 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 898 899 return 0; 900 } 901 902 static int eqos_set_mii_speed_100(struct udevice *dev) 903 { 904 struct eqos_priv *eqos = dev_get_priv(dev); 905 906 debug("%s(dev=%p):\n", __func__, dev); 907 908 setbits_le32(&eqos->mac_regs->configuration, 909 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 910 911 return 0; 912 } 913 914 static int eqos_set_mii_speed_10(struct udevice *dev) 915 { 916 struct eqos_priv *eqos = dev_get_priv(dev); 917 918 debug("%s(dev=%p):\n", __func__, dev); 919 920 clrsetbits_le32(&eqos->mac_regs->configuration, 921 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 922 923 return 0; 924 } 925 926 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 927 { 928 #ifdef CONFIG_CLK 929 struct eqos_priv *eqos = dev_get_priv(dev); 930 ulong rate; 931 int ret; 932 933 debug("%s(dev=%p):\n", __func__, dev); 934 935 switch (eqos->phy->speed) { 936 case SPEED_1000: 937 rate = 125 * 1000 * 1000; 938 break; 939 case SPEED_100: 940 rate = 25 * 1000 * 1000; 941 break; 942 case SPEED_10: 943 rate = 2.5 * 1000 * 1000; 944 break; 945 default: 946 pr_err("invalid speed %d", eqos->phy->speed); 947 return -EINVAL; 948 } 949 950 ret = clk_set_rate(&eqos->clk_tx, rate); 951 if (ret < 0) { 952 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 953 return ret; 954 } 955 #endif 956 957 return 0; 958 } 959 960 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 961 { 962 return 0; 963 } 964 965 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 966 { 967 struct eqos_priv *eqos = dev_get_priv(dev); 968 ulong rate; 969 int ret; 970 971 debug("%s(dev=%p):\n", __func__, dev); 972 973 switch (eqos->phy->speed) { 974 case SPEED_1000: 975 rate = 125 * 1000 * 1000; 976 break; 977 case SPEED_100: 978 rate = 25 * 1000 * 1000; 979 break; 980 case SPEED_10: 981 rate = 2.5 * 1000 * 1000; 982 break; 983 default: 984 pr_err("invalid speed %d", eqos->phy->speed); 985 return -EINVAL; 986 } 987 988 ret = imx_eqos_txclk_set_rate(rate); 989 if (ret < 0) { 990 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 991 return ret; 992 } 993 994 return 0; 995 } 996 997 static int eqos_adjust_link(struct udevice *dev) 998 { 999 struct eqos_priv *eqos = dev_get_priv(dev); 1000 int ret; 1001 bool en_calibration; 1002 1003 debug("%s(dev=%p):\n", __func__, dev); 1004 1005 if (eqos->phy->duplex) 1006 ret = eqos_set_full_duplex(dev); 1007 else 1008 ret = eqos_set_half_duplex(dev); 1009 if (ret < 0) { 1010 pr_err("eqos_set_*_duplex() failed: %d", ret); 1011 return ret; 1012 } 1013 1014 switch (eqos->phy->speed) { 1015 case SPEED_1000: 1016 en_calibration = true; 1017 ret = eqos_set_gmii_speed(dev); 1018 break; 1019 case SPEED_100: 1020 en_calibration = true; 1021 ret = eqos_set_mii_speed_100(dev); 1022 break; 1023 case SPEED_10: 1024 en_calibration = false; 1025 ret = eqos_set_mii_speed_10(dev); 1026 break; 1027 default: 1028 pr_err("invalid speed %d", eqos->phy->speed); 1029 return -EINVAL; 1030 } 1031 if (ret < 0) { 1032 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1033 return ret; 1034 } 1035 1036 if (en_calibration) { 1037 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1038 if (ret < 0) { 1039 pr_err("eqos_calibrate_pads() failed: %d", 1040 ret); 1041 return ret; 1042 } 1043 } else { 1044 ret = eqos->config->ops->eqos_disable_calibration(dev); 1045 if (ret < 0) { 1046 pr_err("eqos_disable_calibration() failed: %d", 1047 ret); 1048 return ret; 1049 } 1050 } 1051 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1052 if (ret < 0) { 1053 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1054 return ret; 1055 } 1056 1057 return 0; 1058 } 1059 1060 int eqos_write_hwaddr(struct udevice *dev) 1061 { 1062 struct eth_pdata *plat = dev_get_platdata(dev); 1063 struct eqos_priv *eqos = dev_get_priv(dev); 1064 uint32_t val; 1065 1066 /* 1067 * This function may be called before start() or after stop(). At that 1068 * time, on at least some configurations of the EQoS HW, all clocks to 1069 * the EQoS HW block will be stopped, and a reset signal applied. If 1070 * any register access is attempted in this state, bus timeouts or CPU 1071 * hangs may occur. This check prevents that. 1072 * 1073 * A simple solution to this problem would be to not implement 1074 * write_hwaddr(), since start() always writes the MAC address into HW 1075 * anyway. However, it is desirable to implement write_hwaddr() to 1076 * support the case of SW that runs subsequent to U-Boot which expects 1077 * the MAC address to already be programmed into the EQoS registers, 1078 * which must happen irrespective of whether the U-Boot user (or 1079 * scripts) actually made use of the EQoS device, and hence 1080 * irrespective of whether start() was ever called. 1081 * 1082 * Note that this requirement by subsequent SW is not valid for 1083 * Tegra186, and is likely not valid for any non-PCI instantiation of 1084 * the EQoS HW block. This function is implemented solely as 1085 * future-proofing with the expectation the driver will eventually be 1086 * ported to some system where the expectation above is true. 1087 */ 1088 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1089 return 0; 1090 1091 /* Update the MAC address */ 1092 val = (plat->enetaddr[5] << 8) | 1093 (plat->enetaddr[4]); 1094 writel(val, &eqos->mac_regs->address0_high); 1095 val = (plat->enetaddr[3] << 24) | 1096 (plat->enetaddr[2] << 16) | 1097 (plat->enetaddr[1] << 8) | 1098 (plat->enetaddr[0]); 1099 writel(val, &eqos->mac_regs->address0_low); 1100 1101 return 0; 1102 } 1103 1104 static int eqos_read_rom_hwaddr(struct udevice *dev) 1105 { 1106 struct eth_pdata *pdata = dev_get_platdata(dev); 1107 1108 #ifdef CONFIG_ARCH_IMX8M 1109 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1110 #endif 1111 return !is_valid_ethaddr(pdata->enetaddr); 1112 } 1113 1114 int eqos_init(struct udevice *dev) 1115 { 1116 struct eqos_priv *eqos = dev_get_priv(dev); 1117 int ret, limit = 10; 1118 ulong rate; 1119 u32 val; 1120 1121 debug("%s(dev=%p):\n", __func__, dev); 1122 1123 if (eqos->config->ops->eqos_start_clks) { 1124 ret = eqos->config->ops->eqos_start_clks(dev); 1125 if (ret < 0) { 1126 pr_err("eqos_start_clks() failed: %d", ret); 1127 goto err; 1128 } 1129 } 1130 1131 ret = eqos->config->ops->eqos_start_resets(dev); 1132 if (ret < 0) { 1133 pr_err("eqos_start_resets() failed: %d", ret); 1134 goto err_stop_clks; 1135 } 1136 1137 udelay(10); 1138 1139 eqos->reg_access_ok = true; 1140 1141 /* DMA SW reset */ 1142 val = readl(&eqos->dma_regs->mode); 1143 val |= EQOS_DMA_MODE_SWR; 1144 writel(val, &eqos->dma_regs->mode); 1145 while (limit--) { 1146 if (!(readl(&eqos->dma_regs->mode) & EQOS_DMA_MODE_SWR)) 1147 break; 1148 mdelay(10); 1149 } 1150 1151 if (limit < 0) { 1152 pr_err("EQOS_DMA_MODE_SWR stuck"); 1153 goto err_stop_resets; 1154 } 1155 1156 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1157 if (ret < 0) { 1158 pr_err("eqos_calibrate_pads() failed: %d", ret); 1159 goto err_stop_resets; 1160 } 1161 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1162 1163 val = (rate / 1000000) - 1; 1164 writel(val, &eqos->mac_regs->us_tic_counter); 1165 1166 /* 1167 * if PHY was already connected and configured, 1168 * don't need to reconnect/reconfigure again 1169 */ 1170 if (!eqos->phy) { 1171 int addr = -1; 1172 #ifdef CONFIG_DM_ETH_PHY 1173 addr = eth_phy_get_addr(dev); 1174 #endif 1175 #ifdef DWC_NET_PHYADDR 1176 addr = DWC_NET_PHYADDR; 1177 #endif 1178 eqos->phy = phy_connect(eqos->mii, addr, dev, 1179 eqos->config->ops->eqos_get_interface(dev)); 1180 if (!eqos->phy) { 1181 pr_err("phy_connect() failed"); 1182 goto err_stop_resets; 1183 } 1184 1185 if (eqos->max_speed) { 1186 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1187 if (ret) { 1188 pr_err("phy_set_supported() failed: %d", ret); 1189 goto err_shutdown_phy; 1190 } 1191 } 1192 1193 ret = phy_config(eqos->phy); 1194 if (ret < 0) { 1195 pr_err("phy_config() failed: %d", ret); 1196 goto err_shutdown_phy; 1197 } 1198 } 1199 1200 ret = phy_startup(eqos->phy); 1201 if (ret < 0) { 1202 pr_err("phy_startup() failed: %d", ret); 1203 goto err_shutdown_phy; 1204 } 1205 1206 if (!eqos->phy->link) { 1207 pr_err("No link"); 1208 goto err_shutdown_phy; 1209 } 1210 1211 ret = eqos_adjust_link(dev); 1212 if (ret < 0) { 1213 pr_err("eqos_adjust_link() failed: %d", ret); 1214 goto err_shutdown_phy; 1215 } 1216 1217 debug("%s: OK\n", __func__); 1218 return 0; 1219 1220 err_shutdown_phy: 1221 phy_shutdown(eqos->phy); 1222 err_stop_resets: 1223 eqos->config->ops->eqos_stop_resets(dev); 1224 err_stop_clks: 1225 if (eqos->config->ops->eqos_stop_clks) 1226 eqos->config->ops->eqos_stop_clks(dev); 1227 err: 1228 pr_err("FAILED: %d", ret); 1229 return ret; 1230 } 1231 1232 void eqos_enable(struct udevice *dev) 1233 { 1234 struct eqos_priv *eqos = dev_get_priv(dev); 1235 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1236 ulong last_rx_desc; 1237 int i; 1238 1239 eqos->tx_desc_idx = 0; 1240 eqos->rx_desc_idx = 0; 1241 1242 /* Configure MTL */ 1243 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1244 1245 /* Enable Store and Forward mode for TX */ 1246 /* Program Tx operating mode */ 1247 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1248 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1249 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1250 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1251 1252 /* Transmit Queue weight */ 1253 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1254 1255 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1256 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1257 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1258 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1259 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1260 1261 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1262 val = readl(&eqos->mac_regs->hw_feature1); 1263 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1264 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1265 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1266 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1267 1268 /* 1269 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1270 * r/tqs is encoded as (n / 256) - 1. 1271 */ 1272 tqs = (128 << tx_fifo_sz) / 256 - 1; 1273 rqs = (128 << rx_fifo_sz) / 256 - 1; 1274 1275 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1276 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1277 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1278 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1279 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1280 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1281 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1282 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1283 1284 /* Flow control used only if each channel gets 4KB or more FIFO */ 1285 if (rqs >= ((4096 / 256) - 1)) { 1286 u32 rfd, rfa; 1287 1288 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1289 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1290 1291 /* 1292 * Set Threshold for Activating Flow Contol space for min 2 1293 * frames ie, (1500 * 1) = 1500 bytes. 1294 * 1295 * Set Threshold for Deactivating Flow Contol for space of 1296 * min 1 frame (frame size 1500bytes) in receive fifo 1297 */ 1298 if (rqs == ((4096 / 256) - 1)) { 1299 /* 1300 * This violates the above formula because of FIFO size 1301 * limit therefore overflow may occur inspite of this. 1302 */ 1303 rfd = 0x3; /* Full-3K */ 1304 rfa = 0x1; /* Full-1.5K */ 1305 } else if (rqs == ((8192 / 256) - 1)) { 1306 rfd = 0x6; /* Full-4K */ 1307 rfa = 0xa; /* Full-6K */ 1308 } else if (rqs == ((16384 / 256) - 1)) { 1309 rfd = 0x6; /* Full-4K */ 1310 rfa = 0x12; /* Full-10K */ 1311 } else { 1312 rfd = 0x6; /* Full-4K */ 1313 rfa = 0x1E; /* Full-16K */ 1314 } 1315 1316 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1317 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1318 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1319 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1320 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1321 (rfd << 1322 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1323 (rfa << 1324 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1325 } 1326 1327 /* Configure MAC */ 1328 1329 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1330 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1331 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1332 eqos->config->config_mac << 1333 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1334 1335 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1336 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1337 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1338 0x2 << 1339 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1340 1341 /* Multicast and Broadcast Queue Enable */ 1342 setbits_le32(&eqos->mac_regs->unused_0a4, 1343 0x00100000); 1344 /* enable promise mode */ 1345 setbits_le32(&eqos->mac_regs->unused_004[1], 1346 0x1); 1347 1348 /* Set TX flow control parameters */ 1349 /* Set Pause Time */ 1350 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1351 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1352 /* Assign priority for TX flow control */ 1353 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1354 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1355 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1356 /* Assign priority for RX flow control */ 1357 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1358 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1359 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1360 /* Enable flow control */ 1361 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1362 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1363 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1364 EQOS_MAC_RX_FLOW_CTRL_RFE); 1365 1366 clrsetbits_le32(&eqos->mac_regs->configuration, 1367 EQOS_MAC_CONFIGURATION_GPSLCE | 1368 EQOS_MAC_CONFIGURATION_WD | 1369 EQOS_MAC_CONFIGURATION_JD | 1370 EQOS_MAC_CONFIGURATION_JE, 1371 EQOS_MAC_CONFIGURATION_CST | 1372 EQOS_MAC_CONFIGURATION_ACS); 1373 1374 eqos_write_hwaddr(dev); 1375 1376 /* Configure DMA */ 1377 1378 /* Enable OSP mode */ 1379 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1380 EQOS_DMA_CH0_TX_CONTROL_OSP); 1381 1382 /* RX buffer size. Must be a multiple of bus width */ 1383 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1384 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1385 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1386 EQOS_MAX_PACKET_SIZE << 1387 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1388 1389 setbits_le32(&eqos->dma_regs->ch0_control, 1390 EQOS_DMA_CH0_CONTROL_PBLX8); 1391 1392 /* 1393 * Burst length must be < 1/2 FIFO size. 1394 * FIFO size in tqs is encoded as (n / 256) - 1. 1395 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1396 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1397 */ 1398 pbl = tqs + 1; 1399 if (pbl > 32) 1400 pbl = 32; 1401 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1402 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1403 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1404 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1405 1406 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1407 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1408 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1409 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1410 1411 /* DMA performance configuration */ 1412 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1413 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1414 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1415 writel(val, &eqos->dma_regs->sysbus_mode); 1416 1417 /* Set up descriptors */ 1418 1419 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1420 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1421 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1422 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1423 (i * EQOS_MAX_PACKET_SIZE)); 1424 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1425 mb(); 1426 eqos->config->ops->eqos_flush_desc(rx_desc); 1427 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1428 (i * EQOS_MAX_PACKET_SIZE), 1429 EQOS_MAX_PACKET_SIZE); 1430 } 1431 1432 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1433 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1434 writel(EQOS_DESCRIPTORS_TX - 1, 1435 &eqos->dma_regs->ch0_txdesc_ring_length); 1436 1437 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1438 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1439 writel(EQOS_DESCRIPTORS_RX - 1, 1440 &eqos->dma_regs->ch0_rxdesc_ring_length); 1441 1442 /* Enable everything */ 1443 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1444 EQOS_DMA_CH0_TX_CONTROL_ST); 1445 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1446 EQOS_DMA_CH0_RX_CONTROL_SR); 1447 setbits_le32(&eqos->mac_regs->configuration, 1448 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1449 1450 /* TX tail pointer not written until we need to TX a packet */ 1451 /* 1452 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1453 * first descriptor, implying all descriptors were available. However, 1454 * that's not distinguishable from none of the descriptors being 1455 * available. 1456 */ 1457 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1458 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1459 1460 eqos->started = true; 1461 } 1462 1463 static int eqos_start(struct udevice *dev) 1464 { 1465 int ret; 1466 1467 ret = eqos_init(dev); 1468 if (ret) 1469 return ret; 1470 1471 eqos_enable(dev); 1472 1473 return 0; 1474 } 1475 1476 void eqos_stop(struct udevice *dev) 1477 { 1478 struct eqos_priv *eqos = dev_get_priv(dev); 1479 int i; 1480 1481 debug("%s(dev=%p):\n", __func__, dev); 1482 1483 if (!eqos->started) 1484 return; 1485 eqos->started = false; 1486 eqos->reg_access_ok = false; 1487 1488 /* Disable TX DMA */ 1489 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1490 EQOS_DMA_CH0_TX_CONTROL_ST); 1491 1492 /* Wait for TX all packets to drain out of MTL */ 1493 for (i = 0; i < 1000000; i++) { 1494 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1495 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1496 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1497 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1498 if ((trcsts != 1) && (!txqsts)) 1499 break; 1500 } 1501 1502 /* Turn off MAC TX and RX */ 1503 clrbits_le32(&eqos->mac_regs->configuration, 1504 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1505 1506 /* Wait for all RX packets to drain out of MTL */ 1507 for (i = 0; i < 1000000; i++) { 1508 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1509 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1510 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1511 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1512 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1513 if ((!prxq) && (!rxqsts)) 1514 break; 1515 } 1516 1517 /* Turn off RX DMA */ 1518 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1519 EQOS_DMA_CH0_RX_CONTROL_SR); 1520 1521 if (eqos->phy) { 1522 phy_shutdown(eqos->phy); 1523 } 1524 eqos->config->ops->eqos_stop_resets(dev); 1525 if (eqos->config->ops->eqos_stop_clks) 1526 eqos->config->ops->eqos_stop_clks(dev); 1527 1528 debug("%s: OK\n", __func__); 1529 } 1530 1531 int eqos_send(struct udevice *dev, void *packet, int length) 1532 { 1533 struct eqos_priv *eqos = dev_get_priv(dev); 1534 struct eqos_desc *tx_desc; 1535 int i; 1536 1537 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1538 length); 1539 1540 memcpy(eqos->tx_dma_buf, packet, length); 1541 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1542 1543 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1544 eqos->tx_desc_idx++; 1545 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1546 1547 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1548 tx_desc->des1 = 0; 1549 tx_desc->des2 = length; 1550 /* 1551 * Make sure that if HW sees the _OWN write below, it will see all the 1552 * writes to the rest of the descriptor too. 1553 */ 1554 mb(); 1555 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1556 eqos->config->ops->eqos_flush_desc(tx_desc); 1557 1558 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1559 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1560 1561 for (i = 0; i < 1000000; i++) { 1562 eqos->config->ops->eqos_inval_desc(tx_desc); 1563 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1564 return 0; 1565 udelay(1); 1566 } 1567 1568 debug("%s: TX timeout\n", __func__); 1569 1570 return -ETIMEDOUT; 1571 } 1572 1573 int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1574 { 1575 struct eqos_priv *eqos = dev_get_priv(dev); 1576 struct eqos_desc *rx_desc; 1577 int length; 1578 1579 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1580 1581 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1582 eqos->config->ops->eqos_inval_desc(rx_desc); 1583 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1584 debug("%s: RX packet not available\n", __func__); 1585 return -EAGAIN; 1586 } 1587 1588 *packetp = eqos->rx_dma_buf + 1589 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1590 length = rx_desc->des3 & 0x7fff; 1591 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1592 1593 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1594 1595 return length; 1596 } 1597 1598 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1599 { 1600 struct eqos_priv *eqos = dev_get_priv(dev); 1601 uchar *packet_expected; 1602 struct eqos_desc *rx_desc; 1603 1604 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1605 1606 packet_expected = eqos->rx_dma_buf + 1607 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1608 if (packet != packet_expected) { 1609 debug("%s: Unexpected packet (expected %p)\n", __func__, 1610 packet_expected); 1611 return -EINVAL; 1612 } 1613 1614 eqos->config->ops->eqos_inval_buffer(packet, length); 1615 1616 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1617 1618 rx_desc->des0 = 0; 1619 mb(); 1620 eqos->config->ops->eqos_flush_desc(rx_desc); 1621 eqos->config->ops->eqos_inval_buffer(packet, length); 1622 rx_desc->des0 = (u32)(ulong)packet; 1623 rx_desc->des1 = 0; 1624 rx_desc->des2 = 0; 1625 /* 1626 * Make sure that if HW sees the _OWN write below, it will see all the 1627 * writes to the rest of the descriptor too. 1628 */ 1629 mb(); 1630 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1631 eqos->config->ops->eqos_flush_desc(rx_desc); 1632 1633 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1634 1635 eqos->rx_desc_idx++; 1636 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1637 1638 return 0; 1639 } 1640 1641 static int eqos_probe_resources_core(struct udevice *dev) 1642 { 1643 struct eqos_priv *eqos = dev_get_priv(dev); 1644 int ret; 1645 1646 debug("%s(dev=%p):\n", __func__, dev); 1647 1648 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1649 EQOS_DESCRIPTORS_RX); 1650 if (!eqos->descs) { 1651 debug("%s: eqos_alloc_descs() failed\n", __func__); 1652 ret = -ENOMEM; 1653 goto err; 1654 } 1655 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1656 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1657 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1658 eqos->rx_descs); 1659 1660 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1661 if (!eqos->tx_dma_buf) { 1662 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1663 ret = -ENOMEM; 1664 goto err_free_descs; 1665 } 1666 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1667 1668 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1669 if (!eqos->rx_dma_buf) { 1670 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1671 ret = -ENOMEM; 1672 goto err_free_tx_dma_buf; 1673 } 1674 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1675 1676 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1677 if (!eqos->rx_pkt) { 1678 debug("%s: malloc(rx_pkt) failed\n", __func__); 1679 ret = -ENOMEM; 1680 goto err_free_rx_dma_buf; 1681 } 1682 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1683 1684 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1685 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1686 1687 debug("%s: OK\n", __func__); 1688 return 0; 1689 1690 err_free_rx_dma_buf: 1691 free(eqos->rx_dma_buf); 1692 err_free_tx_dma_buf: 1693 free(eqos->tx_dma_buf); 1694 err_free_descs: 1695 eqos_free_descs(eqos->descs); 1696 err: 1697 1698 debug("%s: returns %d\n", __func__, ret); 1699 return ret; 1700 } 1701 1702 static int eqos_remove_resources_core(struct udevice *dev) 1703 { 1704 struct eqos_priv *eqos = dev_get_priv(dev); 1705 1706 debug("%s(dev=%p):\n", __func__, dev); 1707 1708 free(eqos->rx_pkt); 1709 free(eqos->rx_dma_buf); 1710 free(eqos->tx_dma_buf); 1711 eqos_free_descs(eqos->descs); 1712 1713 debug("%s: OK\n", __func__); 1714 return 0; 1715 } 1716 1717 static int eqos_probe_resources_tegra186(struct udevice *dev) 1718 { 1719 struct eqos_priv *eqos = dev_get_priv(dev); 1720 int ret; 1721 1722 debug("%s(dev=%p):\n", __func__, dev); 1723 1724 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1725 if (ret) { 1726 pr_err("reset_get_by_name(rst) failed: %d", ret); 1727 return ret; 1728 } 1729 1730 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1731 &eqos->phy_reset_gpio, 1732 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1733 if (ret) { 1734 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1735 goto err_free_reset_eqos; 1736 } 1737 1738 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1739 if (ret) { 1740 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1741 goto err_free_gpio_phy_reset; 1742 } 1743 1744 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1745 if (ret) { 1746 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1747 goto err_free_clk_slave_bus; 1748 } 1749 1750 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1751 if (ret) { 1752 pr_err("clk_get_by_name(rx) failed: %d", ret); 1753 goto err_free_clk_master_bus; 1754 } 1755 1756 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1757 if (ret) { 1758 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1759 goto err_free_clk_rx; 1760 return ret; 1761 } 1762 1763 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1764 if (ret) { 1765 pr_err("clk_get_by_name(tx) failed: %d", ret); 1766 goto err_free_clk_ptp_ref; 1767 } 1768 1769 debug("%s: OK\n", __func__); 1770 return 0; 1771 1772 err_free_clk_ptp_ref: 1773 clk_free(&eqos->clk_ptp_ref); 1774 err_free_clk_rx: 1775 clk_free(&eqos->clk_rx); 1776 err_free_clk_master_bus: 1777 clk_free(&eqos->clk_master_bus); 1778 err_free_clk_slave_bus: 1779 clk_free(&eqos->clk_slave_bus); 1780 err_free_gpio_phy_reset: 1781 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1782 err_free_reset_eqos: 1783 reset_free(&eqos->reset_ctl); 1784 1785 debug("%s: returns %d\n", __func__, ret); 1786 return ret; 1787 } 1788 1789 /* board-specific Ethernet Interface initializations. */ 1790 __weak int board_interface_eth_init(struct udevice *dev, 1791 phy_interface_t interface_type) 1792 { 1793 return 0; 1794 } 1795 1796 static int eqos_probe_resources_stm32(struct udevice *dev) 1797 { 1798 struct eqos_priv *eqos = dev_get_priv(dev); 1799 int ret; 1800 phy_interface_t interface; 1801 struct ofnode_phandle_args phandle_args; 1802 1803 debug("%s(dev=%p):\n", __func__, dev); 1804 1805 interface = eqos->config->ops->eqos_get_interface(dev); 1806 1807 if (interface == PHY_INTERFACE_MODE_NONE) { 1808 pr_err("Invalid PHY interface\n"); 1809 return -EINVAL; 1810 } 1811 1812 ret = board_interface_eth_init(dev, interface); 1813 if (ret) 1814 return -EINVAL; 1815 1816 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1817 1818 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1819 if (ret) { 1820 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1821 return ret; 1822 } 1823 1824 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1825 if (ret) 1826 pr_warn("clk_get_by_name(rx) failed: %d", ret); 1827 1828 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1829 if (ret) 1830 pr_warn("clk_get_by_name(tx) failed: %d", ret); 1831 1832 /* Get ETH_CLK clocks (optional) */ 1833 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1834 if (ret) 1835 pr_warn("No phy clock provided %d", ret); 1836 1837 eqos->phyaddr = -1; 1838 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1839 &phandle_args); 1840 if (!ret) { 1841 /* search "reset-gpios" in phy node */ 1842 ret = gpio_request_by_name_nodev(phandle_args.node, 1843 "reset-gpios", 0, 1844 &eqos->phy_reset_gpio, 1845 GPIOD_IS_OUT | 1846 GPIOD_IS_OUT_ACTIVE); 1847 if (ret) 1848 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1849 ret); 1850 else 1851 eqos->reset_delays[1] = 2; 1852 1853 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1854 "reg", -1); 1855 } 1856 1857 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1858 int reset_flags = GPIOD_IS_OUT; 1859 1860 if (dev_read_bool(dev, "snps,reset-active-low")) 1861 reset_flags |= GPIOD_ACTIVE_LOW; 1862 1863 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1864 &eqos->phy_reset_gpio, reset_flags); 1865 if (ret == 0) 1866 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1867 eqos->reset_delays, 3); 1868 else 1869 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1870 ret); 1871 } 1872 1873 debug("%s: OK\n", __func__); 1874 return 0; 1875 } 1876 1877 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1878 { 1879 const char *phy_mode; 1880 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1881 1882 debug("%s(dev=%p):\n", __func__, dev); 1883 1884 phy_mode = dev_read_string(dev, "phy-mode"); 1885 if (phy_mode) 1886 interface = phy_get_interface_by_name(phy_mode); 1887 1888 return interface; 1889 } 1890 1891 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1892 { 1893 return PHY_INTERFACE_MODE_MII; 1894 } 1895 1896 static int eqos_probe_resources_imx(struct udevice *dev) 1897 { 1898 struct eqos_priv *eqos = dev_get_priv(dev); 1899 phy_interface_t interface; 1900 1901 debug("%s(dev=%p):\n", __func__, dev); 1902 1903 interface = eqos->config->ops->eqos_get_interface(dev); 1904 1905 if (interface == PHY_INTERFACE_MODE_NONE) { 1906 pr_err("Invalid PHY interface\n"); 1907 return -EINVAL; 1908 } 1909 1910 debug("%s: OK\n", __func__); 1911 return 0; 1912 } 1913 1914 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1915 { 1916 const char *phy_mode; 1917 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1918 1919 debug("%s(dev=%p):\n", __func__, dev); 1920 1921 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1922 NULL); 1923 if (phy_mode) 1924 interface = phy_get_interface_by_name(phy_mode); 1925 1926 return interface; 1927 } 1928 1929 static int eqos_remove_resources_tegra186(struct udevice *dev) 1930 { 1931 struct eqos_priv *eqos = dev_get_priv(dev); 1932 1933 debug("%s(dev=%p):\n", __func__, dev); 1934 1935 #ifdef CONFIG_CLK 1936 clk_free(&eqos->clk_tx); 1937 clk_free(&eqos->clk_ptp_ref); 1938 clk_free(&eqos->clk_rx); 1939 clk_free(&eqos->clk_slave_bus); 1940 clk_free(&eqos->clk_master_bus); 1941 #endif 1942 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1943 reset_free(&eqos->reset_ctl); 1944 1945 debug("%s: OK\n", __func__); 1946 return 0; 1947 } 1948 1949 static int eqos_remove_resources_stm32(struct udevice *dev) 1950 { 1951 #ifdef CONFIG_CLK 1952 struct eqos_priv *eqos = dev_get_priv(dev); 1953 1954 debug("%s(dev=%p):\n", __func__, dev); 1955 1956 if (clk_valid(&eqos->clk_tx)) 1957 clk_free(&eqos->clk_tx); 1958 if (clk_valid(&eqos->clk_rx)) 1959 clk_free(&eqos->clk_rx); 1960 clk_free(&eqos->clk_master_bus); 1961 if (clk_valid(&eqos->clk_ck)) 1962 clk_free(&eqos->clk_ck); 1963 #endif 1964 1965 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1966 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1967 1968 debug("%s: OK\n", __func__); 1969 return 0; 1970 } 1971 1972 static int eqos_remove_resources_imx(struct udevice *dev) 1973 { 1974 return 0; 1975 } 1976 1977 int eqos_probe(struct udevice *dev) 1978 { 1979 struct eqos_priv *eqos = dev_get_priv(dev); 1980 int ret; 1981 1982 debug("%s(dev=%p):\n", __func__, dev); 1983 1984 eqos->dev = dev; 1985 eqos->config = (void *)dev_get_driver_data(dev); 1986 1987 eqos->regs = dev_read_addr(dev); 1988 if (eqos->regs == FDT_ADDR_T_NONE) { 1989 pr_err("dev_read_addr() failed"); 1990 return -ENODEV; 1991 } 1992 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1993 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1994 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1995 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1996 1997 ret = eqos_probe_resources_core(dev); 1998 if (ret < 0) { 1999 pr_err("eqos_probe_resources_core() failed: %d", ret); 2000 return ret; 2001 } 2002 2003 ret = eqos->config->ops->eqos_probe_resources(dev); 2004 if (ret < 0) { 2005 pr_err("eqos_probe_resources() failed: %d", ret); 2006 goto err_remove_resources_core; 2007 } 2008 2009 #ifdef CONFIG_DM_ETH_PHY 2010 eqos->mii = eth_phy_get_mdio_bus(dev); 2011 #endif 2012 if (!eqos->mii) { 2013 eqos->mii = mdio_alloc(); 2014 if (!eqos->mii) { 2015 pr_err("mdio_alloc() failed"); 2016 ret = -ENOMEM; 2017 goto err_remove_resources_tegra; 2018 } 2019 eqos->mii->read = eqos_mdio_read; 2020 eqos->mii->write = eqos_mdio_write; 2021 eqos->mii->priv = eqos; 2022 strcpy(eqos->mii->name, dev->name); 2023 2024 ret = mdio_register(eqos->mii); 2025 if (ret < 0) { 2026 pr_err("mdio_register() failed: %d", ret); 2027 goto err_free_mdio; 2028 } 2029 } 2030 2031 #ifdef CONFIG_DM_ETH_PHY 2032 eth_phy_set_mdio_bus(dev, eqos->mii); 2033 #endif 2034 2035 debug("%s: OK\n", __func__); 2036 return 0; 2037 2038 err_free_mdio: 2039 mdio_free(eqos->mii); 2040 err_remove_resources_tegra: 2041 eqos->config->ops->eqos_remove_resources(dev); 2042 err_remove_resources_core: 2043 eqos_remove_resources_core(dev); 2044 2045 debug("%s: returns %d\n", __func__, ret); 2046 return ret; 2047 } 2048 2049 static int eqos_remove(struct udevice *dev) 2050 { 2051 struct eqos_priv *eqos = dev_get_priv(dev); 2052 2053 debug("%s(dev=%p):\n", __func__, dev); 2054 2055 mdio_unregister(eqos->mii); 2056 mdio_free(eqos->mii); 2057 eqos->config->ops->eqos_remove_resources(dev); 2058 2059 eqos_probe_resources_core(dev); 2060 2061 debug("%s: OK\n", __func__); 2062 return 0; 2063 } 2064 2065 static const struct eth_ops eqos_ops = { 2066 .start = eqos_start, 2067 .stop = eqos_stop, 2068 .send = eqos_send, 2069 .recv = eqos_recv, 2070 .free_pkt = eqos_free_pkt, 2071 .write_hwaddr = eqos_write_hwaddr, 2072 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2073 }; 2074 2075 static struct eqos_ops eqos_tegra186_ops = { 2076 .eqos_inval_desc = eqos_inval_desc_tegra186, 2077 .eqos_flush_desc = eqos_flush_desc_tegra186, 2078 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2079 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2080 .eqos_probe_resources = eqos_probe_resources_tegra186, 2081 .eqos_remove_resources = eqos_remove_resources_tegra186, 2082 .eqos_stop_resets = eqos_stop_resets_tegra186, 2083 .eqos_start_resets = eqos_start_resets_tegra186, 2084 .eqos_stop_clks = eqos_stop_clks_tegra186, 2085 .eqos_start_clks = eqos_start_clks_tegra186, 2086 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2087 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2088 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2089 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2090 .eqos_get_interface = eqos_get_interface_tegra186 2091 }; 2092 2093 static const struct eqos_config eqos_tegra186_config = { 2094 .reg_access_always_ok = false, 2095 .mdio_wait = 10, 2096 .swr_wait = 10, 2097 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2098 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2099 .ops = &eqos_tegra186_ops 2100 }; 2101 2102 static struct eqos_ops eqos_stm32_ops = { 2103 .eqos_inval_desc = eqos_inval_desc_generic, 2104 .eqos_flush_desc = eqos_flush_desc_generic, 2105 .eqos_inval_buffer = eqos_inval_buffer_generic, 2106 .eqos_flush_buffer = eqos_flush_buffer_generic, 2107 .eqos_probe_resources = eqos_probe_resources_stm32, 2108 .eqos_remove_resources = eqos_remove_resources_stm32, 2109 .eqos_stop_resets = eqos_stop_resets_stm32, 2110 .eqos_start_resets = eqos_start_resets_stm32, 2111 .eqos_stop_clks = eqos_stop_clks_stm32, 2112 .eqos_start_clks = eqos_start_clks_stm32, 2113 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2114 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2115 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2116 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2117 .eqos_get_interface = eqos_get_interface_stm32 2118 }; 2119 2120 static const struct eqos_config eqos_stm32_config = { 2121 .reg_access_always_ok = false, 2122 .mdio_wait = 10000, 2123 .swr_wait = 50, 2124 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2125 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2126 .ops = &eqos_stm32_ops 2127 }; 2128 2129 static struct eqos_ops eqos_imx_ops = { 2130 .eqos_inval_desc = eqos_inval_desc_generic, 2131 .eqos_flush_desc = eqos_flush_desc_generic, 2132 .eqos_inval_buffer = eqos_inval_buffer_generic, 2133 .eqos_flush_buffer = eqos_flush_buffer_generic, 2134 .eqos_probe_resources = eqos_probe_resources_imx, 2135 .eqos_remove_resources = eqos_remove_resources_imx, 2136 .eqos_stop_resets = eqos_stop_resets_imx, 2137 .eqos_start_resets = eqos_start_resets_imx, 2138 .eqos_stop_clks = eqos_stop_clks_imx, 2139 .eqos_start_clks = eqos_start_clks_imx, 2140 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2141 .eqos_disable_calibration = eqos_disable_calibration_imx, 2142 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2143 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2144 .eqos_get_interface = eqos_get_interface_imx 2145 }; 2146 2147 struct eqos_config eqos_imx_config = { 2148 .reg_access_always_ok = false, 2149 .mdio_wait = 10000, 2150 .swr_wait = 50, 2151 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2152 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2153 .ops = &eqos_imx_ops 2154 }; 2155 2156 struct eqos_ops eqos_rockchip_ops = { 2157 .eqos_inval_desc = eqos_inval_desc_generic, 2158 .eqos_flush_desc = eqos_flush_desc_generic, 2159 .eqos_inval_buffer = eqos_inval_buffer_generic, 2160 .eqos_flush_buffer = eqos_flush_buffer_generic, 2161 .eqos_probe_resources = eqos_probe_resources_stm32, 2162 .eqos_remove_resources = eqos_remove_resources_stm32, 2163 .eqos_stop_resets = eqos_stop_resets_stm32, 2164 .eqos_start_resets = eqos_start_resets_stm32, 2165 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2166 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2167 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2168 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2169 .eqos_get_interface = eqos_get_interface_stm32 2170 }; 2171 2172 static const struct udevice_id eqos_ids[] = { 2173 { 2174 .compatible = "nvidia,tegra186-eqos", 2175 .data = (ulong)&eqos_tegra186_config 2176 }, 2177 { 2178 .compatible = "snps,dwmac-4.20a", 2179 .data = (ulong)&eqos_stm32_config 2180 }, 2181 { 2182 .compatible = "fsl,imx-eqos", 2183 .data = (ulong)&eqos_imx_config 2184 }, 2185 2186 { } 2187 }; 2188 2189 U_BOOT_DRIVER(eth_eqos) = { 2190 .name = "eth_eqos", 2191 .id = UCLASS_ETH, 2192 .of_match = of_match_ptr(eqos_ids), 2193 .probe = eqos_probe, 2194 .remove = eqos_remove, 2195 .ops = &eqos_ops, 2196 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2197 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2198 }; 2199