1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/io.h> 42 #include <eth_phy.h> 43 #ifdef CONFIG_ARCH_IMX8M 44 #include <asm/arch/clock.h> 45 #include <asm/mach-imx/sys_proto.h> 46 #endif 47 #include "dwc_eth_qos.h" 48 49 /* Core registers */ 50 51 #define EQOS_MAC_REGS_BASE 0x000 52 struct eqos_mac_regs { 53 uint32_t configuration; /* 0x000 */ 54 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 55 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 56 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 57 uint32_t rx_flow_ctrl; /* 0x090 */ 58 uint32_t unused_094; /* 0x094 */ 59 uint32_t txq_prty_map0; /* 0x098 */ 60 uint32_t unused_09c; /* 0x09c */ 61 uint32_t rxq_ctrl0; /* 0x0a0 */ 62 uint32_t unused_0a4; /* 0x0a4 */ 63 uint32_t rxq_ctrl2; /* 0x0a8 */ 64 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 65 uint32_t us_tic_counter; /* 0x0dc */ 66 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 67 uint32_t hw_feature0; /* 0x11c */ 68 uint32_t hw_feature1; /* 0x120 */ 69 uint32_t hw_feature2; /* 0x124 */ 70 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 71 uint32_t mdio_address; /* 0x200 */ 72 uint32_t mdio_data; /* 0x204 */ 73 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 74 uint32_t address0_high; /* 0x300 */ 75 uint32_t address0_low; /* 0x304 */ 76 }; 77 78 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 79 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 80 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 81 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 82 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 83 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 84 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 85 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 86 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 87 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 88 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 89 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 90 91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 94 95 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 96 97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 99 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 102 103 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 104 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 105 106 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 107 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 108 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 109 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 110 111 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 112 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 113 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 114 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 115 116 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 117 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 118 119 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 120 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 121 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 122 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 123 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 124 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 125 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 126 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 127 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 128 129 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 130 131 #define EQOS_MTL_REGS_BASE 0xd00 132 struct eqos_mtl_regs { 133 uint32_t txq0_operation_mode; /* 0xd00 */ 134 uint32_t unused_d04; /* 0xd04 */ 135 uint32_t txq0_debug; /* 0xd08 */ 136 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 137 uint32_t txq0_quantum_weight; /* 0xd18 */ 138 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 139 uint32_t rxq0_operation_mode; /* 0xd30 */ 140 uint32_t unused_d34; /* 0xd34 */ 141 uint32_t rxq0_debug; /* 0xd38 */ 142 }; 143 144 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 145 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 146 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 147 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 148 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 151 152 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 153 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 154 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 155 156 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 157 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 158 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 159 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 160 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 166 167 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 168 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 169 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 170 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 171 172 #define EQOS_DMA_REGS_BASE 0x1000 173 struct eqos_dma_regs { 174 uint32_t mode; /* 0x1000 */ 175 uint32_t sysbus_mode; /* 0x1004 */ 176 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 177 uint32_t ch0_control; /* 0x1100 */ 178 uint32_t ch0_tx_control; /* 0x1104 */ 179 uint32_t ch0_rx_control; /* 0x1108 */ 180 uint32_t unused_110c; /* 0x110c */ 181 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 182 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 183 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 184 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 185 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 186 uint32_t unused_1124; /* 0x1124 */ 187 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 188 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 189 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 190 }; 191 192 #define EQOS_DMA_MODE_SWR BIT(0) 193 194 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 195 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 196 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 197 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 198 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 199 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 200 201 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 202 203 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 204 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 205 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 206 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 207 208 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 209 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 210 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 211 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 212 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 213 214 /* These registers are Tegra186-specific */ 215 #define EQOS_TEGRA186_REGS_BASE 0x8800 216 struct eqos_tegra186_regs { 217 uint32_t sdmemcomppadctrl; /* 0x8800 */ 218 uint32_t auto_cal_config; /* 0x8804 */ 219 uint32_t unused_8808; /* 0x8808 */ 220 uint32_t auto_cal_status; /* 0x880c */ 221 }; 222 223 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 224 225 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 226 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 227 228 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 229 230 /* Descriptors */ 231 232 #define EQOS_DESCRIPTOR_WORDS 4 233 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 234 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 235 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 236 #define EQOS_DESCRIPTORS_TX 4 237 #define EQOS_DESCRIPTORS_RX 4 238 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 239 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 240 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 241 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 242 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 243 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 244 245 /* 246 * Warn if the cache-line size is larger than the descriptor size. In such 247 * cases the driver will likely fail because the CPU needs to flush the cache 248 * when requeuing RX buffers, therefore descriptors written by the hardware 249 * may be discarded. Architectures with full IO coherence, such as x86, do not 250 * experience this issue, and hence are excluded from this condition. 251 * 252 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 253 * the driver to allocate descriptors from a pool of non-cached memory. 254 */ 255 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 256 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 257 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 258 #warning Cache line size is larger than descriptor size 259 #endif 260 #endif 261 262 struct eqos_desc { 263 u32 des0; 264 u32 des1; 265 u32 des2; 266 u32 des3; 267 }; 268 269 #define EQOS_DESC3_OWN BIT(31) 270 #define EQOS_DESC3_FD BIT(29) 271 #define EQOS_DESC3_LD BIT(28) 272 #define EQOS_DESC3_BUF1V BIT(24) 273 274 /* 275 * TX and RX descriptors are 16 bytes. This causes problems with the cache 276 * maintenance on CPUs where the cache-line size exceeds the size of these 277 * descriptors. What will happen is that when the driver receives a packet 278 * it will be immediately requeued for the hardware to reuse. The CPU will 279 * therefore need to flush the cache-line containing the descriptor, which 280 * will cause all other descriptors in the same cache-line to be flushed 281 * along with it. If one of those descriptors had been written to by the 282 * device those changes (and the associated packet) will be lost. 283 * 284 * To work around this, we make use of non-cached memory if available. If 285 * descriptors are mapped uncached there's no need to manually flush them 286 * or invalidate them. 287 * 288 * Note that this only applies to descriptors. The packet data buffers do 289 * not have the same constraints since they are 1536 bytes large, so they 290 * are unlikely to share cache-lines. 291 */ 292 static void *eqos_alloc_descs(unsigned int num) 293 { 294 #ifdef CONFIG_SYS_NONCACHED_MEMORY 295 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 296 EQOS_DESCRIPTOR_ALIGN); 297 #else 298 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 299 #endif 300 } 301 302 static void eqos_free_descs(void *descs) 303 { 304 #ifdef CONFIG_SYS_NONCACHED_MEMORY 305 /* FIXME: noncached_alloc() has no opposite */ 306 #else 307 free(descs); 308 #endif 309 } 310 311 #ifdef CONFIG_QOS_FULL 312 static void eqos_inval_desc_tegra186(void *desc) 313 { 314 #ifndef CONFIG_SYS_NONCACHED_MEMORY 315 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 316 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 317 ARCH_DMA_MINALIGN); 318 319 invalidate_dcache_range(start, end); 320 #endif 321 } 322 #endif 323 324 static void eqos_inval_desc_generic(void *desc) 325 { 326 #ifndef CONFIG_SYS_NONCACHED_MEMORY 327 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 328 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 329 ARCH_DMA_MINALIGN); 330 331 invalidate_dcache_range(start, end); 332 #endif 333 } 334 335 #ifdef CONFIG_QOS_FULL 336 static void eqos_flush_desc_tegra186(void *desc) 337 { 338 #ifndef CONFIG_SYS_NONCACHED_MEMORY 339 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 340 #endif 341 } 342 #endif 343 344 static void eqos_flush_desc_generic(void *desc) 345 { 346 #ifndef CONFIG_SYS_NONCACHED_MEMORY 347 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 348 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 349 ARCH_DMA_MINALIGN); 350 351 flush_dcache_range(start, end); 352 #endif 353 } 354 355 #ifdef CONFIG_QOS_FULL 356 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 357 { 358 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 359 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 360 361 invalidate_dcache_range(start, end); 362 } 363 #endif 364 365 static void eqos_inval_buffer_generic(void *buf, size_t size) 366 { 367 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 368 unsigned long end = roundup((unsigned long)buf + size, 369 ARCH_DMA_MINALIGN); 370 371 invalidate_dcache_range(start, end); 372 } 373 374 #ifdef CONFIG_QOS_FULL 375 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 376 { 377 flush_cache((unsigned long)buf, size); 378 } 379 #endif 380 381 static void eqos_flush_buffer_generic(void *buf, size_t size) 382 { 383 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 384 unsigned long end = roundup((unsigned long)buf + size, 385 ARCH_DMA_MINALIGN); 386 387 flush_dcache_range(start, end); 388 } 389 390 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 391 { 392 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 393 EQOS_MAC_MDIO_ADDRESS_GB, false, 394 1000000, true); 395 } 396 397 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 398 int mdio_reg) 399 { 400 struct eqos_priv *eqos = bus->priv; 401 u32 val; 402 int ret; 403 404 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 405 mdio_reg); 406 407 ret = eqos_mdio_wait_idle(eqos); 408 if (ret) { 409 pr_err("MDIO not idle at entry"); 410 return ret; 411 } 412 413 val = readl(&eqos->mac_regs->mdio_address); 414 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 415 EQOS_MAC_MDIO_ADDRESS_C45E; 416 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 417 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 418 (eqos->config->config_mac_mdio << 419 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 420 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 421 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 422 EQOS_MAC_MDIO_ADDRESS_GB; 423 writel(val, &eqos->mac_regs->mdio_address); 424 425 udelay(eqos->config->mdio_wait); 426 427 ret = eqos_mdio_wait_idle(eqos); 428 if (ret) { 429 pr_err("MDIO read didn't complete"); 430 return ret; 431 } 432 433 val = readl(&eqos->mac_regs->mdio_data); 434 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 435 436 debug("%s: val=%x\n", __func__, val); 437 438 return val; 439 } 440 441 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 442 int mdio_reg, u16 mdio_val) 443 { 444 struct eqos_priv *eqos = bus->priv; 445 u32 val; 446 int ret; 447 448 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 449 mdio_addr, mdio_reg, mdio_val); 450 451 ret = eqos_mdio_wait_idle(eqos); 452 if (ret) { 453 pr_err("MDIO not idle at entry"); 454 return ret; 455 } 456 457 writel(mdio_val, &eqos->mac_regs->mdio_data); 458 459 val = readl(&eqos->mac_regs->mdio_address); 460 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 461 EQOS_MAC_MDIO_ADDRESS_C45E; 462 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 463 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 464 (eqos->config->config_mac_mdio << 465 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 466 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 467 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 468 EQOS_MAC_MDIO_ADDRESS_GB; 469 writel(val, &eqos->mac_regs->mdio_address); 470 471 udelay(eqos->config->mdio_wait); 472 473 ret = eqos_mdio_wait_idle(eqos); 474 if (ret) { 475 pr_err("MDIO read didn't complete"); 476 return ret; 477 } 478 479 return 0; 480 } 481 482 #ifdef CONFIG_QOS_FULL 483 static int eqos_start_clks_tegra186(struct udevice *dev) 484 { 485 #ifdef CONFIG_CLK 486 struct eqos_priv *eqos = dev_get_priv(dev); 487 int ret; 488 489 debug("%s(dev=%p):\n", __func__, dev); 490 491 ret = clk_enable(&eqos->clk_slave_bus); 492 if (ret < 0) { 493 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 494 goto err; 495 } 496 497 ret = clk_enable(&eqos->clk_master_bus); 498 if (ret < 0) { 499 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 500 goto err_disable_clk_slave_bus; 501 } 502 503 ret = clk_enable(&eqos->clk_rx); 504 if (ret < 0) { 505 pr_err("clk_enable(clk_rx) failed: %d", ret); 506 goto err_disable_clk_master_bus; 507 } 508 509 ret = clk_enable(&eqos->clk_ptp_ref); 510 if (ret < 0) { 511 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 512 goto err_disable_clk_rx; 513 } 514 515 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 516 if (ret < 0) { 517 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 518 goto err_disable_clk_ptp_ref; 519 } 520 521 ret = clk_enable(&eqos->clk_tx); 522 if (ret < 0) { 523 pr_err("clk_enable(clk_tx) failed: %d", ret); 524 goto err_disable_clk_ptp_ref; 525 } 526 #endif 527 528 debug("%s: OK\n", __func__); 529 return 0; 530 531 #ifdef CONFIG_CLK 532 err_disable_clk_ptp_ref: 533 clk_disable(&eqos->clk_ptp_ref); 534 err_disable_clk_rx: 535 clk_disable(&eqos->clk_rx); 536 err_disable_clk_master_bus: 537 clk_disable(&eqos->clk_master_bus); 538 err_disable_clk_slave_bus: 539 clk_disable(&eqos->clk_slave_bus); 540 err: 541 debug("%s: FAILED: %d\n", __func__, ret); 542 return ret; 543 #endif 544 } 545 546 static int eqos_start_clks_stm32(struct udevice *dev) 547 { 548 #ifdef CONFIG_CLK 549 struct eqos_priv *eqos = dev_get_priv(dev); 550 int ret; 551 552 debug("%s(dev=%p):\n", __func__, dev); 553 554 ret = clk_enable(&eqos->clk_master_bus); 555 if (ret < 0) { 556 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 557 goto err; 558 } 559 560 if (clk_valid(&eqos->clk_rx)) { 561 ret = clk_enable(&eqos->clk_rx); 562 if (ret < 0) { 563 pr_err("clk_enable(clk_rx) failed: %d", ret); 564 goto err_disable_clk_master_bus; 565 } 566 } 567 568 if (clk_valid(&eqos->clk_tx)) { 569 ret = clk_enable(&eqos->clk_tx); 570 if (ret < 0) { 571 pr_err("clk_enable(clk_tx) failed: %d", ret); 572 goto err_disable_clk_rx; 573 } 574 } 575 576 if (clk_valid(&eqos->clk_ck)) { 577 ret = clk_enable(&eqos->clk_ck); 578 if (ret < 0) { 579 pr_err("clk_enable(clk_ck) failed: %d", ret); 580 goto err_disable_clk_tx; 581 } 582 } 583 #endif 584 585 debug("%s: OK\n", __func__); 586 return 0; 587 588 #ifdef CONFIG_CLK 589 err_disable_clk_tx: 590 if (clk_valid(&eqos->clk_tx)) 591 clk_disable(&eqos->clk_tx); 592 err_disable_clk_rx: 593 if (clk_valid(&eqos->clk_rx)) 594 clk_disable(&eqos->clk_rx); 595 err_disable_clk_master_bus: 596 clk_disable(&eqos->clk_master_bus); 597 err: 598 debug("%s: FAILED: %d\n", __func__, ret); 599 return ret; 600 #endif 601 } 602 603 static int eqos_start_clks_imx(struct udevice *dev) 604 { 605 return 0; 606 } 607 608 static void eqos_stop_clks_tegra186(struct udevice *dev) 609 { 610 #ifdef CONFIG_CLK 611 struct eqos_priv *eqos = dev_get_priv(dev); 612 613 debug("%s(dev=%p):\n", __func__, dev); 614 615 clk_disable(&eqos->clk_tx); 616 clk_disable(&eqos->clk_ptp_ref); 617 clk_disable(&eqos->clk_rx); 618 clk_disable(&eqos->clk_master_bus); 619 clk_disable(&eqos->clk_slave_bus); 620 #endif 621 622 debug("%s: OK\n", __func__); 623 } 624 625 static void eqos_stop_clks_stm32(struct udevice *dev) 626 { 627 #ifdef CONFIG_CLK 628 struct eqos_priv *eqos = dev_get_priv(dev); 629 630 debug("%s(dev=%p):\n", __func__, dev); 631 632 if (clk_valid(&eqos->clk_tx)) 633 clk_disable(&eqos->clk_tx); 634 if (clk_valid(&eqos->clk_rx)) 635 clk_disable(&eqos->clk_rx); 636 clk_disable(&eqos->clk_master_bus); 637 if (clk_valid(&eqos->clk_ck)) 638 clk_disable(&eqos->clk_ck); 639 #endif 640 641 debug("%s: OK\n", __func__); 642 } 643 644 static void eqos_stop_clks_imx(struct udevice *dev) 645 { 646 /* empty */ 647 } 648 649 static int eqos_start_resets_tegra186(struct udevice *dev) 650 { 651 struct eqos_priv *eqos = dev_get_priv(dev); 652 int ret; 653 654 debug("%s(dev=%p):\n", __func__, dev); 655 656 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 657 if (ret < 0) { 658 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 659 return ret; 660 } 661 662 udelay(2); 663 664 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 665 if (ret < 0) { 666 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 667 return ret; 668 } 669 670 ret = reset_assert(&eqos->reset_ctl); 671 if (ret < 0) { 672 pr_err("reset_assert() failed: %d", ret); 673 return ret; 674 } 675 676 udelay(2); 677 678 ret = reset_deassert(&eqos->reset_ctl); 679 if (ret < 0) { 680 pr_err("reset_deassert() failed: %d", ret); 681 return ret; 682 } 683 684 debug("%s: OK\n", __func__); 685 return 0; 686 } 687 #endif 688 689 static int eqos_start_resets_stm32(struct udevice *dev) 690 { 691 struct eqos_priv *eqos = dev_get_priv(dev); 692 int ret; 693 694 debug("%s(dev=%p):\n", __func__, dev); 695 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 696 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 697 if (ret < 0) { 698 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 699 ret); 700 return ret; 701 } 702 703 udelay(eqos->reset_delays[0]); 704 705 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 706 if (ret < 0) { 707 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 708 ret); 709 return ret; 710 } 711 712 udelay(eqos->reset_delays[1]); 713 714 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 715 if (ret < 0) { 716 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 717 ret); 718 return ret; 719 } 720 721 udelay(eqos->reset_delays[2]); 722 } 723 debug("%s: OK\n", __func__); 724 725 return 0; 726 } 727 728 #ifdef CONFIG_QOS_FULL 729 static int eqos_start_resets_imx(struct udevice *dev) 730 { 731 return 0; 732 } 733 734 static int eqos_stop_resets_tegra186(struct udevice *dev) 735 { 736 struct eqos_priv *eqos = dev_get_priv(dev); 737 738 reset_assert(&eqos->reset_ctl); 739 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 740 741 return 0; 742 } 743 #endif 744 745 static int eqos_stop_resets_stm32(struct udevice *dev) 746 { 747 struct eqos_priv *eqos = dev_get_priv(dev); 748 int ret; 749 750 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 751 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 752 if (ret < 0) { 753 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 754 ret); 755 return ret; 756 } 757 } 758 759 return 0; 760 } 761 762 #ifdef CONFIG_QOS_FULL 763 static int eqos_stop_resets_imx(struct udevice *dev) 764 { 765 return 0; 766 } 767 768 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 769 { 770 struct eqos_priv *eqos = dev_get_priv(dev); 771 int ret; 772 773 debug("%s(dev=%p):\n", __func__, dev); 774 775 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 776 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 777 778 udelay(1); 779 780 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 781 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 782 783 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 784 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 785 if (ret) { 786 pr_err("calibrate didn't start"); 787 goto failed; 788 } 789 790 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 791 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 792 if (ret) { 793 pr_err("calibrate didn't finish"); 794 goto failed; 795 } 796 797 ret = 0; 798 799 failed: 800 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 801 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 802 803 debug("%s: returns %d\n", __func__, ret); 804 805 return ret; 806 } 807 808 static int eqos_disable_calibration_tegra186(struct udevice *dev) 809 { 810 struct eqos_priv *eqos = dev_get_priv(dev); 811 812 debug("%s(dev=%p):\n", __func__, dev); 813 814 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 815 EQOS_AUTO_CAL_CONFIG_ENABLE); 816 817 return 0; 818 } 819 820 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 821 { 822 #ifdef CONFIG_CLK 823 struct eqos_priv *eqos = dev_get_priv(dev); 824 825 return clk_get_rate(&eqos->clk_slave_bus); 826 #else 827 return 0; 828 #endif 829 } 830 #endif 831 832 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 833 { 834 #ifdef CONFIG_CLK 835 struct eqos_priv *eqos = dev_get_priv(dev); 836 837 if (eqos->clk_master_bus.id) 838 return clk_get_rate(&eqos->clk_master_bus); 839 else 840 return 0; 841 #else 842 return 0; 843 #endif 844 } 845 846 #ifdef CONFIG_QOS_FULL 847 __weak u32 imx_get_eqos_csr_clk(void) 848 { 849 return 100 * 1000000; 850 } 851 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 852 { 853 return 0; 854 } 855 856 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 857 { 858 return imx_get_eqos_csr_clk(); 859 } 860 #endif 861 862 static int eqos_calibrate_pads_stm32(struct udevice *dev) 863 { 864 return 0; 865 } 866 867 #ifdef CONFIG_QOS_FULL 868 static int eqos_calibrate_pads_imx(struct udevice *dev) 869 { 870 return 0; 871 } 872 #endif 873 874 static int eqos_disable_calibration_stm32(struct udevice *dev) 875 { 876 return 0; 877 } 878 879 #ifdef CONFIG_QOS_FULL 880 static int eqos_disable_calibration_imx(struct udevice *dev) 881 { 882 return 0; 883 } 884 #endif 885 886 static int eqos_set_full_duplex(struct udevice *dev) 887 { 888 struct eqos_priv *eqos = dev_get_priv(dev); 889 890 debug("%s(dev=%p):\n", __func__, dev); 891 892 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 893 894 return 0; 895 } 896 897 static int eqos_set_half_duplex(struct udevice *dev) 898 { 899 struct eqos_priv *eqos = dev_get_priv(dev); 900 901 debug("%s(dev=%p):\n", __func__, dev); 902 903 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 904 905 /* WAR: Flush TX queue when switching to half-duplex */ 906 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 907 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 908 909 return 0; 910 } 911 912 static int eqos_set_gmii_speed(struct udevice *dev) 913 { 914 struct eqos_priv *eqos = dev_get_priv(dev); 915 916 debug("%s(dev=%p):\n", __func__, dev); 917 918 clrbits_le32(&eqos->mac_regs->configuration, 919 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 920 921 return 0; 922 } 923 924 static int eqos_set_mii_speed_100(struct udevice *dev) 925 { 926 struct eqos_priv *eqos = dev_get_priv(dev); 927 928 debug("%s(dev=%p):\n", __func__, dev); 929 930 setbits_le32(&eqos->mac_regs->configuration, 931 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 932 933 return 0; 934 } 935 936 static int eqos_set_mii_speed_10(struct udevice *dev) 937 { 938 struct eqos_priv *eqos = dev_get_priv(dev); 939 940 debug("%s(dev=%p):\n", __func__, dev); 941 942 clrsetbits_le32(&eqos->mac_regs->configuration, 943 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 944 945 return 0; 946 } 947 948 #ifdef CONFIG_QOS_FULL 949 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 950 { 951 #ifdef CONFIG_CLK 952 struct eqos_priv *eqos = dev_get_priv(dev); 953 ulong rate; 954 int ret; 955 956 debug("%s(dev=%p):\n", __func__, dev); 957 958 switch (eqos->phy->speed) { 959 case SPEED_1000: 960 rate = 125 * 1000 * 1000; 961 break; 962 case SPEED_100: 963 rate = 25 * 1000 * 1000; 964 break; 965 case SPEED_10: 966 rate = 2.5 * 1000 * 1000; 967 break; 968 default: 969 pr_err("invalid speed %d", eqos->phy->speed); 970 return -EINVAL; 971 } 972 973 ret = clk_set_rate(&eqos->clk_tx, rate); 974 if (ret < 0) { 975 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 976 return ret; 977 } 978 #endif 979 980 return 0; 981 } 982 #endif 983 984 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 985 { 986 return 0; 987 } 988 989 #ifdef CONFIG_QOS_FULL 990 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 991 { 992 struct eqos_priv *eqos = dev_get_priv(dev); 993 ulong rate; 994 int ret; 995 996 debug("%s(dev=%p):\n", __func__, dev); 997 998 switch (eqos->phy->speed) { 999 case SPEED_1000: 1000 rate = 125 * 1000 * 1000; 1001 break; 1002 case SPEED_100: 1003 rate = 25 * 1000 * 1000; 1004 break; 1005 case SPEED_10: 1006 rate = 2.5 * 1000 * 1000; 1007 break; 1008 default: 1009 pr_err("invalid speed %d", eqos->phy->speed); 1010 return -EINVAL; 1011 } 1012 1013 ret = imx_eqos_txclk_set_rate(rate); 1014 if (ret < 0) { 1015 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 1016 return ret; 1017 } 1018 1019 return 0; 1020 } 1021 #endif 1022 1023 static int eqos_adjust_link(struct udevice *dev) 1024 { 1025 struct eqos_priv *eqos = dev_get_priv(dev); 1026 int ret; 1027 bool en_calibration; 1028 1029 debug("%s(dev=%p):\n", __func__, dev); 1030 1031 if (eqos->phy->duplex) 1032 ret = eqos_set_full_duplex(dev); 1033 else 1034 ret = eqos_set_half_duplex(dev); 1035 if (ret < 0) { 1036 pr_err("eqos_set_*_duplex() failed: %d", ret); 1037 return ret; 1038 } 1039 1040 switch (eqos->phy->speed) { 1041 case SPEED_1000: 1042 en_calibration = true; 1043 ret = eqos_set_gmii_speed(dev); 1044 break; 1045 case SPEED_100: 1046 en_calibration = true; 1047 ret = eqos_set_mii_speed_100(dev); 1048 break; 1049 case SPEED_10: 1050 en_calibration = false; 1051 ret = eqos_set_mii_speed_10(dev); 1052 break; 1053 default: 1054 pr_err("invalid speed %d", eqos->phy->speed); 1055 return -EINVAL; 1056 } 1057 if (ret < 0) { 1058 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1059 return ret; 1060 } 1061 1062 if (en_calibration) { 1063 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1064 if (ret < 0) { 1065 pr_err("eqos_calibrate_pads() failed: %d", 1066 ret); 1067 return ret; 1068 } 1069 } else { 1070 ret = eqos->config->ops->eqos_disable_calibration(dev); 1071 if (ret < 0) { 1072 pr_err("eqos_disable_calibration() failed: %d", 1073 ret); 1074 return ret; 1075 } 1076 } 1077 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1078 if (ret < 0) { 1079 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1080 return ret; 1081 } 1082 1083 return 0; 1084 } 1085 1086 int eqos_write_hwaddr(struct udevice *dev) 1087 { 1088 struct eth_pdata *plat = dev_get_platdata(dev); 1089 struct eqos_priv *eqos = dev_get_priv(dev); 1090 uint32_t val; 1091 1092 /* 1093 * This function may be called before start() or after stop(). At that 1094 * time, on at least some configurations of the EQoS HW, all clocks to 1095 * the EQoS HW block will be stopped, and a reset signal applied. If 1096 * any register access is attempted in this state, bus timeouts or CPU 1097 * hangs may occur. This check prevents that. 1098 * 1099 * A simple solution to this problem would be to not implement 1100 * write_hwaddr(), since start() always writes the MAC address into HW 1101 * anyway. However, it is desirable to implement write_hwaddr() to 1102 * support the case of SW that runs subsequent to U-Boot which expects 1103 * the MAC address to already be programmed into the EQoS registers, 1104 * which must happen irrespective of whether the U-Boot user (or 1105 * scripts) actually made use of the EQoS device, and hence 1106 * irrespective of whether start() was ever called. 1107 * 1108 * Note that this requirement by subsequent SW is not valid for 1109 * Tegra186, and is likely not valid for any non-PCI instantiation of 1110 * the EQoS HW block. This function is implemented solely as 1111 * future-proofing with the expectation the driver will eventually be 1112 * ported to some system where the expectation above is true. 1113 */ 1114 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1115 return 0; 1116 1117 /* Update the MAC address */ 1118 val = (plat->enetaddr[5] << 8) | 1119 (plat->enetaddr[4]); 1120 writel(val, &eqos->mac_regs->address0_high); 1121 val = (plat->enetaddr[3] << 24) | 1122 (plat->enetaddr[2] << 16) | 1123 (plat->enetaddr[1] << 8) | 1124 (plat->enetaddr[0]); 1125 writel(val, &eqos->mac_regs->address0_low); 1126 1127 return 0; 1128 } 1129 1130 #ifdef CONFIG_QOS_FULL 1131 static int eqos_read_rom_hwaddr(struct udevice *dev) 1132 { 1133 struct eth_pdata *pdata = dev_get_platdata(dev); 1134 1135 #ifdef CONFIG_ARCH_IMX8M 1136 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1137 #endif 1138 return !is_valid_ethaddr(pdata->enetaddr); 1139 } 1140 #endif 1141 1142 int eqos_init(struct udevice *dev) 1143 { 1144 struct eqos_priv *eqos = dev_get_priv(dev); 1145 int ret = 0, limit = 10; 1146 ulong rate; 1147 u32 val; 1148 1149 debug("%s(dev=%p):\n", __func__, dev); 1150 1151 if (eqos->config->ops->eqos_start_clks) { 1152 ret = eqos->config->ops->eqos_start_clks(dev); 1153 if (ret < 0) { 1154 pr_err("eqos_start_clks() failed: %d", ret); 1155 goto err; 1156 } 1157 } 1158 1159 if (!eqos->mii_reseted) { 1160 ret = eqos->config->ops->eqos_start_resets(dev); 1161 if (ret < 0) { 1162 pr_err("eqos_start_resets() failed: %d", ret); 1163 goto err_stop_clks; 1164 } 1165 1166 eqos->mii_reseted = true; 1167 udelay(10); 1168 } 1169 1170 eqos->reg_access_ok = true; 1171 1172 /* DMA SW reset */ 1173 val = readl(&eqos->dma_regs->mode); 1174 val |= EQOS_DMA_MODE_SWR; 1175 writel(val, &eqos->dma_regs->mode); 1176 while (limit--) { 1177 if (!(readl(&eqos->dma_regs->mode) & EQOS_DMA_MODE_SWR)) 1178 break; 1179 mdelay(10); 1180 } 1181 1182 if (limit < 0) { 1183 pr_err("EQOS_DMA_MODE_SWR stuck"); 1184 ret = -EAGAIN; 1185 goto err_stop_resets; 1186 } 1187 1188 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1189 if (ret < 0) { 1190 pr_err("eqos_calibrate_pads() failed: %d", ret); 1191 goto err_stop_resets; 1192 } 1193 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1194 1195 val = (rate / 1000000) - 1; 1196 writel(val, &eqos->mac_regs->us_tic_counter); 1197 1198 /* 1199 * if PHY was already connected and configured, 1200 * don't need to reconnect/reconfigure again 1201 */ 1202 if (!eqos->phy) { 1203 int addr = -1; 1204 #ifdef CONFIG_DM_ETH_PHY 1205 addr = eth_phy_get_addr(dev); 1206 #endif 1207 #ifdef DWC_NET_PHYADDR 1208 addr = DWC_NET_PHYADDR; 1209 #endif 1210 eqos->phy = phy_connect(eqos->mii, addr, dev, 1211 eqos->config->ops->eqos_get_interface(dev)); 1212 if (!eqos->phy) { 1213 pr_err("phy_connect() failed"); 1214 ret = -ENODEV; 1215 goto err_stop_resets; 1216 } 1217 1218 if (eqos->max_speed) { 1219 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1220 if (ret) { 1221 pr_err("phy_set_supported() failed: %d", ret); 1222 goto err_shutdown_phy; 1223 } 1224 } 1225 1226 ret = phy_config(eqos->phy); 1227 if (ret < 0) { 1228 pr_err("phy_config() failed: %d", ret); 1229 goto err_shutdown_phy; 1230 } 1231 } 1232 1233 ret = phy_startup(eqos->phy); 1234 if (ret < 0) { 1235 pr_err("phy_startup() failed: %d", ret); 1236 goto err_shutdown_phy; 1237 } 1238 1239 if (!eqos->phy->link) { 1240 pr_err("No link"); 1241 ret = -EINVAL; 1242 goto err_shutdown_phy; 1243 } 1244 1245 ret = eqos_adjust_link(dev); 1246 if (ret < 0) { 1247 pr_err("eqos_adjust_link() failed: %d", ret); 1248 goto err_shutdown_phy; 1249 } 1250 1251 debug("%s: OK\n", __func__); 1252 return 0; 1253 1254 err_shutdown_phy: 1255 phy_shutdown(eqos->phy); 1256 err_stop_resets: 1257 eqos->config->ops->eqos_stop_resets(dev); 1258 eqos->mii_reseted = false; 1259 err_stop_clks: 1260 if (eqos->config->ops->eqos_stop_clks) 1261 eqos->config->ops->eqos_stop_clks(dev); 1262 err: 1263 pr_err("FAILED: %d", ret); 1264 return ret; 1265 } 1266 1267 void eqos_enable(struct udevice *dev) 1268 { 1269 struct eqos_priv *eqos = dev_get_priv(dev); 1270 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1271 ulong last_rx_desc; 1272 int i; 1273 1274 eqos->tx_desc_idx = 0; 1275 eqos->rx_desc_idx = 0; 1276 1277 /* Configure MTL */ 1278 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1279 1280 /* Enable Store and Forward mode for TX */ 1281 /* Program Tx operating mode */ 1282 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1283 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1284 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1285 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1286 1287 /* Transmit Queue weight */ 1288 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1289 1290 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1291 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1292 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1293 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1294 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1295 1296 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1297 val = readl(&eqos->mac_regs->hw_feature1); 1298 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1299 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1300 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1301 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1302 1303 /* 1304 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1305 * r/tqs is encoded as (n / 256) - 1. 1306 */ 1307 tqs = (128 << tx_fifo_sz) / 256 - 1; 1308 rqs = (128 << rx_fifo_sz) / 256 - 1; 1309 1310 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1311 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1312 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1313 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1314 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1315 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1316 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1317 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1318 1319 /* Flow control used only if each channel gets 4KB or more FIFO */ 1320 if (rqs >= ((4096 / 256) - 1)) { 1321 u32 rfd, rfa; 1322 1323 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1324 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1325 1326 /* 1327 * Set Threshold for Activating Flow Contol space for min 2 1328 * frames ie, (1500 * 1) = 1500 bytes. 1329 * 1330 * Set Threshold for Deactivating Flow Contol for space of 1331 * min 1 frame (frame size 1500bytes) in receive fifo 1332 */ 1333 if (rqs == ((4096 / 256) - 1)) { 1334 /* 1335 * This violates the above formula because of FIFO size 1336 * limit therefore overflow may occur inspite of this. 1337 */ 1338 rfd = 0x3; /* Full-3K */ 1339 rfa = 0x1; /* Full-1.5K */ 1340 } else if (rqs == ((8192 / 256) - 1)) { 1341 rfd = 0x6; /* Full-4K */ 1342 rfa = 0xa; /* Full-6K */ 1343 } else if (rqs == ((16384 / 256) - 1)) { 1344 rfd = 0x6; /* Full-4K */ 1345 rfa = 0x12; /* Full-10K */ 1346 } else { 1347 rfd = 0x6; /* Full-4K */ 1348 rfa = 0x1E; /* Full-16K */ 1349 } 1350 1351 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1352 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1353 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1354 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1355 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1356 (rfd << 1357 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1358 (rfa << 1359 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1360 } 1361 1362 /* Configure MAC */ 1363 1364 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1365 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1366 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1367 eqos->config->config_mac << 1368 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1369 1370 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1371 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1372 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1373 0x2 << 1374 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1375 1376 /* Multicast and Broadcast Queue Enable */ 1377 setbits_le32(&eqos->mac_regs->unused_0a4, 1378 0x00100000); 1379 /* enable promise mode */ 1380 setbits_le32(&eqos->mac_regs->unused_004[1], 1381 0x1); 1382 1383 /* Set TX flow control parameters */ 1384 /* Set Pause Time */ 1385 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1386 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1387 /* Assign priority for TX flow control */ 1388 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1389 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1390 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1391 /* Assign priority for RX flow control */ 1392 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1393 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1394 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1395 /* Enable flow control */ 1396 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1397 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1398 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1399 EQOS_MAC_RX_FLOW_CTRL_RFE); 1400 1401 clrsetbits_le32(&eqos->mac_regs->configuration, 1402 EQOS_MAC_CONFIGURATION_GPSLCE | 1403 EQOS_MAC_CONFIGURATION_WD | 1404 EQOS_MAC_CONFIGURATION_JD | 1405 EQOS_MAC_CONFIGURATION_JE, 1406 EQOS_MAC_CONFIGURATION_CST | 1407 EQOS_MAC_CONFIGURATION_ACS); 1408 1409 eqos_write_hwaddr(dev); 1410 1411 /* Configure DMA */ 1412 1413 /* Enable OSP mode */ 1414 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1415 EQOS_DMA_CH0_TX_CONTROL_OSP); 1416 1417 /* RX buffer size. Must be a multiple of bus width */ 1418 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1419 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1420 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1421 EQOS_MAX_PACKET_SIZE << 1422 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1423 1424 setbits_le32(&eqos->dma_regs->ch0_control, 1425 EQOS_DMA_CH0_CONTROL_PBLX8); 1426 1427 /* 1428 * Burst length must be < 1/2 FIFO size. 1429 * FIFO size in tqs is encoded as (n / 256) - 1. 1430 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1431 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1432 */ 1433 pbl = tqs + 1; 1434 if (pbl > 32) 1435 pbl = 32; 1436 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1437 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1438 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1439 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1440 1441 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1442 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1443 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1444 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1445 1446 /* DMA performance configuration */ 1447 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1448 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1449 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1450 writel(val, &eqos->dma_regs->sysbus_mode); 1451 1452 /* Set up descriptors */ 1453 1454 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1455 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1456 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1457 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1458 (i * EQOS_MAX_PACKET_SIZE)); 1459 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1460 mb(); 1461 eqos->config->ops->eqos_flush_desc(rx_desc); 1462 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1463 (i * EQOS_MAX_PACKET_SIZE), 1464 EQOS_MAX_PACKET_SIZE); 1465 } 1466 1467 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1468 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1469 writel(EQOS_DESCRIPTORS_TX - 1, 1470 &eqos->dma_regs->ch0_txdesc_ring_length); 1471 1472 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1473 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1474 writel(EQOS_DESCRIPTORS_RX - 1, 1475 &eqos->dma_regs->ch0_rxdesc_ring_length); 1476 1477 /* Enable everything */ 1478 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1479 EQOS_DMA_CH0_TX_CONTROL_ST); 1480 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1481 EQOS_DMA_CH0_RX_CONTROL_SR); 1482 setbits_le32(&eqos->mac_regs->configuration, 1483 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1484 1485 /* TX tail pointer not written until we need to TX a packet */ 1486 /* 1487 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1488 * first descriptor, implying all descriptors were available. However, 1489 * that's not distinguishable from none of the descriptors being 1490 * available. 1491 */ 1492 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1493 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1494 1495 eqos->started = true; 1496 } 1497 1498 static int __maybe_unused eqos_start(struct udevice *dev) 1499 { 1500 int ret; 1501 1502 ret = eqos_init(dev); 1503 if (ret) 1504 return ret; 1505 1506 eqos_enable(dev); 1507 1508 return 0; 1509 } 1510 1511 void eqos_stop(struct udevice *dev) 1512 { 1513 struct eqos_priv *eqos = dev_get_priv(dev); 1514 int i; 1515 1516 debug("%s(dev=%p):\n", __func__, dev); 1517 1518 if (!eqos->started) 1519 return; 1520 eqos->started = false; 1521 eqos->reg_access_ok = false; 1522 1523 /* Disable TX DMA */ 1524 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1525 EQOS_DMA_CH0_TX_CONTROL_ST); 1526 1527 /* Wait for TX all packets to drain out of MTL */ 1528 for (i = 0; i < 1000000; i++) { 1529 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1530 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1531 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1532 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1533 if ((trcsts != 1) && (!txqsts)) 1534 break; 1535 } 1536 1537 /* Turn off MAC TX and RX */ 1538 clrbits_le32(&eqos->mac_regs->configuration, 1539 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1540 1541 /* Wait for all RX packets to drain out of MTL */ 1542 for (i = 0; i < 1000000; i++) { 1543 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1544 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1545 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1546 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1547 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1548 if ((!prxq) && (!rxqsts)) 1549 break; 1550 } 1551 1552 /* Turn off RX DMA */ 1553 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1554 EQOS_DMA_CH0_RX_CONTROL_SR); 1555 1556 if (eqos->phy) { 1557 phy_shutdown(eqos->phy); 1558 } 1559 if (eqos->config->ops->eqos_stop_clks) 1560 eqos->config->ops->eqos_stop_clks(dev); 1561 1562 debug("%s: OK\n", __func__); 1563 } 1564 1565 int eqos_send(struct udevice *dev, void *packet, int length) 1566 { 1567 struct eqos_priv *eqos = dev_get_priv(dev); 1568 struct eqos_desc *tx_desc; 1569 int i; 1570 1571 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1572 length); 1573 1574 memcpy(eqos->tx_dma_buf, packet, length); 1575 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1576 1577 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1578 eqos->tx_desc_idx++; 1579 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1580 1581 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1582 tx_desc->des1 = 0; 1583 tx_desc->des2 = length; 1584 /* 1585 * Make sure that if HW sees the _OWN write below, it will see all the 1586 * writes to the rest of the descriptor too. 1587 */ 1588 mb(); 1589 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1590 eqos->config->ops->eqos_flush_desc(tx_desc); 1591 1592 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1593 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1594 1595 for (i = 0; i < 1000000; i++) { 1596 eqos->config->ops->eqos_inval_desc(tx_desc); 1597 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1598 return 0; 1599 udelay(1); 1600 } 1601 1602 debug("%s: TX timeout\n", __func__); 1603 1604 return -ETIMEDOUT; 1605 } 1606 1607 int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1608 { 1609 struct eqos_priv *eqos = dev_get_priv(dev); 1610 struct eqos_desc *rx_desc; 1611 int length; 1612 1613 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1614 1615 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1616 eqos->config->ops->eqos_inval_desc(rx_desc); 1617 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1618 debug("%s: RX packet not available\n", __func__); 1619 return -EAGAIN; 1620 } 1621 1622 *packetp = eqos->rx_dma_buf + 1623 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1624 length = rx_desc->des3 & 0x7fff; 1625 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1626 1627 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1628 1629 return length; 1630 } 1631 1632 int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1633 { 1634 struct eqos_priv *eqos = dev_get_priv(dev); 1635 uchar *packet_expected; 1636 struct eqos_desc *rx_desc; 1637 1638 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1639 1640 packet_expected = eqos->rx_dma_buf + 1641 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1642 if (packet != packet_expected) { 1643 debug("%s: Unexpected packet (expected %p)\n", __func__, 1644 packet_expected); 1645 return -EINVAL; 1646 } 1647 1648 eqos->config->ops->eqos_inval_buffer(packet, length); 1649 1650 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1651 1652 rx_desc->des0 = 0; 1653 mb(); 1654 eqos->config->ops->eqos_flush_desc(rx_desc); 1655 eqos->config->ops->eqos_inval_buffer(packet, length); 1656 rx_desc->des0 = (u32)(ulong)packet; 1657 rx_desc->des1 = 0; 1658 rx_desc->des2 = 0; 1659 /* 1660 * Make sure that if HW sees the _OWN write below, it will see all the 1661 * writes to the rest of the descriptor too. 1662 */ 1663 mb(); 1664 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1665 eqos->config->ops->eqos_flush_desc(rx_desc); 1666 1667 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1668 1669 eqos->rx_desc_idx++; 1670 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1671 1672 return 0; 1673 } 1674 1675 static int eqos_probe_resources_core(struct udevice *dev) 1676 { 1677 struct eqos_priv *eqos = dev_get_priv(dev); 1678 int ret; 1679 1680 debug("%s(dev=%p):\n", __func__, dev); 1681 1682 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1683 EQOS_DESCRIPTORS_RX); 1684 if (!eqos->descs) { 1685 debug("%s: eqos_alloc_descs() failed\n", __func__); 1686 ret = -ENOMEM; 1687 goto err; 1688 } 1689 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1690 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1691 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1692 eqos->rx_descs); 1693 1694 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1695 if (!eqos->tx_dma_buf) { 1696 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1697 ret = -ENOMEM; 1698 goto err_free_descs; 1699 } 1700 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1701 1702 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1703 if (!eqos->rx_dma_buf) { 1704 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1705 ret = -ENOMEM; 1706 goto err_free_tx_dma_buf; 1707 } 1708 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1709 1710 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1711 if (!eqos->rx_pkt) { 1712 debug("%s: malloc(rx_pkt) failed\n", __func__); 1713 ret = -ENOMEM; 1714 goto err_free_rx_dma_buf; 1715 } 1716 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1717 1718 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1719 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1720 1721 debug("%s: OK\n", __func__); 1722 return 0; 1723 1724 err_free_rx_dma_buf: 1725 free(eqos->rx_dma_buf); 1726 err_free_tx_dma_buf: 1727 free(eqos->tx_dma_buf); 1728 err_free_descs: 1729 eqos_free_descs(eqos->descs); 1730 err: 1731 1732 debug("%s: returns %d\n", __func__, ret); 1733 return ret; 1734 } 1735 1736 static int eqos_remove_resources_core(struct udevice *dev) 1737 { 1738 struct eqos_priv *eqos = dev_get_priv(dev); 1739 1740 debug("%s(dev=%p):\n", __func__, dev); 1741 1742 free(eqos->rx_pkt); 1743 free(eqos->rx_dma_buf); 1744 free(eqos->tx_dma_buf); 1745 eqos_free_descs(eqos->descs); 1746 1747 debug("%s: OK\n", __func__); 1748 return 0; 1749 } 1750 1751 #ifdef CONFIG_QOS_FULL 1752 static int eqos_probe_resources_tegra186(struct udevice *dev) 1753 { 1754 struct eqos_priv *eqos = dev_get_priv(dev); 1755 int ret; 1756 1757 debug("%s(dev=%p):\n", __func__, dev); 1758 1759 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1760 if (ret) { 1761 pr_err("reset_get_by_name(rst) failed: %d", ret); 1762 return ret; 1763 } 1764 1765 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1766 &eqos->phy_reset_gpio, 1767 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1768 if (ret) { 1769 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1770 goto err_free_reset_eqos; 1771 } 1772 1773 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1774 if (ret) { 1775 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1776 goto err_free_gpio_phy_reset; 1777 } 1778 1779 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1780 if (ret) { 1781 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1782 goto err_free_clk_slave_bus; 1783 } 1784 1785 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1786 if (ret) { 1787 pr_err("clk_get_by_name(rx) failed: %d", ret); 1788 goto err_free_clk_master_bus; 1789 } 1790 1791 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1792 if (ret) { 1793 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1794 goto err_free_clk_rx; 1795 return ret; 1796 } 1797 1798 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1799 if (ret) { 1800 pr_err("clk_get_by_name(tx) failed: %d", ret); 1801 goto err_free_clk_ptp_ref; 1802 } 1803 1804 debug("%s: OK\n", __func__); 1805 return 0; 1806 1807 err_free_clk_ptp_ref: 1808 clk_free(&eqos->clk_ptp_ref); 1809 err_free_clk_rx: 1810 clk_free(&eqos->clk_rx); 1811 err_free_clk_master_bus: 1812 clk_free(&eqos->clk_master_bus); 1813 err_free_clk_slave_bus: 1814 clk_free(&eqos->clk_slave_bus); 1815 err_free_gpio_phy_reset: 1816 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1817 err_free_reset_eqos: 1818 reset_free(&eqos->reset_ctl); 1819 1820 debug("%s: returns %d\n", __func__, ret); 1821 return ret; 1822 } 1823 #endif 1824 1825 /* board-specific Ethernet Interface initializations. */ 1826 __weak int board_interface_eth_init(struct udevice *dev, 1827 phy_interface_t interface_type) 1828 { 1829 return 0; 1830 } 1831 1832 static int eqos_probe_resources_stm32(struct udevice *dev) 1833 { 1834 struct eqos_priv *eqos = dev_get_priv(dev); 1835 int ret; 1836 phy_interface_t interface; 1837 struct ofnode_phandle_args phandle_args; 1838 1839 debug("%s(dev=%p):\n", __func__, dev); 1840 1841 interface = eqos->config->ops->eqos_get_interface(dev); 1842 1843 if (interface == PHY_INTERFACE_MODE_NONE) { 1844 pr_err("Invalid PHY interface\n"); 1845 return -EINVAL; 1846 } 1847 1848 ret = board_interface_eth_init(dev, interface); 1849 if (ret) 1850 return -EINVAL; 1851 1852 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1853 1854 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1855 if (ret) 1856 pr_err("clk_get_by_name(master_bus) failed: %d\n", ret); 1857 1858 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1859 if (ret) 1860 pr_warn("clk_get_by_name(rx) failed: %d", ret); 1861 1862 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1863 if (ret) 1864 pr_warn("clk_get_by_name(tx) failed: %d", ret); 1865 1866 /* Get ETH_CLK clocks (optional) */ 1867 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1868 if (ret) 1869 pr_warn("No phy clock provided %d", ret); 1870 1871 eqos->phyaddr = -1; 1872 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1873 &phandle_args); 1874 if (!ret) { 1875 /* search "reset-gpios" in phy node */ 1876 ret = gpio_request_by_name_nodev(phandle_args.node, 1877 "reset-gpios", 0, 1878 &eqos->phy_reset_gpio, 1879 GPIOD_IS_OUT | 1880 GPIOD_IS_OUT_ACTIVE); 1881 if (ret) 1882 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1883 ret); 1884 else 1885 eqos->reset_delays[1] = 2; 1886 1887 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1888 "reg", -1); 1889 } 1890 1891 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1892 int reset_flags = GPIOD_IS_OUT; 1893 1894 if (dev_read_bool(dev, "snps,reset-active-low")) 1895 reset_flags |= GPIOD_ACTIVE_LOW; 1896 1897 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1898 &eqos->phy_reset_gpio, reset_flags); 1899 if (ret == 0) 1900 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1901 eqos->reset_delays, 3); 1902 else 1903 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1904 ret); 1905 } 1906 1907 debug("%s: OK\n", __func__); 1908 return 0; 1909 } 1910 1911 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1912 { 1913 const char *phy_mode; 1914 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1915 1916 debug("%s(dev=%p):\n", __func__, dev); 1917 1918 phy_mode = dev_read_string(dev, "phy-mode"); 1919 if (phy_mode) 1920 interface = phy_get_interface_by_name(phy_mode); 1921 1922 return interface; 1923 } 1924 1925 #ifdef CONFIG_QOS_FULL 1926 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1927 { 1928 return PHY_INTERFACE_MODE_MII; 1929 } 1930 1931 static int eqos_probe_resources_imx(struct udevice *dev) 1932 { 1933 struct eqos_priv *eqos = dev_get_priv(dev); 1934 phy_interface_t interface; 1935 1936 debug("%s(dev=%p):\n", __func__, dev); 1937 1938 interface = eqos->config->ops->eqos_get_interface(dev); 1939 1940 if (interface == PHY_INTERFACE_MODE_NONE) { 1941 pr_err("Invalid PHY interface\n"); 1942 return -EINVAL; 1943 } 1944 1945 debug("%s: OK\n", __func__); 1946 return 0; 1947 } 1948 1949 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1950 { 1951 const char *phy_mode; 1952 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1953 1954 debug("%s(dev=%p):\n", __func__, dev); 1955 1956 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1957 NULL); 1958 if (phy_mode) 1959 interface = phy_get_interface_by_name(phy_mode); 1960 1961 return interface; 1962 } 1963 1964 static int eqos_remove_resources_tegra186(struct udevice *dev) 1965 { 1966 struct eqos_priv *eqos = dev_get_priv(dev); 1967 1968 debug("%s(dev=%p):\n", __func__, dev); 1969 1970 #ifdef CONFIG_CLK 1971 clk_free(&eqos->clk_tx); 1972 clk_free(&eqos->clk_ptp_ref); 1973 clk_free(&eqos->clk_rx); 1974 clk_free(&eqos->clk_slave_bus); 1975 clk_free(&eqos->clk_master_bus); 1976 #endif 1977 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1978 reset_free(&eqos->reset_ctl); 1979 1980 debug("%s: OK\n", __func__); 1981 return 0; 1982 } 1983 #endif 1984 1985 static int eqos_remove_resources_stm32(struct udevice *dev) 1986 { 1987 #ifdef CONFIG_CLK 1988 struct eqos_priv *eqos = dev_get_priv(dev); 1989 1990 debug("%s(dev=%p):\n", __func__, dev); 1991 1992 if (clk_valid(&eqos->clk_tx)) 1993 clk_free(&eqos->clk_tx); 1994 if (clk_valid(&eqos->clk_rx)) 1995 clk_free(&eqos->clk_rx); 1996 clk_free(&eqos->clk_master_bus); 1997 if (clk_valid(&eqos->clk_ck)) 1998 clk_free(&eqos->clk_ck); 1999 #endif 2000 2001 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 2002 dm_gpio_free(dev, &eqos->phy_reset_gpio); 2003 2004 debug("%s: OK\n", __func__); 2005 return 0; 2006 } 2007 2008 #ifdef CONFIG_QOS_FULL 2009 static int eqos_remove_resources_imx(struct udevice *dev) 2010 { 2011 return 0; 2012 } 2013 #endif 2014 2015 int eqos_probe(struct udevice *dev) 2016 { 2017 struct eqos_priv *eqos = dev_get_priv(dev); 2018 int ret; 2019 2020 debug("%s(dev=%p):\n", __func__, dev); 2021 2022 eqos->dev = dev; 2023 eqos->config = (void *)dev_get_driver_data(dev); 2024 2025 eqos->regs = dev_read_addr(dev); 2026 if (eqos->regs == FDT_ADDR_T_NONE) { 2027 pr_err("dev_read_addr() failed"); 2028 return -ENODEV; 2029 } 2030 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 2031 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 2032 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 2033 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 2034 2035 ret = eqos_probe_resources_core(dev); 2036 if (ret < 0) { 2037 pr_err("eqos_probe_resources_core() failed: %d", ret); 2038 return ret; 2039 } 2040 2041 ret = eqos->config->ops->eqos_probe_resources(dev); 2042 if (ret < 0) { 2043 pr_err("eqos_probe_resources() failed: %d", ret); 2044 goto err_remove_resources_core; 2045 } 2046 2047 #ifdef CONFIG_DM_ETH_PHY 2048 eqos->mii = eth_phy_get_mdio_bus(dev); 2049 #endif 2050 if (!eqos->mii) { 2051 eqos->mii = mdio_alloc(); 2052 if (!eqos->mii) { 2053 pr_err("mdio_alloc() failed"); 2054 ret = -ENOMEM; 2055 goto err_remove_resources_tegra; 2056 } 2057 eqos->mii->read = eqos_mdio_read; 2058 eqos->mii->write = eqos_mdio_write; 2059 eqos->mii->priv = eqos; 2060 strcpy(eqos->mii->name, dev->name); 2061 2062 ret = mdio_register(eqos->mii); 2063 if (ret < 0) { 2064 pr_err("mdio_register() failed: %d", ret); 2065 goto err_free_mdio; 2066 } 2067 } 2068 2069 #ifdef CONFIG_DM_ETH_PHY 2070 eth_phy_set_mdio_bus(dev, eqos->mii); 2071 #endif 2072 2073 debug("%s: OK\n", __func__); 2074 return 0; 2075 2076 err_free_mdio: 2077 mdio_free(eqos->mii); 2078 err_remove_resources_tegra: 2079 eqos->config->ops->eqos_remove_resources(dev); 2080 err_remove_resources_core: 2081 eqos_remove_resources_core(dev); 2082 2083 debug("%s: returns %d\n", __func__, ret); 2084 return ret; 2085 } 2086 2087 static int __maybe_unused eqos_remove(struct udevice *dev) 2088 { 2089 struct eqos_priv *eqos = dev_get_priv(dev); 2090 2091 debug("%s(dev=%p):\n", __func__, dev); 2092 2093 mdio_unregister(eqos->mii); 2094 mdio_free(eqos->mii); 2095 eqos->config->ops->eqos_remove_resources(dev); 2096 2097 eqos_probe_resources_core(dev); 2098 2099 debug("%s: OK\n", __func__); 2100 return 0; 2101 } 2102 2103 #ifdef CONFIG_QOS_FULL 2104 static const struct eth_ops eqos_ops = { 2105 .start = eqos_start, 2106 .stop = eqos_stop, 2107 .send = eqos_send, 2108 .recv = eqos_recv, 2109 .free_pkt = eqos_free_pkt, 2110 .write_hwaddr = eqos_write_hwaddr, 2111 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2112 }; 2113 2114 static struct eqos_ops eqos_tegra186_ops = { 2115 .eqos_inval_desc = eqos_inval_desc_tegra186, 2116 .eqos_flush_desc = eqos_flush_desc_tegra186, 2117 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2118 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2119 .eqos_probe_resources = eqos_probe_resources_tegra186, 2120 .eqos_remove_resources = eqos_remove_resources_tegra186, 2121 .eqos_stop_resets = eqos_stop_resets_tegra186, 2122 .eqos_start_resets = eqos_start_resets_tegra186, 2123 .eqos_stop_clks = eqos_stop_clks_tegra186, 2124 .eqos_start_clks = eqos_start_clks_tegra186, 2125 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2126 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2127 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2128 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2129 .eqos_get_interface = eqos_get_interface_tegra186 2130 }; 2131 2132 static const struct eqos_config eqos_tegra186_config = { 2133 .reg_access_always_ok = false, 2134 .mdio_wait = 10, 2135 .swr_wait = 10, 2136 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2137 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2138 .ops = &eqos_tegra186_ops 2139 }; 2140 2141 static struct eqos_ops eqos_stm32_ops = { 2142 .eqos_inval_desc = eqos_inval_desc_generic, 2143 .eqos_flush_desc = eqos_flush_desc_generic, 2144 .eqos_inval_buffer = eqos_inval_buffer_generic, 2145 .eqos_flush_buffer = eqos_flush_buffer_generic, 2146 .eqos_probe_resources = eqos_probe_resources_stm32, 2147 .eqos_remove_resources = eqos_remove_resources_stm32, 2148 .eqos_stop_resets = eqos_stop_resets_stm32, 2149 .eqos_start_resets = eqos_start_resets_stm32, 2150 .eqos_stop_clks = eqos_stop_clks_stm32, 2151 .eqos_start_clks = eqos_start_clks_stm32, 2152 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2153 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2154 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2155 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2156 .eqos_get_interface = eqos_get_interface_stm32 2157 }; 2158 2159 static const struct eqos_config eqos_stm32_config = { 2160 .reg_access_always_ok = false, 2161 .mdio_wait = 10000, 2162 .swr_wait = 50, 2163 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2164 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2165 .ops = &eqos_stm32_ops 2166 }; 2167 2168 static struct eqos_ops eqos_imx_ops = { 2169 .eqos_inval_desc = eqos_inval_desc_generic, 2170 .eqos_flush_desc = eqos_flush_desc_generic, 2171 .eqos_inval_buffer = eqos_inval_buffer_generic, 2172 .eqos_flush_buffer = eqos_flush_buffer_generic, 2173 .eqos_probe_resources = eqos_probe_resources_imx, 2174 .eqos_remove_resources = eqos_remove_resources_imx, 2175 .eqos_stop_resets = eqos_stop_resets_imx, 2176 .eqos_start_resets = eqos_start_resets_imx, 2177 .eqos_stop_clks = eqos_stop_clks_imx, 2178 .eqos_start_clks = eqos_start_clks_imx, 2179 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2180 .eqos_disable_calibration = eqos_disable_calibration_imx, 2181 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2182 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2183 .eqos_get_interface = eqos_get_interface_imx 2184 }; 2185 2186 struct eqos_config eqos_imx_config = { 2187 .reg_access_always_ok = false, 2188 .mdio_wait = 10000, 2189 .swr_wait = 50, 2190 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2191 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2192 .ops = &eqos_imx_ops 2193 }; 2194 #endif 2195 2196 struct eqos_ops eqos_rockchip_ops = { 2197 .eqos_inval_desc = eqos_inval_desc_generic, 2198 .eqos_flush_desc = eqos_flush_desc_generic, 2199 .eqos_inval_buffer = eqos_inval_buffer_generic, 2200 .eqos_flush_buffer = eqos_flush_buffer_generic, 2201 .eqos_probe_resources = eqos_probe_resources_stm32, 2202 .eqos_remove_resources = eqos_remove_resources_stm32, 2203 .eqos_stop_resets = eqos_stop_resets_stm32, 2204 .eqos_start_resets = eqos_start_resets_stm32, 2205 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2206 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2207 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2208 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2209 .eqos_get_interface = eqos_get_interface_stm32 2210 }; 2211 2212 #ifdef CONFIG_QOS_FULL 2213 static const struct udevice_id eqos_ids[] = { 2214 { 2215 .compatible = "nvidia,tegra186-eqos", 2216 .data = (ulong)&eqos_tegra186_config 2217 }, 2218 { 2219 .compatible = "snps,dwmac-4.20a", 2220 .data = (ulong)&eqos_stm32_config 2221 }, 2222 { 2223 .compatible = "fsl,imx-eqos", 2224 .data = (ulong)&eqos_imx_config 2225 }, 2226 2227 { } 2228 }; 2229 2230 U_BOOT_DRIVER(eth_eqos) = { 2231 .name = "eth_eqos", 2232 .id = UCLASS_ETH, 2233 .of_match = of_match_ptr(eqos_ids), 2234 .probe = eqos_probe, 2235 .remove = eqos_remove, 2236 .ops = &eqos_ops, 2237 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2238 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2239 }; 2240 #endif 2241