1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 45 /* Core registers */ 46 47 #define EQOS_MAC_REGS_BASE 0x000 48 struct eqos_mac_regs { 49 uint32_t configuration; /* 0x000 */ 50 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 51 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 52 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 53 uint32_t rx_flow_ctrl; /* 0x090 */ 54 uint32_t unused_094; /* 0x094 */ 55 uint32_t txq_prty_map0; /* 0x098 */ 56 uint32_t unused_09c; /* 0x09c */ 57 uint32_t rxq_ctrl0; /* 0x0a0 */ 58 uint32_t unused_0a4; /* 0x0a4 */ 59 uint32_t rxq_ctrl2; /* 0x0a8 */ 60 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 61 uint32_t us_tic_counter; /* 0x0dc */ 62 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 63 uint32_t hw_feature0; /* 0x11c */ 64 uint32_t hw_feature1; /* 0x120 */ 65 uint32_t hw_feature2; /* 0x124 */ 66 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 67 uint32_t mdio_address; /* 0x200 */ 68 uint32_t mdio_data; /* 0x204 */ 69 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 70 uint32_t address0_high; /* 0x300 */ 71 uint32_t address0_low; /* 0x304 */ 72 }; 73 74 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 75 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 76 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 77 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 78 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 79 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 80 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 81 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 82 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 83 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 84 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 85 86 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 87 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 88 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 89 90 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 91 92 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 93 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 94 95 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 96 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 97 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 98 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 99 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 100 101 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 102 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 103 104 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 105 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 106 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 107 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 108 109 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 110 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 111 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 112 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 113 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 114 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 115 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 116 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 117 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 118 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 119 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 120 121 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 122 123 #define EQOS_MTL_REGS_BASE 0xd00 124 struct eqos_mtl_regs { 125 uint32_t txq0_operation_mode; /* 0xd00 */ 126 uint32_t unused_d04; /* 0xd04 */ 127 uint32_t txq0_debug; /* 0xd08 */ 128 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 129 uint32_t txq0_quantum_weight; /* 0xd18 */ 130 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 131 uint32_t rxq0_operation_mode; /* 0xd30 */ 132 uint32_t unused_d34; /* 0xd34 */ 133 uint32_t rxq0_debug; /* 0xd38 */ 134 }; 135 136 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 137 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 138 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 139 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 140 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 141 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 142 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 143 144 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 145 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 146 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 147 148 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 149 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 150 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 151 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 152 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 153 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 154 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 155 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 156 157 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 158 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 159 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 160 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 161 162 #define EQOS_DMA_REGS_BASE 0x1000 163 struct eqos_dma_regs { 164 uint32_t mode; /* 0x1000 */ 165 uint32_t sysbus_mode; /* 0x1004 */ 166 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 167 uint32_t ch0_control; /* 0x1100 */ 168 uint32_t ch0_tx_control; /* 0x1104 */ 169 uint32_t ch0_rx_control; /* 0x1108 */ 170 uint32_t unused_110c; /* 0x110c */ 171 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 172 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 173 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 174 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 175 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 176 uint32_t unused_1124; /* 0x1124 */ 177 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 178 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 179 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 180 }; 181 182 #define EQOS_DMA_MODE_SWR BIT(0) 183 184 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 185 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 186 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 187 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 188 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 189 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 190 191 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 192 193 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 194 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 195 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 196 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 197 198 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 199 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 200 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 201 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 202 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 203 204 /* These registers are Tegra186-specific */ 205 #define EQOS_TEGRA186_REGS_BASE 0x8800 206 struct eqos_tegra186_regs { 207 uint32_t sdmemcomppadctrl; /* 0x8800 */ 208 uint32_t auto_cal_config; /* 0x8804 */ 209 uint32_t unused_8808; /* 0x8808 */ 210 uint32_t auto_cal_status; /* 0x880c */ 211 }; 212 213 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 214 215 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 216 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 217 218 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 219 220 /* Descriptors */ 221 222 #define EQOS_DESCRIPTOR_WORDS 4 223 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 224 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 225 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 226 #define EQOS_DESCRIPTORS_TX 4 227 #define EQOS_DESCRIPTORS_RX 4 228 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 229 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 230 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 231 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 232 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 233 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 234 235 /* 236 * Warn if the cache-line size is larger than the descriptor size. In such 237 * cases the driver will likely fail because the CPU needs to flush the cache 238 * when requeuing RX buffers, therefore descriptors written by the hardware 239 * may be discarded. Architectures with full IO coherence, such as x86, do not 240 * experience this issue, and hence are excluded from this condition. 241 * 242 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 243 * the driver to allocate descriptors from a pool of non-cached memory. 244 */ 245 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 246 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 247 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 248 #warning Cache line size is larger than descriptor size 249 #endif 250 #endif 251 252 struct eqos_desc { 253 u32 des0; 254 u32 des1; 255 u32 des2; 256 u32 des3; 257 }; 258 259 #define EQOS_DESC3_OWN BIT(31) 260 #define EQOS_DESC3_FD BIT(29) 261 #define EQOS_DESC3_LD BIT(28) 262 #define EQOS_DESC3_BUF1V BIT(24) 263 264 struct eqos_config { 265 bool reg_access_always_ok; 266 int mdio_wait; 267 int swr_wait; 268 int config_mac; 269 int config_mac_mdio; 270 phy_interface_t (*interface)(struct udevice *dev); 271 struct eqos_ops *ops; 272 }; 273 274 struct eqos_ops { 275 void (*eqos_inval_desc)(void *desc); 276 void (*eqos_flush_desc)(void *desc); 277 void (*eqos_inval_buffer)(void *buf, size_t size); 278 void (*eqos_flush_buffer)(void *buf, size_t size); 279 int (*eqos_probe_resources)(struct udevice *dev); 280 int (*eqos_remove_resources)(struct udevice *dev); 281 int (*eqos_stop_resets)(struct udevice *dev); 282 int (*eqos_start_resets)(struct udevice *dev); 283 void (*eqos_stop_clks)(struct udevice *dev); 284 int (*eqos_start_clks)(struct udevice *dev); 285 int (*eqos_calibrate_pads)(struct udevice *dev); 286 int (*eqos_disable_calibration)(struct udevice *dev); 287 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 288 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 289 }; 290 291 struct eqos_priv { 292 struct udevice *dev; 293 const struct eqos_config *config; 294 fdt_addr_t regs; 295 struct eqos_mac_regs *mac_regs; 296 struct eqos_mtl_regs *mtl_regs; 297 struct eqos_dma_regs *dma_regs; 298 struct eqos_tegra186_regs *tegra186_regs; 299 struct reset_ctl reset_ctl; 300 struct gpio_desc phy_reset_gpio; 301 struct clk clk_master_bus; 302 struct clk clk_rx; 303 struct clk clk_ptp_ref; 304 struct clk clk_tx; 305 struct clk clk_ck; 306 struct clk clk_slave_bus; 307 struct mii_dev *mii; 308 struct phy_device *phy; 309 int phyaddr; 310 u32 max_speed; 311 void *descs; 312 struct eqos_desc *tx_descs; 313 struct eqos_desc *rx_descs; 314 int tx_desc_idx, rx_desc_idx; 315 void *tx_dma_buf; 316 void *rx_dma_buf; 317 void *rx_pkt; 318 bool started; 319 bool reg_access_ok; 320 }; 321 322 /* 323 * TX and RX descriptors are 16 bytes. This causes problems with the cache 324 * maintenance on CPUs where the cache-line size exceeds the size of these 325 * descriptors. What will happen is that when the driver receives a packet 326 * it will be immediately requeued for the hardware to reuse. The CPU will 327 * therefore need to flush the cache-line containing the descriptor, which 328 * will cause all other descriptors in the same cache-line to be flushed 329 * along with it. If one of those descriptors had been written to by the 330 * device those changes (and the associated packet) will be lost. 331 * 332 * To work around this, we make use of non-cached memory if available. If 333 * descriptors are mapped uncached there's no need to manually flush them 334 * or invalidate them. 335 * 336 * Note that this only applies to descriptors. The packet data buffers do 337 * not have the same constraints since they are 1536 bytes large, so they 338 * are unlikely to share cache-lines. 339 */ 340 static void *eqos_alloc_descs(unsigned int num) 341 { 342 #ifdef CONFIG_SYS_NONCACHED_MEMORY 343 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 344 EQOS_DESCRIPTOR_ALIGN); 345 #else 346 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 347 #endif 348 } 349 350 static void eqos_free_descs(void *descs) 351 { 352 #ifdef CONFIG_SYS_NONCACHED_MEMORY 353 /* FIXME: noncached_alloc() has no opposite */ 354 #else 355 free(descs); 356 #endif 357 } 358 359 static void eqos_inval_desc_tegra186(void *desc) 360 { 361 #ifndef CONFIG_SYS_NONCACHED_MEMORY 362 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 363 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 364 ARCH_DMA_MINALIGN); 365 366 invalidate_dcache_range(start, end); 367 #endif 368 } 369 370 static void eqos_inval_desc_stm32(void *desc) 371 { 372 #ifndef CONFIG_SYS_NONCACHED_MEMORY 373 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 374 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 375 ARCH_DMA_MINALIGN); 376 377 invalidate_dcache_range(start, end); 378 #endif 379 } 380 381 static void eqos_flush_desc_tegra186(void *desc) 382 { 383 #ifndef CONFIG_SYS_NONCACHED_MEMORY 384 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 385 #endif 386 } 387 388 static void eqos_flush_desc_stm32(void *desc) 389 { 390 #ifndef CONFIG_SYS_NONCACHED_MEMORY 391 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 392 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 393 ARCH_DMA_MINALIGN); 394 395 flush_dcache_range(start, end); 396 #endif 397 } 398 399 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 400 { 401 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 402 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 403 404 invalidate_dcache_range(start, end); 405 } 406 407 static void eqos_inval_buffer_stm32(void *buf, size_t size) 408 { 409 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 410 unsigned long end = roundup((unsigned long)buf + size, 411 ARCH_DMA_MINALIGN); 412 413 invalidate_dcache_range(start, end); 414 } 415 416 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 417 { 418 flush_cache((unsigned long)buf, size); 419 } 420 421 static void eqos_flush_buffer_stm32(void *buf, size_t size) 422 { 423 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 424 unsigned long end = roundup((unsigned long)buf + size, 425 ARCH_DMA_MINALIGN); 426 427 flush_dcache_range(start, end); 428 } 429 430 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 431 { 432 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 433 EQOS_MAC_MDIO_ADDRESS_GB, false, 434 1000000, true); 435 } 436 437 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 438 int mdio_reg) 439 { 440 struct eqos_priv *eqos = bus->priv; 441 u32 val; 442 int ret; 443 444 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 445 mdio_reg); 446 447 ret = eqos_mdio_wait_idle(eqos); 448 if (ret) { 449 pr_err("MDIO not idle at entry"); 450 return ret; 451 } 452 453 val = readl(&eqos->mac_regs->mdio_address); 454 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 455 EQOS_MAC_MDIO_ADDRESS_C45E; 456 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 457 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 458 (eqos->config->config_mac_mdio << 459 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 460 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 461 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 462 EQOS_MAC_MDIO_ADDRESS_GB; 463 writel(val, &eqos->mac_regs->mdio_address); 464 465 udelay(eqos->config->mdio_wait); 466 467 ret = eqos_mdio_wait_idle(eqos); 468 if (ret) { 469 pr_err("MDIO read didn't complete"); 470 return ret; 471 } 472 473 val = readl(&eqos->mac_regs->mdio_data); 474 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 475 476 debug("%s: val=%x\n", __func__, val); 477 478 return val; 479 } 480 481 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 482 int mdio_reg, u16 mdio_val) 483 { 484 struct eqos_priv *eqos = bus->priv; 485 u32 val; 486 int ret; 487 488 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 489 mdio_addr, mdio_reg, mdio_val); 490 491 ret = eqos_mdio_wait_idle(eqos); 492 if (ret) { 493 pr_err("MDIO not idle at entry"); 494 return ret; 495 } 496 497 writel(mdio_val, &eqos->mac_regs->mdio_data); 498 499 val = readl(&eqos->mac_regs->mdio_address); 500 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 501 EQOS_MAC_MDIO_ADDRESS_C45E; 502 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 503 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 504 (eqos->config->config_mac_mdio << 505 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 506 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 507 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 508 EQOS_MAC_MDIO_ADDRESS_GB; 509 writel(val, &eqos->mac_regs->mdio_address); 510 511 udelay(eqos->config->mdio_wait); 512 513 ret = eqos_mdio_wait_idle(eqos); 514 if (ret) { 515 pr_err("MDIO read didn't complete"); 516 return ret; 517 } 518 519 return 0; 520 } 521 522 static int eqos_start_clks_tegra186(struct udevice *dev) 523 { 524 struct eqos_priv *eqos = dev_get_priv(dev); 525 int ret; 526 527 debug("%s(dev=%p):\n", __func__, dev); 528 529 ret = clk_enable(&eqos->clk_slave_bus); 530 if (ret < 0) { 531 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 532 goto err; 533 } 534 535 ret = clk_enable(&eqos->clk_master_bus); 536 if (ret < 0) { 537 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 538 goto err_disable_clk_slave_bus; 539 } 540 541 ret = clk_enable(&eqos->clk_rx); 542 if (ret < 0) { 543 pr_err("clk_enable(clk_rx) failed: %d", ret); 544 goto err_disable_clk_master_bus; 545 } 546 547 ret = clk_enable(&eqos->clk_ptp_ref); 548 if (ret < 0) { 549 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 550 goto err_disable_clk_rx; 551 } 552 553 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 554 if (ret < 0) { 555 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 556 goto err_disable_clk_ptp_ref; 557 } 558 559 ret = clk_enable(&eqos->clk_tx); 560 if (ret < 0) { 561 pr_err("clk_enable(clk_tx) failed: %d", ret); 562 goto err_disable_clk_ptp_ref; 563 } 564 565 debug("%s: OK\n", __func__); 566 return 0; 567 568 err_disable_clk_ptp_ref: 569 clk_disable(&eqos->clk_ptp_ref); 570 err_disable_clk_rx: 571 clk_disable(&eqos->clk_rx); 572 err_disable_clk_master_bus: 573 clk_disable(&eqos->clk_master_bus); 574 err_disable_clk_slave_bus: 575 clk_disable(&eqos->clk_slave_bus); 576 err: 577 debug("%s: FAILED: %d\n", __func__, ret); 578 return ret; 579 } 580 581 static int eqos_start_clks_stm32(struct udevice *dev) 582 { 583 struct eqos_priv *eqos = dev_get_priv(dev); 584 int ret; 585 586 debug("%s(dev=%p):\n", __func__, dev); 587 588 ret = clk_enable(&eqos->clk_master_bus); 589 if (ret < 0) { 590 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 591 goto err; 592 } 593 594 ret = clk_enable(&eqos->clk_rx); 595 if (ret < 0) { 596 pr_err("clk_enable(clk_rx) failed: %d", ret); 597 goto err_disable_clk_master_bus; 598 } 599 600 ret = clk_enable(&eqos->clk_tx); 601 if (ret < 0) { 602 pr_err("clk_enable(clk_tx) failed: %d", ret); 603 goto err_disable_clk_rx; 604 } 605 606 if (clk_valid(&eqos->clk_ck)) { 607 ret = clk_enable(&eqos->clk_ck); 608 if (ret < 0) { 609 pr_err("clk_enable(clk_ck) failed: %d", ret); 610 goto err_disable_clk_tx; 611 } 612 } 613 614 debug("%s: OK\n", __func__); 615 return 0; 616 617 err_disable_clk_tx: 618 clk_disable(&eqos->clk_tx); 619 err_disable_clk_rx: 620 clk_disable(&eqos->clk_rx); 621 err_disable_clk_master_bus: 622 clk_disable(&eqos->clk_master_bus); 623 err: 624 debug("%s: FAILED: %d\n", __func__, ret); 625 return ret; 626 } 627 628 static void eqos_stop_clks_tegra186(struct udevice *dev) 629 { 630 struct eqos_priv *eqos = dev_get_priv(dev); 631 632 debug("%s(dev=%p):\n", __func__, dev); 633 634 clk_disable(&eqos->clk_tx); 635 clk_disable(&eqos->clk_ptp_ref); 636 clk_disable(&eqos->clk_rx); 637 clk_disable(&eqos->clk_master_bus); 638 clk_disable(&eqos->clk_slave_bus); 639 640 debug("%s: OK\n", __func__); 641 } 642 643 static void eqos_stop_clks_stm32(struct udevice *dev) 644 { 645 struct eqos_priv *eqos = dev_get_priv(dev); 646 647 debug("%s(dev=%p):\n", __func__, dev); 648 649 clk_disable(&eqos->clk_tx); 650 clk_disable(&eqos->clk_rx); 651 clk_disable(&eqos->clk_master_bus); 652 if (clk_valid(&eqos->clk_ck)) 653 clk_disable(&eqos->clk_ck); 654 655 debug("%s: OK\n", __func__); 656 } 657 658 static int eqos_start_resets_tegra186(struct udevice *dev) 659 { 660 struct eqos_priv *eqos = dev_get_priv(dev); 661 int ret; 662 663 debug("%s(dev=%p):\n", __func__, dev); 664 665 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 666 if (ret < 0) { 667 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 668 return ret; 669 } 670 671 udelay(2); 672 673 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 674 if (ret < 0) { 675 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 676 return ret; 677 } 678 679 ret = reset_assert(&eqos->reset_ctl); 680 if (ret < 0) { 681 pr_err("reset_assert() failed: %d", ret); 682 return ret; 683 } 684 685 udelay(2); 686 687 ret = reset_deassert(&eqos->reset_ctl); 688 if (ret < 0) { 689 pr_err("reset_deassert() failed: %d", ret); 690 return ret; 691 } 692 693 debug("%s: OK\n", __func__); 694 return 0; 695 } 696 697 static int eqos_start_resets_stm32(struct udevice *dev) 698 { 699 struct eqos_priv *eqos = dev_get_priv(dev); 700 int ret; 701 702 debug("%s(dev=%p):\n", __func__, dev); 703 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 704 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 705 if (ret < 0) { 706 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 707 ret); 708 return ret; 709 } 710 711 udelay(2); 712 713 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 714 if (ret < 0) { 715 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 716 ret); 717 return ret; 718 } 719 } 720 debug("%s: OK\n", __func__); 721 722 return 0; 723 } 724 725 static int eqos_stop_resets_tegra186(struct udevice *dev) 726 { 727 struct eqos_priv *eqos = dev_get_priv(dev); 728 729 reset_assert(&eqos->reset_ctl); 730 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 731 732 return 0; 733 } 734 735 static int eqos_stop_resets_stm32(struct udevice *dev) 736 { 737 struct eqos_priv *eqos = dev_get_priv(dev); 738 int ret; 739 740 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 741 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 742 if (ret < 0) { 743 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 744 ret); 745 return ret; 746 } 747 } 748 749 return 0; 750 } 751 752 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 753 { 754 struct eqos_priv *eqos = dev_get_priv(dev); 755 int ret; 756 757 debug("%s(dev=%p):\n", __func__, dev); 758 759 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 760 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 761 762 udelay(1); 763 764 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 765 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 766 767 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 768 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 769 if (ret) { 770 pr_err("calibrate didn't start"); 771 goto failed; 772 } 773 774 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 775 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 776 if (ret) { 777 pr_err("calibrate didn't finish"); 778 goto failed; 779 } 780 781 ret = 0; 782 783 failed: 784 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 785 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 786 787 debug("%s: returns %d\n", __func__, ret); 788 789 return ret; 790 } 791 792 static int eqos_disable_calibration_tegra186(struct udevice *dev) 793 { 794 struct eqos_priv *eqos = dev_get_priv(dev); 795 796 debug("%s(dev=%p):\n", __func__, dev); 797 798 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 799 EQOS_AUTO_CAL_CONFIG_ENABLE); 800 801 return 0; 802 } 803 804 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 805 { 806 struct eqos_priv *eqos = dev_get_priv(dev); 807 808 return clk_get_rate(&eqos->clk_slave_bus); 809 } 810 811 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 812 { 813 struct eqos_priv *eqos = dev_get_priv(dev); 814 815 return clk_get_rate(&eqos->clk_master_bus); 816 } 817 818 static int eqos_calibrate_pads_stm32(struct udevice *dev) 819 { 820 return 0; 821 } 822 823 static int eqos_disable_calibration_stm32(struct udevice *dev) 824 { 825 return 0; 826 } 827 828 static int eqos_set_full_duplex(struct udevice *dev) 829 { 830 struct eqos_priv *eqos = dev_get_priv(dev); 831 832 debug("%s(dev=%p):\n", __func__, dev); 833 834 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 835 836 return 0; 837 } 838 839 static int eqos_set_half_duplex(struct udevice *dev) 840 { 841 struct eqos_priv *eqos = dev_get_priv(dev); 842 843 debug("%s(dev=%p):\n", __func__, dev); 844 845 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 846 847 /* WAR: Flush TX queue when switching to half-duplex */ 848 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 849 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 850 851 return 0; 852 } 853 854 static int eqos_set_gmii_speed(struct udevice *dev) 855 { 856 struct eqos_priv *eqos = dev_get_priv(dev); 857 858 debug("%s(dev=%p):\n", __func__, dev); 859 860 clrbits_le32(&eqos->mac_regs->configuration, 861 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 862 863 return 0; 864 } 865 866 static int eqos_set_mii_speed_100(struct udevice *dev) 867 { 868 struct eqos_priv *eqos = dev_get_priv(dev); 869 870 debug("%s(dev=%p):\n", __func__, dev); 871 872 setbits_le32(&eqos->mac_regs->configuration, 873 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 874 875 return 0; 876 } 877 878 static int eqos_set_mii_speed_10(struct udevice *dev) 879 { 880 struct eqos_priv *eqos = dev_get_priv(dev); 881 882 debug("%s(dev=%p):\n", __func__, dev); 883 884 clrsetbits_le32(&eqos->mac_regs->configuration, 885 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 886 887 return 0; 888 } 889 890 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 891 { 892 struct eqos_priv *eqos = dev_get_priv(dev); 893 ulong rate; 894 int ret; 895 896 debug("%s(dev=%p):\n", __func__, dev); 897 898 switch (eqos->phy->speed) { 899 case SPEED_1000: 900 rate = 125 * 1000 * 1000; 901 break; 902 case SPEED_100: 903 rate = 25 * 1000 * 1000; 904 break; 905 case SPEED_10: 906 rate = 2.5 * 1000 * 1000; 907 break; 908 default: 909 pr_err("invalid speed %d", eqos->phy->speed); 910 return -EINVAL; 911 } 912 913 ret = clk_set_rate(&eqos->clk_tx, rate); 914 if (ret < 0) { 915 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 916 return ret; 917 } 918 919 return 0; 920 } 921 922 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 923 { 924 return 0; 925 } 926 927 static int eqos_adjust_link(struct udevice *dev) 928 { 929 struct eqos_priv *eqos = dev_get_priv(dev); 930 int ret; 931 bool en_calibration; 932 933 debug("%s(dev=%p):\n", __func__, dev); 934 935 if (eqos->phy->duplex) 936 ret = eqos_set_full_duplex(dev); 937 else 938 ret = eqos_set_half_duplex(dev); 939 if (ret < 0) { 940 pr_err("eqos_set_*_duplex() failed: %d", ret); 941 return ret; 942 } 943 944 switch (eqos->phy->speed) { 945 case SPEED_1000: 946 en_calibration = true; 947 ret = eqos_set_gmii_speed(dev); 948 break; 949 case SPEED_100: 950 en_calibration = true; 951 ret = eqos_set_mii_speed_100(dev); 952 break; 953 case SPEED_10: 954 en_calibration = false; 955 ret = eqos_set_mii_speed_10(dev); 956 break; 957 default: 958 pr_err("invalid speed %d", eqos->phy->speed); 959 return -EINVAL; 960 } 961 if (ret < 0) { 962 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 963 return ret; 964 } 965 966 if (en_calibration) { 967 ret = eqos->config->ops->eqos_calibrate_pads(dev); 968 if (ret < 0) { 969 pr_err("eqos_calibrate_pads() failed: %d", 970 ret); 971 return ret; 972 } 973 } else { 974 ret = eqos->config->ops->eqos_disable_calibration(dev); 975 if (ret < 0) { 976 pr_err("eqos_disable_calibration() failed: %d", 977 ret); 978 return ret; 979 } 980 } 981 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 982 if (ret < 0) { 983 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 984 return ret; 985 } 986 987 return 0; 988 } 989 990 static int eqos_write_hwaddr(struct udevice *dev) 991 { 992 struct eth_pdata *plat = dev_get_platdata(dev); 993 struct eqos_priv *eqos = dev_get_priv(dev); 994 uint32_t val; 995 996 /* 997 * This function may be called before start() or after stop(). At that 998 * time, on at least some configurations of the EQoS HW, all clocks to 999 * the EQoS HW block will be stopped, and a reset signal applied. If 1000 * any register access is attempted in this state, bus timeouts or CPU 1001 * hangs may occur. This check prevents that. 1002 * 1003 * A simple solution to this problem would be to not implement 1004 * write_hwaddr(), since start() always writes the MAC address into HW 1005 * anyway. However, it is desirable to implement write_hwaddr() to 1006 * support the case of SW that runs subsequent to U-Boot which expects 1007 * the MAC address to already be programmed into the EQoS registers, 1008 * which must happen irrespective of whether the U-Boot user (or 1009 * scripts) actually made use of the EQoS device, and hence 1010 * irrespective of whether start() was ever called. 1011 * 1012 * Note that this requirement by subsequent SW is not valid for 1013 * Tegra186, and is likely not valid for any non-PCI instantiation of 1014 * the EQoS HW block. This function is implemented solely as 1015 * future-proofing with the expectation the driver will eventually be 1016 * ported to some system where the expectation above is true. 1017 */ 1018 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1019 return 0; 1020 1021 /* Update the MAC address */ 1022 val = (plat->enetaddr[5] << 8) | 1023 (plat->enetaddr[4]); 1024 writel(val, &eqos->mac_regs->address0_high); 1025 val = (plat->enetaddr[3] << 24) | 1026 (plat->enetaddr[2] << 16) | 1027 (plat->enetaddr[1] << 8) | 1028 (plat->enetaddr[0]); 1029 writel(val, &eqos->mac_regs->address0_low); 1030 1031 return 0; 1032 } 1033 1034 static int eqos_start(struct udevice *dev) 1035 { 1036 struct eqos_priv *eqos = dev_get_priv(dev); 1037 int ret, i; 1038 ulong rate; 1039 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1040 ulong last_rx_desc; 1041 1042 debug("%s(dev=%p):\n", __func__, dev); 1043 1044 eqos->tx_desc_idx = 0; 1045 eqos->rx_desc_idx = 0; 1046 1047 ret = eqos->config->ops->eqos_start_clks(dev); 1048 if (ret < 0) { 1049 pr_err("eqos_start_clks() failed: %d", ret); 1050 goto err; 1051 } 1052 1053 ret = eqos->config->ops->eqos_start_resets(dev); 1054 if (ret < 0) { 1055 pr_err("eqos_start_resets() failed: %d", ret); 1056 goto err_stop_clks; 1057 } 1058 1059 udelay(10); 1060 1061 eqos->reg_access_ok = true; 1062 1063 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1064 EQOS_DMA_MODE_SWR, false, 1065 eqos->config->swr_wait, false); 1066 if (ret) { 1067 pr_err("EQOS_DMA_MODE_SWR stuck"); 1068 goto err_stop_resets; 1069 } 1070 1071 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1072 if (ret < 0) { 1073 pr_err("eqos_calibrate_pads() failed: %d", ret); 1074 goto err_stop_resets; 1075 } 1076 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1077 1078 val = (rate / 1000000) - 1; 1079 writel(val, &eqos->mac_regs->us_tic_counter); 1080 1081 /* 1082 * if PHY was already connected and configured, 1083 * don't need to reconnect/reconfigure again 1084 */ 1085 if (!eqos->phy) { 1086 int addr = -1; 1087 #ifdef CONFIG_DM_ETH_PHY 1088 addr = eth_phy_get_addr(dev); 1089 #endif 1090 #ifdef DWC_NET_PHYADDR 1091 addr = DWC_NET_PHYADDR; 1092 #endif 1093 eqos->phy = phy_connect(eqos->mii, addr, dev, 1094 eqos->config->interface(dev)); 1095 if (!eqos->phy) { 1096 pr_err("phy_connect() failed"); 1097 goto err_stop_resets; 1098 } 1099 1100 if (eqos->max_speed) { 1101 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1102 if (ret) { 1103 pr_err("phy_set_supported() failed: %d", ret); 1104 goto err_shutdown_phy; 1105 } 1106 } 1107 1108 ret = phy_config(eqos->phy); 1109 if (ret < 0) { 1110 pr_err("phy_config() failed: %d", ret); 1111 goto err_shutdown_phy; 1112 } 1113 } 1114 1115 ret = phy_startup(eqos->phy); 1116 if (ret < 0) { 1117 pr_err("phy_startup() failed: %d", ret); 1118 goto err_shutdown_phy; 1119 } 1120 1121 if (!eqos->phy->link) { 1122 pr_err("No link"); 1123 goto err_shutdown_phy; 1124 } 1125 1126 ret = eqos_adjust_link(dev); 1127 if (ret < 0) { 1128 pr_err("eqos_adjust_link() failed: %d", ret); 1129 goto err_shutdown_phy; 1130 } 1131 1132 /* Configure MTL */ 1133 1134 /* Enable Store and Forward mode for TX */ 1135 /* Program Tx operating mode */ 1136 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1137 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1138 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1139 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1140 1141 /* Transmit Queue weight */ 1142 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1143 1144 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1145 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1146 EQOS_MTL_RXQ0_OPERATION_MODE_RSF); 1147 1148 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1149 val = readl(&eqos->mac_regs->hw_feature1); 1150 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1151 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1152 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1153 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1154 1155 /* 1156 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1157 * r/tqs is encoded as (n / 256) - 1. 1158 */ 1159 tqs = (128 << tx_fifo_sz) / 256 - 1; 1160 rqs = (128 << rx_fifo_sz) / 256 - 1; 1161 1162 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1163 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1164 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1165 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1166 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1167 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1168 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1169 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1170 1171 /* Flow control used only if each channel gets 4KB or more FIFO */ 1172 if (rqs >= ((4096 / 256) - 1)) { 1173 u32 rfd, rfa; 1174 1175 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1176 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1177 1178 /* 1179 * Set Threshold for Activating Flow Contol space for min 2 1180 * frames ie, (1500 * 1) = 1500 bytes. 1181 * 1182 * Set Threshold for Deactivating Flow Contol for space of 1183 * min 1 frame (frame size 1500bytes) in receive fifo 1184 */ 1185 if (rqs == ((4096 / 256) - 1)) { 1186 /* 1187 * This violates the above formula because of FIFO size 1188 * limit therefore overflow may occur inspite of this. 1189 */ 1190 rfd = 0x3; /* Full-3K */ 1191 rfa = 0x1; /* Full-1.5K */ 1192 } else if (rqs == ((8192 / 256) - 1)) { 1193 rfd = 0x6; /* Full-4K */ 1194 rfa = 0xa; /* Full-6K */ 1195 } else if (rqs == ((16384 / 256) - 1)) { 1196 rfd = 0x6; /* Full-4K */ 1197 rfa = 0x12; /* Full-10K */ 1198 } else { 1199 rfd = 0x6; /* Full-4K */ 1200 rfa = 0x1E; /* Full-16K */ 1201 } 1202 1203 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1204 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1205 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1206 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1207 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1208 (rfd << 1209 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1210 (rfa << 1211 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1212 } 1213 1214 /* Configure MAC */ 1215 1216 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1217 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1218 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1219 eqos->config->config_mac << 1220 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1221 1222 /* Set TX flow control parameters */ 1223 /* Set Pause Time */ 1224 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1225 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1226 /* Assign priority for TX flow control */ 1227 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1228 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1229 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1230 /* Assign priority for RX flow control */ 1231 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1232 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1233 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1234 /* Enable flow control */ 1235 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1236 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1237 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1238 EQOS_MAC_RX_FLOW_CTRL_RFE); 1239 1240 clrsetbits_le32(&eqos->mac_regs->configuration, 1241 EQOS_MAC_CONFIGURATION_GPSLCE | 1242 EQOS_MAC_CONFIGURATION_WD | 1243 EQOS_MAC_CONFIGURATION_JD | 1244 EQOS_MAC_CONFIGURATION_JE, 1245 EQOS_MAC_CONFIGURATION_CST | 1246 EQOS_MAC_CONFIGURATION_ACS); 1247 1248 eqos_write_hwaddr(dev); 1249 1250 /* Configure DMA */ 1251 1252 /* Enable OSP mode */ 1253 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1254 EQOS_DMA_CH0_TX_CONTROL_OSP); 1255 1256 /* RX buffer size. Must be a multiple of bus width */ 1257 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1258 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1259 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1260 EQOS_MAX_PACKET_SIZE << 1261 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1262 1263 setbits_le32(&eqos->dma_regs->ch0_control, 1264 EQOS_DMA_CH0_CONTROL_PBLX8); 1265 1266 /* 1267 * Burst length must be < 1/2 FIFO size. 1268 * FIFO size in tqs is encoded as (n / 256) - 1. 1269 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1270 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1271 */ 1272 pbl = tqs + 1; 1273 if (pbl > 32) 1274 pbl = 32; 1275 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1276 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1277 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1278 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1279 1280 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1281 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1282 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1283 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1284 1285 /* DMA performance configuration */ 1286 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1287 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1288 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1289 writel(val, &eqos->dma_regs->sysbus_mode); 1290 1291 /* Set up descriptors */ 1292 1293 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1294 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1295 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1296 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1297 (i * EQOS_MAX_PACKET_SIZE)); 1298 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1299 eqos->config->ops->eqos_flush_desc(rx_desc); 1300 } 1301 1302 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1303 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1304 writel(EQOS_DESCRIPTORS_TX - 1, 1305 &eqos->dma_regs->ch0_txdesc_ring_length); 1306 1307 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1308 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1309 writel(EQOS_DESCRIPTORS_RX - 1, 1310 &eqos->dma_regs->ch0_rxdesc_ring_length); 1311 1312 /* Enable everything */ 1313 1314 setbits_le32(&eqos->mac_regs->configuration, 1315 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1316 1317 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1318 EQOS_DMA_CH0_TX_CONTROL_ST); 1319 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1320 EQOS_DMA_CH0_RX_CONTROL_SR); 1321 1322 /* TX tail pointer not written until we need to TX a packet */ 1323 /* 1324 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1325 * first descriptor, implying all descriptors were available. However, 1326 * that's not distinguishable from none of the descriptors being 1327 * available. 1328 */ 1329 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1330 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1331 1332 eqos->started = true; 1333 1334 debug("%s: OK\n", __func__); 1335 return 0; 1336 1337 err_shutdown_phy: 1338 phy_shutdown(eqos->phy); 1339 err_stop_resets: 1340 eqos->config->ops->eqos_stop_resets(dev); 1341 err_stop_clks: 1342 eqos->config->ops->eqos_stop_clks(dev); 1343 err: 1344 pr_err("FAILED: %d", ret); 1345 return ret; 1346 } 1347 1348 static void eqos_stop(struct udevice *dev) 1349 { 1350 struct eqos_priv *eqos = dev_get_priv(dev); 1351 int i; 1352 1353 debug("%s(dev=%p):\n", __func__, dev); 1354 1355 if (!eqos->started) 1356 return; 1357 eqos->started = false; 1358 eqos->reg_access_ok = false; 1359 1360 /* Disable TX DMA */ 1361 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1362 EQOS_DMA_CH0_TX_CONTROL_ST); 1363 1364 /* Wait for TX all packets to drain out of MTL */ 1365 for (i = 0; i < 1000000; i++) { 1366 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1367 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1368 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1369 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1370 if ((trcsts != 1) && (!txqsts)) 1371 break; 1372 } 1373 1374 /* Turn off MAC TX and RX */ 1375 clrbits_le32(&eqos->mac_regs->configuration, 1376 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1377 1378 /* Wait for all RX packets to drain out of MTL */ 1379 for (i = 0; i < 1000000; i++) { 1380 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1381 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1382 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1383 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1384 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1385 if ((!prxq) && (!rxqsts)) 1386 break; 1387 } 1388 1389 /* Turn off RX DMA */ 1390 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1391 EQOS_DMA_CH0_RX_CONTROL_SR); 1392 1393 if (eqos->phy) { 1394 phy_shutdown(eqos->phy); 1395 } 1396 eqos->config->ops->eqos_stop_resets(dev); 1397 eqos->config->ops->eqos_stop_clks(dev); 1398 1399 debug("%s: OK\n", __func__); 1400 } 1401 1402 static int eqos_send(struct udevice *dev, void *packet, int length) 1403 { 1404 struct eqos_priv *eqos = dev_get_priv(dev); 1405 struct eqos_desc *tx_desc; 1406 int i; 1407 1408 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1409 length); 1410 1411 memcpy(eqos->tx_dma_buf, packet, length); 1412 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1413 1414 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1415 eqos->tx_desc_idx++; 1416 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1417 1418 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1419 tx_desc->des1 = 0; 1420 tx_desc->des2 = length; 1421 /* 1422 * Make sure that if HW sees the _OWN write below, it will see all the 1423 * writes to the rest of the descriptor too. 1424 */ 1425 mb(); 1426 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1427 eqos->config->ops->eqos_flush_desc(tx_desc); 1428 1429 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1430 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1431 1432 for (i = 0; i < 1000000; i++) { 1433 eqos->config->ops->eqos_inval_desc(tx_desc); 1434 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1435 return 0; 1436 udelay(1); 1437 } 1438 1439 debug("%s: TX timeout\n", __func__); 1440 1441 return -ETIMEDOUT; 1442 } 1443 1444 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1445 { 1446 struct eqos_priv *eqos = dev_get_priv(dev); 1447 struct eqos_desc *rx_desc; 1448 int length; 1449 1450 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1451 1452 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1453 eqos->config->ops->eqos_inval_desc(rx_desc); 1454 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1455 debug("%s: RX packet not available\n", __func__); 1456 return -EAGAIN; 1457 } 1458 1459 *packetp = eqos->rx_dma_buf + 1460 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1461 length = rx_desc->des3 & 0x7fff; 1462 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1463 1464 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1465 1466 return length; 1467 } 1468 1469 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1470 { 1471 struct eqos_priv *eqos = dev_get_priv(dev); 1472 uchar *packet_expected; 1473 struct eqos_desc *rx_desc; 1474 1475 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1476 1477 packet_expected = eqos->rx_dma_buf + 1478 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1479 if (packet != packet_expected) { 1480 debug("%s: Unexpected packet (expected %p)\n", __func__, 1481 packet_expected); 1482 return -EINVAL; 1483 } 1484 1485 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1486 1487 rx_desc->des0 = 0; 1488 mb(); 1489 eqos->config->ops->eqos_flush_desc(rx_desc); 1490 eqos->config->ops->eqos_inval_buffer(packet, length); 1491 rx_desc->des0 = (u32)(ulong)packet; 1492 rx_desc->des1 = 0; 1493 rx_desc->des2 = 0; 1494 /* 1495 * Make sure that if HW sees the _OWN write below, it will see all the 1496 * writes to the rest of the descriptor too. 1497 */ 1498 mb(); 1499 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1500 eqos->config->ops->eqos_flush_desc(rx_desc); 1501 1502 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1503 1504 eqos->rx_desc_idx++; 1505 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1506 1507 return 0; 1508 } 1509 1510 static int eqos_probe_resources_core(struct udevice *dev) 1511 { 1512 struct eqos_priv *eqos = dev_get_priv(dev); 1513 int ret; 1514 1515 debug("%s(dev=%p):\n", __func__, dev); 1516 1517 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1518 EQOS_DESCRIPTORS_RX); 1519 if (!eqos->descs) { 1520 debug("%s: eqos_alloc_descs() failed\n", __func__); 1521 ret = -ENOMEM; 1522 goto err; 1523 } 1524 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1525 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1526 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1527 eqos->rx_descs); 1528 1529 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1530 if (!eqos->tx_dma_buf) { 1531 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1532 ret = -ENOMEM; 1533 goto err_free_descs; 1534 } 1535 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1536 1537 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1538 if (!eqos->rx_dma_buf) { 1539 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1540 ret = -ENOMEM; 1541 goto err_free_tx_dma_buf; 1542 } 1543 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1544 1545 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1546 if (!eqos->rx_pkt) { 1547 debug("%s: malloc(rx_pkt) failed\n", __func__); 1548 ret = -ENOMEM; 1549 goto err_free_rx_dma_buf; 1550 } 1551 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1552 1553 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1554 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1555 1556 debug("%s: OK\n", __func__); 1557 return 0; 1558 1559 err_free_rx_dma_buf: 1560 free(eqos->rx_dma_buf); 1561 err_free_tx_dma_buf: 1562 free(eqos->tx_dma_buf); 1563 err_free_descs: 1564 eqos_free_descs(eqos->descs); 1565 err: 1566 1567 debug("%s: returns %d\n", __func__, ret); 1568 return ret; 1569 } 1570 1571 static int eqos_remove_resources_core(struct udevice *dev) 1572 { 1573 struct eqos_priv *eqos = dev_get_priv(dev); 1574 1575 debug("%s(dev=%p):\n", __func__, dev); 1576 1577 free(eqos->rx_pkt); 1578 free(eqos->rx_dma_buf); 1579 free(eqos->tx_dma_buf); 1580 eqos_free_descs(eqos->descs); 1581 1582 debug("%s: OK\n", __func__); 1583 return 0; 1584 } 1585 1586 static int eqos_probe_resources_tegra186(struct udevice *dev) 1587 { 1588 struct eqos_priv *eqos = dev_get_priv(dev); 1589 int ret; 1590 1591 debug("%s(dev=%p):\n", __func__, dev); 1592 1593 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1594 if (ret) { 1595 pr_err("reset_get_by_name(rst) failed: %d", ret); 1596 return ret; 1597 } 1598 1599 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1600 &eqos->phy_reset_gpio, 1601 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1602 if (ret) { 1603 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1604 goto err_free_reset_eqos; 1605 } 1606 1607 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1608 if (ret) { 1609 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1610 goto err_free_gpio_phy_reset; 1611 } 1612 1613 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1614 if (ret) { 1615 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1616 goto err_free_clk_slave_bus; 1617 } 1618 1619 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1620 if (ret) { 1621 pr_err("clk_get_by_name(rx) failed: %d", ret); 1622 goto err_free_clk_master_bus; 1623 } 1624 1625 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1626 if (ret) { 1627 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1628 goto err_free_clk_rx; 1629 return ret; 1630 } 1631 1632 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1633 if (ret) { 1634 pr_err("clk_get_by_name(tx) failed: %d", ret); 1635 goto err_free_clk_ptp_ref; 1636 } 1637 1638 debug("%s: OK\n", __func__); 1639 return 0; 1640 1641 err_free_clk_ptp_ref: 1642 clk_free(&eqos->clk_ptp_ref); 1643 err_free_clk_rx: 1644 clk_free(&eqos->clk_rx); 1645 err_free_clk_master_bus: 1646 clk_free(&eqos->clk_master_bus); 1647 err_free_clk_slave_bus: 1648 clk_free(&eqos->clk_slave_bus); 1649 err_free_gpio_phy_reset: 1650 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1651 err_free_reset_eqos: 1652 reset_free(&eqos->reset_ctl); 1653 1654 debug("%s: returns %d\n", __func__, ret); 1655 return ret; 1656 } 1657 1658 /* board-specific Ethernet Interface initializations. */ 1659 __weak int board_interface_eth_init(struct udevice *dev, 1660 phy_interface_t interface_type) 1661 { 1662 return 0; 1663 } 1664 1665 static int eqos_probe_resources_stm32(struct udevice *dev) 1666 { 1667 struct eqos_priv *eqos = dev_get_priv(dev); 1668 int ret; 1669 phy_interface_t interface; 1670 struct ofnode_phandle_args phandle_args; 1671 1672 debug("%s(dev=%p):\n", __func__, dev); 1673 1674 interface = eqos->config->interface(dev); 1675 1676 if (interface == PHY_INTERFACE_MODE_NONE) { 1677 pr_err("Invalid PHY interface\n"); 1678 return -EINVAL; 1679 } 1680 1681 ret = board_interface_eth_init(dev, interface); 1682 if (ret) 1683 return -EINVAL; 1684 1685 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1686 1687 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1688 if (ret) { 1689 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1690 goto err_probe; 1691 } 1692 1693 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1694 if (ret) { 1695 pr_err("clk_get_by_name(rx) failed: %d", ret); 1696 goto err_free_clk_master_bus; 1697 } 1698 1699 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1700 if (ret) { 1701 pr_err("clk_get_by_name(tx) failed: %d", ret); 1702 goto err_free_clk_rx; 1703 } 1704 1705 /* Get ETH_CLK clocks (optional) */ 1706 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1707 if (ret) 1708 pr_warn("No phy clock provided %d", ret); 1709 1710 eqos->phyaddr = -1; 1711 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1712 &phandle_args); 1713 if (!ret) { 1714 /* search "reset-gpios" in phy node */ 1715 ret = gpio_request_by_name_nodev(phandle_args.node, 1716 "reset-gpios", 0, 1717 &eqos->phy_reset_gpio, 1718 GPIOD_IS_OUT | 1719 GPIOD_IS_OUT_ACTIVE); 1720 if (ret) 1721 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1722 ret); 1723 1724 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1725 "reg", -1); 1726 } 1727 1728 debug("%s: OK\n", __func__); 1729 return 0; 1730 1731 err_free_clk_rx: 1732 clk_free(&eqos->clk_rx); 1733 err_free_clk_master_bus: 1734 clk_free(&eqos->clk_master_bus); 1735 err_probe: 1736 1737 debug("%s: returns %d\n", __func__, ret); 1738 return ret; 1739 } 1740 1741 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1742 { 1743 const char *phy_mode; 1744 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1745 1746 debug("%s(dev=%p):\n", __func__, dev); 1747 1748 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1749 NULL); 1750 if (phy_mode) 1751 interface = phy_get_interface_by_name(phy_mode); 1752 1753 return interface; 1754 } 1755 1756 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1757 { 1758 return PHY_INTERFACE_MODE_MII; 1759 } 1760 1761 static int eqos_remove_resources_tegra186(struct udevice *dev) 1762 { 1763 struct eqos_priv *eqos = dev_get_priv(dev); 1764 1765 debug("%s(dev=%p):\n", __func__, dev); 1766 1767 clk_free(&eqos->clk_tx); 1768 clk_free(&eqos->clk_ptp_ref); 1769 clk_free(&eqos->clk_rx); 1770 clk_free(&eqos->clk_slave_bus); 1771 clk_free(&eqos->clk_master_bus); 1772 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1773 reset_free(&eqos->reset_ctl); 1774 1775 debug("%s: OK\n", __func__); 1776 return 0; 1777 } 1778 1779 static int eqos_remove_resources_stm32(struct udevice *dev) 1780 { 1781 struct eqos_priv *eqos = dev_get_priv(dev); 1782 1783 debug("%s(dev=%p):\n", __func__, dev); 1784 1785 clk_free(&eqos->clk_tx); 1786 clk_free(&eqos->clk_rx); 1787 clk_free(&eqos->clk_master_bus); 1788 if (clk_valid(&eqos->clk_ck)) 1789 clk_free(&eqos->clk_ck); 1790 1791 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1792 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1793 1794 debug("%s: OK\n", __func__); 1795 return 0; 1796 } 1797 1798 static int eqos_probe(struct udevice *dev) 1799 { 1800 struct eqos_priv *eqos = dev_get_priv(dev); 1801 int ret; 1802 1803 debug("%s(dev=%p):\n", __func__, dev); 1804 1805 eqos->dev = dev; 1806 eqos->config = (void *)dev_get_driver_data(dev); 1807 1808 eqos->regs = devfdt_get_addr(dev); 1809 if (eqos->regs == FDT_ADDR_T_NONE) { 1810 pr_err("devfdt_get_addr() failed"); 1811 return -ENODEV; 1812 } 1813 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1814 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1815 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1816 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1817 1818 ret = eqos_probe_resources_core(dev); 1819 if (ret < 0) { 1820 pr_err("eqos_probe_resources_core() failed: %d", ret); 1821 return ret; 1822 } 1823 1824 ret = eqos->config->ops->eqos_probe_resources(dev); 1825 if (ret < 0) { 1826 pr_err("eqos_probe_resources() failed: %d", ret); 1827 goto err_remove_resources_core; 1828 } 1829 1830 #ifdef CONFIG_DM_ETH_PHY 1831 eqos->mii = eth_phy_get_mdio_bus(dev); 1832 #endif 1833 if (!eqos->mii) { 1834 eqos->mii = mdio_alloc(); 1835 if (!eqos->mii) { 1836 pr_err("mdio_alloc() failed"); 1837 ret = -ENOMEM; 1838 goto err_remove_resources_tegra; 1839 } 1840 eqos->mii->read = eqos_mdio_read; 1841 eqos->mii->write = eqos_mdio_write; 1842 eqos->mii->priv = eqos; 1843 strcpy(eqos->mii->name, dev->name); 1844 1845 ret = mdio_register(eqos->mii); 1846 if (ret < 0) { 1847 pr_err("mdio_register() failed: %d", ret); 1848 goto err_free_mdio; 1849 } 1850 } 1851 1852 #ifdef CONFIG_DM_ETH_PHY 1853 eth_phy_set_mdio_bus(dev, eqos->mii); 1854 #endif 1855 1856 debug("%s: OK\n", __func__); 1857 return 0; 1858 1859 err_free_mdio: 1860 mdio_free(eqos->mii); 1861 err_remove_resources_tegra: 1862 eqos->config->ops->eqos_remove_resources(dev); 1863 err_remove_resources_core: 1864 eqos_remove_resources_core(dev); 1865 1866 debug("%s: returns %d\n", __func__, ret); 1867 return ret; 1868 } 1869 1870 static int eqos_remove(struct udevice *dev) 1871 { 1872 struct eqos_priv *eqos = dev_get_priv(dev); 1873 1874 debug("%s(dev=%p):\n", __func__, dev); 1875 1876 mdio_unregister(eqos->mii); 1877 mdio_free(eqos->mii); 1878 eqos->config->ops->eqos_remove_resources(dev); 1879 1880 eqos_probe_resources_core(dev); 1881 1882 debug("%s: OK\n", __func__); 1883 return 0; 1884 } 1885 1886 static const struct eth_ops eqos_ops = { 1887 .start = eqos_start, 1888 .stop = eqos_stop, 1889 .send = eqos_send, 1890 .recv = eqos_recv, 1891 .free_pkt = eqos_free_pkt, 1892 .write_hwaddr = eqos_write_hwaddr, 1893 }; 1894 1895 static struct eqos_ops eqos_tegra186_ops = { 1896 .eqos_inval_desc = eqos_inval_desc_tegra186, 1897 .eqos_flush_desc = eqos_flush_desc_tegra186, 1898 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 1899 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 1900 .eqos_probe_resources = eqos_probe_resources_tegra186, 1901 .eqos_remove_resources = eqos_remove_resources_tegra186, 1902 .eqos_stop_resets = eqos_stop_resets_tegra186, 1903 .eqos_start_resets = eqos_start_resets_tegra186, 1904 .eqos_stop_clks = eqos_stop_clks_tegra186, 1905 .eqos_start_clks = eqos_start_clks_tegra186, 1906 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 1907 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 1908 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 1909 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186 1910 }; 1911 1912 static const struct eqos_config eqos_tegra186_config = { 1913 .reg_access_always_ok = false, 1914 .mdio_wait = 10, 1915 .swr_wait = 10, 1916 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 1917 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 1918 .interface = eqos_get_interface_tegra186, 1919 .ops = &eqos_tegra186_ops 1920 }; 1921 1922 static struct eqos_ops eqos_stm32_ops = { 1923 .eqos_inval_desc = eqos_inval_desc_stm32, 1924 .eqos_flush_desc = eqos_flush_desc_stm32, 1925 .eqos_inval_buffer = eqos_inval_buffer_stm32, 1926 .eqos_flush_buffer = eqos_flush_buffer_stm32, 1927 .eqos_probe_resources = eqos_probe_resources_stm32, 1928 .eqos_remove_resources = eqos_remove_resources_stm32, 1929 .eqos_stop_resets = eqos_stop_resets_stm32, 1930 .eqos_start_resets = eqos_start_resets_stm32, 1931 .eqos_stop_clks = eqos_stop_clks_stm32, 1932 .eqos_start_clks = eqos_start_clks_stm32, 1933 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 1934 .eqos_disable_calibration = eqos_disable_calibration_stm32, 1935 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 1936 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32 1937 }; 1938 1939 static const struct eqos_config eqos_stm32_config = { 1940 .reg_access_always_ok = false, 1941 .mdio_wait = 10000, 1942 .swr_wait = 50, 1943 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 1944 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 1945 .interface = eqos_get_interface_stm32, 1946 .ops = &eqos_stm32_ops 1947 }; 1948 1949 static const struct udevice_id eqos_ids[] = { 1950 { 1951 .compatible = "nvidia,tegra186-eqos", 1952 .data = (ulong)&eqos_tegra186_config 1953 }, 1954 { 1955 .compatible = "snps,dwmac-4.20a", 1956 .data = (ulong)&eqos_stm32_config 1957 }, 1958 1959 { } 1960 }; 1961 1962 U_BOOT_DRIVER(eth_eqos) = { 1963 .name = "eth_eqos", 1964 .id = UCLASS_ETH, 1965 .of_match = eqos_ids, 1966 .probe = eqos_probe, 1967 .remove = eqos_remove, 1968 .ops = &eqos_ops, 1969 .priv_auto_alloc_size = sizeof(struct eqos_priv), 1970 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 1971 }; 1972