1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 44 /* Core registers */ 45 46 #define EQOS_MAC_REGS_BASE 0x000 47 struct eqos_mac_regs { 48 uint32_t configuration; /* 0x000 */ 49 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 50 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 51 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 52 uint32_t rx_flow_ctrl; /* 0x090 */ 53 uint32_t unused_094; /* 0x094 */ 54 uint32_t txq_prty_map0; /* 0x098 */ 55 uint32_t unused_09c; /* 0x09c */ 56 uint32_t rxq_ctrl0; /* 0x0a0 */ 57 uint32_t unused_0a4; /* 0x0a4 */ 58 uint32_t rxq_ctrl2; /* 0x0a8 */ 59 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 60 uint32_t us_tic_counter; /* 0x0dc */ 61 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 62 uint32_t hw_feature0; /* 0x11c */ 63 uint32_t hw_feature1; /* 0x120 */ 64 uint32_t hw_feature2; /* 0x124 */ 65 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 66 uint32_t mdio_address; /* 0x200 */ 67 uint32_t mdio_data; /* 0x204 */ 68 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 69 uint32_t address0_high; /* 0x300 */ 70 uint32_t address0_low; /* 0x304 */ 71 }; 72 73 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 74 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 75 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 76 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 77 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 78 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 79 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 80 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 81 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 82 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 83 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 84 85 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 86 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 87 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 88 89 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 90 91 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 92 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 93 94 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 95 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 96 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 97 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 98 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 99 100 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 102 103 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 104 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 105 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 106 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 107 108 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 109 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 110 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 111 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 112 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 113 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 114 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 115 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 116 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 117 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 118 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 119 120 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 121 122 #define EQOS_MTL_REGS_BASE 0xd00 123 struct eqos_mtl_regs { 124 uint32_t txq0_operation_mode; /* 0xd00 */ 125 uint32_t unused_d04; /* 0xd04 */ 126 uint32_t txq0_debug; /* 0xd08 */ 127 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 128 uint32_t txq0_quantum_weight; /* 0xd18 */ 129 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 130 uint32_t rxq0_operation_mode; /* 0xd30 */ 131 uint32_t unused_d34; /* 0xd34 */ 132 uint32_t rxq0_debug; /* 0xd38 */ 133 }; 134 135 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 136 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 137 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 138 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 139 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 140 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 141 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 142 143 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 144 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 145 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 146 147 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 148 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 149 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 150 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 151 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 152 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 153 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 154 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 155 156 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 157 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 158 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 159 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 160 161 #define EQOS_DMA_REGS_BASE 0x1000 162 struct eqos_dma_regs { 163 uint32_t mode; /* 0x1000 */ 164 uint32_t sysbus_mode; /* 0x1004 */ 165 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 166 uint32_t ch0_control; /* 0x1100 */ 167 uint32_t ch0_tx_control; /* 0x1104 */ 168 uint32_t ch0_rx_control; /* 0x1108 */ 169 uint32_t unused_110c; /* 0x110c */ 170 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 171 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 172 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 173 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 174 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 175 uint32_t unused_1124; /* 0x1124 */ 176 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 177 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 178 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 179 }; 180 181 #define EQOS_DMA_MODE_SWR BIT(0) 182 183 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 184 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 185 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 186 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 187 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 188 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 189 190 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 191 192 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 193 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 194 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 195 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 196 197 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 198 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 199 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 200 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 201 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 202 203 /* These registers are Tegra186-specific */ 204 #define EQOS_TEGRA186_REGS_BASE 0x8800 205 struct eqos_tegra186_regs { 206 uint32_t sdmemcomppadctrl; /* 0x8800 */ 207 uint32_t auto_cal_config; /* 0x8804 */ 208 uint32_t unused_8808; /* 0x8808 */ 209 uint32_t auto_cal_status; /* 0x880c */ 210 }; 211 212 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 213 214 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 215 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 216 217 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 218 219 /* Descriptors */ 220 221 #define EQOS_DESCRIPTOR_WORDS 4 222 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 223 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 224 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 225 #define EQOS_DESCRIPTORS_TX 4 226 #define EQOS_DESCRIPTORS_RX 4 227 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 228 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 229 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 230 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 231 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 232 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 233 234 /* 235 * Warn if the cache-line size is larger than the descriptor size. In such 236 * cases the driver will likely fail because the CPU needs to flush the cache 237 * when requeuing RX buffers, therefore descriptors written by the hardware 238 * may be discarded. Architectures with full IO coherence, such as x86, do not 239 * experience this issue, and hence are excluded from this condition. 240 * 241 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 242 * the driver to allocate descriptors from a pool of non-cached memory. 243 */ 244 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 245 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 246 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 247 #warning Cache line size is larger than descriptor size 248 #endif 249 #endif 250 251 struct eqos_desc { 252 u32 des0; 253 u32 des1; 254 u32 des2; 255 u32 des3; 256 }; 257 258 #define EQOS_DESC3_OWN BIT(31) 259 #define EQOS_DESC3_FD BIT(29) 260 #define EQOS_DESC3_LD BIT(28) 261 #define EQOS_DESC3_BUF1V BIT(24) 262 263 struct eqos_config { 264 bool reg_access_always_ok; 265 int mdio_wait; 266 int swr_wait; 267 int config_mac; 268 int config_mac_mdio; 269 phy_interface_t (*interface)(struct udevice *dev); 270 struct eqos_ops *ops; 271 }; 272 273 struct eqos_ops { 274 void (*eqos_inval_desc)(void *desc); 275 void (*eqos_flush_desc)(void *desc); 276 void (*eqos_inval_buffer)(void *buf, size_t size); 277 void (*eqos_flush_buffer)(void *buf, size_t size); 278 int (*eqos_probe_resources)(struct udevice *dev); 279 int (*eqos_remove_resources)(struct udevice *dev); 280 int (*eqos_stop_resets)(struct udevice *dev); 281 int (*eqos_start_resets)(struct udevice *dev); 282 void (*eqos_stop_clks)(struct udevice *dev); 283 int (*eqos_start_clks)(struct udevice *dev); 284 int (*eqos_calibrate_pads)(struct udevice *dev); 285 int (*eqos_disable_calibration)(struct udevice *dev); 286 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 287 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 288 }; 289 290 struct eqos_priv { 291 struct udevice *dev; 292 const struct eqos_config *config; 293 fdt_addr_t regs; 294 struct eqos_mac_regs *mac_regs; 295 struct eqos_mtl_regs *mtl_regs; 296 struct eqos_dma_regs *dma_regs; 297 struct eqos_tegra186_regs *tegra186_regs; 298 struct reset_ctl reset_ctl; 299 struct gpio_desc phy_reset_gpio; 300 struct clk clk_master_bus; 301 struct clk clk_rx; 302 struct clk clk_ptp_ref; 303 struct clk clk_tx; 304 struct clk clk_ck; 305 struct clk clk_slave_bus; 306 struct mii_dev *mii; 307 struct phy_device *phy; 308 void *descs; 309 struct eqos_desc *tx_descs; 310 struct eqos_desc *rx_descs; 311 int tx_desc_idx, rx_desc_idx; 312 void *tx_dma_buf; 313 void *rx_dma_buf; 314 void *rx_pkt; 315 bool started; 316 bool reg_access_ok; 317 }; 318 319 /* 320 * TX and RX descriptors are 16 bytes. This causes problems with the cache 321 * maintenance on CPUs where the cache-line size exceeds the size of these 322 * descriptors. What will happen is that when the driver receives a packet 323 * it will be immediately requeued for the hardware to reuse. The CPU will 324 * therefore need to flush the cache-line containing the descriptor, which 325 * will cause all other descriptors in the same cache-line to be flushed 326 * along with it. If one of those descriptors had been written to by the 327 * device those changes (and the associated packet) will be lost. 328 * 329 * To work around this, we make use of non-cached memory if available. If 330 * descriptors are mapped uncached there's no need to manually flush them 331 * or invalidate them. 332 * 333 * Note that this only applies to descriptors. The packet data buffers do 334 * not have the same constraints since they are 1536 bytes large, so they 335 * are unlikely to share cache-lines. 336 */ 337 static void *eqos_alloc_descs(unsigned int num) 338 { 339 #ifdef CONFIG_SYS_NONCACHED_MEMORY 340 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 341 EQOS_DESCRIPTOR_ALIGN); 342 #else 343 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 344 #endif 345 } 346 347 static void eqos_free_descs(void *descs) 348 { 349 #ifdef CONFIG_SYS_NONCACHED_MEMORY 350 /* FIXME: noncached_alloc() has no opposite */ 351 #else 352 free(descs); 353 #endif 354 } 355 356 static void eqos_inval_desc_tegra186(void *desc) 357 { 358 #ifndef CONFIG_SYS_NONCACHED_MEMORY 359 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 360 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 361 ARCH_DMA_MINALIGN); 362 363 invalidate_dcache_range(start, end); 364 #endif 365 } 366 367 static void eqos_inval_desc_stm32(void *desc) 368 { 369 #ifndef CONFIG_SYS_NONCACHED_MEMORY 370 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 371 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 372 ARCH_DMA_MINALIGN); 373 374 invalidate_dcache_range(start, end); 375 #endif 376 } 377 378 static void eqos_flush_desc_tegra186(void *desc) 379 { 380 #ifndef CONFIG_SYS_NONCACHED_MEMORY 381 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 382 #endif 383 } 384 385 static void eqos_flush_desc_stm32(void *desc) 386 { 387 #ifndef CONFIG_SYS_NONCACHED_MEMORY 388 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 389 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 390 ARCH_DMA_MINALIGN); 391 392 flush_dcache_range(start, end); 393 #endif 394 } 395 396 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 397 { 398 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 399 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 400 401 invalidate_dcache_range(start, end); 402 } 403 404 static void eqos_inval_buffer_stm32(void *buf, size_t size) 405 { 406 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 407 unsigned long end = roundup((unsigned long)buf + size, 408 ARCH_DMA_MINALIGN); 409 410 invalidate_dcache_range(start, end); 411 } 412 413 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 414 { 415 flush_cache((unsigned long)buf, size); 416 } 417 418 static void eqos_flush_buffer_stm32(void *buf, size_t size) 419 { 420 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 421 unsigned long end = roundup((unsigned long)buf + size, 422 ARCH_DMA_MINALIGN); 423 424 flush_dcache_range(start, end); 425 } 426 427 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 428 { 429 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 430 EQOS_MAC_MDIO_ADDRESS_GB, false, 431 1000000, true); 432 } 433 434 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 435 int mdio_reg) 436 { 437 struct eqos_priv *eqos = bus->priv; 438 u32 val; 439 int ret; 440 441 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 442 mdio_reg); 443 444 ret = eqos_mdio_wait_idle(eqos); 445 if (ret) { 446 pr_err("MDIO not idle at entry"); 447 return ret; 448 } 449 450 val = readl(&eqos->mac_regs->mdio_address); 451 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 452 EQOS_MAC_MDIO_ADDRESS_C45E; 453 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 454 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 455 (eqos->config->config_mac_mdio << 456 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 457 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 458 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 459 EQOS_MAC_MDIO_ADDRESS_GB; 460 writel(val, &eqos->mac_regs->mdio_address); 461 462 udelay(eqos->config->mdio_wait); 463 464 ret = eqos_mdio_wait_idle(eqos); 465 if (ret) { 466 pr_err("MDIO read didn't complete"); 467 return ret; 468 } 469 470 val = readl(&eqos->mac_regs->mdio_data); 471 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 472 473 debug("%s: val=%x\n", __func__, val); 474 475 return val; 476 } 477 478 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 479 int mdio_reg, u16 mdio_val) 480 { 481 struct eqos_priv *eqos = bus->priv; 482 u32 val; 483 int ret; 484 485 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 486 mdio_addr, mdio_reg, mdio_val); 487 488 ret = eqos_mdio_wait_idle(eqos); 489 if (ret) { 490 pr_err("MDIO not idle at entry"); 491 return ret; 492 } 493 494 writel(mdio_val, &eqos->mac_regs->mdio_data); 495 496 val = readl(&eqos->mac_regs->mdio_address); 497 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 498 EQOS_MAC_MDIO_ADDRESS_C45E; 499 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 500 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 501 (eqos->config->config_mac_mdio << 502 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 503 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 504 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 505 EQOS_MAC_MDIO_ADDRESS_GB; 506 writel(val, &eqos->mac_regs->mdio_address); 507 508 udelay(eqos->config->mdio_wait); 509 510 ret = eqos_mdio_wait_idle(eqos); 511 if (ret) { 512 pr_err("MDIO read didn't complete"); 513 return ret; 514 } 515 516 return 0; 517 } 518 519 static int eqos_start_clks_tegra186(struct udevice *dev) 520 { 521 struct eqos_priv *eqos = dev_get_priv(dev); 522 int ret; 523 524 debug("%s(dev=%p):\n", __func__, dev); 525 526 ret = clk_enable(&eqos->clk_slave_bus); 527 if (ret < 0) { 528 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 529 goto err; 530 } 531 532 ret = clk_enable(&eqos->clk_master_bus); 533 if (ret < 0) { 534 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 535 goto err_disable_clk_slave_bus; 536 } 537 538 ret = clk_enable(&eqos->clk_rx); 539 if (ret < 0) { 540 pr_err("clk_enable(clk_rx) failed: %d", ret); 541 goto err_disable_clk_master_bus; 542 } 543 544 ret = clk_enable(&eqos->clk_ptp_ref); 545 if (ret < 0) { 546 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 547 goto err_disable_clk_rx; 548 } 549 550 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 551 if (ret < 0) { 552 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 553 goto err_disable_clk_ptp_ref; 554 } 555 556 ret = clk_enable(&eqos->clk_tx); 557 if (ret < 0) { 558 pr_err("clk_enable(clk_tx) failed: %d", ret); 559 goto err_disable_clk_ptp_ref; 560 } 561 562 debug("%s: OK\n", __func__); 563 return 0; 564 565 err_disable_clk_ptp_ref: 566 clk_disable(&eqos->clk_ptp_ref); 567 err_disable_clk_rx: 568 clk_disable(&eqos->clk_rx); 569 err_disable_clk_master_bus: 570 clk_disable(&eqos->clk_master_bus); 571 err_disable_clk_slave_bus: 572 clk_disable(&eqos->clk_slave_bus); 573 err: 574 debug("%s: FAILED: %d\n", __func__, ret); 575 return ret; 576 } 577 578 static int eqos_start_clks_stm32(struct udevice *dev) 579 { 580 struct eqos_priv *eqos = dev_get_priv(dev); 581 int ret; 582 583 debug("%s(dev=%p):\n", __func__, dev); 584 585 ret = clk_enable(&eqos->clk_master_bus); 586 if (ret < 0) { 587 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 588 goto err; 589 } 590 591 ret = clk_enable(&eqos->clk_rx); 592 if (ret < 0) { 593 pr_err("clk_enable(clk_rx) failed: %d", ret); 594 goto err_disable_clk_master_bus; 595 } 596 597 ret = clk_enable(&eqos->clk_tx); 598 if (ret < 0) { 599 pr_err("clk_enable(clk_tx) failed: %d", ret); 600 goto err_disable_clk_rx; 601 } 602 603 if (clk_valid(&eqos->clk_ck)) { 604 ret = clk_enable(&eqos->clk_ck); 605 if (ret < 0) { 606 pr_err("clk_enable(clk_ck) failed: %d", ret); 607 goto err_disable_clk_tx; 608 } 609 } 610 611 debug("%s: OK\n", __func__); 612 return 0; 613 614 err_disable_clk_tx: 615 clk_disable(&eqos->clk_tx); 616 err_disable_clk_rx: 617 clk_disable(&eqos->clk_rx); 618 err_disable_clk_master_bus: 619 clk_disable(&eqos->clk_master_bus); 620 err: 621 debug("%s: FAILED: %d\n", __func__, ret); 622 return ret; 623 } 624 625 static void eqos_stop_clks_tegra186(struct udevice *dev) 626 { 627 struct eqos_priv *eqos = dev_get_priv(dev); 628 629 debug("%s(dev=%p):\n", __func__, dev); 630 631 clk_disable(&eqos->clk_tx); 632 clk_disable(&eqos->clk_ptp_ref); 633 clk_disable(&eqos->clk_rx); 634 clk_disable(&eqos->clk_master_bus); 635 clk_disable(&eqos->clk_slave_bus); 636 637 debug("%s: OK\n", __func__); 638 } 639 640 static void eqos_stop_clks_stm32(struct udevice *dev) 641 { 642 struct eqos_priv *eqos = dev_get_priv(dev); 643 644 debug("%s(dev=%p):\n", __func__, dev); 645 646 clk_disable(&eqos->clk_tx); 647 clk_disable(&eqos->clk_rx); 648 clk_disable(&eqos->clk_master_bus); 649 if (clk_valid(&eqos->clk_ck)) 650 clk_disable(&eqos->clk_ck); 651 652 debug("%s: OK\n", __func__); 653 } 654 655 static int eqos_start_resets_tegra186(struct udevice *dev) 656 { 657 struct eqos_priv *eqos = dev_get_priv(dev); 658 int ret; 659 660 debug("%s(dev=%p):\n", __func__, dev); 661 662 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 663 if (ret < 0) { 664 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 665 return ret; 666 } 667 668 udelay(2); 669 670 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 671 if (ret < 0) { 672 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 673 return ret; 674 } 675 676 ret = reset_assert(&eqos->reset_ctl); 677 if (ret < 0) { 678 pr_err("reset_assert() failed: %d", ret); 679 return ret; 680 } 681 682 udelay(2); 683 684 ret = reset_deassert(&eqos->reset_ctl); 685 if (ret < 0) { 686 pr_err("reset_deassert() failed: %d", ret); 687 return ret; 688 } 689 690 debug("%s: OK\n", __func__); 691 return 0; 692 } 693 694 static int eqos_start_resets_stm32(struct udevice *dev) 695 { 696 struct eqos_priv *eqos = dev_get_priv(dev); 697 int ret; 698 699 debug("%s(dev=%p):\n", __func__, dev); 700 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 701 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 702 if (ret < 0) { 703 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 704 ret); 705 return ret; 706 } 707 708 udelay(2); 709 710 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 711 if (ret < 0) { 712 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 713 ret); 714 return ret; 715 } 716 } 717 debug("%s: OK\n", __func__); 718 719 return 0; 720 } 721 722 static int eqos_stop_resets_tegra186(struct udevice *dev) 723 { 724 struct eqos_priv *eqos = dev_get_priv(dev); 725 726 reset_assert(&eqos->reset_ctl); 727 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 728 729 return 0; 730 } 731 732 static int eqos_stop_resets_stm32(struct udevice *dev) 733 { 734 struct eqos_priv *eqos = dev_get_priv(dev); 735 int ret; 736 737 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 738 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 739 if (ret < 0) { 740 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 741 ret); 742 return ret; 743 } 744 } 745 746 return 0; 747 } 748 749 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 750 { 751 struct eqos_priv *eqos = dev_get_priv(dev); 752 int ret; 753 754 debug("%s(dev=%p):\n", __func__, dev); 755 756 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 757 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 758 759 udelay(1); 760 761 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 762 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 763 764 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 765 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 766 if (ret) { 767 pr_err("calibrate didn't start"); 768 goto failed; 769 } 770 771 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 772 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 773 if (ret) { 774 pr_err("calibrate didn't finish"); 775 goto failed; 776 } 777 778 ret = 0; 779 780 failed: 781 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 782 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 783 784 debug("%s: returns %d\n", __func__, ret); 785 786 return ret; 787 } 788 789 static int eqos_disable_calibration_tegra186(struct udevice *dev) 790 { 791 struct eqos_priv *eqos = dev_get_priv(dev); 792 793 debug("%s(dev=%p):\n", __func__, dev); 794 795 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 796 EQOS_AUTO_CAL_CONFIG_ENABLE); 797 798 return 0; 799 } 800 801 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 802 { 803 struct eqos_priv *eqos = dev_get_priv(dev); 804 805 return clk_get_rate(&eqos->clk_slave_bus); 806 } 807 808 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 809 { 810 struct eqos_priv *eqos = dev_get_priv(dev); 811 812 return clk_get_rate(&eqos->clk_master_bus); 813 } 814 815 static int eqos_calibrate_pads_stm32(struct udevice *dev) 816 { 817 return 0; 818 } 819 820 static int eqos_disable_calibration_stm32(struct udevice *dev) 821 { 822 return 0; 823 } 824 825 static int eqos_set_full_duplex(struct udevice *dev) 826 { 827 struct eqos_priv *eqos = dev_get_priv(dev); 828 829 debug("%s(dev=%p):\n", __func__, dev); 830 831 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 832 833 return 0; 834 } 835 836 static int eqos_set_half_duplex(struct udevice *dev) 837 { 838 struct eqos_priv *eqos = dev_get_priv(dev); 839 840 debug("%s(dev=%p):\n", __func__, dev); 841 842 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 843 844 /* WAR: Flush TX queue when switching to half-duplex */ 845 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 846 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 847 848 return 0; 849 } 850 851 static int eqos_set_gmii_speed(struct udevice *dev) 852 { 853 struct eqos_priv *eqos = dev_get_priv(dev); 854 855 debug("%s(dev=%p):\n", __func__, dev); 856 857 clrbits_le32(&eqos->mac_regs->configuration, 858 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 859 860 return 0; 861 } 862 863 static int eqos_set_mii_speed_100(struct udevice *dev) 864 { 865 struct eqos_priv *eqos = dev_get_priv(dev); 866 867 debug("%s(dev=%p):\n", __func__, dev); 868 869 setbits_le32(&eqos->mac_regs->configuration, 870 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 871 872 return 0; 873 } 874 875 static int eqos_set_mii_speed_10(struct udevice *dev) 876 { 877 struct eqos_priv *eqos = dev_get_priv(dev); 878 879 debug("%s(dev=%p):\n", __func__, dev); 880 881 clrsetbits_le32(&eqos->mac_regs->configuration, 882 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 883 884 return 0; 885 } 886 887 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 888 { 889 struct eqos_priv *eqos = dev_get_priv(dev); 890 ulong rate; 891 int ret; 892 893 debug("%s(dev=%p):\n", __func__, dev); 894 895 switch (eqos->phy->speed) { 896 case SPEED_1000: 897 rate = 125 * 1000 * 1000; 898 break; 899 case SPEED_100: 900 rate = 25 * 1000 * 1000; 901 break; 902 case SPEED_10: 903 rate = 2.5 * 1000 * 1000; 904 break; 905 default: 906 pr_err("invalid speed %d", eqos->phy->speed); 907 return -EINVAL; 908 } 909 910 ret = clk_set_rate(&eqos->clk_tx, rate); 911 if (ret < 0) { 912 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 913 return ret; 914 } 915 916 return 0; 917 } 918 919 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 920 { 921 return 0; 922 } 923 924 static int eqos_adjust_link(struct udevice *dev) 925 { 926 struct eqos_priv *eqos = dev_get_priv(dev); 927 int ret; 928 bool en_calibration; 929 930 debug("%s(dev=%p):\n", __func__, dev); 931 932 if (eqos->phy->duplex) 933 ret = eqos_set_full_duplex(dev); 934 else 935 ret = eqos_set_half_duplex(dev); 936 if (ret < 0) { 937 pr_err("eqos_set_*_duplex() failed: %d", ret); 938 return ret; 939 } 940 941 switch (eqos->phy->speed) { 942 case SPEED_1000: 943 en_calibration = true; 944 ret = eqos_set_gmii_speed(dev); 945 break; 946 case SPEED_100: 947 en_calibration = true; 948 ret = eqos_set_mii_speed_100(dev); 949 break; 950 case SPEED_10: 951 en_calibration = false; 952 ret = eqos_set_mii_speed_10(dev); 953 break; 954 default: 955 pr_err("invalid speed %d", eqos->phy->speed); 956 return -EINVAL; 957 } 958 if (ret < 0) { 959 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 960 return ret; 961 } 962 963 if (en_calibration) { 964 ret = eqos->config->ops->eqos_calibrate_pads(dev); 965 if (ret < 0) { 966 pr_err("eqos_calibrate_pads() failed: %d", 967 ret); 968 return ret; 969 } 970 } else { 971 ret = eqos->config->ops->eqos_disable_calibration(dev); 972 if (ret < 0) { 973 pr_err("eqos_disable_calibration() failed: %d", 974 ret); 975 return ret; 976 } 977 } 978 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 979 if (ret < 0) { 980 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 981 return ret; 982 } 983 984 return 0; 985 } 986 987 static int eqos_write_hwaddr(struct udevice *dev) 988 { 989 struct eth_pdata *plat = dev_get_platdata(dev); 990 struct eqos_priv *eqos = dev_get_priv(dev); 991 uint32_t val; 992 993 /* 994 * This function may be called before start() or after stop(). At that 995 * time, on at least some configurations of the EQoS HW, all clocks to 996 * the EQoS HW block will be stopped, and a reset signal applied. If 997 * any register access is attempted in this state, bus timeouts or CPU 998 * hangs may occur. This check prevents that. 999 * 1000 * A simple solution to this problem would be to not implement 1001 * write_hwaddr(), since start() always writes the MAC address into HW 1002 * anyway. However, it is desirable to implement write_hwaddr() to 1003 * support the case of SW that runs subsequent to U-Boot which expects 1004 * the MAC address to already be programmed into the EQoS registers, 1005 * which must happen irrespective of whether the U-Boot user (or 1006 * scripts) actually made use of the EQoS device, and hence 1007 * irrespective of whether start() was ever called. 1008 * 1009 * Note that this requirement by subsequent SW is not valid for 1010 * Tegra186, and is likely not valid for any non-PCI instantiation of 1011 * the EQoS HW block. This function is implemented solely as 1012 * future-proofing with the expectation the driver will eventually be 1013 * ported to some system where the expectation above is true. 1014 */ 1015 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1016 return 0; 1017 1018 /* Update the MAC address */ 1019 val = (plat->enetaddr[5] << 8) | 1020 (plat->enetaddr[4]); 1021 writel(val, &eqos->mac_regs->address0_high); 1022 val = (plat->enetaddr[3] << 24) | 1023 (plat->enetaddr[2] << 16) | 1024 (plat->enetaddr[1] << 8) | 1025 (plat->enetaddr[0]); 1026 writel(val, &eqos->mac_regs->address0_low); 1027 1028 return 0; 1029 } 1030 1031 static int eqos_start(struct udevice *dev) 1032 { 1033 struct eqos_priv *eqos = dev_get_priv(dev); 1034 int ret, i; 1035 ulong rate; 1036 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1037 ulong last_rx_desc; 1038 1039 debug("%s(dev=%p):\n", __func__, dev); 1040 1041 eqos->tx_desc_idx = 0; 1042 eqos->rx_desc_idx = 0; 1043 1044 ret = eqos->config->ops->eqos_start_clks(dev); 1045 if (ret < 0) { 1046 pr_err("eqos_start_clks() failed: %d", ret); 1047 goto err; 1048 } 1049 1050 ret = eqos->config->ops->eqos_start_resets(dev); 1051 if (ret < 0) { 1052 pr_err("eqos_start_resets() failed: %d", ret); 1053 goto err_stop_clks; 1054 } 1055 1056 udelay(10); 1057 1058 eqos->reg_access_ok = true; 1059 1060 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1061 EQOS_DMA_MODE_SWR, false, 1062 eqos->config->swr_wait, false); 1063 if (ret) { 1064 pr_err("EQOS_DMA_MODE_SWR stuck"); 1065 goto err_stop_resets; 1066 } 1067 1068 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1069 if (ret < 0) { 1070 pr_err("eqos_calibrate_pads() failed: %d", ret); 1071 goto err_stop_resets; 1072 } 1073 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1074 1075 val = (rate / 1000000) - 1; 1076 writel(val, &eqos->mac_regs->us_tic_counter); 1077 1078 /* 1079 * if PHY was already connected and configured, 1080 * don't need to reconnect/reconfigure again 1081 */ 1082 if (!eqos->phy) { 1083 eqos->phy = phy_connect(eqos->mii, -1, dev, 1084 eqos->config->interface(dev)); 1085 if (!eqos->phy) { 1086 pr_err("phy_connect() failed"); 1087 goto err_stop_resets; 1088 } 1089 ret = phy_config(eqos->phy); 1090 if (ret < 0) { 1091 pr_err("phy_config() failed: %d", ret); 1092 goto err_shutdown_phy; 1093 } 1094 } 1095 1096 ret = phy_startup(eqos->phy); 1097 if (ret < 0) { 1098 pr_err("phy_startup() failed: %d", ret); 1099 goto err_shutdown_phy; 1100 } 1101 1102 if (!eqos->phy->link) { 1103 pr_err("No link"); 1104 goto err_shutdown_phy; 1105 } 1106 1107 ret = eqos_adjust_link(dev); 1108 if (ret < 0) { 1109 pr_err("eqos_adjust_link() failed: %d", ret); 1110 goto err_shutdown_phy; 1111 } 1112 1113 /* Configure MTL */ 1114 1115 /* Enable Store and Forward mode for TX */ 1116 /* Program Tx operating mode */ 1117 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1118 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1119 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1120 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1121 1122 /* Transmit Queue weight */ 1123 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1124 1125 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1126 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1127 EQOS_MTL_RXQ0_OPERATION_MODE_RSF); 1128 1129 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1130 val = readl(&eqos->mac_regs->hw_feature1); 1131 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1132 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1133 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1134 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1135 1136 /* 1137 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1138 * r/tqs is encoded as (n / 256) - 1. 1139 */ 1140 tqs = (128 << tx_fifo_sz) / 256 - 1; 1141 rqs = (128 << rx_fifo_sz) / 256 - 1; 1142 1143 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1144 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1145 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1146 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1147 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1148 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1149 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1150 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1151 1152 /* Flow control used only if each channel gets 4KB or more FIFO */ 1153 if (rqs >= ((4096 / 256) - 1)) { 1154 u32 rfd, rfa; 1155 1156 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1157 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1158 1159 /* 1160 * Set Threshold for Activating Flow Contol space for min 2 1161 * frames ie, (1500 * 1) = 1500 bytes. 1162 * 1163 * Set Threshold for Deactivating Flow Contol for space of 1164 * min 1 frame (frame size 1500bytes) in receive fifo 1165 */ 1166 if (rqs == ((4096 / 256) - 1)) { 1167 /* 1168 * This violates the above formula because of FIFO size 1169 * limit therefore overflow may occur inspite of this. 1170 */ 1171 rfd = 0x3; /* Full-3K */ 1172 rfa = 0x1; /* Full-1.5K */ 1173 } else if (rqs == ((8192 / 256) - 1)) { 1174 rfd = 0x6; /* Full-4K */ 1175 rfa = 0xa; /* Full-6K */ 1176 } else if (rqs == ((16384 / 256) - 1)) { 1177 rfd = 0x6; /* Full-4K */ 1178 rfa = 0x12; /* Full-10K */ 1179 } else { 1180 rfd = 0x6; /* Full-4K */ 1181 rfa = 0x1E; /* Full-16K */ 1182 } 1183 1184 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1185 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1186 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1187 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1188 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1189 (rfd << 1190 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1191 (rfa << 1192 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1193 } 1194 1195 /* Configure MAC */ 1196 1197 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1198 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1199 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1200 eqos->config->config_mac << 1201 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1202 1203 /* Set TX flow control parameters */ 1204 /* Set Pause Time */ 1205 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1206 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1207 /* Assign priority for TX flow control */ 1208 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1209 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1210 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1211 /* Assign priority for RX flow control */ 1212 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1213 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1214 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1215 /* Enable flow control */ 1216 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1217 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1218 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1219 EQOS_MAC_RX_FLOW_CTRL_RFE); 1220 1221 clrsetbits_le32(&eqos->mac_regs->configuration, 1222 EQOS_MAC_CONFIGURATION_GPSLCE | 1223 EQOS_MAC_CONFIGURATION_WD | 1224 EQOS_MAC_CONFIGURATION_JD | 1225 EQOS_MAC_CONFIGURATION_JE, 1226 EQOS_MAC_CONFIGURATION_CST | 1227 EQOS_MAC_CONFIGURATION_ACS); 1228 1229 eqos_write_hwaddr(dev); 1230 1231 /* Configure DMA */ 1232 1233 /* Enable OSP mode */ 1234 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1235 EQOS_DMA_CH0_TX_CONTROL_OSP); 1236 1237 /* RX buffer size. Must be a multiple of bus width */ 1238 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1239 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1240 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1241 EQOS_MAX_PACKET_SIZE << 1242 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1243 1244 setbits_le32(&eqos->dma_regs->ch0_control, 1245 EQOS_DMA_CH0_CONTROL_PBLX8); 1246 1247 /* 1248 * Burst length must be < 1/2 FIFO size. 1249 * FIFO size in tqs is encoded as (n / 256) - 1. 1250 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1251 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1252 */ 1253 pbl = tqs + 1; 1254 if (pbl > 32) 1255 pbl = 32; 1256 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1257 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1258 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1259 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1260 1261 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1262 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1263 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1264 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1265 1266 /* DMA performance configuration */ 1267 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1268 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1269 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1270 writel(val, &eqos->dma_regs->sysbus_mode); 1271 1272 /* Set up descriptors */ 1273 1274 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1275 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1276 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1277 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1278 (i * EQOS_MAX_PACKET_SIZE)); 1279 rx_desc->des3 |= EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1280 } 1281 eqos->config->ops->eqos_flush_desc(eqos->descs); 1282 1283 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1284 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1285 writel(EQOS_DESCRIPTORS_TX - 1, 1286 &eqos->dma_regs->ch0_txdesc_ring_length); 1287 1288 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1289 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1290 writel(EQOS_DESCRIPTORS_RX - 1, 1291 &eqos->dma_regs->ch0_rxdesc_ring_length); 1292 1293 /* Enable everything */ 1294 1295 setbits_le32(&eqos->mac_regs->configuration, 1296 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1297 1298 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1299 EQOS_DMA_CH0_TX_CONTROL_ST); 1300 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1301 EQOS_DMA_CH0_RX_CONTROL_SR); 1302 1303 /* TX tail pointer not written until we need to TX a packet */ 1304 /* 1305 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1306 * first descriptor, implying all descriptors were available. However, 1307 * that's not distinguishable from none of the descriptors being 1308 * available. 1309 */ 1310 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1311 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1312 1313 eqos->started = true; 1314 1315 debug("%s: OK\n", __func__); 1316 return 0; 1317 1318 err_shutdown_phy: 1319 phy_shutdown(eqos->phy); 1320 err_stop_resets: 1321 eqos->config->ops->eqos_stop_resets(dev); 1322 err_stop_clks: 1323 eqos->config->ops->eqos_stop_clks(dev); 1324 err: 1325 pr_err("FAILED: %d", ret); 1326 return ret; 1327 } 1328 1329 static void eqos_stop(struct udevice *dev) 1330 { 1331 struct eqos_priv *eqos = dev_get_priv(dev); 1332 int i; 1333 1334 debug("%s(dev=%p):\n", __func__, dev); 1335 1336 if (!eqos->started) 1337 return; 1338 eqos->started = false; 1339 eqos->reg_access_ok = false; 1340 1341 /* Disable TX DMA */ 1342 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1343 EQOS_DMA_CH0_TX_CONTROL_ST); 1344 1345 /* Wait for TX all packets to drain out of MTL */ 1346 for (i = 0; i < 1000000; i++) { 1347 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1348 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1349 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1350 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1351 if ((trcsts != 1) && (!txqsts)) 1352 break; 1353 } 1354 1355 /* Turn off MAC TX and RX */ 1356 clrbits_le32(&eqos->mac_regs->configuration, 1357 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1358 1359 /* Wait for all RX packets to drain out of MTL */ 1360 for (i = 0; i < 1000000; i++) { 1361 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1362 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1363 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1364 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1365 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1366 if ((!prxq) && (!rxqsts)) 1367 break; 1368 } 1369 1370 /* Turn off RX DMA */ 1371 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1372 EQOS_DMA_CH0_RX_CONTROL_SR); 1373 1374 if (eqos->phy) { 1375 phy_shutdown(eqos->phy); 1376 } 1377 eqos->config->ops->eqos_stop_resets(dev); 1378 eqos->config->ops->eqos_stop_clks(dev); 1379 1380 debug("%s: OK\n", __func__); 1381 } 1382 1383 static int eqos_send(struct udevice *dev, void *packet, int length) 1384 { 1385 struct eqos_priv *eqos = dev_get_priv(dev); 1386 struct eqos_desc *tx_desc; 1387 int i; 1388 1389 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1390 length); 1391 1392 memcpy(eqos->tx_dma_buf, packet, length); 1393 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1394 1395 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1396 eqos->tx_desc_idx++; 1397 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1398 1399 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1400 tx_desc->des1 = 0; 1401 tx_desc->des2 = length; 1402 /* 1403 * Make sure that if HW sees the _OWN write below, it will see all the 1404 * writes to the rest of the descriptor too. 1405 */ 1406 mb(); 1407 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1408 eqos->config->ops->eqos_flush_desc(tx_desc); 1409 1410 writel((ulong)(tx_desc + 1), &eqos->dma_regs->ch0_txdesc_tail_pointer); 1411 1412 for (i = 0; i < 1000000; i++) { 1413 eqos->config->ops->eqos_inval_desc(tx_desc); 1414 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1415 return 0; 1416 udelay(1); 1417 } 1418 1419 debug("%s: TX timeout\n", __func__); 1420 1421 return -ETIMEDOUT; 1422 } 1423 1424 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1425 { 1426 struct eqos_priv *eqos = dev_get_priv(dev); 1427 struct eqos_desc *rx_desc; 1428 int length; 1429 1430 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1431 1432 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1433 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1434 debug("%s: RX packet not available\n", __func__); 1435 return -EAGAIN; 1436 } 1437 1438 *packetp = eqos->rx_dma_buf + 1439 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1440 length = rx_desc->des3 & 0x7fff; 1441 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1442 1443 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1444 1445 return length; 1446 } 1447 1448 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1449 { 1450 struct eqos_priv *eqos = dev_get_priv(dev); 1451 uchar *packet_expected; 1452 struct eqos_desc *rx_desc; 1453 1454 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1455 1456 packet_expected = eqos->rx_dma_buf + 1457 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1458 if (packet != packet_expected) { 1459 debug("%s: Unexpected packet (expected %p)\n", __func__, 1460 packet_expected); 1461 return -EINVAL; 1462 } 1463 1464 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1465 rx_desc->des0 = (u32)(ulong)packet; 1466 rx_desc->des1 = 0; 1467 rx_desc->des2 = 0; 1468 /* 1469 * Make sure that if HW sees the _OWN write below, it will see all the 1470 * writes to the rest of the descriptor too. 1471 */ 1472 mb(); 1473 rx_desc->des3 |= EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1474 eqos->config->ops->eqos_flush_desc(rx_desc); 1475 1476 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1477 1478 eqos->rx_desc_idx++; 1479 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1480 1481 return 0; 1482 } 1483 1484 static int eqos_probe_resources_core(struct udevice *dev) 1485 { 1486 struct eqos_priv *eqos = dev_get_priv(dev); 1487 int ret; 1488 1489 debug("%s(dev=%p):\n", __func__, dev); 1490 1491 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1492 EQOS_DESCRIPTORS_RX); 1493 if (!eqos->descs) { 1494 debug("%s: eqos_alloc_descs() failed\n", __func__); 1495 ret = -ENOMEM; 1496 goto err; 1497 } 1498 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1499 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1500 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1501 eqos->rx_descs); 1502 1503 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1504 if (!eqos->tx_dma_buf) { 1505 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1506 ret = -ENOMEM; 1507 goto err_free_descs; 1508 } 1509 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1510 1511 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1512 if (!eqos->rx_dma_buf) { 1513 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1514 ret = -ENOMEM; 1515 goto err_free_tx_dma_buf; 1516 } 1517 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1518 1519 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1520 if (!eqos->rx_pkt) { 1521 debug("%s: malloc(rx_pkt) failed\n", __func__); 1522 ret = -ENOMEM; 1523 goto err_free_rx_dma_buf; 1524 } 1525 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1526 1527 debug("%s: OK\n", __func__); 1528 return 0; 1529 1530 err_free_rx_dma_buf: 1531 free(eqos->rx_dma_buf); 1532 err_free_tx_dma_buf: 1533 free(eqos->tx_dma_buf); 1534 err_free_descs: 1535 eqos_free_descs(eqos->descs); 1536 err: 1537 1538 debug("%s: returns %d\n", __func__, ret); 1539 return ret; 1540 } 1541 1542 static int eqos_remove_resources_core(struct udevice *dev) 1543 { 1544 struct eqos_priv *eqos = dev_get_priv(dev); 1545 1546 debug("%s(dev=%p):\n", __func__, dev); 1547 1548 free(eqos->rx_pkt); 1549 free(eqos->rx_dma_buf); 1550 free(eqos->tx_dma_buf); 1551 eqos_free_descs(eqos->descs); 1552 1553 debug("%s: OK\n", __func__); 1554 return 0; 1555 } 1556 1557 static int eqos_probe_resources_tegra186(struct udevice *dev) 1558 { 1559 struct eqos_priv *eqos = dev_get_priv(dev); 1560 int ret; 1561 1562 debug("%s(dev=%p):\n", __func__, dev); 1563 1564 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1565 if (ret) { 1566 pr_err("reset_get_by_name(rst) failed: %d", ret); 1567 return ret; 1568 } 1569 1570 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1571 &eqos->phy_reset_gpio, 1572 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1573 if (ret) { 1574 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1575 goto err_free_reset_eqos; 1576 } 1577 1578 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1579 if (ret) { 1580 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1581 goto err_free_gpio_phy_reset; 1582 } 1583 1584 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1585 if (ret) { 1586 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1587 goto err_free_clk_slave_bus; 1588 } 1589 1590 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1591 if (ret) { 1592 pr_err("clk_get_by_name(rx) failed: %d", ret); 1593 goto err_free_clk_master_bus; 1594 } 1595 1596 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1597 if (ret) { 1598 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1599 goto err_free_clk_rx; 1600 return ret; 1601 } 1602 1603 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1604 if (ret) { 1605 pr_err("clk_get_by_name(tx) failed: %d", ret); 1606 goto err_free_clk_ptp_ref; 1607 } 1608 1609 debug("%s: OK\n", __func__); 1610 return 0; 1611 1612 err_free_clk_ptp_ref: 1613 clk_free(&eqos->clk_ptp_ref); 1614 err_free_clk_rx: 1615 clk_free(&eqos->clk_rx); 1616 err_free_clk_master_bus: 1617 clk_free(&eqos->clk_master_bus); 1618 err_free_clk_slave_bus: 1619 clk_free(&eqos->clk_slave_bus); 1620 err_free_gpio_phy_reset: 1621 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1622 err_free_reset_eqos: 1623 reset_free(&eqos->reset_ctl); 1624 1625 debug("%s: returns %d\n", __func__, ret); 1626 return ret; 1627 } 1628 1629 /* board-specific Ethernet Interface initializations. */ 1630 __weak int board_interface_eth_init(struct udevice *dev, 1631 phy_interface_t interface_type) 1632 { 1633 return 0; 1634 } 1635 1636 static int eqos_probe_resources_stm32(struct udevice *dev) 1637 { 1638 struct eqos_priv *eqos = dev_get_priv(dev); 1639 int ret; 1640 phy_interface_t interface; 1641 struct ofnode_phandle_args phandle_args; 1642 1643 debug("%s(dev=%p):\n", __func__, dev); 1644 1645 interface = eqos->config->interface(dev); 1646 1647 if (interface == PHY_INTERFACE_MODE_NONE) { 1648 pr_err("Invalid PHY interface\n"); 1649 return -EINVAL; 1650 } 1651 1652 ret = board_interface_eth_init(dev, interface); 1653 if (ret) 1654 return -EINVAL; 1655 1656 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1657 if (ret) { 1658 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1659 goto err_probe; 1660 } 1661 1662 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1663 if (ret) { 1664 pr_err("clk_get_by_name(rx) failed: %d", ret); 1665 goto err_free_clk_master_bus; 1666 } 1667 1668 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1669 if (ret) { 1670 pr_err("clk_get_by_name(tx) failed: %d", ret); 1671 goto err_free_clk_rx; 1672 } 1673 1674 /* Get ETH_CLK clocks (optional) */ 1675 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1676 if (ret) 1677 pr_warn("No phy clock provided %d", ret); 1678 1679 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1680 &phandle_args); 1681 if (!ret) { 1682 /* search "reset-gpios" in phy node */ 1683 ret = gpio_request_by_name_nodev(phandle_args.node, 1684 "reset-gpios", 0, 1685 &eqos->phy_reset_gpio, 1686 GPIOD_IS_OUT | 1687 GPIOD_IS_OUT_ACTIVE); 1688 if (ret) 1689 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1690 ret); 1691 } 1692 1693 debug("%s: OK\n", __func__); 1694 return 0; 1695 1696 err_free_clk_rx: 1697 clk_free(&eqos->clk_rx); 1698 err_free_clk_master_bus: 1699 clk_free(&eqos->clk_master_bus); 1700 err_probe: 1701 1702 debug("%s: returns %d\n", __func__, ret); 1703 return ret; 1704 } 1705 1706 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1707 { 1708 const char *phy_mode; 1709 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1710 1711 debug("%s(dev=%p):\n", __func__, dev); 1712 1713 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1714 NULL); 1715 if (phy_mode) 1716 interface = phy_get_interface_by_name(phy_mode); 1717 1718 return interface; 1719 } 1720 1721 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1722 { 1723 return PHY_INTERFACE_MODE_MII; 1724 } 1725 1726 static int eqos_remove_resources_tegra186(struct udevice *dev) 1727 { 1728 struct eqos_priv *eqos = dev_get_priv(dev); 1729 1730 debug("%s(dev=%p):\n", __func__, dev); 1731 1732 clk_free(&eqos->clk_tx); 1733 clk_free(&eqos->clk_ptp_ref); 1734 clk_free(&eqos->clk_rx); 1735 clk_free(&eqos->clk_slave_bus); 1736 clk_free(&eqos->clk_master_bus); 1737 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1738 reset_free(&eqos->reset_ctl); 1739 1740 debug("%s: OK\n", __func__); 1741 return 0; 1742 } 1743 1744 static int eqos_remove_resources_stm32(struct udevice *dev) 1745 { 1746 struct eqos_priv *eqos = dev_get_priv(dev); 1747 1748 debug("%s(dev=%p):\n", __func__, dev); 1749 1750 clk_free(&eqos->clk_tx); 1751 clk_free(&eqos->clk_rx); 1752 clk_free(&eqos->clk_master_bus); 1753 if (clk_valid(&eqos->clk_ck)) 1754 clk_free(&eqos->clk_ck); 1755 1756 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1757 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1758 1759 debug("%s: OK\n", __func__); 1760 return 0; 1761 } 1762 1763 static int eqos_probe(struct udevice *dev) 1764 { 1765 struct eqos_priv *eqos = dev_get_priv(dev); 1766 int ret; 1767 1768 debug("%s(dev=%p):\n", __func__, dev); 1769 1770 eqos->dev = dev; 1771 eqos->config = (void *)dev_get_driver_data(dev); 1772 1773 eqos->regs = devfdt_get_addr(dev); 1774 if (eqos->regs == FDT_ADDR_T_NONE) { 1775 pr_err("devfdt_get_addr() failed"); 1776 return -ENODEV; 1777 } 1778 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1779 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1780 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1781 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1782 1783 ret = eqos_probe_resources_core(dev); 1784 if (ret < 0) { 1785 pr_err("eqos_probe_resources_core() failed: %d", ret); 1786 return ret; 1787 } 1788 1789 ret = eqos->config->ops->eqos_probe_resources(dev); 1790 if (ret < 0) { 1791 pr_err("eqos_probe_resources() failed: %d", ret); 1792 goto err_remove_resources_core; 1793 } 1794 1795 eqos->mii = mdio_alloc(); 1796 if (!eqos->mii) { 1797 pr_err("mdio_alloc() failed"); 1798 ret = -ENOMEM; 1799 goto err_remove_resources_tegra; 1800 } 1801 eqos->mii->read = eqos_mdio_read; 1802 eqos->mii->write = eqos_mdio_write; 1803 eqos->mii->priv = eqos; 1804 strcpy(eqos->mii->name, dev->name); 1805 1806 ret = mdio_register(eqos->mii); 1807 if (ret < 0) { 1808 pr_err("mdio_register() failed: %d", ret); 1809 goto err_free_mdio; 1810 } 1811 1812 debug("%s: OK\n", __func__); 1813 return 0; 1814 1815 err_free_mdio: 1816 mdio_free(eqos->mii); 1817 err_remove_resources_tegra: 1818 eqos->config->ops->eqos_remove_resources(dev); 1819 err_remove_resources_core: 1820 eqos_remove_resources_core(dev); 1821 1822 debug("%s: returns %d\n", __func__, ret); 1823 return ret; 1824 } 1825 1826 static int eqos_remove(struct udevice *dev) 1827 { 1828 struct eqos_priv *eqos = dev_get_priv(dev); 1829 1830 debug("%s(dev=%p):\n", __func__, dev); 1831 1832 mdio_unregister(eqos->mii); 1833 mdio_free(eqos->mii); 1834 eqos->config->ops->eqos_remove_resources(dev); 1835 1836 eqos_probe_resources_core(dev); 1837 1838 debug("%s: OK\n", __func__); 1839 return 0; 1840 } 1841 1842 static const struct eth_ops eqos_ops = { 1843 .start = eqos_start, 1844 .stop = eqos_stop, 1845 .send = eqos_send, 1846 .recv = eqos_recv, 1847 .free_pkt = eqos_free_pkt, 1848 .write_hwaddr = eqos_write_hwaddr, 1849 }; 1850 1851 static struct eqos_ops eqos_tegra186_ops = { 1852 .eqos_inval_desc = eqos_inval_desc_tegra186, 1853 .eqos_flush_desc = eqos_flush_desc_tegra186, 1854 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 1855 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 1856 .eqos_probe_resources = eqos_probe_resources_tegra186, 1857 .eqos_remove_resources = eqos_remove_resources_tegra186, 1858 .eqos_stop_resets = eqos_stop_resets_tegra186, 1859 .eqos_start_resets = eqos_start_resets_tegra186, 1860 .eqos_stop_clks = eqos_stop_clks_tegra186, 1861 .eqos_start_clks = eqos_start_clks_tegra186, 1862 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 1863 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 1864 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 1865 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186 1866 }; 1867 1868 static const struct eqos_config eqos_tegra186_config = { 1869 .reg_access_always_ok = false, 1870 .mdio_wait = 10, 1871 .swr_wait = 10, 1872 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 1873 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 1874 .interface = eqos_get_interface_tegra186, 1875 .ops = &eqos_tegra186_ops 1876 }; 1877 1878 static struct eqos_ops eqos_stm32_ops = { 1879 .eqos_inval_desc = eqos_inval_desc_stm32, 1880 .eqos_flush_desc = eqos_flush_desc_stm32, 1881 .eqos_inval_buffer = eqos_inval_buffer_stm32, 1882 .eqos_flush_buffer = eqos_flush_buffer_stm32, 1883 .eqos_probe_resources = eqos_probe_resources_stm32, 1884 .eqos_remove_resources = eqos_remove_resources_stm32, 1885 .eqos_stop_resets = eqos_stop_resets_stm32, 1886 .eqos_start_resets = eqos_start_resets_stm32, 1887 .eqos_stop_clks = eqos_stop_clks_stm32, 1888 .eqos_start_clks = eqos_start_clks_stm32, 1889 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 1890 .eqos_disable_calibration = eqos_disable_calibration_stm32, 1891 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 1892 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32 1893 }; 1894 1895 static const struct eqos_config eqos_stm32_config = { 1896 .reg_access_always_ok = false, 1897 .mdio_wait = 10000, 1898 .swr_wait = 50, 1899 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 1900 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 1901 .interface = eqos_get_interface_stm32, 1902 .ops = &eqos_stm32_ops 1903 }; 1904 1905 static const struct udevice_id eqos_ids[] = { 1906 { 1907 .compatible = "nvidia,tegra186-eqos", 1908 .data = (ulong)&eqos_tegra186_config 1909 }, 1910 { 1911 .compatible = "snps,dwmac-4.20a", 1912 .data = (ulong)&eqos_stm32_config 1913 }, 1914 1915 { } 1916 }; 1917 1918 U_BOOT_DRIVER(eth_eqos) = { 1919 .name = "eth_eqos", 1920 .id = UCLASS_ETH, 1921 .of_match = eqos_ids, 1922 .probe = eqos_probe, 1923 .remove = eqos_remove, 1924 .ops = &eqos_ops, 1925 .priv_auto_alloc_size = sizeof(struct eqos_priv), 1926 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 1927 }; 1928