1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 49 /* Core registers */ 50 51 #define EQOS_MAC_REGS_BASE 0x000 52 struct eqos_mac_regs { 53 uint32_t configuration; /* 0x000 */ 54 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 55 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 56 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 57 uint32_t rx_flow_ctrl; /* 0x090 */ 58 uint32_t unused_094; /* 0x094 */ 59 uint32_t txq_prty_map0; /* 0x098 */ 60 uint32_t unused_09c; /* 0x09c */ 61 uint32_t rxq_ctrl0; /* 0x0a0 */ 62 uint32_t unused_0a4; /* 0x0a4 */ 63 uint32_t rxq_ctrl2; /* 0x0a8 */ 64 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 65 uint32_t us_tic_counter; /* 0x0dc */ 66 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 67 uint32_t hw_feature0; /* 0x11c */ 68 uint32_t hw_feature1; /* 0x120 */ 69 uint32_t hw_feature2; /* 0x124 */ 70 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 71 uint32_t mdio_address; /* 0x200 */ 72 uint32_t mdio_data; /* 0x204 */ 73 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 74 uint32_t address0_high; /* 0x300 */ 75 uint32_t address0_low; /* 0x304 */ 76 }; 77 78 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 79 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 80 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 81 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 82 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 83 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 84 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 85 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 86 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 87 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 88 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 89 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 90 91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 94 95 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 96 97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 99 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 103 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 104 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 105 106 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 107 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 108 109 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 110 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 111 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 112 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 113 114 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 115 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 116 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 117 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 118 119 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 120 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 121 122 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 123 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 124 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 125 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 126 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 127 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 128 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 129 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 130 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 131 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 132 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 133 134 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 135 136 #define EQOS_MTL_REGS_BASE 0xd00 137 struct eqos_mtl_regs { 138 uint32_t txq0_operation_mode; /* 0xd00 */ 139 uint32_t unused_d04; /* 0xd04 */ 140 uint32_t txq0_debug; /* 0xd08 */ 141 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 142 uint32_t txq0_quantum_weight; /* 0xd18 */ 143 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 144 uint32_t rxq0_operation_mode; /* 0xd30 */ 145 uint32_t unused_d34; /* 0xd34 */ 146 uint32_t rxq0_debug; /* 0xd38 */ 147 }; 148 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 152 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 153 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 154 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 155 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 156 157 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 158 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 159 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 160 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 167 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 168 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 169 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 170 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 171 172 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 173 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 174 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 175 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 176 177 #define EQOS_DMA_REGS_BASE 0x1000 178 struct eqos_dma_regs { 179 uint32_t mode; /* 0x1000 */ 180 uint32_t sysbus_mode; /* 0x1004 */ 181 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 182 uint32_t ch0_control; /* 0x1100 */ 183 uint32_t ch0_tx_control; /* 0x1104 */ 184 uint32_t ch0_rx_control; /* 0x1108 */ 185 uint32_t unused_110c; /* 0x110c */ 186 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 187 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 188 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 189 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 190 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 191 uint32_t unused_1124; /* 0x1124 */ 192 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 193 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 194 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 195 }; 196 197 #define EQOS_DMA_MODE_SWR BIT(0) 198 199 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 200 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 201 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 202 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 203 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 204 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 205 206 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 207 208 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 209 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 210 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 211 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 212 213 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 214 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 215 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 216 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 217 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 218 219 /* These registers are Tegra186-specific */ 220 #define EQOS_TEGRA186_REGS_BASE 0x8800 221 struct eqos_tegra186_regs { 222 uint32_t sdmemcomppadctrl; /* 0x8800 */ 223 uint32_t auto_cal_config; /* 0x8804 */ 224 uint32_t unused_8808; /* 0x8808 */ 225 uint32_t auto_cal_status; /* 0x880c */ 226 }; 227 228 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 229 230 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 231 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 232 233 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 234 235 /* Descriptors */ 236 237 #define EQOS_DESCRIPTOR_WORDS 4 238 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 239 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 240 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 241 #define EQOS_DESCRIPTORS_TX 4 242 #define EQOS_DESCRIPTORS_RX 4 243 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 244 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 245 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 246 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 247 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 248 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 249 250 /* 251 * Warn if the cache-line size is larger than the descriptor size. In such 252 * cases the driver will likely fail because the CPU needs to flush the cache 253 * when requeuing RX buffers, therefore descriptors written by the hardware 254 * may be discarded. Architectures with full IO coherence, such as x86, do not 255 * experience this issue, and hence are excluded from this condition. 256 * 257 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 258 * the driver to allocate descriptors from a pool of non-cached memory. 259 */ 260 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 261 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 262 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 263 #warning Cache line size is larger than descriptor size 264 #endif 265 #endif 266 267 struct eqos_desc { 268 u32 des0; 269 u32 des1; 270 u32 des2; 271 u32 des3; 272 }; 273 274 #define EQOS_DESC3_OWN BIT(31) 275 #define EQOS_DESC3_FD BIT(29) 276 #define EQOS_DESC3_LD BIT(28) 277 #define EQOS_DESC3_BUF1V BIT(24) 278 279 struct eqos_config { 280 bool reg_access_always_ok; 281 int mdio_wait; 282 int swr_wait; 283 int config_mac; 284 int config_mac_mdio; 285 struct eqos_ops *ops; 286 }; 287 288 struct eqos_ops { 289 void (*eqos_inval_desc)(void *desc); 290 void (*eqos_flush_desc)(void *desc); 291 void (*eqos_inval_buffer)(void *buf, size_t size); 292 void (*eqos_flush_buffer)(void *buf, size_t size); 293 int (*eqos_probe_resources)(struct udevice *dev); 294 int (*eqos_remove_resources)(struct udevice *dev); 295 int (*eqos_stop_resets)(struct udevice *dev); 296 int (*eqos_start_resets)(struct udevice *dev); 297 void (*eqos_stop_clks)(struct udevice *dev); 298 int (*eqos_start_clks)(struct udevice *dev); 299 int (*eqos_calibrate_pads)(struct udevice *dev); 300 int (*eqos_disable_calibration)(struct udevice *dev); 301 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 302 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 303 phy_interface_t (*eqos_get_interface)(struct udevice *dev); 304 }; 305 306 struct eqos_priv { 307 struct udevice *dev; 308 const struct eqos_config *config; 309 fdt_addr_t regs; 310 struct eqos_mac_regs *mac_regs; 311 struct eqos_mtl_regs *mtl_regs; 312 struct eqos_dma_regs *dma_regs; 313 struct eqos_tegra186_regs *tegra186_regs; 314 struct reset_ctl reset_ctl; 315 struct gpio_desc phy_reset_gpio; 316 u32 reset_delays[3]; 317 struct clk clk_master_bus; 318 struct clk clk_rx; 319 struct clk clk_ptp_ref; 320 struct clk clk_tx; 321 struct clk clk_ck; 322 struct clk clk_slave_bus; 323 struct mii_dev *mii; 324 struct phy_device *phy; 325 int phyaddr; 326 u32 max_speed; 327 void *descs; 328 struct eqos_desc *tx_descs; 329 struct eqos_desc *rx_descs; 330 int tx_desc_idx, rx_desc_idx; 331 void *tx_dma_buf; 332 void *rx_dma_buf; 333 void *rx_pkt; 334 bool started; 335 bool reg_access_ok; 336 }; 337 338 /* 339 * TX and RX descriptors are 16 bytes. This causes problems with the cache 340 * maintenance on CPUs where the cache-line size exceeds the size of these 341 * descriptors. What will happen is that when the driver receives a packet 342 * it will be immediately requeued for the hardware to reuse. The CPU will 343 * therefore need to flush the cache-line containing the descriptor, which 344 * will cause all other descriptors in the same cache-line to be flushed 345 * along with it. If one of those descriptors had been written to by the 346 * device those changes (and the associated packet) will be lost. 347 * 348 * To work around this, we make use of non-cached memory if available. If 349 * descriptors are mapped uncached there's no need to manually flush them 350 * or invalidate them. 351 * 352 * Note that this only applies to descriptors. The packet data buffers do 353 * not have the same constraints since they are 1536 bytes large, so they 354 * are unlikely to share cache-lines. 355 */ 356 static void *eqos_alloc_descs(unsigned int num) 357 { 358 #ifdef CONFIG_SYS_NONCACHED_MEMORY 359 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 360 EQOS_DESCRIPTOR_ALIGN); 361 #else 362 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 363 #endif 364 } 365 366 static void eqos_free_descs(void *descs) 367 { 368 #ifdef CONFIG_SYS_NONCACHED_MEMORY 369 /* FIXME: noncached_alloc() has no opposite */ 370 #else 371 free(descs); 372 #endif 373 } 374 375 static void eqos_inval_desc_tegra186(void *desc) 376 { 377 #ifndef CONFIG_SYS_NONCACHED_MEMORY 378 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 379 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 380 ARCH_DMA_MINALIGN); 381 382 invalidate_dcache_range(start, end); 383 #endif 384 } 385 386 static void eqos_inval_desc_generic(void *desc) 387 { 388 #ifndef CONFIG_SYS_NONCACHED_MEMORY 389 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 390 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 391 ARCH_DMA_MINALIGN); 392 393 invalidate_dcache_range(start, end); 394 #endif 395 } 396 397 static void eqos_flush_desc_tegra186(void *desc) 398 { 399 #ifndef CONFIG_SYS_NONCACHED_MEMORY 400 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 401 #endif 402 } 403 404 static void eqos_flush_desc_generic(void *desc) 405 { 406 #ifndef CONFIG_SYS_NONCACHED_MEMORY 407 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 408 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 409 ARCH_DMA_MINALIGN); 410 411 flush_dcache_range(start, end); 412 #endif 413 } 414 415 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 416 { 417 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 418 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 419 420 invalidate_dcache_range(start, end); 421 } 422 423 static void eqos_inval_buffer_generic(void *buf, size_t size) 424 { 425 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 426 unsigned long end = roundup((unsigned long)buf + size, 427 ARCH_DMA_MINALIGN); 428 429 invalidate_dcache_range(start, end); 430 } 431 432 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 433 { 434 flush_cache((unsigned long)buf, size); 435 } 436 437 static void eqos_flush_buffer_generic(void *buf, size_t size) 438 { 439 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 440 unsigned long end = roundup((unsigned long)buf + size, 441 ARCH_DMA_MINALIGN); 442 443 flush_dcache_range(start, end); 444 } 445 446 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 447 { 448 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 449 EQOS_MAC_MDIO_ADDRESS_GB, false, 450 1000000, true); 451 } 452 453 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 454 int mdio_reg) 455 { 456 struct eqos_priv *eqos = bus->priv; 457 u32 val; 458 int ret; 459 460 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 461 mdio_reg); 462 463 ret = eqos_mdio_wait_idle(eqos); 464 if (ret) { 465 pr_err("MDIO not idle at entry"); 466 return ret; 467 } 468 469 val = readl(&eqos->mac_regs->mdio_address); 470 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 471 EQOS_MAC_MDIO_ADDRESS_C45E; 472 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 473 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 474 (eqos->config->config_mac_mdio << 475 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 476 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 477 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 478 EQOS_MAC_MDIO_ADDRESS_GB; 479 writel(val, &eqos->mac_regs->mdio_address); 480 481 udelay(eqos->config->mdio_wait); 482 483 ret = eqos_mdio_wait_idle(eqos); 484 if (ret) { 485 pr_err("MDIO read didn't complete"); 486 return ret; 487 } 488 489 val = readl(&eqos->mac_regs->mdio_data); 490 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 491 492 debug("%s: val=%x\n", __func__, val); 493 494 return val; 495 } 496 497 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 498 int mdio_reg, u16 mdio_val) 499 { 500 struct eqos_priv *eqos = bus->priv; 501 u32 val; 502 int ret; 503 504 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 505 mdio_addr, mdio_reg, mdio_val); 506 507 ret = eqos_mdio_wait_idle(eqos); 508 if (ret) { 509 pr_err("MDIO not idle at entry"); 510 return ret; 511 } 512 513 writel(mdio_val, &eqos->mac_regs->mdio_data); 514 515 val = readl(&eqos->mac_regs->mdio_address); 516 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 517 EQOS_MAC_MDIO_ADDRESS_C45E; 518 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 519 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 520 (eqos->config->config_mac_mdio << 521 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 522 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 523 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 524 EQOS_MAC_MDIO_ADDRESS_GB; 525 writel(val, &eqos->mac_regs->mdio_address); 526 527 udelay(eqos->config->mdio_wait); 528 529 ret = eqos_mdio_wait_idle(eqos); 530 if (ret) { 531 pr_err("MDIO read didn't complete"); 532 return ret; 533 } 534 535 return 0; 536 } 537 538 static int eqos_start_clks_tegra186(struct udevice *dev) 539 { 540 #ifdef CONFIG_CLK 541 struct eqos_priv *eqos = dev_get_priv(dev); 542 int ret; 543 544 debug("%s(dev=%p):\n", __func__, dev); 545 546 ret = clk_enable(&eqos->clk_slave_bus); 547 if (ret < 0) { 548 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 549 goto err; 550 } 551 552 ret = clk_enable(&eqos->clk_master_bus); 553 if (ret < 0) { 554 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 555 goto err_disable_clk_slave_bus; 556 } 557 558 ret = clk_enable(&eqos->clk_rx); 559 if (ret < 0) { 560 pr_err("clk_enable(clk_rx) failed: %d", ret); 561 goto err_disable_clk_master_bus; 562 } 563 564 ret = clk_enable(&eqos->clk_ptp_ref); 565 if (ret < 0) { 566 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 567 goto err_disable_clk_rx; 568 } 569 570 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 571 if (ret < 0) { 572 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 573 goto err_disable_clk_ptp_ref; 574 } 575 576 ret = clk_enable(&eqos->clk_tx); 577 if (ret < 0) { 578 pr_err("clk_enable(clk_tx) failed: %d", ret); 579 goto err_disable_clk_ptp_ref; 580 } 581 #endif 582 583 debug("%s: OK\n", __func__); 584 return 0; 585 586 #ifdef CONFIG_CLK 587 err_disable_clk_ptp_ref: 588 clk_disable(&eqos->clk_ptp_ref); 589 err_disable_clk_rx: 590 clk_disable(&eqos->clk_rx); 591 err_disable_clk_master_bus: 592 clk_disable(&eqos->clk_master_bus); 593 err_disable_clk_slave_bus: 594 clk_disable(&eqos->clk_slave_bus); 595 err: 596 debug("%s: FAILED: %d\n", __func__, ret); 597 return ret; 598 #endif 599 } 600 601 static int eqos_start_clks_stm32(struct udevice *dev) 602 { 603 #ifdef CONFIG_CLK 604 struct eqos_priv *eqos = dev_get_priv(dev); 605 int ret; 606 607 debug("%s(dev=%p):\n", __func__, dev); 608 609 ret = clk_enable(&eqos->clk_master_bus); 610 if (ret < 0) { 611 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 612 goto err; 613 } 614 615 if (clk_valid(&eqos->clk_rx)) { 616 ret = clk_enable(&eqos->clk_rx); 617 if (ret < 0) { 618 pr_err("clk_enable(clk_rx) failed: %d", ret); 619 goto err_disable_clk_master_bus; 620 } 621 } 622 623 if (clk_valid(&eqos->clk_tx)) { 624 ret = clk_enable(&eqos->clk_tx); 625 if (ret < 0) { 626 pr_err("clk_enable(clk_tx) failed: %d", ret); 627 goto err_disable_clk_rx; 628 } 629 } 630 631 if (clk_valid(&eqos->clk_ck)) { 632 ret = clk_enable(&eqos->clk_ck); 633 if (ret < 0) { 634 pr_err("clk_enable(clk_ck) failed: %d", ret); 635 goto err_disable_clk_tx; 636 } 637 } 638 #endif 639 640 debug("%s: OK\n", __func__); 641 return 0; 642 643 #ifdef CONFIG_CLK 644 err_disable_clk_tx: 645 if (clk_valid(&eqos->clk_tx)) 646 clk_disable(&eqos->clk_tx); 647 err_disable_clk_rx: 648 if (clk_valid(&eqos->clk_rx)) 649 clk_disable(&eqos->clk_rx); 650 err_disable_clk_master_bus: 651 clk_disable(&eqos->clk_master_bus); 652 err: 653 debug("%s: FAILED: %d\n", __func__, ret); 654 return ret; 655 #endif 656 } 657 658 static int eqos_start_clks_imx(struct udevice *dev) 659 { 660 return 0; 661 } 662 663 static void eqos_stop_clks_tegra186(struct udevice *dev) 664 { 665 #ifdef CONFIG_CLK 666 struct eqos_priv *eqos = dev_get_priv(dev); 667 668 debug("%s(dev=%p):\n", __func__, dev); 669 670 clk_disable(&eqos->clk_tx); 671 clk_disable(&eqos->clk_ptp_ref); 672 clk_disable(&eqos->clk_rx); 673 clk_disable(&eqos->clk_master_bus); 674 clk_disable(&eqos->clk_slave_bus); 675 #endif 676 677 debug("%s: OK\n", __func__); 678 } 679 680 static void eqos_stop_clks_stm32(struct udevice *dev) 681 { 682 #ifdef CONFIG_CLK 683 struct eqos_priv *eqos = dev_get_priv(dev); 684 685 debug("%s(dev=%p):\n", __func__, dev); 686 687 if (clk_valid(&eqos->clk_tx)) 688 clk_disable(&eqos->clk_tx); 689 if (clk_valid(&eqos->clk_rx)) 690 clk_disable(&eqos->clk_rx); 691 clk_disable(&eqos->clk_master_bus); 692 if (clk_valid(&eqos->clk_ck)) 693 clk_disable(&eqos->clk_ck); 694 #endif 695 696 debug("%s: OK\n", __func__); 697 } 698 699 static void eqos_stop_clks_imx(struct udevice *dev) 700 { 701 /* empty */ 702 } 703 704 static int eqos_start_resets_tegra186(struct udevice *dev) 705 { 706 struct eqos_priv *eqos = dev_get_priv(dev); 707 int ret; 708 709 debug("%s(dev=%p):\n", __func__, dev); 710 711 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 712 if (ret < 0) { 713 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 714 return ret; 715 } 716 717 udelay(2); 718 719 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 720 if (ret < 0) { 721 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 722 return ret; 723 } 724 725 ret = reset_assert(&eqos->reset_ctl); 726 if (ret < 0) { 727 pr_err("reset_assert() failed: %d", ret); 728 return ret; 729 } 730 731 udelay(2); 732 733 ret = reset_deassert(&eqos->reset_ctl); 734 if (ret < 0) { 735 pr_err("reset_deassert() failed: %d", ret); 736 return ret; 737 } 738 739 debug("%s: OK\n", __func__); 740 return 0; 741 } 742 743 static int eqos_start_resets_stm32(struct udevice *dev) 744 { 745 struct eqos_priv *eqos = dev_get_priv(dev); 746 int ret; 747 748 debug("%s(dev=%p):\n", __func__, dev); 749 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 750 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 751 if (ret < 0) { 752 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 753 ret); 754 return ret; 755 } 756 757 udelay(eqos->reset_delays[0]); 758 759 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 760 if (ret < 0) { 761 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 762 ret); 763 return ret; 764 } 765 766 udelay(eqos->reset_delays[1]); 767 768 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 769 if (ret < 0) { 770 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 771 ret); 772 return ret; 773 } 774 775 udelay(eqos->reset_delays[2]); 776 } 777 debug("%s: OK\n", __func__); 778 779 return 0; 780 } 781 782 static int eqos_start_resets_imx(struct udevice *dev) 783 { 784 return 0; 785 } 786 787 static int eqos_stop_resets_tegra186(struct udevice *dev) 788 { 789 struct eqos_priv *eqos = dev_get_priv(dev); 790 791 reset_assert(&eqos->reset_ctl); 792 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 793 794 return 0; 795 } 796 797 static int eqos_stop_resets_stm32(struct udevice *dev) 798 { 799 struct eqos_priv *eqos = dev_get_priv(dev); 800 int ret; 801 802 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 803 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 804 if (ret < 0) { 805 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 806 ret); 807 return ret; 808 } 809 } 810 811 return 0; 812 } 813 814 static int eqos_stop_resets_imx(struct udevice *dev) 815 { 816 return 0; 817 } 818 819 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 820 { 821 struct eqos_priv *eqos = dev_get_priv(dev); 822 int ret; 823 824 debug("%s(dev=%p):\n", __func__, dev); 825 826 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 827 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 828 829 udelay(1); 830 831 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 832 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 833 834 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 835 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 836 if (ret) { 837 pr_err("calibrate didn't start"); 838 goto failed; 839 } 840 841 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 842 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 843 if (ret) { 844 pr_err("calibrate didn't finish"); 845 goto failed; 846 } 847 848 ret = 0; 849 850 failed: 851 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 852 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 853 854 debug("%s: returns %d\n", __func__, ret); 855 856 return ret; 857 } 858 859 static int eqos_disable_calibration_tegra186(struct udevice *dev) 860 { 861 struct eqos_priv *eqos = dev_get_priv(dev); 862 863 debug("%s(dev=%p):\n", __func__, dev); 864 865 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 866 EQOS_AUTO_CAL_CONFIG_ENABLE); 867 868 return 0; 869 } 870 871 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 872 { 873 #ifdef CONFIG_CLK 874 struct eqos_priv *eqos = dev_get_priv(dev); 875 876 return clk_get_rate(&eqos->clk_slave_bus); 877 #else 878 return 0; 879 #endif 880 } 881 882 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 883 { 884 #ifdef CONFIG_CLK 885 struct eqos_priv *eqos = dev_get_priv(dev); 886 887 return clk_get_rate(&eqos->clk_master_bus); 888 #else 889 return 0; 890 #endif 891 } 892 893 __weak u32 imx_get_eqos_csr_clk(void) 894 { 895 return 100 * 1000000; 896 } 897 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 898 { 899 return 0; 900 } 901 902 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 903 { 904 return imx_get_eqos_csr_clk(); 905 } 906 907 static int eqos_calibrate_pads_stm32(struct udevice *dev) 908 { 909 return 0; 910 } 911 912 static int eqos_calibrate_pads_imx(struct udevice *dev) 913 { 914 return 0; 915 } 916 917 static int eqos_disable_calibration_stm32(struct udevice *dev) 918 { 919 return 0; 920 } 921 922 static int eqos_disable_calibration_imx(struct udevice *dev) 923 { 924 return 0; 925 } 926 927 static int eqos_set_full_duplex(struct udevice *dev) 928 { 929 struct eqos_priv *eqos = dev_get_priv(dev); 930 931 debug("%s(dev=%p):\n", __func__, dev); 932 933 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 934 935 return 0; 936 } 937 938 static int eqos_set_half_duplex(struct udevice *dev) 939 { 940 struct eqos_priv *eqos = dev_get_priv(dev); 941 942 debug("%s(dev=%p):\n", __func__, dev); 943 944 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 945 946 /* WAR: Flush TX queue when switching to half-duplex */ 947 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 948 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 949 950 return 0; 951 } 952 953 static int eqos_set_gmii_speed(struct udevice *dev) 954 { 955 struct eqos_priv *eqos = dev_get_priv(dev); 956 957 debug("%s(dev=%p):\n", __func__, dev); 958 959 clrbits_le32(&eqos->mac_regs->configuration, 960 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 961 962 return 0; 963 } 964 965 static int eqos_set_mii_speed_100(struct udevice *dev) 966 { 967 struct eqos_priv *eqos = dev_get_priv(dev); 968 969 debug("%s(dev=%p):\n", __func__, dev); 970 971 setbits_le32(&eqos->mac_regs->configuration, 972 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 973 974 return 0; 975 } 976 977 static int eqos_set_mii_speed_10(struct udevice *dev) 978 { 979 struct eqos_priv *eqos = dev_get_priv(dev); 980 981 debug("%s(dev=%p):\n", __func__, dev); 982 983 clrsetbits_le32(&eqos->mac_regs->configuration, 984 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 985 986 return 0; 987 } 988 989 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 990 { 991 #ifdef CONFIG_CLK 992 struct eqos_priv *eqos = dev_get_priv(dev); 993 ulong rate; 994 int ret; 995 996 debug("%s(dev=%p):\n", __func__, dev); 997 998 switch (eqos->phy->speed) { 999 case SPEED_1000: 1000 rate = 125 * 1000 * 1000; 1001 break; 1002 case SPEED_100: 1003 rate = 25 * 1000 * 1000; 1004 break; 1005 case SPEED_10: 1006 rate = 2.5 * 1000 * 1000; 1007 break; 1008 default: 1009 pr_err("invalid speed %d", eqos->phy->speed); 1010 return -EINVAL; 1011 } 1012 1013 ret = clk_set_rate(&eqos->clk_tx, rate); 1014 if (ret < 0) { 1015 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 1016 return ret; 1017 } 1018 #endif 1019 1020 return 0; 1021 } 1022 1023 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 1024 { 1025 return 0; 1026 } 1027 1028 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 1029 { 1030 struct eqos_priv *eqos = dev_get_priv(dev); 1031 ulong rate; 1032 int ret; 1033 1034 debug("%s(dev=%p):\n", __func__, dev); 1035 1036 switch (eqos->phy->speed) { 1037 case SPEED_1000: 1038 rate = 125 * 1000 * 1000; 1039 break; 1040 case SPEED_100: 1041 rate = 25 * 1000 * 1000; 1042 break; 1043 case SPEED_10: 1044 rate = 2.5 * 1000 * 1000; 1045 break; 1046 default: 1047 pr_err("invalid speed %d", eqos->phy->speed); 1048 return -EINVAL; 1049 } 1050 1051 ret = imx_eqos_txclk_set_rate(rate); 1052 if (ret < 0) { 1053 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 1054 return ret; 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int eqos_adjust_link(struct udevice *dev) 1061 { 1062 struct eqos_priv *eqos = dev_get_priv(dev); 1063 int ret; 1064 bool en_calibration; 1065 1066 debug("%s(dev=%p):\n", __func__, dev); 1067 1068 if (eqos->phy->duplex) 1069 ret = eqos_set_full_duplex(dev); 1070 else 1071 ret = eqos_set_half_duplex(dev); 1072 if (ret < 0) { 1073 pr_err("eqos_set_*_duplex() failed: %d", ret); 1074 return ret; 1075 } 1076 1077 switch (eqos->phy->speed) { 1078 case SPEED_1000: 1079 en_calibration = true; 1080 ret = eqos_set_gmii_speed(dev); 1081 break; 1082 case SPEED_100: 1083 en_calibration = true; 1084 ret = eqos_set_mii_speed_100(dev); 1085 break; 1086 case SPEED_10: 1087 en_calibration = false; 1088 ret = eqos_set_mii_speed_10(dev); 1089 break; 1090 default: 1091 pr_err("invalid speed %d", eqos->phy->speed); 1092 return -EINVAL; 1093 } 1094 if (ret < 0) { 1095 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1096 return ret; 1097 } 1098 1099 if (en_calibration) { 1100 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1101 if (ret < 0) { 1102 pr_err("eqos_calibrate_pads() failed: %d", 1103 ret); 1104 return ret; 1105 } 1106 } else { 1107 ret = eqos->config->ops->eqos_disable_calibration(dev); 1108 if (ret < 0) { 1109 pr_err("eqos_disable_calibration() failed: %d", 1110 ret); 1111 return ret; 1112 } 1113 } 1114 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1115 if (ret < 0) { 1116 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1117 return ret; 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int eqos_write_hwaddr(struct udevice *dev) 1124 { 1125 struct eth_pdata *plat = dev_get_platdata(dev); 1126 struct eqos_priv *eqos = dev_get_priv(dev); 1127 uint32_t val; 1128 1129 /* 1130 * This function may be called before start() or after stop(). At that 1131 * time, on at least some configurations of the EQoS HW, all clocks to 1132 * the EQoS HW block will be stopped, and a reset signal applied. If 1133 * any register access is attempted in this state, bus timeouts or CPU 1134 * hangs may occur. This check prevents that. 1135 * 1136 * A simple solution to this problem would be to not implement 1137 * write_hwaddr(), since start() always writes the MAC address into HW 1138 * anyway. However, it is desirable to implement write_hwaddr() to 1139 * support the case of SW that runs subsequent to U-Boot which expects 1140 * the MAC address to already be programmed into the EQoS registers, 1141 * which must happen irrespective of whether the U-Boot user (or 1142 * scripts) actually made use of the EQoS device, and hence 1143 * irrespective of whether start() was ever called. 1144 * 1145 * Note that this requirement by subsequent SW is not valid for 1146 * Tegra186, and is likely not valid for any non-PCI instantiation of 1147 * the EQoS HW block. This function is implemented solely as 1148 * future-proofing with the expectation the driver will eventually be 1149 * ported to some system where the expectation above is true. 1150 */ 1151 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1152 return 0; 1153 1154 /* Update the MAC address */ 1155 val = (plat->enetaddr[5] << 8) | 1156 (plat->enetaddr[4]); 1157 writel(val, &eqos->mac_regs->address0_high); 1158 val = (plat->enetaddr[3] << 24) | 1159 (plat->enetaddr[2] << 16) | 1160 (plat->enetaddr[1] << 8) | 1161 (plat->enetaddr[0]); 1162 writel(val, &eqos->mac_regs->address0_low); 1163 1164 return 0; 1165 } 1166 1167 static int eqos_read_rom_hwaddr(struct udevice *dev) 1168 { 1169 struct eth_pdata *pdata = dev_get_platdata(dev); 1170 1171 #ifdef CONFIG_ARCH_IMX8M 1172 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1173 #endif 1174 return !is_valid_ethaddr(pdata->enetaddr); 1175 } 1176 1177 static int eqos_init(struct udevice *dev) 1178 { 1179 struct eqos_priv *eqos = dev_get_priv(dev); 1180 int ret; 1181 ulong rate; 1182 u32 val; 1183 1184 debug("%s(dev=%p):\n", __func__, dev); 1185 1186 if (eqos->config->ops->eqos_start_clks) { 1187 ret = eqos->config->ops->eqos_start_clks(dev); 1188 if (ret < 0) { 1189 pr_err("eqos_start_clks() failed: %d", ret); 1190 goto err; 1191 } 1192 } 1193 1194 ret = eqos->config->ops->eqos_start_resets(dev); 1195 if (ret < 0) { 1196 pr_err("eqos_start_resets() failed: %d", ret); 1197 goto err_stop_clks; 1198 } 1199 1200 udelay(10); 1201 1202 eqos->reg_access_ok = true; 1203 1204 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1205 EQOS_DMA_MODE_SWR, false, 1206 eqos->config->swr_wait, false); 1207 if (ret) { 1208 pr_err("EQOS_DMA_MODE_SWR stuck"); 1209 goto err_stop_resets; 1210 } 1211 1212 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1213 if (ret < 0) { 1214 pr_err("eqos_calibrate_pads() failed: %d", ret); 1215 goto err_stop_resets; 1216 } 1217 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1218 1219 val = (rate / 1000000) - 1; 1220 writel(val, &eqos->mac_regs->us_tic_counter); 1221 1222 /* 1223 * if PHY was already connected and configured, 1224 * don't need to reconnect/reconfigure again 1225 */ 1226 if (!eqos->phy) { 1227 int addr = -1; 1228 #ifdef CONFIG_DM_ETH_PHY 1229 addr = eth_phy_get_addr(dev); 1230 #endif 1231 #ifdef DWC_NET_PHYADDR 1232 addr = DWC_NET_PHYADDR; 1233 #endif 1234 eqos->phy = phy_connect(eqos->mii, addr, dev, 1235 eqos->config->ops->eqos_get_interface(dev)); 1236 if (!eqos->phy) { 1237 pr_err("phy_connect() failed"); 1238 goto err_stop_resets; 1239 } 1240 1241 if (eqos->max_speed) { 1242 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1243 if (ret) { 1244 pr_err("phy_set_supported() failed: %d", ret); 1245 goto err_shutdown_phy; 1246 } 1247 } 1248 1249 ret = phy_config(eqos->phy); 1250 if (ret < 0) { 1251 pr_err("phy_config() failed: %d", ret); 1252 goto err_shutdown_phy; 1253 } 1254 } 1255 1256 ret = phy_startup(eqos->phy); 1257 if (ret < 0) { 1258 pr_err("phy_startup() failed: %d", ret); 1259 goto err_shutdown_phy; 1260 } 1261 1262 if (!eqos->phy->link) { 1263 pr_err("No link"); 1264 goto err_shutdown_phy; 1265 } 1266 1267 ret = eqos_adjust_link(dev); 1268 if (ret < 0) { 1269 pr_err("eqos_adjust_link() failed: %d", ret); 1270 goto err_shutdown_phy; 1271 } 1272 1273 debug("%s: OK\n", __func__); 1274 return 0; 1275 1276 err_shutdown_phy: 1277 phy_shutdown(eqos->phy); 1278 err_stop_resets: 1279 eqos->config->ops->eqos_stop_resets(dev); 1280 err_stop_clks: 1281 if (eqos->config->ops->eqos_stop_clks) 1282 eqos->config->ops->eqos_stop_clks(dev); 1283 err: 1284 pr_err("FAILED: %d", ret); 1285 return ret; 1286 } 1287 1288 static void eqos_enable(struct udevice *dev) 1289 { 1290 struct eqos_priv *eqos = dev_get_priv(dev); 1291 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1292 ulong last_rx_desc; 1293 int i; 1294 1295 eqos->tx_desc_idx = 0; 1296 eqos->rx_desc_idx = 0; 1297 1298 /* Configure MTL */ 1299 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1300 1301 /* Enable Store and Forward mode for TX */ 1302 /* Program Tx operating mode */ 1303 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1304 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1305 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1306 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1307 1308 /* Transmit Queue weight */ 1309 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1310 1311 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1312 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1313 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1314 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1315 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1316 1317 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1318 val = readl(&eqos->mac_regs->hw_feature1); 1319 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1320 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1321 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1322 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1323 1324 /* 1325 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1326 * r/tqs is encoded as (n / 256) - 1. 1327 */ 1328 tqs = (128 << tx_fifo_sz) / 256 - 1; 1329 rqs = (128 << rx_fifo_sz) / 256 - 1; 1330 1331 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1332 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1333 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1334 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1335 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1336 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1337 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1338 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1339 1340 /* Flow control used only if each channel gets 4KB or more FIFO */ 1341 if (rqs >= ((4096 / 256) - 1)) { 1342 u32 rfd, rfa; 1343 1344 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1345 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1346 1347 /* 1348 * Set Threshold for Activating Flow Contol space for min 2 1349 * frames ie, (1500 * 1) = 1500 bytes. 1350 * 1351 * Set Threshold for Deactivating Flow Contol for space of 1352 * min 1 frame (frame size 1500bytes) in receive fifo 1353 */ 1354 if (rqs == ((4096 / 256) - 1)) { 1355 /* 1356 * This violates the above formula because of FIFO size 1357 * limit therefore overflow may occur inspite of this. 1358 */ 1359 rfd = 0x3; /* Full-3K */ 1360 rfa = 0x1; /* Full-1.5K */ 1361 } else if (rqs == ((8192 / 256) - 1)) { 1362 rfd = 0x6; /* Full-4K */ 1363 rfa = 0xa; /* Full-6K */ 1364 } else if (rqs == ((16384 / 256) - 1)) { 1365 rfd = 0x6; /* Full-4K */ 1366 rfa = 0x12; /* Full-10K */ 1367 } else { 1368 rfd = 0x6; /* Full-4K */ 1369 rfa = 0x1E; /* Full-16K */ 1370 } 1371 1372 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1373 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1374 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1375 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1376 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1377 (rfd << 1378 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1379 (rfa << 1380 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1381 } 1382 1383 /* Configure MAC */ 1384 1385 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1386 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1387 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1388 eqos->config->config_mac << 1389 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1390 1391 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1392 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1393 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1394 0x2 << 1395 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1396 1397 /* Multicast and Broadcast Queue Enable */ 1398 setbits_le32(&eqos->mac_regs->unused_0a4, 1399 0x00100000); 1400 /* enable promise mode */ 1401 setbits_le32(&eqos->mac_regs->unused_004[1], 1402 0x1); 1403 1404 /* Set TX flow control parameters */ 1405 /* Set Pause Time */ 1406 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1407 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1408 /* Assign priority for TX flow control */ 1409 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1410 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1411 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1412 /* Assign priority for RX flow control */ 1413 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1414 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1415 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1416 /* Enable flow control */ 1417 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1418 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1419 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1420 EQOS_MAC_RX_FLOW_CTRL_RFE); 1421 1422 clrsetbits_le32(&eqos->mac_regs->configuration, 1423 EQOS_MAC_CONFIGURATION_GPSLCE | 1424 EQOS_MAC_CONFIGURATION_WD | 1425 EQOS_MAC_CONFIGURATION_JD | 1426 EQOS_MAC_CONFIGURATION_JE, 1427 EQOS_MAC_CONFIGURATION_CST | 1428 EQOS_MAC_CONFIGURATION_ACS); 1429 1430 eqos_write_hwaddr(dev); 1431 1432 /* Configure DMA */ 1433 1434 /* Enable OSP mode */ 1435 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1436 EQOS_DMA_CH0_TX_CONTROL_OSP); 1437 1438 /* RX buffer size. Must be a multiple of bus width */ 1439 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1440 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1441 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1442 EQOS_MAX_PACKET_SIZE << 1443 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1444 1445 setbits_le32(&eqos->dma_regs->ch0_control, 1446 EQOS_DMA_CH0_CONTROL_PBLX8); 1447 1448 /* 1449 * Burst length must be < 1/2 FIFO size. 1450 * FIFO size in tqs is encoded as (n / 256) - 1. 1451 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1452 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1453 */ 1454 pbl = tqs + 1; 1455 if (pbl > 32) 1456 pbl = 32; 1457 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1458 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1459 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1460 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1461 1462 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1463 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1464 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1465 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1466 1467 /* DMA performance configuration */ 1468 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1469 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1470 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1471 writel(val, &eqos->dma_regs->sysbus_mode); 1472 1473 /* Set up descriptors */ 1474 1475 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1476 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1477 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1478 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1479 (i * EQOS_MAX_PACKET_SIZE)); 1480 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1481 mb(); 1482 eqos->config->ops->eqos_flush_desc(rx_desc); 1483 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1484 (i * EQOS_MAX_PACKET_SIZE), 1485 EQOS_MAX_PACKET_SIZE); 1486 } 1487 1488 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1489 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1490 writel(EQOS_DESCRIPTORS_TX - 1, 1491 &eqos->dma_regs->ch0_txdesc_ring_length); 1492 1493 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1494 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1495 writel(EQOS_DESCRIPTORS_RX - 1, 1496 &eqos->dma_regs->ch0_rxdesc_ring_length); 1497 1498 /* Enable everything */ 1499 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1500 EQOS_DMA_CH0_TX_CONTROL_ST); 1501 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1502 EQOS_DMA_CH0_RX_CONTROL_SR); 1503 setbits_le32(&eqos->mac_regs->configuration, 1504 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1505 1506 /* TX tail pointer not written until we need to TX a packet */ 1507 /* 1508 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1509 * first descriptor, implying all descriptors were available. However, 1510 * that's not distinguishable from none of the descriptors being 1511 * available. 1512 */ 1513 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1514 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1515 1516 eqos->started = true; 1517 } 1518 1519 static int eqos_start(struct udevice *dev) 1520 { 1521 int ret; 1522 1523 ret = eqos_init(dev); 1524 if (ret) 1525 return ret; 1526 1527 eqos_enable(dev); 1528 1529 return 0; 1530 } 1531 1532 static void eqos_stop(struct udevice *dev) 1533 { 1534 struct eqos_priv *eqos = dev_get_priv(dev); 1535 int i; 1536 1537 debug("%s(dev=%p):\n", __func__, dev); 1538 1539 if (!eqos->started) 1540 return; 1541 eqos->started = false; 1542 eqos->reg_access_ok = false; 1543 1544 /* Disable TX DMA */ 1545 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1546 EQOS_DMA_CH0_TX_CONTROL_ST); 1547 1548 /* Wait for TX all packets to drain out of MTL */ 1549 for (i = 0; i < 1000000; i++) { 1550 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1551 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1552 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1553 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1554 if ((trcsts != 1) && (!txqsts)) 1555 break; 1556 } 1557 1558 /* Turn off MAC TX and RX */ 1559 clrbits_le32(&eqos->mac_regs->configuration, 1560 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1561 1562 /* Wait for all RX packets to drain out of MTL */ 1563 for (i = 0; i < 1000000; i++) { 1564 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1565 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1566 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1567 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1568 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1569 if ((!prxq) && (!rxqsts)) 1570 break; 1571 } 1572 1573 /* Turn off RX DMA */ 1574 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1575 EQOS_DMA_CH0_RX_CONTROL_SR); 1576 1577 if (eqos->phy) { 1578 phy_shutdown(eqos->phy); 1579 } 1580 eqos->config->ops->eqos_stop_resets(dev); 1581 if (eqos->config->ops->eqos_stop_clks) 1582 eqos->config->ops->eqos_stop_clks(dev); 1583 1584 debug("%s: OK\n", __func__); 1585 } 1586 1587 static int eqos_send(struct udevice *dev, void *packet, int length) 1588 { 1589 struct eqos_priv *eqos = dev_get_priv(dev); 1590 struct eqos_desc *tx_desc; 1591 int i; 1592 1593 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1594 length); 1595 1596 memcpy(eqos->tx_dma_buf, packet, length); 1597 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1598 1599 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1600 eqos->tx_desc_idx++; 1601 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1602 1603 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1604 tx_desc->des1 = 0; 1605 tx_desc->des2 = length; 1606 /* 1607 * Make sure that if HW sees the _OWN write below, it will see all the 1608 * writes to the rest of the descriptor too. 1609 */ 1610 mb(); 1611 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1612 eqos->config->ops->eqos_flush_desc(tx_desc); 1613 1614 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1615 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1616 1617 for (i = 0; i < 1000000; i++) { 1618 eqos->config->ops->eqos_inval_desc(tx_desc); 1619 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1620 return 0; 1621 udelay(1); 1622 } 1623 1624 debug("%s: TX timeout\n", __func__); 1625 1626 return -ETIMEDOUT; 1627 } 1628 1629 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1630 { 1631 struct eqos_priv *eqos = dev_get_priv(dev); 1632 struct eqos_desc *rx_desc; 1633 int length; 1634 1635 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1636 1637 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1638 eqos->config->ops->eqos_inval_desc(rx_desc); 1639 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1640 debug("%s: RX packet not available\n", __func__); 1641 return -EAGAIN; 1642 } 1643 1644 *packetp = eqos->rx_dma_buf + 1645 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1646 length = rx_desc->des3 & 0x7fff; 1647 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1648 1649 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1650 1651 return length; 1652 } 1653 1654 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1655 { 1656 struct eqos_priv *eqos = dev_get_priv(dev); 1657 uchar *packet_expected; 1658 struct eqos_desc *rx_desc; 1659 1660 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1661 1662 packet_expected = eqos->rx_dma_buf + 1663 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1664 if (packet != packet_expected) { 1665 debug("%s: Unexpected packet (expected %p)\n", __func__, 1666 packet_expected); 1667 return -EINVAL; 1668 } 1669 1670 eqos->config->ops->eqos_inval_buffer(packet, length); 1671 1672 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1673 1674 rx_desc->des0 = 0; 1675 mb(); 1676 eqos->config->ops->eqos_flush_desc(rx_desc); 1677 eqos->config->ops->eqos_inval_buffer(packet, length); 1678 rx_desc->des0 = (u32)(ulong)packet; 1679 rx_desc->des1 = 0; 1680 rx_desc->des2 = 0; 1681 /* 1682 * Make sure that if HW sees the _OWN write below, it will see all the 1683 * writes to the rest of the descriptor too. 1684 */ 1685 mb(); 1686 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1687 eqos->config->ops->eqos_flush_desc(rx_desc); 1688 1689 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1690 1691 eqos->rx_desc_idx++; 1692 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1693 1694 return 0; 1695 } 1696 1697 static int eqos_probe_resources_core(struct udevice *dev) 1698 { 1699 struct eqos_priv *eqos = dev_get_priv(dev); 1700 int ret; 1701 1702 debug("%s(dev=%p):\n", __func__, dev); 1703 1704 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1705 EQOS_DESCRIPTORS_RX); 1706 if (!eqos->descs) { 1707 debug("%s: eqos_alloc_descs() failed\n", __func__); 1708 ret = -ENOMEM; 1709 goto err; 1710 } 1711 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1712 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1713 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1714 eqos->rx_descs); 1715 1716 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1717 if (!eqos->tx_dma_buf) { 1718 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1719 ret = -ENOMEM; 1720 goto err_free_descs; 1721 } 1722 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1723 1724 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1725 if (!eqos->rx_dma_buf) { 1726 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1727 ret = -ENOMEM; 1728 goto err_free_tx_dma_buf; 1729 } 1730 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1731 1732 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1733 if (!eqos->rx_pkt) { 1734 debug("%s: malloc(rx_pkt) failed\n", __func__); 1735 ret = -ENOMEM; 1736 goto err_free_rx_dma_buf; 1737 } 1738 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1739 1740 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1741 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1742 1743 debug("%s: OK\n", __func__); 1744 return 0; 1745 1746 err_free_rx_dma_buf: 1747 free(eqos->rx_dma_buf); 1748 err_free_tx_dma_buf: 1749 free(eqos->tx_dma_buf); 1750 err_free_descs: 1751 eqos_free_descs(eqos->descs); 1752 err: 1753 1754 debug("%s: returns %d\n", __func__, ret); 1755 return ret; 1756 } 1757 1758 static int eqos_remove_resources_core(struct udevice *dev) 1759 { 1760 struct eqos_priv *eqos = dev_get_priv(dev); 1761 1762 debug("%s(dev=%p):\n", __func__, dev); 1763 1764 free(eqos->rx_pkt); 1765 free(eqos->rx_dma_buf); 1766 free(eqos->tx_dma_buf); 1767 eqos_free_descs(eqos->descs); 1768 1769 debug("%s: OK\n", __func__); 1770 return 0; 1771 } 1772 1773 static int eqos_probe_resources_tegra186(struct udevice *dev) 1774 { 1775 struct eqos_priv *eqos = dev_get_priv(dev); 1776 int ret; 1777 1778 debug("%s(dev=%p):\n", __func__, dev); 1779 1780 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1781 if (ret) { 1782 pr_err("reset_get_by_name(rst) failed: %d", ret); 1783 return ret; 1784 } 1785 1786 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1787 &eqos->phy_reset_gpio, 1788 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1789 if (ret) { 1790 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1791 goto err_free_reset_eqos; 1792 } 1793 1794 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1795 if (ret) { 1796 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1797 goto err_free_gpio_phy_reset; 1798 } 1799 1800 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1801 if (ret) { 1802 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1803 goto err_free_clk_slave_bus; 1804 } 1805 1806 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1807 if (ret) { 1808 pr_err("clk_get_by_name(rx) failed: %d", ret); 1809 goto err_free_clk_master_bus; 1810 } 1811 1812 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1813 if (ret) { 1814 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1815 goto err_free_clk_rx; 1816 return ret; 1817 } 1818 1819 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1820 if (ret) { 1821 pr_err("clk_get_by_name(tx) failed: %d", ret); 1822 goto err_free_clk_ptp_ref; 1823 } 1824 1825 debug("%s: OK\n", __func__); 1826 return 0; 1827 1828 err_free_clk_ptp_ref: 1829 clk_free(&eqos->clk_ptp_ref); 1830 err_free_clk_rx: 1831 clk_free(&eqos->clk_rx); 1832 err_free_clk_master_bus: 1833 clk_free(&eqos->clk_master_bus); 1834 err_free_clk_slave_bus: 1835 clk_free(&eqos->clk_slave_bus); 1836 err_free_gpio_phy_reset: 1837 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1838 err_free_reset_eqos: 1839 reset_free(&eqos->reset_ctl); 1840 1841 debug("%s: returns %d\n", __func__, ret); 1842 return ret; 1843 } 1844 1845 /* board-specific Ethernet Interface initializations. */ 1846 __weak int board_interface_eth_init(struct udevice *dev, 1847 phy_interface_t interface_type) 1848 { 1849 return 0; 1850 } 1851 1852 static int eqos_probe_resources_stm32(struct udevice *dev) 1853 { 1854 struct eqos_priv *eqos = dev_get_priv(dev); 1855 int ret; 1856 phy_interface_t interface; 1857 struct ofnode_phandle_args phandle_args; 1858 1859 debug("%s(dev=%p):\n", __func__, dev); 1860 1861 interface = eqos->config->ops->eqos_get_interface(dev); 1862 1863 if (interface == PHY_INTERFACE_MODE_NONE) { 1864 pr_err("Invalid PHY interface\n"); 1865 return -EINVAL; 1866 } 1867 1868 ret = board_interface_eth_init(dev, interface); 1869 if (ret) 1870 return -EINVAL; 1871 1872 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1873 1874 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1875 if (ret) { 1876 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1877 return ret; 1878 } 1879 1880 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1881 if (ret) 1882 pr_warn("clk_get_by_name(rx) failed: %d", ret); 1883 1884 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1885 if (ret) 1886 pr_warn("clk_get_by_name(tx) failed: %d", ret); 1887 1888 /* Get ETH_CLK clocks (optional) */ 1889 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1890 if (ret) 1891 pr_warn("No phy clock provided %d", ret); 1892 1893 eqos->phyaddr = -1; 1894 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1895 &phandle_args); 1896 if (!ret) { 1897 /* search "reset-gpios" in phy node */ 1898 ret = gpio_request_by_name_nodev(phandle_args.node, 1899 "reset-gpios", 0, 1900 &eqos->phy_reset_gpio, 1901 GPIOD_IS_OUT | 1902 GPIOD_IS_OUT_ACTIVE); 1903 if (ret) 1904 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1905 ret); 1906 else 1907 eqos->reset_delays[1] = 2; 1908 1909 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1910 "reg", -1); 1911 } 1912 1913 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1914 int reset_flags = GPIOD_IS_OUT; 1915 1916 if (dev_read_bool(dev, "snps,reset-active-low")) 1917 reset_flags |= GPIOD_ACTIVE_LOW; 1918 1919 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1920 &eqos->phy_reset_gpio, reset_flags); 1921 if (ret == 0) 1922 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1923 eqos->reset_delays, 3); 1924 else 1925 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1926 ret); 1927 } 1928 1929 debug("%s: OK\n", __func__); 1930 return 0; 1931 } 1932 1933 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1934 { 1935 const char *phy_mode; 1936 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1937 1938 debug("%s(dev=%p):\n", __func__, dev); 1939 1940 phy_mode = dev_read_string(dev, "phy-mode"); 1941 if (phy_mode) 1942 interface = phy_get_interface_by_name(phy_mode); 1943 1944 return interface; 1945 } 1946 1947 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1948 { 1949 return PHY_INTERFACE_MODE_MII; 1950 } 1951 1952 static int eqos_probe_resources_imx(struct udevice *dev) 1953 { 1954 struct eqos_priv *eqos = dev_get_priv(dev); 1955 phy_interface_t interface; 1956 1957 debug("%s(dev=%p):\n", __func__, dev); 1958 1959 interface = eqos->config->ops->eqos_get_interface(dev); 1960 1961 if (interface == PHY_INTERFACE_MODE_NONE) { 1962 pr_err("Invalid PHY interface\n"); 1963 return -EINVAL; 1964 } 1965 1966 debug("%s: OK\n", __func__); 1967 return 0; 1968 } 1969 1970 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1971 { 1972 const char *phy_mode; 1973 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1974 1975 debug("%s(dev=%p):\n", __func__, dev); 1976 1977 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1978 NULL); 1979 if (phy_mode) 1980 interface = phy_get_interface_by_name(phy_mode); 1981 1982 return interface; 1983 } 1984 1985 static int eqos_remove_resources_tegra186(struct udevice *dev) 1986 { 1987 struct eqos_priv *eqos = dev_get_priv(dev); 1988 1989 debug("%s(dev=%p):\n", __func__, dev); 1990 1991 #ifdef CONFIG_CLK 1992 clk_free(&eqos->clk_tx); 1993 clk_free(&eqos->clk_ptp_ref); 1994 clk_free(&eqos->clk_rx); 1995 clk_free(&eqos->clk_slave_bus); 1996 clk_free(&eqos->clk_master_bus); 1997 #endif 1998 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1999 reset_free(&eqos->reset_ctl); 2000 2001 debug("%s: OK\n", __func__); 2002 return 0; 2003 } 2004 2005 static int eqos_remove_resources_stm32(struct udevice *dev) 2006 { 2007 #ifdef CONFIG_CLK 2008 struct eqos_priv *eqos = dev_get_priv(dev); 2009 2010 debug("%s(dev=%p):\n", __func__, dev); 2011 2012 if (clk_valid(&eqos->clk_tx)) 2013 clk_free(&eqos->clk_tx); 2014 if (clk_valid(&eqos->clk_rx)) 2015 clk_free(&eqos->clk_rx); 2016 clk_free(&eqos->clk_master_bus); 2017 if (clk_valid(&eqos->clk_ck)) 2018 clk_free(&eqos->clk_ck); 2019 #endif 2020 2021 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 2022 dm_gpio_free(dev, &eqos->phy_reset_gpio); 2023 2024 debug("%s: OK\n", __func__); 2025 return 0; 2026 } 2027 2028 static int eqos_remove_resources_imx(struct udevice *dev) 2029 { 2030 return 0; 2031 } 2032 2033 static int eqos_probe(struct udevice *dev) 2034 { 2035 struct eqos_priv *eqos = dev_get_priv(dev); 2036 int ret; 2037 2038 debug("%s(dev=%p):\n", __func__, dev); 2039 2040 eqos->dev = dev; 2041 eqos->config = (void *)dev_get_driver_data(dev); 2042 2043 eqos->regs = dev_read_addr(dev); 2044 if (eqos->regs == FDT_ADDR_T_NONE) { 2045 pr_err("dev_read_addr() failed"); 2046 return -ENODEV; 2047 } 2048 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 2049 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 2050 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 2051 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 2052 2053 ret = eqos_probe_resources_core(dev); 2054 if (ret < 0) { 2055 pr_err("eqos_probe_resources_core() failed: %d", ret); 2056 return ret; 2057 } 2058 2059 ret = eqos->config->ops->eqos_probe_resources(dev); 2060 if (ret < 0) { 2061 pr_err("eqos_probe_resources() failed: %d", ret); 2062 goto err_remove_resources_core; 2063 } 2064 2065 #ifdef CONFIG_DM_ETH_PHY 2066 eqos->mii = eth_phy_get_mdio_bus(dev); 2067 #endif 2068 if (!eqos->mii) { 2069 eqos->mii = mdio_alloc(); 2070 if (!eqos->mii) { 2071 pr_err("mdio_alloc() failed"); 2072 ret = -ENOMEM; 2073 goto err_remove_resources_tegra; 2074 } 2075 eqos->mii->read = eqos_mdio_read; 2076 eqos->mii->write = eqos_mdio_write; 2077 eqos->mii->priv = eqos; 2078 strcpy(eqos->mii->name, dev->name); 2079 2080 ret = mdio_register(eqos->mii); 2081 if (ret < 0) { 2082 pr_err("mdio_register() failed: %d", ret); 2083 goto err_free_mdio; 2084 } 2085 } 2086 2087 #ifdef CONFIG_DM_ETH_PHY 2088 eth_phy_set_mdio_bus(dev, eqos->mii); 2089 #endif 2090 2091 debug("%s: OK\n", __func__); 2092 return 0; 2093 2094 err_free_mdio: 2095 mdio_free(eqos->mii); 2096 err_remove_resources_tegra: 2097 eqos->config->ops->eqos_remove_resources(dev); 2098 err_remove_resources_core: 2099 eqos_remove_resources_core(dev); 2100 2101 debug("%s: returns %d\n", __func__, ret); 2102 return ret; 2103 } 2104 2105 static int eqos_remove(struct udevice *dev) 2106 { 2107 struct eqos_priv *eqos = dev_get_priv(dev); 2108 2109 debug("%s(dev=%p):\n", __func__, dev); 2110 2111 mdio_unregister(eqos->mii); 2112 mdio_free(eqos->mii); 2113 eqos->config->ops->eqos_remove_resources(dev); 2114 2115 eqos_probe_resources_core(dev); 2116 2117 debug("%s: OK\n", __func__); 2118 return 0; 2119 } 2120 2121 static const struct eth_ops eqos_ops = { 2122 .start = eqos_start, 2123 .stop = eqos_stop, 2124 .send = eqos_send, 2125 .recv = eqos_recv, 2126 .free_pkt = eqos_free_pkt, 2127 .write_hwaddr = eqos_write_hwaddr, 2128 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2129 }; 2130 2131 static struct eqos_ops eqos_tegra186_ops = { 2132 .eqos_inval_desc = eqos_inval_desc_tegra186, 2133 .eqos_flush_desc = eqos_flush_desc_tegra186, 2134 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2135 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2136 .eqos_probe_resources = eqos_probe_resources_tegra186, 2137 .eqos_remove_resources = eqos_remove_resources_tegra186, 2138 .eqos_stop_resets = eqos_stop_resets_tegra186, 2139 .eqos_start_resets = eqos_start_resets_tegra186, 2140 .eqos_stop_clks = eqos_stop_clks_tegra186, 2141 .eqos_start_clks = eqos_start_clks_tegra186, 2142 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2143 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2144 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2145 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2146 .eqos_get_interface = eqos_get_interface_tegra186 2147 }; 2148 2149 static const struct eqos_config eqos_tegra186_config = { 2150 .reg_access_always_ok = false, 2151 .mdio_wait = 10, 2152 .swr_wait = 10, 2153 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2154 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2155 .ops = &eqos_tegra186_ops 2156 }; 2157 2158 static struct eqos_ops eqos_stm32_ops = { 2159 .eqos_inval_desc = eqos_inval_desc_generic, 2160 .eqos_flush_desc = eqos_flush_desc_generic, 2161 .eqos_inval_buffer = eqos_inval_buffer_generic, 2162 .eqos_flush_buffer = eqos_flush_buffer_generic, 2163 .eqos_probe_resources = eqos_probe_resources_stm32, 2164 .eqos_remove_resources = eqos_remove_resources_stm32, 2165 .eqos_stop_resets = eqos_stop_resets_stm32, 2166 .eqos_start_resets = eqos_start_resets_stm32, 2167 .eqos_stop_clks = eqos_stop_clks_stm32, 2168 .eqos_start_clks = eqos_start_clks_stm32, 2169 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2170 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2171 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2172 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2173 .eqos_get_interface = eqos_get_interface_stm32 2174 }; 2175 2176 static const struct eqos_config eqos_stm32_config = { 2177 .reg_access_always_ok = false, 2178 .mdio_wait = 10000, 2179 .swr_wait = 50, 2180 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2181 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2182 .ops = &eqos_stm32_ops 2183 }; 2184 2185 static struct eqos_ops eqos_imx_ops = { 2186 .eqos_inval_desc = eqos_inval_desc_generic, 2187 .eqos_flush_desc = eqos_flush_desc_generic, 2188 .eqos_inval_buffer = eqos_inval_buffer_generic, 2189 .eqos_flush_buffer = eqos_flush_buffer_generic, 2190 .eqos_probe_resources = eqos_probe_resources_imx, 2191 .eqos_remove_resources = eqos_remove_resources_imx, 2192 .eqos_stop_resets = eqos_stop_resets_imx, 2193 .eqos_start_resets = eqos_start_resets_imx, 2194 .eqos_stop_clks = eqos_stop_clks_imx, 2195 .eqos_start_clks = eqos_start_clks_imx, 2196 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2197 .eqos_disable_calibration = eqos_disable_calibration_imx, 2198 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2199 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2200 .eqos_get_interface = eqos_get_interface_imx 2201 }; 2202 2203 struct eqos_config eqos_imx_config = { 2204 .reg_access_always_ok = false, 2205 .mdio_wait = 10000, 2206 .swr_wait = 50, 2207 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2208 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2209 .ops = &eqos_imx_ops 2210 }; 2211 2212 static const struct udevice_id eqos_ids[] = { 2213 { 2214 .compatible = "nvidia,tegra186-eqos", 2215 .data = (ulong)&eqos_tegra186_config 2216 }, 2217 { 2218 .compatible = "snps,dwmac-4.20a", 2219 .data = (ulong)&eqos_stm32_config 2220 }, 2221 { 2222 .compatible = "fsl,imx-eqos", 2223 .data = (ulong)&eqos_imx_config 2224 }, 2225 2226 { } 2227 }; 2228 2229 U_BOOT_DRIVER(eth_eqos) = { 2230 .name = "eth_eqos", 2231 .id = UCLASS_ETH, 2232 .of_match = of_match_ptr(eqos_ids), 2233 .probe = eqos_probe, 2234 .remove = eqos_remove, 2235 .ops = &eqos_ops, 2236 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2237 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2238 }; 2239