1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 49 /* Core registers */ 50 51 #define EQOS_MAC_REGS_BASE 0x000 52 struct eqos_mac_regs { 53 uint32_t configuration; /* 0x000 */ 54 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 55 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 56 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 57 uint32_t rx_flow_ctrl; /* 0x090 */ 58 uint32_t unused_094; /* 0x094 */ 59 uint32_t txq_prty_map0; /* 0x098 */ 60 uint32_t unused_09c; /* 0x09c */ 61 uint32_t rxq_ctrl0; /* 0x0a0 */ 62 uint32_t unused_0a4; /* 0x0a4 */ 63 uint32_t rxq_ctrl2; /* 0x0a8 */ 64 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 65 uint32_t us_tic_counter; /* 0x0dc */ 66 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 67 uint32_t hw_feature0; /* 0x11c */ 68 uint32_t hw_feature1; /* 0x120 */ 69 uint32_t hw_feature2; /* 0x124 */ 70 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 71 uint32_t mdio_address; /* 0x200 */ 72 uint32_t mdio_data; /* 0x204 */ 73 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 74 uint32_t address0_high; /* 0x300 */ 75 uint32_t address0_low; /* 0x304 */ 76 }; 77 78 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 79 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 80 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 81 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 82 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 83 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 84 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 85 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 86 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 87 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 88 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 89 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 90 91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 94 95 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 96 97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 99 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 103 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 104 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 105 106 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 107 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 108 109 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 110 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 111 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 112 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 113 114 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 115 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 116 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 117 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 118 119 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 120 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 121 122 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 123 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 124 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 125 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 126 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 127 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 128 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 129 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 130 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 131 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 132 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 133 134 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 135 136 #define EQOS_MTL_REGS_BASE 0xd00 137 struct eqos_mtl_regs { 138 uint32_t txq0_operation_mode; /* 0xd00 */ 139 uint32_t unused_d04; /* 0xd04 */ 140 uint32_t txq0_debug; /* 0xd08 */ 141 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 142 uint32_t txq0_quantum_weight; /* 0xd18 */ 143 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 144 uint32_t rxq0_operation_mode; /* 0xd30 */ 145 uint32_t unused_d34; /* 0xd34 */ 146 uint32_t rxq0_debug; /* 0xd38 */ 147 }; 148 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 152 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 153 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 154 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 155 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 156 157 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 158 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 159 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 160 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 167 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 168 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 169 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 170 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 171 172 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 173 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 174 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 175 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 176 177 #define EQOS_DMA_REGS_BASE 0x1000 178 struct eqos_dma_regs { 179 uint32_t mode; /* 0x1000 */ 180 uint32_t sysbus_mode; /* 0x1004 */ 181 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 182 uint32_t ch0_control; /* 0x1100 */ 183 uint32_t ch0_tx_control; /* 0x1104 */ 184 uint32_t ch0_rx_control; /* 0x1108 */ 185 uint32_t unused_110c; /* 0x110c */ 186 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 187 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 188 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 189 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 190 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 191 uint32_t unused_1124; /* 0x1124 */ 192 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 193 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 194 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 195 }; 196 197 #define EQOS_DMA_MODE_SWR BIT(0) 198 199 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 200 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 201 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 202 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 203 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 204 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 205 206 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 207 208 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 209 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 210 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 211 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 212 213 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 214 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 215 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 216 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 217 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 218 219 /* These registers are Tegra186-specific */ 220 #define EQOS_TEGRA186_REGS_BASE 0x8800 221 struct eqos_tegra186_regs { 222 uint32_t sdmemcomppadctrl; /* 0x8800 */ 223 uint32_t auto_cal_config; /* 0x8804 */ 224 uint32_t unused_8808; /* 0x8808 */ 225 uint32_t auto_cal_status; /* 0x880c */ 226 }; 227 228 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 229 230 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 231 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 232 233 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 234 235 /* Descriptors */ 236 237 #define EQOS_DESCRIPTOR_WORDS 4 238 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 239 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 240 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 241 #define EQOS_DESCRIPTORS_TX 4 242 #define EQOS_DESCRIPTORS_RX 4 243 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 244 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 245 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 246 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 247 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 248 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 249 250 /* 251 * Warn if the cache-line size is larger than the descriptor size. In such 252 * cases the driver will likely fail because the CPU needs to flush the cache 253 * when requeuing RX buffers, therefore descriptors written by the hardware 254 * may be discarded. Architectures with full IO coherence, such as x86, do not 255 * experience this issue, and hence are excluded from this condition. 256 * 257 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 258 * the driver to allocate descriptors from a pool of non-cached memory. 259 */ 260 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 261 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 262 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 263 #warning Cache line size is larger than descriptor size 264 #endif 265 #endif 266 267 struct eqos_desc { 268 u32 des0; 269 u32 des1; 270 u32 des2; 271 u32 des3; 272 }; 273 274 #define EQOS_DESC3_OWN BIT(31) 275 #define EQOS_DESC3_FD BIT(29) 276 #define EQOS_DESC3_LD BIT(28) 277 #define EQOS_DESC3_BUF1V BIT(24) 278 279 struct eqos_config { 280 bool reg_access_always_ok; 281 int mdio_wait; 282 int swr_wait; 283 int config_mac; 284 int config_mac_mdio; 285 struct eqos_ops *ops; 286 }; 287 288 struct eqos_ops { 289 void (*eqos_inval_desc)(void *desc); 290 void (*eqos_flush_desc)(void *desc); 291 void (*eqos_inval_buffer)(void *buf, size_t size); 292 void (*eqos_flush_buffer)(void *buf, size_t size); 293 int (*eqos_probe_resources)(struct udevice *dev); 294 int (*eqos_remove_resources)(struct udevice *dev); 295 int (*eqos_stop_resets)(struct udevice *dev); 296 int (*eqos_start_resets)(struct udevice *dev); 297 void (*eqos_stop_clks)(struct udevice *dev); 298 int (*eqos_start_clks)(struct udevice *dev); 299 int (*eqos_calibrate_pads)(struct udevice *dev); 300 int (*eqos_disable_calibration)(struct udevice *dev); 301 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 302 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 303 phy_interface_t (*eqos_get_interface)(struct udevice *dev); 304 }; 305 306 struct eqos_priv { 307 struct udevice *dev; 308 const struct eqos_config *config; 309 fdt_addr_t regs; 310 struct eqos_mac_regs *mac_regs; 311 struct eqos_mtl_regs *mtl_regs; 312 struct eqos_dma_regs *dma_regs; 313 struct eqos_tegra186_regs *tegra186_regs; 314 struct reset_ctl reset_ctl; 315 struct gpio_desc phy_reset_gpio; 316 u32 reset_delays[3]; 317 struct clk clk_master_bus; 318 struct clk clk_rx; 319 struct clk clk_ptp_ref; 320 struct clk clk_tx; 321 struct clk clk_ck; 322 struct clk clk_slave_bus; 323 struct mii_dev *mii; 324 struct phy_device *phy; 325 int phyaddr; 326 u32 max_speed; 327 void *descs; 328 struct eqos_desc *tx_descs; 329 struct eqos_desc *rx_descs; 330 int tx_desc_idx, rx_desc_idx; 331 void *tx_dma_buf; 332 void *rx_dma_buf; 333 void *rx_pkt; 334 bool started; 335 bool reg_access_ok; 336 }; 337 338 /* 339 * TX and RX descriptors are 16 bytes. This causes problems with the cache 340 * maintenance on CPUs where the cache-line size exceeds the size of these 341 * descriptors. What will happen is that when the driver receives a packet 342 * it will be immediately requeued for the hardware to reuse. The CPU will 343 * therefore need to flush the cache-line containing the descriptor, which 344 * will cause all other descriptors in the same cache-line to be flushed 345 * along with it. If one of those descriptors had been written to by the 346 * device those changes (and the associated packet) will be lost. 347 * 348 * To work around this, we make use of non-cached memory if available. If 349 * descriptors are mapped uncached there's no need to manually flush them 350 * or invalidate them. 351 * 352 * Note that this only applies to descriptors. The packet data buffers do 353 * not have the same constraints since they are 1536 bytes large, so they 354 * are unlikely to share cache-lines. 355 */ 356 static void *eqos_alloc_descs(unsigned int num) 357 { 358 #ifdef CONFIG_SYS_NONCACHED_MEMORY 359 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 360 EQOS_DESCRIPTOR_ALIGN); 361 #else 362 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 363 #endif 364 } 365 366 static void eqos_free_descs(void *descs) 367 { 368 #ifdef CONFIG_SYS_NONCACHED_MEMORY 369 /* FIXME: noncached_alloc() has no opposite */ 370 #else 371 free(descs); 372 #endif 373 } 374 375 static void eqos_inval_desc_tegra186(void *desc) 376 { 377 #ifndef CONFIG_SYS_NONCACHED_MEMORY 378 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 379 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 380 ARCH_DMA_MINALIGN); 381 382 invalidate_dcache_range(start, end); 383 #endif 384 } 385 386 static void eqos_inval_desc_generic(void *desc) 387 { 388 #ifndef CONFIG_SYS_NONCACHED_MEMORY 389 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 390 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 391 ARCH_DMA_MINALIGN); 392 393 invalidate_dcache_range(start, end); 394 #endif 395 } 396 397 static void eqos_flush_desc_tegra186(void *desc) 398 { 399 #ifndef CONFIG_SYS_NONCACHED_MEMORY 400 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 401 #endif 402 } 403 404 static void eqos_flush_desc_generic(void *desc) 405 { 406 #ifndef CONFIG_SYS_NONCACHED_MEMORY 407 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 408 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 409 ARCH_DMA_MINALIGN); 410 411 flush_dcache_range(start, end); 412 #endif 413 } 414 415 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 416 { 417 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 418 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 419 420 invalidate_dcache_range(start, end); 421 } 422 423 static void eqos_inval_buffer_generic(void *buf, size_t size) 424 { 425 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 426 unsigned long end = roundup((unsigned long)buf + size, 427 ARCH_DMA_MINALIGN); 428 429 invalidate_dcache_range(start, end); 430 } 431 432 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 433 { 434 flush_cache((unsigned long)buf, size); 435 } 436 437 static void eqos_flush_buffer_generic(void *buf, size_t size) 438 { 439 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 440 unsigned long end = roundup((unsigned long)buf + size, 441 ARCH_DMA_MINALIGN); 442 443 flush_dcache_range(start, end); 444 } 445 446 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 447 { 448 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 449 EQOS_MAC_MDIO_ADDRESS_GB, false, 450 1000000, true); 451 } 452 453 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 454 int mdio_reg) 455 { 456 struct eqos_priv *eqos = bus->priv; 457 u32 val; 458 int ret; 459 460 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 461 mdio_reg); 462 463 ret = eqos_mdio_wait_idle(eqos); 464 if (ret) { 465 pr_err("MDIO not idle at entry"); 466 return ret; 467 } 468 469 val = readl(&eqos->mac_regs->mdio_address); 470 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 471 EQOS_MAC_MDIO_ADDRESS_C45E; 472 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 473 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 474 (eqos->config->config_mac_mdio << 475 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 476 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 477 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 478 EQOS_MAC_MDIO_ADDRESS_GB; 479 writel(val, &eqos->mac_regs->mdio_address); 480 481 udelay(eqos->config->mdio_wait); 482 483 ret = eqos_mdio_wait_idle(eqos); 484 if (ret) { 485 pr_err("MDIO read didn't complete"); 486 return ret; 487 } 488 489 val = readl(&eqos->mac_regs->mdio_data); 490 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 491 492 debug("%s: val=%x\n", __func__, val); 493 494 return val; 495 } 496 497 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 498 int mdio_reg, u16 mdio_val) 499 { 500 struct eqos_priv *eqos = bus->priv; 501 u32 val; 502 int ret; 503 504 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 505 mdio_addr, mdio_reg, mdio_val); 506 507 ret = eqos_mdio_wait_idle(eqos); 508 if (ret) { 509 pr_err("MDIO not idle at entry"); 510 return ret; 511 } 512 513 writel(mdio_val, &eqos->mac_regs->mdio_data); 514 515 val = readl(&eqos->mac_regs->mdio_address); 516 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 517 EQOS_MAC_MDIO_ADDRESS_C45E; 518 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 519 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 520 (eqos->config->config_mac_mdio << 521 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 522 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 523 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 524 EQOS_MAC_MDIO_ADDRESS_GB; 525 writel(val, &eqos->mac_regs->mdio_address); 526 527 udelay(eqos->config->mdio_wait); 528 529 ret = eqos_mdio_wait_idle(eqos); 530 if (ret) { 531 pr_err("MDIO read didn't complete"); 532 return ret; 533 } 534 535 return 0; 536 } 537 538 static int eqos_start_clks_tegra186(struct udevice *dev) 539 { 540 #ifdef CONFIG_CLK 541 struct eqos_priv *eqos = dev_get_priv(dev); 542 int ret; 543 544 debug("%s(dev=%p):\n", __func__, dev); 545 546 ret = clk_enable(&eqos->clk_slave_bus); 547 if (ret < 0) { 548 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 549 goto err; 550 } 551 552 ret = clk_enable(&eqos->clk_master_bus); 553 if (ret < 0) { 554 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 555 goto err_disable_clk_slave_bus; 556 } 557 558 ret = clk_enable(&eqos->clk_rx); 559 if (ret < 0) { 560 pr_err("clk_enable(clk_rx) failed: %d", ret); 561 goto err_disable_clk_master_bus; 562 } 563 564 ret = clk_enable(&eqos->clk_ptp_ref); 565 if (ret < 0) { 566 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 567 goto err_disable_clk_rx; 568 } 569 570 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 571 if (ret < 0) { 572 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 573 goto err_disable_clk_ptp_ref; 574 } 575 576 ret = clk_enable(&eqos->clk_tx); 577 if (ret < 0) { 578 pr_err("clk_enable(clk_tx) failed: %d", ret); 579 goto err_disable_clk_ptp_ref; 580 } 581 #endif 582 583 debug("%s: OK\n", __func__); 584 return 0; 585 586 #ifdef CONFIG_CLK 587 err_disable_clk_ptp_ref: 588 clk_disable(&eqos->clk_ptp_ref); 589 err_disable_clk_rx: 590 clk_disable(&eqos->clk_rx); 591 err_disable_clk_master_bus: 592 clk_disable(&eqos->clk_master_bus); 593 err_disable_clk_slave_bus: 594 clk_disable(&eqos->clk_slave_bus); 595 err: 596 debug("%s: FAILED: %d\n", __func__, ret); 597 return ret; 598 #endif 599 } 600 601 static int eqos_start_clks_stm32(struct udevice *dev) 602 { 603 #ifdef CONFIG_CLK 604 struct eqos_priv *eqos = dev_get_priv(dev); 605 int ret; 606 607 debug("%s(dev=%p):\n", __func__, dev); 608 609 ret = clk_enable(&eqos->clk_master_bus); 610 if (ret < 0) { 611 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 612 goto err; 613 } 614 615 ret = clk_enable(&eqos->clk_rx); 616 if (ret < 0) { 617 pr_err("clk_enable(clk_rx) failed: %d", ret); 618 goto err_disable_clk_master_bus; 619 } 620 621 ret = clk_enable(&eqos->clk_tx); 622 if (ret < 0) { 623 pr_err("clk_enable(clk_tx) failed: %d", ret); 624 goto err_disable_clk_rx; 625 } 626 627 if (clk_valid(&eqos->clk_ck)) { 628 ret = clk_enable(&eqos->clk_ck); 629 if (ret < 0) { 630 pr_err("clk_enable(clk_ck) failed: %d", ret); 631 goto err_disable_clk_tx; 632 } 633 } 634 #endif 635 636 debug("%s: OK\n", __func__); 637 return 0; 638 639 #ifdef CONFIG_CLK 640 err_disable_clk_tx: 641 clk_disable(&eqos->clk_tx); 642 err_disable_clk_rx: 643 clk_disable(&eqos->clk_rx); 644 err_disable_clk_master_bus: 645 clk_disable(&eqos->clk_master_bus); 646 err: 647 debug("%s: FAILED: %d\n", __func__, ret); 648 return ret; 649 #endif 650 } 651 652 static int eqos_start_clks_imx(struct udevice *dev) 653 { 654 return 0; 655 } 656 657 static void eqos_stop_clks_tegra186(struct udevice *dev) 658 { 659 #ifdef CONFIG_CLK 660 struct eqos_priv *eqos = dev_get_priv(dev); 661 662 debug("%s(dev=%p):\n", __func__, dev); 663 664 clk_disable(&eqos->clk_tx); 665 clk_disable(&eqos->clk_ptp_ref); 666 clk_disable(&eqos->clk_rx); 667 clk_disable(&eqos->clk_master_bus); 668 clk_disable(&eqos->clk_slave_bus); 669 #endif 670 671 debug("%s: OK\n", __func__); 672 } 673 674 static void eqos_stop_clks_stm32(struct udevice *dev) 675 { 676 #ifdef CONFIG_CLK 677 struct eqos_priv *eqos = dev_get_priv(dev); 678 679 debug("%s(dev=%p):\n", __func__, dev); 680 681 clk_disable(&eqos->clk_tx); 682 clk_disable(&eqos->clk_rx); 683 clk_disable(&eqos->clk_master_bus); 684 if (clk_valid(&eqos->clk_ck)) 685 clk_disable(&eqos->clk_ck); 686 #endif 687 688 debug("%s: OK\n", __func__); 689 } 690 691 static void eqos_stop_clks_imx(struct udevice *dev) 692 { 693 /* empty */ 694 } 695 696 static int eqos_start_resets_tegra186(struct udevice *dev) 697 { 698 struct eqos_priv *eqos = dev_get_priv(dev); 699 int ret; 700 701 debug("%s(dev=%p):\n", __func__, dev); 702 703 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 704 if (ret < 0) { 705 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 706 return ret; 707 } 708 709 udelay(2); 710 711 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 712 if (ret < 0) { 713 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 714 return ret; 715 } 716 717 ret = reset_assert(&eqos->reset_ctl); 718 if (ret < 0) { 719 pr_err("reset_assert() failed: %d", ret); 720 return ret; 721 } 722 723 udelay(2); 724 725 ret = reset_deassert(&eqos->reset_ctl); 726 if (ret < 0) { 727 pr_err("reset_deassert() failed: %d", ret); 728 return ret; 729 } 730 731 debug("%s: OK\n", __func__); 732 return 0; 733 } 734 735 static int eqos_start_resets_stm32(struct udevice *dev) 736 { 737 struct eqos_priv *eqos = dev_get_priv(dev); 738 int ret; 739 740 debug("%s(dev=%p):\n", __func__, dev); 741 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 742 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 743 if (ret < 0) { 744 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 745 ret); 746 return ret; 747 } 748 749 udelay(eqos->reset_delays[0]); 750 751 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 752 if (ret < 0) { 753 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 754 ret); 755 return ret; 756 } 757 758 udelay(eqos->reset_delays[1]); 759 760 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 761 if (ret < 0) { 762 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 763 ret); 764 return ret; 765 } 766 767 udelay(eqos->reset_delays[2]); 768 } 769 debug("%s: OK\n", __func__); 770 771 return 0; 772 } 773 774 static int eqos_start_resets_imx(struct udevice *dev) 775 { 776 return 0; 777 } 778 779 static int eqos_stop_resets_tegra186(struct udevice *dev) 780 { 781 struct eqos_priv *eqos = dev_get_priv(dev); 782 783 reset_assert(&eqos->reset_ctl); 784 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 785 786 return 0; 787 } 788 789 static int eqos_stop_resets_stm32(struct udevice *dev) 790 { 791 struct eqos_priv *eqos = dev_get_priv(dev); 792 int ret; 793 794 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 795 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 796 if (ret < 0) { 797 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 798 ret); 799 return ret; 800 } 801 } 802 803 return 0; 804 } 805 806 static int eqos_stop_resets_imx(struct udevice *dev) 807 { 808 return 0; 809 } 810 811 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 812 { 813 struct eqos_priv *eqos = dev_get_priv(dev); 814 int ret; 815 816 debug("%s(dev=%p):\n", __func__, dev); 817 818 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 819 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 820 821 udelay(1); 822 823 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 824 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 825 826 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 827 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 828 if (ret) { 829 pr_err("calibrate didn't start"); 830 goto failed; 831 } 832 833 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 834 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 835 if (ret) { 836 pr_err("calibrate didn't finish"); 837 goto failed; 838 } 839 840 ret = 0; 841 842 failed: 843 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 844 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 845 846 debug("%s: returns %d\n", __func__, ret); 847 848 return ret; 849 } 850 851 static int eqos_disable_calibration_tegra186(struct udevice *dev) 852 { 853 struct eqos_priv *eqos = dev_get_priv(dev); 854 855 debug("%s(dev=%p):\n", __func__, dev); 856 857 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 858 EQOS_AUTO_CAL_CONFIG_ENABLE); 859 860 return 0; 861 } 862 863 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 864 { 865 #ifdef CONFIG_CLK 866 struct eqos_priv *eqos = dev_get_priv(dev); 867 868 return clk_get_rate(&eqos->clk_slave_bus); 869 #else 870 return 0; 871 #endif 872 } 873 874 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 875 { 876 #ifdef CONFIG_CLK 877 struct eqos_priv *eqos = dev_get_priv(dev); 878 879 return clk_get_rate(&eqos->clk_master_bus); 880 #else 881 return 0; 882 #endif 883 } 884 885 __weak u32 imx_get_eqos_csr_clk(void) 886 { 887 return 100 * 1000000; 888 } 889 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 890 { 891 return 0; 892 } 893 894 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 895 { 896 return imx_get_eqos_csr_clk(); 897 } 898 899 static int eqos_calibrate_pads_stm32(struct udevice *dev) 900 { 901 return 0; 902 } 903 904 static int eqos_calibrate_pads_imx(struct udevice *dev) 905 { 906 return 0; 907 } 908 909 static int eqos_disable_calibration_stm32(struct udevice *dev) 910 { 911 return 0; 912 } 913 914 static int eqos_disable_calibration_imx(struct udevice *dev) 915 { 916 return 0; 917 } 918 919 static int eqos_set_full_duplex(struct udevice *dev) 920 { 921 struct eqos_priv *eqos = dev_get_priv(dev); 922 923 debug("%s(dev=%p):\n", __func__, dev); 924 925 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 926 927 return 0; 928 } 929 930 static int eqos_set_half_duplex(struct udevice *dev) 931 { 932 struct eqos_priv *eqos = dev_get_priv(dev); 933 934 debug("%s(dev=%p):\n", __func__, dev); 935 936 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 937 938 /* WAR: Flush TX queue when switching to half-duplex */ 939 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 940 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 941 942 return 0; 943 } 944 945 static int eqos_set_gmii_speed(struct udevice *dev) 946 { 947 struct eqos_priv *eqos = dev_get_priv(dev); 948 949 debug("%s(dev=%p):\n", __func__, dev); 950 951 clrbits_le32(&eqos->mac_regs->configuration, 952 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 953 954 return 0; 955 } 956 957 static int eqos_set_mii_speed_100(struct udevice *dev) 958 { 959 struct eqos_priv *eqos = dev_get_priv(dev); 960 961 debug("%s(dev=%p):\n", __func__, dev); 962 963 setbits_le32(&eqos->mac_regs->configuration, 964 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 965 966 return 0; 967 } 968 969 static int eqos_set_mii_speed_10(struct udevice *dev) 970 { 971 struct eqos_priv *eqos = dev_get_priv(dev); 972 973 debug("%s(dev=%p):\n", __func__, dev); 974 975 clrsetbits_le32(&eqos->mac_regs->configuration, 976 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 977 978 return 0; 979 } 980 981 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 982 { 983 #ifdef CONFIG_CLK 984 struct eqos_priv *eqos = dev_get_priv(dev); 985 ulong rate; 986 int ret; 987 988 debug("%s(dev=%p):\n", __func__, dev); 989 990 switch (eqos->phy->speed) { 991 case SPEED_1000: 992 rate = 125 * 1000 * 1000; 993 break; 994 case SPEED_100: 995 rate = 25 * 1000 * 1000; 996 break; 997 case SPEED_10: 998 rate = 2.5 * 1000 * 1000; 999 break; 1000 default: 1001 pr_err("invalid speed %d", eqos->phy->speed); 1002 return -EINVAL; 1003 } 1004 1005 ret = clk_set_rate(&eqos->clk_tx, rate); 1006 if (ret < 0) { 1007 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 1008 return ret; 1009 } 1010 #endif 1011 1012 return 0; 1013 } 1014 1015 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 1016 { 1017 return 0; 1018 } 1019 1020 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 1021 { 1022 struct eqos_priv *eqos = dev_get_priv(dev); 1023 ulong rate; 1024 int ret; 1025 1026 debug("%s(dev=%p):\n", __func__, dev); 1027 1028 switch (eqos->phy->speed) { 1029 case SPEED_1000: 1030 rate = 125 * 1000 * 1000; 1031 break; 1032 case SPEED_100: 1033 rate = 25 * 1000 * 1000; 1034 break; 1035 case SPEED_10: 1036 rate = 2.5 * 1000 * 1000; 1037 break; 1038 default: 1039 pr_err("invalid speed %d", eqos->phy->speed); 1040 return -EINVAL; 1041 } 1042 1043 ret = imx_eqos_txclk_set_rate(rate); 1044 if (ret < 0) { 1045 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 1046 return ret; 1047 } 1048 1049 return 0; 1050 } 1051 1052 static int eqos_adjust_link(struct udevice *dev) 1053 { 1054 struct eqos_priv *eqos = dev_get_priv(dev); 1055 int ret; 1056 bool en_calibration; 1057 1058 debug("%s(dev=%p):\n", __func__, dev); 1059 1060 if (eqos->phy->duplex) 1061 ret = eqos_set_full_duplex(dev); 1062 else 1063 ret = eqos_set_half_duplex(dev); 1064 if (ret < 0) { 1065 pr_err("eqos_set_*_duplex() failed: %d", ret); 1066 return ret; 1067 } 1068 1069 switch (eqos->phy->speed) { 1070 case SPEED_1000: 1071 en_calibration = true; 1072 ret = eqos_set_gmii_speed(dev); 1073 break; 1074 case SPEED_100: 1075 en_calibration = true; 1076 ret = eqos_set_mii_speed_100(dev); 1077 break; 1078 case SPEED_10: 1079 en_calibration = false; 1080 ret = eqos_set_mii_speed_10(dev); 1081 break; 1082 default: 1083 pr_err("invalid speed %d", eqos->phy->speed); 1084 return -EINVAL; 1085 } 1086 if (ret < 0) { 1087 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1088 return ret; 1089 } 1090 1091 if (en_calibration) { 1092 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1093 if (ret < 0) { 1094 pr_err("eqos_calibrate_pads() failed: %d", 1095 ret); 1096 return ret; 1097 } 1098 } else { 1099 ret = eqos->config->ops->eqos_disable_calibration(dev); 1100 if (ret < 0) { 1101 pr_err("eqos_disable_calibration() failed: %d", 1102 ret); 1103 return ret; 1104 } 1105 } 1106 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1107 if (ret < 0) { 1108 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1109 return ret; 1110 } 1111 1112 return 0; 1113 } 1114 1115 static int eqos_write_hwaddr(struct udevice *dev) 1116 { 1117 struct eth_pdata *plat = dev_get_platdata(dev); 1118 struct eqos_priv *eqos = dev_get_priv(dev); 1119 uint32_t val; 1120 1121 /* 1122 * This function may be called before start() or after stop(). At that 1123 * time, on at least some configurations of the EQoS HW, all clocks to 1124 * the EQoS HW block will be stopped, and a reset signal applied. If 1125 * any register access is attempted in this state, bus timeouts or CPU 1126 * hangs may occur. This check prevents that. 1127 * 1128 * A simple solution to this problem would be to not implement 1129 * write_hwaddr(), since start() always writes the MAC address into HW 1130 * anyway. However, it is desirable to implement write_hwaddr() to 1131 * support the case of SW that runs subsequent to U-Boot which expects 1132 * the MAC address to already be programmed into the EQoS registers, 1133 * which must happen irrespective of whether the U-Boot user (or 1134 * scripts) actually made use of the EQoS device, and hence 1135 * irrespective of whether start() was ever called. 1136 * 1137 * Note that this requirement by subsequent SW is not valid for 1138 * Tegra186, and is likely not valid for any non-PCI instantiation of 1139 * the EQoS HW block. This function is implemented solely as 1140 * future-proofing with the expectation the driver will eventually be 1141 * ported to some system where the expectation above is true. 1142 */ 1143 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1144 return 0; 1145 1146 /* Update the MAC address */ 1147 val = (plat->enetaddr[5] << 8) | 1148 (plat->enetaddr[4]); 1149 writel(val, &eqos->mac_regs->address0_high); 1150 val = (plat->enetaddr[3] << 24) | 1151 (plat->enetaddr[2] << 16) | 1152 (plat->enetaddr[1] << 8) | 1153 (plat->enetaddr[0]); 1154 writel(val, &eqos->mac_regs->address0_low); 1155 1156 return 0; 1157 } 1158 1159 static int eqos_read_rom_hwaddr(struct udevice *dev) 1160 { 1161 struct eth_pdata *pdata = dev_get_platdata(dev); 1162 1163 #ifdef CONFIG_ARCH_IMX8M 1164 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1165 #endif 1166 return !is_valid_ethaddr(pdata->enetaddr); 1167 } 1168 1169 static int eqos_start(struct udevice *dev) 1170 { 1171 struct eqos_priv *eqos = dev_get_priv(dev); 1172 int ret, i; 1173 ulong rate; 1174 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1175 ulong last_rx_desc; 1176 1177 debug("%s(dev=%p):\n", __func__, dev); 1178 1179 eqos->tx_desc_idx = 0; 1180 eqos->rx_desc_idx = 0; 1181 1182 ret = eqos->config->ops->eqos_start_clks(dev); 1183 if (ret < 0) { 1184 pr_err("eqos_start_clks() failed: %d", ret); 1185 goto err; 1186 } 1187 1188 ret = eqos->config->ops->eqos_start_resets(dev); 1189 if (ret < 0) { 1190 pr_err("eqos_start_resets() failed: %d", ret); 1191 goto err_stop_clks; 1192 } 1193 1194 udelay(10); 1195 1196 eqos->reg_access_ok = true; 1197 1198 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1199 EQOS_DMA_MODE_SWR, false, 1200 eqos->config->swr_wait, false); 1201 if (ret) { 1202 pr_err("EQOS_DMA_MODE_SWR stuck"); 1203 goto err_stop_resets; 1204 } 1205 1206 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1207 if (ret < 0) { 1208 pr_err("eqos_calibrate_pads() failed: %d", ret); 1209 goto err_stop_resets; 1210 } 1211 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1212 1213 val = (rate / 1000000) - 1; 1214 writel(val, &eqos->mac_regs->us_tic_counter); 1215 1216 /* 1217 * if PHY was already connected and configured, 1218 * don't need to reconnect/reconfigure again 1219 */ 1220 if (!eqos->phy) { 1221 int addr = -1; 1222 #ifdef CONFIG_DM_ETH_PHY 1223 addr = eth_phy_get_addr(dev); 1224 #endif 1225 #ifdef DWC_NET_PHYADDR 1226 addr = DWC_NET_PHYADDR; 1227 #endif 1228 eqos->phy = phy_connect(eqos->mii, addr, dev, 1229 eqos->config->ops->eqos_get_interface(dev)); 1230 if (!eqos->phy) { 1231 pr_err("phy_connect() failed"); 1232 goto err_stop_resets; 1233 } 1234 1235 if (eqos->max_speed) { 1236 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1237 if (ret) { 1238 pr_err("phy_set_supported() failed: %d", ret); 1239 goto err_shutdown_phy; 1240 } 1241 } 1242 1243 ret = phy_config(eqos->phy); 1244 if (ret < 0) { 1245 pr_err("phy_config() failed: %d", ret); 1246 goto err_shutdown_phy; 1247 } 1248 } 1249 1250 ret = phy_startup(eqos->phy); 1251 if (ret < 0) { 1252 pr_err("phy_startup() failed: %d", ret); 1253 goto err_shutdown_phy; 1254 } 1255 1256 if (!eqos->phy->link) { 1257 pr_err("No link"); 1258 goto err_shutdown_phy; 1259 } 1260 1261 ret = eqos_adjust_link(dev); 1262 if (ret < 0) { 1263 pr_err("eqos_adjust_link() failed: %d", ret); 1264 goto err_shutdown_phy; 1265 } 1266 1267 /* Configure MTL */ 1268 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1269 1270 /* Enable Store and Forward mode for TX */ 1271 /* Program Tx operating mode */ 1272 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1273 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1274 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1275 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1276 1277 /* Transmit Queue weight */ 1278 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1279 1280 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1281 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1282 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1283 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1284 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1285 1286 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1287 val = readl(&eqos->mac_regs->hw_feature1); 1288 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1289 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1290 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1291 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1292 1293 /* 1294 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1295 * r/tqs is encoded as (n / 256) - 1. 1296 */ 1297 tqs = (128 << tx_fifo_sz) / 256 - 1; 1298 rqs = (128 << rx_fifo_sz) / 256 - 1; 1299 1300 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1301 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1302 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1303 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1304 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1305 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1306 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1307 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1308 1309 /* Flow control used only if each channel gets 4KB or more FIFO */ 1310 if (rqs >= ((4096 / 256) - 1)) { 1311 u32 rfd, rfa; 1312 1313 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1314 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1315 1316 /* 1317 * Set Threshold for Activating Flow Contol space for min 2 1318 * frames ie, (1500 * 1) = 1500 bytes. 1319 * 1320 * Set Threshold for Deactivating Flow Contol for space of 1321 * min 1 frame (frame size 1500bytes) in receive fifo 1322 */ 1323 if (rqs == ((4096 / 256) - 1)) { 1324 /* 1325 * This violates the above formula because of FIFO size 1326 * limit therefore overflow may occur inspite of this. 1327 */ 1328 rfd = 0x3; /* Full-3K */ 1329 rfa = 0x1; /* Full-1.5K */ 1330 } else if (rqs == ((8192 / 256) - 1)) { 1331 rfd = 0x6; /* Full-4K */ 1332 rfa = 0xa; /* Full-6K */ 1333 } else if (rqs == ((16384 / 256) - 1)) { 1334 rfd = 0x6; /* Full-4K */ 1335 rfa = 0x12; /* Full-10K */ 1336 } else { 1337 rfd = 0x6; /* Full-4K */ 1338 rfa = 0x1E; /* Full-16K */ 1339 } 1340 1341 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1342 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1343 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1344 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1345 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1346 (rfd << 1347 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1348 (rfa << 1349 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1350 } 1351 1352 /* Configure MAC */ 1353 1354 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1355 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1356 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1357 eqos->config->config_mac << 1358 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1359 1360 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1361 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1362 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1363 0x2 << 1364 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1365 1366 /* Multicast and Broadcast Queue Enable */ 1367 setbits_le32(&eqos->mac_regs->unused_0a4, 1368 0x00100000); 1369 /* enable promise mode */ 1370 setbits_le32(&eqos->mac_regs->unused_004[1], 1371 0x1); 1372 1373 /* Set TX flow control parameters */ 1374 /* Set Pause Time */ 1375 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1376 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1377 /* Assign priority for TX flow control */ 1378 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1379 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1380 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1381 /* Assign priority for RX flow control */ 1382 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1383 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1384 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1385 /* Enable flow control */ 1386 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1387 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1388 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1389 EQOS_MAC_RX_FLOW_CTRL_RFE); 1390 1391 clrsetbits_le32(&eqos->mac_regs->configuration, 1392 EQOS_MAC_CONFIGURATION_GPSLCE | 1393 EQOS_MAC_CONFIGURATION_WD | 1394 EQOS_MAC_CONFIGURATION_JD | 1395 EQOS_MAC_CONFIGURATION_JE, 1396 EQOS_MAC_CONFIGURATION_CST | 1397 EQOS_MAC_CONFIGURATION_ACS); 1398 1399 eqos_write_hwaddr(dev); 1400 1401 /* Configure DMA */ 1402 1403 /* Enable OSP mode */ 1404 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1405 EQOS_DMA_CH0_TX_CONTROL_OSP); 1406 1407 /* RX buffer size. Must be a multiple of bus width */ 1408 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1409 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1410 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1411 EQOS_MAX_PACKET_SIZE << 1412 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1413 1414 setbits_le32(&eqos->dma_regs->ch0_control, 1415 EQOS_DMA_CH0_CONTROL_PBLX8); 1416 1417 /* 1418 * Burst length must be < 1/2 FIFO size. 1419 * FIFO size in tqs is encoded as (n / 256) - 1. 1420 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1421 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1422 */ 1423 pbl = tqs + 1; 1424 if (pbl > 32) 1425 pbl = 32; 1426 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1427 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1428 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1429 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1430 1431 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1432 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1433 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1434 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1435 1436 /* DMA performance configuration */ 1437 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1438 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1439 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1440 writel(val, &eqos->dma_regs->sysbus_mode); 1441 1442 /* Set up descriptors */ 1443 1444 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1445 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1446 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1447 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1448 (i * EQOS_MAX_PACKET_SIZE)); 1449 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1450 mb(); 1451 eqos->config->ops->eqos_flush_desc(rx_desc); 1452 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1453 (i * EQOS_MAX_PACKET_SIZE), 1454 EQOS_MAX_PACKET_SIZE); 1455 } 1456 1457 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1458 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1459 writel(EQOS_DESCRIPTORS_TX - 1, 1460 &eqos->dma_regs->ch0_txdesc_ring_length); 1461 1462 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1463 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1464 writel(EQOS_DESCRIPTORS_RX - 1, 1465 &eqos->dma_regs->ch0_rxdesc_ring_length); 1466 1467 /* Enable everything */ 1468 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1469 EQOS_DMA_CH0_TX_CONTROL_ST); 1470 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1471 EQOS_DMA_CH0_RX_CONTROL_SR); 1472 setbits_le32(&eqos->mac_regs->configuration, 1473 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1474 1475 /* TX tail pointer not written until we need to TX a packet */ 1476 /* 1477 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1478 * first descriptor, implying all descriptors were available. However, 1479 * that's not distinguishable from none of the descriptors being 1480 * available. 1481 */ 1482 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1483 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1484 1485 eqos->started = true; 1486 1487 debug("%s: OK\n", __func__); 1488 return 0; 1489 1490 err_shutdown_phy: 1491 phy_shutdown(eqos->phy); 1492 err_stop_resets: 1493 eqos->config->ops->eqos_stop_resets(dev); 1494 err_stop_clks: 1495 eqos->config->ops->eqos_stop_clks(dev); 1496 err: 1497 pr_err("FAILED: %d", ret); 1498 return ret; 1499 } 1500 1501 static void eqos_stop(struct udevice *dev) 1502 { 1503 struct eqos_priv *eqos = dev_get_priv(dev); 1504 int i; 1505 1506 debug("%s(dev=%p):\n", __func__, dev); 1507 1508 if (!eqos->started) 1509 return; 1510 eqos->started = false; 1511 eqos->reg_access_ok = false; 1512 1513 /* Disable TX DMA */ 1514 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1515 EQOS_DMA_CH0_TX_CONTROL_ST); 1516 1517 /* Wait for TX all packets to drain out of MTL */ 1518 for (i = 0; i < 1000000; i++) { 1519 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1520 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1521 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1522 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1523 if ((trcsts != 1) && (!txqsts)) 1524 break; 1525 } 1526 1527 /* Turn off MAC TX and RX */ 1528 clrbits_le32(&eqos->mac_regs->configuration, 1529 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1530 1531 /* Wait for all RX packets to drain out of MTL */ 1532 for (i = 0; i < 1000000; i++) { 1533 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1534 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1535 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1536 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1537 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1538 if ((!prxq) && (!rxqsts)) 1539 break; 1540 } 1541 1542 /* Turn off RX DMA */ 1543 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1544 EQOS_DMA_CH0_RX_CONTROL_SR); 1545 1546 if (eqos->phy) { 1547 phy_shutdown(eqos->phy); 1548 } 1549 eqos->config->ops->eqos_stop_resets(dev); 1550 eqos->config->ops->eqos_stop_clks(dev); 1551 1552 debug("%s: OK\n", __func__); 1553 } 1554 1555 static int eqos_send(struct udevice *dev, void *packet, int length) 1556 { 1557 struct eqos_priv *eqos = dev_get_priv(dev); 1558 struct eqos_desc *tx_desc; 1559 int i; 1560 1561 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1562 length); 1563 1564 memcpy(eqos->tx_dma_buf, packet, length); 1565 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1566 1567 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1568 eqos->tx_desc_idx++; 1569 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1570 1571 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1572 tx_desc->des1 = 0; 1573 tx_desc->des2 = length; 1574 /* 1575 * Make sure that if HW sees the _OWN write below, it will see all the 1576 * writes to the rest of the descriptor too. 1577 */ 1578 mb(); 1579 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1580 eqos->config->ops->eqos_flush_desc(tx_desc); 1581 1582 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1583 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1584 1585 for (i = 0; i < 1000000; i++) { 1586 eqos->config->ops->eqos_inval_desc(tx_desc); 1587 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1588 return 0; 1589 udelay(1); 1590 } 1591 1592 debug("%s: TX timeout\n", __func__); 1593 1594 return -ETIMEDOUT; 1595 } 1596 1597 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1598 { 1599 struct eqos_priv *eqos = dev_get_priv(dev); 1600 struct eqos_desc *rx_desc; 1601 int length; 1602 1603 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1604 1605 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1606 eqos->config->ops->eqos_inval_desc(rx_desc); 1607 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1608 debug("%s: RX packet not available\n", __func__); 1609 return -EAGAIN; 1610 } 1611 1612 *packetp = eqos->rx_dma_buf + 1613 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1614 length = rx_desc->des3 & 0x7fff; 1615 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1616 1617 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1618 1619 return length; 1620 } 1621 1622 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1623 { 1624 struct eqos_priv *eqos = dev_get_priv(dev); 1625 uchar *packet_expected; 1626 struct eqos_desc *rx_desc; 1627 1628 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1629 1630 packet_expected = eqos->rx_dma_buf + 1631 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1632 if (packet != packet_expected) { 1633 debug("%s: Unexpected packet (expected %p)\n", __func__, 1634 packet_expected); 1635 return -EINVAL; 1636 } 1637 1638 eqos->config->ops->eqos_inval_buffer(packet, length); 1639 1640 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1641 1642 rx_desc->des0 = 0; 1643 mb(); 1644 eqos->config->ops->eqos_flush_desc(rx_desc); 1645 eqos->config->ops->eqos_inval_buffer(packet, length); 1646 rx_desc->des0 = (u32)(ulong)packet; 1647 rx_desc->des1 = 0; 1648 rx_desc->des2 = 0; 1649 /* 1650 * Make sure that if HW sees the _OWN write below, it will see all the 1651 * writes to the rest of the descriptor too. 1652 */ 1653 mb(); 1654 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1655 eqos->config->ops->eqos_flush_desc(rx_desc); 1656 1657 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1658 1659 eqos->rx_desc_idx++; 1660 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1661 1662 return 0; 1663 } 1664 1665 static int eqos_probe_resources_core(struct udevice *dev) 1666 { 1667 struct eqos_priv *eqos = dev_get_priv(dev); 1668 int ret; 1669 1670 debug("%s(dev=%p):\n", __func__, dev); 1671 1672 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1673 EQOS_DESCRIPTORS_RX); 1674 if (!eqos->descs) { 1675 debug("%s: eqos_alloc_descs() failed\n", __func__); 1676 ret = -ENOMEM; 1677 goto err; 1678 } 1679 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1680 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1681 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1682 eqos->rx_descs); 1683 1684 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1685 if (!eqos->tx_dma_buf) { 1686 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1687 ret = -ENOMEM; 1688 goto err_free_descs; 1689 } 1690 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1691 1692 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1693 if (!eqos->rx_dma_buf) { 1694 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1695 ret = -ENOMEM; 1696 goto err_free_tx_dma_buf; 1697 } 1698 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1699 1700 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1701 if (!eqos->rx_pkt) { 1702 debug("%s: malloc(rx_pkt) failed\n", __func__); 1703 ret = -ENOMEM; 1704 goto err_free_rx_dma_buf; 1705 } 1706 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1707 1708 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1709 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1710 1711 debug("%s: OK\n", __func__); 1712 return 0; 1713 1714 err_free_rx_dma_buf: 1715 free(eqos->rx_dma_buf); 1716 err_free_tx_dma_buf: 1717 free(eqos->tx_dma_buf); 1718 err_free_descs: 1719 eqos_free_descs(eqos->descs); 1720 err: 1721 1722 debug("%s: returns %d\n", __func__, ret); 1723 return ret; 1724 } 1725 1726 static int eqos_remove_resources_core(struct udevice *dev) 1727 { 1728 struct eqos_priv *eqos = dev_get_priv(dev); 1729 1730 debug("%s(dev=%p):\n", __func__, dev); 1731 1732 free(eqos->rx_pkt); 1733 free(eqos->rx_dma_buf); 1734 free(eqos->tx_dma_buf); 1735 eqos_free_descs(eqos->descs); 1736 1737 debug("%s: OK\n", __func__); 1738 return 0; 1739 } 1740 1741 static int eqos_probe_resources_tegra186(struct udevice *dev) 1742 { 1743 struct eqos_priv *eqos = dev_get_priv(dev); 1744 int ret; 1745 1746 debug("%s(dev=%p):\n", __func__, dev); 1747 1748 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1749 if (ret) { 1750 pr_err("reset_get_by_name(rst) failed: %d", ret); 1751 return ret; 1752 } 1753 1754 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1755 &eqos->phy_reset_gpio, 1756 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1757 if (ret) { 1758 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1759 goto err_free_reset_eqos; 1760 } 1761 1762 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1763 if (ret) { 1764 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1765 goto err_free_gpio_phy_reset; 1766 } 1767 1768 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1769 if (ret) { 1770 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1771 goto err_free_clk_slave_bus; 1772 } 1773 1774 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1775 if (ret) { 1776 pr_err("clk_get_by_name(rx) failed: %d", ret); 1777 goto err_free_clk_master_bus; 1778 } 1779 1780 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1781 if (ret) { 1782 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1783 goto err_free_clk_rx; 1784 return ret; 1785 } 1786 1787 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1788 if (ret) { 1789 pr_err("clk_get_by_name(tx) failed: %d", ret); 1790 goto err_free_clk_ptp_ref; 1791 } 1792 1793 debug("%s: OK\n", __func__); 1794 return 0; 1795 1796 err_free_clk_ptp_ref: 1797 clk_free(&eqos->clk_ptp_ref); 1798 err_free_clk_rx: 1799 clk_free(&eqos->clk_rx); 1800 err_free_clk_master_bus: 1801 clk_free(&eqos->clk_master_bus); 1802 err_free_clk_slave_bus: 1803 clk_free(&eqos->clk_slave_bus); 1804 err_free_gpio_phy_reset: 1805 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1806 err_free_reset_eqos: 1807 reset_free(&eqos->reset_ctl); 1808 1809 debug("%s: returns %d\n", __func__, ret); 1810 return ret; 1811 } 1812 1813 /* board-specific Ethernet Interface initializations. */ 1814 __weak int board_interface_eth_init(struct udevice *dev, 1815 phy_interface_t interface_type) 1816 { 1817 return 0; 1818 } 1819 1820 static int eqos_probe_resources_stm32(struct udevice *dev) 1821 { 1822 struct eqos_priv *eqos = dev_get_priv(dev); 1823 int ret; 1824 phy_interface_t interface; 1825 struct ofnode_phandle_args phandle_args; 1826 1827 debug("%s(dev=%p):\n", __func__, dev); 1828 1829 interface = eqos->config->ops->eqos_get_interface(dev); 1830 1831 if (interface == PHY_INTERFACE_MODE_NONE) { 1832 pr_err("Invalid PHY interface\n"); 1833 return -EINVAL; 1834 } 1835 1836 ret = board_interface_eth_init(dev, interface); 1837 if (ret) 1838 return -EINVAL; 1839 1840 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1841 1842 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1843 if (ret) { 1844 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1845 goto err_probe; 1846 } 1847 1848 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1849 if (ret) { 1850 pr_err("clk_get_by_name(rx) failed: %d", ret); 1851 goto err_free_clk_master_bus; 1852 } 1853 1854 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1855 if (ret) { 1856 pr_err("clk_get_by_name(tx) failed: %d", ret); 1857 goto err_free_clk_rx; 1858 } 1859 1860 /* Get ETH_CLK clocks (optional) */ 1861 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1862 if (ret) 1863 pr_warn("No phy clock provided %d", ret); 1864 1865 eqos->phyaddr = -1; 1866 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1867 &phandle_args); 1868 if (!ret) { 1869 /* search "reset-gpios" in phy node */ 1870 ret = gpio_request_by_name_nodev(phandle_args.node, 1871 "reset-gpios", 0, 1872 &eqos->phy_reset_gpio, 1873 GPIOD_IS_OUT | 1874 GPIOD_IS_OUT_ACTIVE); 1875 if (ret) 1876 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1877 ret); 1878 else 1879 eqos->reset_delays[1] = 2; 1880 1881 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1882 "reg", -1); 1883 } 1884 1885 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1886 int reset_flags = GPIOD_IS_OUT; 1887 1888 if (dev_read_bool(dev, "snps,reset-active-low")) 1889 reset_flags |= GPIOD_ACTIVE_LOW; 1890 1891 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1892 &eqos->phy_reset_gpio, reset_flags); 1893 if (ret == 0) 1894 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1895 eqos->reset_delays, 3); 1896 else 1897 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1898 ret); 1899 } 1900 1901 debug("%s: OK\n", __func__); 1902 return 0; 1903 1904 err_free_clk_rx: 1905 clk_free(&eqos->clk_rx); 1906 err_free_clk_master_bus: 1907 clk_free(&eqos->clk_master_bus); 1908 err_probe: 1909 1910 debug("%s: returns %d\n", __func__, ret); 1911 return ret; 1912 } 1913 1914 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1915 { 1916 const char *phy_mode; 1917 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1918 1919 debug("%s(dev=%p):\n", __func__, dev); 1920 1921 phy_mode = dev_read_string(dev, "phy-mode"); 1922 if (phy_mode) 1923 interface = phy_get_interface_by_name(phy_mode); 1924 1925 return interface; 1926 } 1927 1928 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1929 { 1930 return PHY_INTERFACE_MODE_MII; 1931 } 1932 1933 static int eqos_probe_resources_imx(struct udevice *dev) 1934 { 1935 struct eqos_priv *eqos = dev_get_priv(dev); 1936 phy_interface_t interface; 1937 1938 debug("%s(dev=%p):\n", __func__, dev); 1939 1940 interface = eqos->config->ops->eqos_get_interface(dev); 1941 1942 if (interface == PHY_INTERFACE_MODE_NONE) { 1943 pr_err("Invalid PHY interface\n"); 1944 return -EINVAL; 1945 } 1946 1947 debug("%s: OK\n", __func__); 1948 return 0; 1949 } 1950 1951 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1952 { 1953 const char *phy_mode; 1954 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1955 1956 debug("%s(dev=%p):\n", __func__, dev); 1957 1958 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1959 NULL); 1960 if (phy_mode) 1961 interface = phy_get_interface_by_name(phy_mode); 1962 1963 return interface; 1964 } 1965 1966 static int eqos_remove_resources_tegra186(struct udevice *dev) 1967 { 1968 struct eqos_priv *eqos = dev_get_priv(dev); 1969 1970 debug("%s(dev=%p):\n", __func__, dev); 1971 1972 #ifdef CONFIG_CLK 1973 clk_free(&eqos->clk_tx); 1974 clk_free(&eqos->clk_ptp_ref); 1975 clk_free(&eqos->clk_rx); 1976 clk_free(&eqos->clk_slave_bus); 1977 clk_free(&eqos->clk_master_bus); 1978 #endif 1979 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1980 reset_free(&eqos->reset_ctl); 1981 1982 debug("%s: OK\n", __func__); 1983 return 0; 1984 } 1985 1986 static int eqos_remove_resources_stm32(struct udevice *dev) 1987 { 1988 #ifdef CONFIG_CLK 1989 struct eqos_priv *eqos = dev_get_priv(dev); 1990 1991 debug("%s(dev=%p):\n", __func__, dev); 1992 1993 clk_free(&eqos->clk_tx); 1994 clk_free(&eqos->clk_rx); 1995 clk_free(&eqos->clk_master_bus); 1996 if (clk_valid(&eqos->clk_ck)) 1997 clk_free(&eqos->clk_ck); 1998 #endif 1999 2000 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 2001 dm_gpio_free(dev, &eqos->phy_reset_gpio); 2002 2003 debug("%s: OK\n", __func__); 2004 return 0; 2005 } 2006 2007 static int eqos_remove_resources_imx(struct udevice *dev) 2008 { 2009 return 0; 2010 } 2011 2012 static int eqos_probe(struct udevice *dev) 2013 { 2014 struct eqos_priv *eqos = dev_get_priv(dev); 2015 int ret; 2016 2017 debug("%s(dev=%p):\n", __func__, dev); 2018 2019 eqos->dev = dev; 2020 eqos->config = (void *)dev_get_driver_data(dev); 2021 2022 eqos->regs = dev_read_addr(dev); 2023 if (eqos->regs == FDT_ADDR_T_NONE) { 2024 pr_err("dev_read_addr() failed"); 2025 return -ENODEV; 2026 } 2027 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 2028 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 2029 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 2030 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 2031 2032 ret = eqos_probe_resources_core(dev); 2033 if (ret < 0) { 2034 pr_err("eqos_probe_resources_core() failed: %d", ret); 2035 return ret; 2036 } 2037 2038 ret = eqos->config->ops->eqos_probe_resources(dev); 2039 if (ret < 0) { 2040 pr_err("eqos_probe_resources() failed: %d", ret); 2041 goto err_remove_resources_core; 2042 } 2043 2044 #ifdef CONFIG_DM_ETH_PHY 2045 eqos->mii = eth_phy_get_mdio_bus(dev); 2046 #endif 2047 if (!eqos->mii) { 2048 eqos->mii = mdio_alloc(); 2049 if (!eqos->mii) { 2050 pr_err("mdio_alloc() failed"); 2051 ret = -ENOMEM; 2052 goto err_remove_resources_tegra; 2053 } 2054 eqos->mii->read = eqos_mdio_read; 2055 eqos->mii->write = eqos_mdio_write; 2056 eqos->mii->priv = eqos; 2057 strcpy(eqos->mii->name, dev->name); 2058 2059 ret = mdio_register(eqos->mii); 2060 if (ret < 0) { 2061 pr_err("mdio_register() failed: %d", ret); 2062 goto err_free_mdio; 2063 } 2064 } 2065 2066 #ifdef CONFIG_DM_ETH_PHY 2067 eth_phy_set_mdio_bus(dev, eqos->mii); 2068 #endif 2069 2070 debug("%s: OK\n", __func__); 2071 return 0; 2072 2073 err_free_mdio: 2074 mdio_free(eqos->mii); 2075 err_remove_resources_tegra: 2076 eqos->config->ops->eqos_remove_resources(dev); 2077 err_remove_resources_core: 2078 eqos_remove_resources_core(dev); 2079 2080 debug("%s: returns %d\n", __func__, ret); 2081 return ret; 2082 } 2083 2084 static int eqos_remove(struct udevice *dev) 2085 { 2086 struct eqos_priv *eqos = dev_get_priv(dev); 2087 2088 debug("%s(dev=%p):\n", __func__, dev); 2089 2090 mdio_unregister(eqos->mii); 2091 mdio_free(eqos->mii); 2092 eqos->config->ops->eqos_remove_resources(dev); 2093 2094 eqos_probe_resources_core(dev); 2095 2096 debug("%s: OK\n", __func__); 2097 return 0; 2098 } 2099 2100 static const struct eth_ops eqos_ops = { 2101 .start = eqos_start, 2102 .stop = eqos_stop, 2103 .send = eqos_send, 2104 .recv = eqos_recv, 2105 .free_pkt = eqos_free_pkt, 2106 .write_hwaddr = eqos_write_hwaddr, 2107 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2108 }; 2109 2110 static struct eqos_ops eqos_tegra186_ops = { 2111 .eqos_inval_desc = eqos_inval_desc_tegra186, 2112 .eqos_flush_desc = eqos_flush_desc_tegra186, 2113 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2114 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2115 .eqos_probe_resources = eqos_probe_resources_tegra186, 2116 .eqos_remove_resources = eqos_remove_resources_tegra186, 2117 .eqos_stop_resets = eqos_stop_resets_tegra186, 2118 .eqos_start_resets = eqos_start_resets_tegra186, 2119 .eqos_stop_clks = eqos_stop_clks_tegra186, 2120 .eqos_start_clks = eqos_start_clks_tegra186, 2121 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2122 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2123 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2124 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2125 .eqos_get_interface = eqos_get_interface_tegra186 2126 }; 2127 2128 static const struct eqos_config eqos_tegra186_config = { 2129 .reg_access_always_ok = false, 2130 .mdio_wait = 10, 2131 .swr_wait = 10, 2132 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2133 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2134 .ops = &eqos_tegra186_ops 2135 }; 2136 2137 static struct eqos_ops eqos_stm32_ops = { 2138 .eqos_inval_desc = eqos_inval_desc_generic, 2139 .eqos_flush_desc = eqos_flush_desc_generic, 2140 .eqos_inval_buffer = eqos_inval_buffer_generic, 2141 .eqos_flush_buffer = eqos_flush_buffer_generic, 2142 .eqos_probe_resources = eqos_probe_resources_stm32, 2143 .eqos_remove_resources = eqos_remove_resources_stm32, 2144 .eqos_stop_resets = eqos_stop_resets_stm32, 2145 .eqos_start_resets = eqos_start_resets_stm32, 2146 .eqos_stop_clks = eqos_stop_clks_stm32, 2147 .eqos_start_clks = eqos_start_clks_stm32, 2148 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2149 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2150 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2151 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2152 .eqos_get_interface = eqos_get_interface_stm32 2153 }; 2154 2155 static const struct eqos_config eqos_stm32_config = { 2156 .reg_access_always_ok = false, 2157 .mdio_wait = 10000, 2158 .swr_wait = 50, 2159 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2160 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2161 .ops = &eqos_stm32_ops 2162 }; 2163 2164 static struct eqos_ops eqos_imx_ops = { 2165 .eqos_inval_desc = eqos_inval_desc_generic, 2166 .eqos_flush_desc = eqos_flush_desc_generic, 2167 .eqos_inval_buffer = eqos_inval_buffer_generic, 2168 .eqos_flush_buffer = eqos_flush_buffer_generic, 2169 .eqos_probe_resources = eqos_probe_resources_imx, 2170 .eqos_remove_resources = eqos_remove_resources_imx, 2171 .eqos_stop_resets = eqos_stop_resets_imx, 2172 .eqos_start_resets = eqos_start_resets_imx, 2173 .eqos_stop_clks = eqos_stop_clks_imx, 2174 .eqos_start_clks = eqos_start_clks_imx, 2175 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2176 .eqos_disable_calibration = eqos_disable_calibration_imx, 2177 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2178 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2179 .eqos_get_interface = eqos_get_interface_imx 2180 }; 2181 2182 struct eqos_config eqos_imx_config = { 2183 .reg_access_always_ok = false, 2184 .mdio_wait = 10000, 2185 .swr_wait = 50, 2186 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2187 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2188 .ops = &eqos_imx_ops 2189 }; 2190 2191 static const struct udevice_id eqos_ids[] = { 2192 { 2193 .compatible = "nvidia,tegra186-eqos", 2194 .data = (ulong)&eqos_tegra186_config 2195 }, 2196 { 2197 .compatible = "snps,dwmac-4.20a", 2198 .data = (ulong)&eqos_stm32_config 2199 }, 2200 { 2201 .compatible = "fsl,imx-eqos", 2202 .data = (ulong)&eqos_imx_config 2203 }, 2204 2205 { } 2206 }; 2207 2208 U_BOOT_DRIVER(eth_eqos) = { 2209 .name = "eth_eqos", 2210 .id = UCLASS_ETH, 2211 .of_match = of_match_ptr(eqos_ids), 2212 .probe = eqos_probe, 2213 .remove = eqos_remove, 2214 .ops = &eqos_ops, 2215 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2216 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2217 }; 2218