1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 49 /* Core registers */ 50 51 #define EQOS_MAC_REGS_BASE 0x000 52 struct eqos_mac_regs { 53 uint32_t configuration; /* 0x000 */ 54 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 55 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 56 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 57 uint32_t rx_flow_ctrl; /* 0x090 */ 58 uint32_t unused_094; /* 0x094 */ 59 uint32_t txq_prty_map0; /* 0x098 */ 60 uint32_t unused_09c; /* 0x09c */ 61 uint32_t rxq_ctrl0; /* 0x0a0 */ 62 uint32_t unused_0a4; /* 0x0a4 */ 63 uint32_t rxq_ctrl2; /* 0x0a8 */ 64 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 65 uint32_t us_tic_counter; /* 0x0dc */ 66 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 67 uint32_t hw_feature0; /* 0x11c */ 68 uint32_t hw_feature1; /* 0x120 */ 69 uint32_t hw_feature2; /* 0x124 */ 70 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 71 uint32_t mdio_address; /* 0x200 */ 72 uint32_t mdio_data; /* 0x204 */ 73 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 74 uint32_t address0_high; /* 0x300 */ 75 uint32_t address0_low; /* 0x304 */ 76 }; 77 78 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 79 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 80 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 81 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 82 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 83 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 84 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 85 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 86 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 87 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 88 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 89 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 90 91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 94 95 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 96 97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 99 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 103 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 104 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 105 106 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 107 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 108 109 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 110 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 111 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 112 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 113 114 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 115 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 116 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 117 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 118 119 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 120 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 121 122 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 123 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 124 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 125 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 126 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 127 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 128 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 129 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 130 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 131 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 132 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 133 134 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 135 136 #define EQOS_MTL_REGS_BASE 0xd00 137 struct eqos_mtl_regs { 138 uint32_t txq0_operation_mode; /* 0xd00 */ 139 uint32_t unused_d04; /* 0xd04 */ 140 uint32_t txq0_debug; /* 0xd08 */ 141 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 142 uint32_t txq0_quantum_weight; /* 0xd18 */ 143 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 144 uint32_t rxq0_operation_mode; /* 0xd30 */ 145 uint32_t unused_d34; /* 0xd34 */ 146 uint32_t rxq0_debug; /* 0xd38 */ 147 }; 148 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 152 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 153 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 154 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 155 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 156 157 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 158 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 159 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 160 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 167 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 168 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 169 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 170 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 171 172 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 173 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 174 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 175 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 176 177 #define EQOS_DMA_REGS_BASE 0x1000 178 struct eqos_dma_regs { 179 uint32_t mode; /* 0x1000 */ 180 uint32_t sysbus_mode; /* 0x1004 */ 181 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 182 uint32_t ch0_control; /* 0x1100 */ 183 uint32_t ch0_tx_control; /* 0x1104 */ 184 uint32_t ch0_rx_control; /* 0x1108 */ 185 uint32_t unused_110c; /* 0x110c */ 186 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 187 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 188 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 189 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 190 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 191 uint32_t unused_1124; /* 0x1124 */ 192 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 193 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 194 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 195 }; 196 197 #define EQOS_DMA_MODE_SWR BIT(0) 198 199 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 200 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 201 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 202 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 203 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 204 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 205 206 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 207 208 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 209 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 210 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 211 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 212 213 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 214 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 215 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 216 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 217 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 218 219 /* These registers are Tegra186-specific */ 220 #define EQOS_TEGRA186_REGS_BASE 0x8800 221 struct eqos_tegra186_regs { 222 uint32_t sdmemcomppadctrl; /* 0x8800 */ 223 uint32_t auto_cal_config; /* 0x8804 */ 224 uint32_t unused_8808; /* 0x8808 */ 225 uint32_t auto_cal_status; /* 0x880c */ 226 }; 227 228 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 229 230 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 231 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 232 233 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 234 235 /* Descriptors */ 236 237 #define EQOS_DESCRIPTOR_WORDS 4 238 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 239 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 240 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 241 #define EQOS_DESCRIPTORS_TX 4 242 #define EQOS_DESCRIPTORS_RX 4 243 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 244 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 245 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 246 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 247 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 248 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 249 250 /* 251 * Warn if the cache-line size is larger than the descriptor size. In such 252 * cases the driver will likely fail because the CPU needs to flush the cache 253 * when requeuing RX buffers, therefore descriptors written by the hardware 254 * may be discarded. Architectures with full IO coherence, such as x86, do not 255 * experience this issue, and hence are excluded from this condition. 256 * 257 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 258 * the driver to allocate descriptors from a pool of non-cached memory. 259 */ 260 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 261 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 262 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 263 #warning Cache line size is larger than descriptor size 264 #endif 265 #endif 266 267 struct eqos_desc { 268 u32 des0; 269 u32 des1; 270 u32 des2; 271 u32 des3; 272 }; 273 274 #define EQOS_DESC3_OWN BIT(31) 275 #define EQOS_DESC3_FD BIT(29) 276 #define EQOS_DESC3_LD BIT(28) 277 #define EQOS_DESC3_BUF1V BIT(24) 278 279 struct eqos_config { 280 bool reg_access_always_ok; 281 int mdio_wait; 282 int swr_wait; 283 int config_mac; 284 int config_mac_mdio; 285 struct eqos_ops *ops; 286 }; 287 288 struct eqos_ops { 289 void (*eqos_inval_desc)(void *desc); 290 void (*eqos_flush_desc)(void *desc); 291 void (*eqos_inval_buffer)(void *buf, size_t size); 292 void (*eqos_flush_buffer)(void *buf, size_t size); 293 int (*eqos_probe_resources)(struct udevice *dev); 294 int (*eqos_remove_resources)(struct udevice *dev); 295 int (*eqos_stop_resets)(struct udevice *dev); 296 int (*eqos_start_resets)(struct udevice *dev); 297 void (*eqos_stop_clks)(struct udevice *dev); 298 int (*eqos_start_clks)(struct udevice *dev); 299 int (*eqos_calibrate_pads)(struct udevice *dev); 300 int (*eqos_disable_calibration)(struct udevice *dev); 301 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 302 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 303 phy_interface_t (*eqos_get_interface)(struct udevice *dev); 304 }; 305 306 struct eqos_priv { 307 struct udevice *dev; 308 const struct eqos_config *config; 309 fdt_addr_t regs; 310 struct eqos_mac_regs *mac_regs; 311 struct eqos_mtl_regs *mtl_regs; 312 struct eqos_dma_regs *dma_regs; 313 struct eqos_tegra186_regs *tegra186_regs; 314 struct reset_ctl reset_ctl; 315 struct gpio_desc phy_reset_gpio; 316 u32 reset_delays[3]; 317 struct clk clk_master_bus; 318 struct clk clk_rx; 319 struct clk clk_ptp_ref; 320 struct clk clk_tx; 321 struct clk clk_ck; 322 struct clk clk_slave_bus; 323 struct mii_dev *mii; 324 struct phy_device *phy; 325 int phyaddr; 326 u32 max_speed; 327 void *descs; 328 struct eqos_desc *tx_descs; 329 struct eqos_desc *rx_descs; 330 int tx_desc_idx, rx_desc_idx; 331 void *tx_dma_buf; 332 void *rx_dma_buf; 333 void *rx_pkt; 334 bool started; 335 bool reg_access_ok; 336 }; 337 338 /* 339 * TX and RX descriptors are 16 bytes. This causes problems with the cache 340 * maintenance on CPUs where the cache-line size exceeds the size of these 341 * descriptors. What will happen is that when the driver receives a packet 342 * it will be immediately requeued for the hardware to reuse. The CPU will 343 * therefore need to flush the cache-line containing the descriptor, which 344 * will cause all other descriptors in the same cache-line to be flushed 345 * along with it. If one of those descriptors had been written to by the 346 * device those changes (and the associated packet) will be lost. 347 * 348 * To work around this, we make use of non-cached memory if available. If 349 * descriptors are mapped uncached there's no need to manually flush them 350 * or invalidate them. 351 * 352 * Note that this only applies to descriptors. The packet data buffers do 353 * not have the same constraints since they are 1536 bytes large, so they 354 * are unlikely to share cache-lines. 355 */ 356 static void *eqos_alloc_descs(unsigned int num) 357 { 358 #ifdef CONFIG_SYS_NONCACHED_MEMORY 359 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 360 EQOS_DESCRIPTOR_ALIGN); 361 #else 362 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 363 #endif 364 } 365 366 static void eqos_free_descs(void *descs) 367 { 368 #ifdef CONFIG_SYS_NONCACHED_MEMORY 369 /* FIXME: noncached_alloc() has no opposite */ 370 #else 371 free(descs); 372 #endif 373 } 374 375 static void eqos_inval_desc_tegra186(void *desc) 376 { 377 #ifndef CONFIG_SYS_NONCACHED_MEMORY 378 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 379 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 380 ARCH_DMA_MINALIGN); 381 382 invalidate_dcache_range(start, end); 383 #endif 384 } 385 386 static void eqos_inval_desc_generic(void *desc) 387 { 388 #ifndef CONFIG_SYS_NONCACHED_MEMORY 389 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 390 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 391 ARCH_DMA_MINALIGN); 392 393 invalidate_dcache_range(start, end); 394 #endif 395 } 396 397 static void eqos_flush_desc_tegra186(void *desc) 398 { 399 #ifndef CONFIG_SYS_NONCACHED_MEMORY 400 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 401 #endif 402 } 403 404 static void eqos_flush_desc_generic(void *desc) 405 { 406 #ifndef CONFIG_SYS_NONCACHED_MEMORY 407 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 408 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 409 ARCH_DMA_MINALIGN); 410 411 flush_dcache_range(start, end); 412 #endif 413 } 414 415 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 416 { 417 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 418 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 419 420 invalidate_dcache_range(start, end); 421 } 422 423 static void eqos_inval_buffer_generic(void *buf, size_t size) 424 { 425 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 426 unsigned long end = roundup((unsigned long)buf + size, 427 ARCH_DMA_MINALIGN); 428 429 invalidate_dcache_range(start, end); 430 } 431 432 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 433 { 434 flush_cache((unsigned long)buf, size); 435 } 436 437 static void eqos_flush_buffer_generic(void *buf, size_t size) 438 { 439 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 440 unsigned long end = roundup((unsigned long)buf + size, 441 ARCH_DMA_MINALIGN); 442 443 flush_dcache_range(start, end); 444 } 445 446 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 447 { 448 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 449 EQOS_MAC_MDIO_ADDRESS_GB, false, 450 1000000, true); 451 } 452 453 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 454 int mdio_reg) 455 { 456 struct eqos_priv *eqos = bus->priv; 457 u32 val; 458 int ret; 459 460 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 461 mdio_reg); 462 463 ret = eqos_mdio_wait_idle(eqos); 464 if (ret) { 465 pr_err("MDIO not idle at entry"); 466 return ret; 467 } 468 469 val = readl(&eqos->mac_regs->mdio_address); 470 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 471 EQOS_MAC_MDIO_ADDRESS_C45E; 472 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 473 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 474 (eqos->config->config_mac_mdio << 475 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 476 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 477 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 478 EQOS_MAC_MDIO_ADDRESS_GB; 479 writel(val, &eqos->mac_regs->mdio_address); 480 481 udelay(eqos->config->mdio_wait); 482 483 ret = eqos_mdio_wait_idle(eqos); 484 if (ret) { 485 pr_err("MDIO read didn't complete"); 486 return ret; 487 } 488 489 val = readl(&eqos->mac_regs->mdio_data); 490 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 491 492 debug("%s: val=%x\n", __func__, val); 493 494 return val; 495 } 496 497 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 498 int mdio_reg, u16 mdio_val) 499 { 500 struct eqos_priv *eqos = bus->priv; 501 u32 val; 502 int ret; 503 504 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 505 mdio_addr, mdio_reg, mdio_val); 506 507 ret = eqos_mdio_wait_idle(eqos); 508 if (ret) { 509 pr_err("MDIO not idle at entry"); 510 return ret; 511 } 512 513 writel(mdio_val, &eqos->mac_regs->mdio_data); 514 515 val = readl(&eqos->mac_regs->mdio_address); 516 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 517 EQOS_MAC_MDIO_ADDRESS_C45E; 518 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 519 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 520 (eqos->config->config_mac_mdio << 521 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 522 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 523 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 524 EQOS_MAC_MDIO_ADDRESS_GB; 525 writel(val, &eqos->mac_regs->mdio_address); 526 527 udelay(eqos->config->mdio_wait); 528 529 ret = eqos_mdio_wait_idle(eqos); 530 if (ret) { 531 pr_err("MDIO read didn't complete"); 532 return ret; 533 } 534 535 return 0; 536 } 537 538 static int eqos_start_clks_tegra186(struct udevice *dev) 539 { 540 #ifdef CONFIG_CLK 541 struct eqos_priv *eqos = dev_get_priv(dev); 542 int ret; 543 544 debug("%s(dev=%p):\n", __func__, dev); 545 546 ret = clk_enable(&eqos->clk_slave_bus); 547 if (ret < 0) { 548 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 549 goto err; 550 } 551 552 ret = clk_enable(&eqos->clk_master_bus); 553 if (ret < 0) { 554 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 555 goto err_disable_clk_slave_bus; 556 } 557 558 ret = clk_enable(&eqos->clk_rx); 559 if (ret < 0) { 560 pr_err("clk_enable(clk_rx) failed: %d", ret); 561 goto err_disable_clk_master_bus; 562 } 563 564 ret = clk_enable(&eqos->clk_ptp_ref); 565 if (ret < 0) { 566 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 567 goto err_disable_clk_rx; 568 } 569 570 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 571 if (ret < 0) { 572 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 573 goto err_disable_clk_ptp_ref; 574 } 575 576 ret = clk_enable(&eqos->clk_tx); 577 if (ret < 0) { 578 pr_err("clk_enable(clk_tx) failed: %d", ret); 579 goto err_disable_clk_ptp_ref; 580 } 581 #endif 582 583 debug("%s: OK\n", __func__); 584 return 0; 585 586 #ifdef CONFIG_CLK 587 err_disable_clk_ptp_ref: 588 clk_disable(&eqos->clk_ptp_ref); 589 err_disable_clk_rx: 590 clk_disable(&eqos->clk_rx); 591 err_disable_clk_master_bus: 592 clk_disable(&eqos->clk_master_bus); 593 err_disable_clk_slave_bus: 594 clk_disable(&eqos->clk_slave_bus); 595 err: 596 debug("%s: FAILED: %d\n", __func__, ret); 597 return ret; 598 #endif 599 } 600 601 static int eqos_start_clks_stm32(struct udevice *dev) 602 { 603 #ifdef CONFIG_CLK 604 struct eqos_priv *eqos = dev_get_priv(dev); 605 int ret; 606 607 debug("%s(dev=%p):\n", __func__, dev); 608 609 ret = clk_enable(&eqos->clk_master_bus); 610 if (ret < 0) { 611 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 612 goto err; 613 } 614 615 if (clk_valid(&eqos->clk_rx)) { 616 ret = clk_enable(&eqos->clk_rx); 617 if (ret < 0) { 618 pr_err("clk_enable(clk_rx) failed: %d", ret); 619 goto err_disable_clk_master_bus; 620 } 621 } 622 623 if (clk_valid(&eqos->clk_tx)) { 624 ret = clk_enable(&eqos->clk_tx); 625 if (ret < 0) { 626 pr_err("clk_enable(clk_tx) failed: %d", ret); 627 goto err_disable_clk_rx; 628 } 629 } 630 631 if (clk_valid(&eqos->clk_ck)) { 632 ret = clk_enable(&eqos->clk_ck); 633 if (ret < 0) { 634 pr_err("clk_enable(clk_ck) failed: %d", ret); 635 goto err_disable_clk_tx; 636 } 637 } 638 #endif 639 640 debug("%s: OK\n", __func__); 641 return 0; 642 643 #ifdef CONFIG_CLK 644 err_disable_clk_tx: 645 if (clk_valid(&eqos->clk_tx)) 646 clk_disable(&eqos->clk_tx); 647 err_disable_clk_rx: 648 if (clk_valid(&eqos->clk_rx)) 649 clk_disable(&eqos->clk_rx); 650 err_disable_clk_master_bus: 651 clk_disable(&eqos->clk_master_bus); 652 err: 653 debug("%s: FAILED: %d\n", __func__, ret); 654 return ret; 655 #endif 656 } 657 658 static int eqos_start_clks_imx(struct udevice *dev) 659 { 660 return 0; 661 } 662 663 static void eqos_stop_clks_tegra186(struct udevice *dev) 664 { 665 #ifdef CONFIG_CLK 666 struct eqos_priv *eqos = dev_get_priv(dev); 667 668 debug("%s(dev=%p):\n", __func__, dev); 669 670 clk_disable(&eqos->clk_tx); 671 clk_disable(&eqos->clk_ptp_ref); 672 clk_disable(&eqos->clk_rx); 673 clk_disable(&eqos->clk_master_bus); 674 clk_disable(&eqos->clk_slave_bus); 675 #endif 676 677 debug("%s: OK\n", __func__); 678 } 679 680 static void eqos_stop_clks_stm32(struct udevice *dev) 681 { 682 #ifdef CONFIG_CLK 683 struct eqos_priv *eqos = dev_get_priv(dev); 684 685 debug("%s(dev=%p):\n", __func__, dev); 686 687 if (clk_valid(&eqos->clk_tx)) 688 clk_disable(&eqos->clk_tx); 689 if (clk_valid(&eqos->clk_rx)) 690 clk_disable(&eqos->clk_rx); 691 clk_disable(&eqos->clk_master_bus); 692 if (clk_valid(&eqos->clk_ck)) 693 clk_disable(&eqos->clk_ck); 694 #endif 695 696 debug("%s: OK\n", __func__); 697 } 698 699 static void eqos_stop_clks_imx(struct udevice *dev) 700 { 701 /* empty */ 702 } 703 704 static int eqos_start_resets_tegra186(struct udevice *dev) 705 { 706 struct eqos_priv *eqos = dev_get_priv(dev); 707 int ret; 708 709 debug("%s(dev=%p):\n", __func__, dev); 710 711 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 712 if (ret < 0) { 713 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 714 return ret; 715 } 716 717 udelay(2); 718 719 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 720 if (ret < 0) { 721 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 722 return ret; 723 } 724 725 ret = reset_assert(&eqos->reset_ctl); 726 if (ret < 0) { 727 pr_err("reset_assert() failed: %d", ret); 728 return ret; 729 } 730 731 udelay(2); 732 733 ret = reset_deassert(&eqos->reset_ctl); 734 if (ret < 0) { 735 pr_err("reset_deassert() failed: %d", ret); 736 return ret; 737 } 738 739 debug("%s: OK\n", __func__); 740 return 0; 741 } 742 743 static int eqos_start_resets_stm32(struct udevice *dev) 744 { 745 struct eqos_priv *eqos = dev_get_priv(dev); 746 int ret; 747 748 debug("%s(dev=%p):\n", __func__, dev); 749 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 750 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 751 if (ret < 0) { 752 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 753 ret); 754 return ret; 755 } 756 757 udelay(eqos->reset_delays[0]); 758 759 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 760 if (ret < 0) { 761 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 762 ret); 763 return ret; 764 } 765 766 udelay(eqos->reset_delays[1]); 767 768 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 769 if (ret < 0) { 770 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 771 ret); 772 return ret; 773 } 774 775 udelay(eqos->reset_delays[2]); 776 } 777 debug("%s: OK\n", __func__); 778 779 return 0; 780 } 781 782 static int eqos_start_resets_imx(struct udevice *dev) 783 { 784 return 0; 785 } 786 787 static int eqos_stop_resets_tegra186(struct udevice *dev) 788 { 789 struct eqos_priv *eqos = dev_get_priv(dev); 790 791 reset_assert(&eqos->reset_ctl); 792 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 793 794 return 0; 795 } 796 797 static int eqos_stop_resets_stm32(struct udevice *dev) 798 { 799 struct eqos_priv *eqos = dev_get_priv(dev); 800 int ret; 801 802 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 803 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 804 if (ret < 0) { 805 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 806 ret); 807 return ret; 808 } 809 } 810 811 return 0; 812 } 813 814 static int eqos_stop_resets_imx(struct udevice *dev) 815 { 816 return 0; 817 } 818 819 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 820 { 821 struct eqos_priv *eqos = dev_get_priv(dev); 822 int ret; 823 824 debug("%s(dev=%p):\n", __func__, dev); 825 826 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 827 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 828 829 udelay(1); 830 831 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 832 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 833 834 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 835 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 836 if (ret) { 837 pr_err("calibrate didn't start"); 838 goto failed; 839 } 840 841 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 842 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 843 if (ret) { 844 pr_err("calibrate didn't finish"); 845 goto failed; 846 } 847 848 ret = 0; 849 850 failed: 851 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 852 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 853 854 debug("%s: returns %d\n", __func__, ret); 855 856 return ret; 857 } 858 859 static int eqos_disable_calibration_tegra186(struct udevice *dev) 860 { 861 struct eqos_priv *eqos = dev_get_priv(dev); 862 863 debug("%s(dev=%p):\n", __func__, dev); 864 865 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 866 EQOS_AUTO_CAL_CONFIG_ENABLE); 867 868 return 0; 869 } 870 871 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 872 { 873 #ifdef CONFIG_CLK 874 struct eqos_priv *eqos = dev_get_priv(dev); 875 876 return clk_get_rate(&eqos->clk_slave_bus); 877 #else 878 return 0; 879 #endif 880 } 881 882 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 883 { 884 #ifdef CONFIG_CLK 885 struct eqos_priv *eqos = dev_get_priv(dev); 886 887 return clk_get_rate(&eqos->clk_master_bus); 888 #else 889 return 0; 890 #endif 891 } 892 893 __weak u32 imx_get_eqos_csr_clk(void) 894 { 895 return 100 * 1000000; 896 } 897 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 898 { 899 return 0; 900 } 901 902 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 903 { 904 return imx_get_eqos_csr_clk(); 905 } 906 907 static int eqos_calibrate_pads_stm32(struct udevice *dev) 908 { 909 return 0; 910 } 911 912 static int eqos_calibrate_pads_imx(struct udevice *dev) 913 { 914 return 0; 915 } 916 917 static int eqos_disable_calibration_stm32(struct udevice *dev) 918 { 919 return 0; 920 } 921 922 static int eqos_disable_calibration_imx(struct udevice *dev) 923 { 924 return 0; 925 } 926 927 static int eqos_set_full_duplex(struct udevice *dev) 928 { 929 struct eqos_priv *eqos = dev_get_priv(dev); 930 931 debug("%s(dev=%p):\n", __func__, dev); 932 933 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 934 935 return 0; 936 } 937 938 static int eqos_set_half_duplex(struct udevice *dev) 939 { 940 struct eqos_priv *eqos = dev_get_priv(dev); 941 942 debug("%s(dev=%p):\n", __func__, dev); 943 944 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 945 946 /* WAR: Flush TX queue when switching to half-duplex */ 947 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 948 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 949 950 return 0; 951 } 952 953 static int eqos_set_gmii_speed(struct udevice *dev) 954 { 955 struct eqos_priv *eqos = dev_get_priv(dev); 956 957 debug("%s(dev=%p):\n", __func__, dev); 958 959 clrbits_le32(&eqos->mac_regs->configuration, 960 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 961 962 return 0; 963 } 964 965 static int eqos_set_mii_speed_100(struct udevice *dev) 966 { 967 struct eqos_priv *eqos = dev_get_priv(dev); 968 969 debug("%s(dev=%p):\n", __func__, dev); 970 971 setbits_le32(&eqos->mac_regs->configuration, 972 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 973 974 return 0; 975 } 976 977 static int eqos_set_mii_speed_10(struct udevice *dev) 978 { 979 struct eqos_priv *eqos = dev_get_priv(dev); 980 981 debug("%s(dev=%p):\n", __func__, dev); 982 983 clrsetbits_le32(&eqos->mac_regs->configuration, 984 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 985 986 return 0; 987 } 988 989 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 990 { 991 #ifdef CONFIG_CLK 992 struct eqos_priv *eqos = dev_get_priv(dev); 993 ulong rate; 994 int ret; 995 996 debug("%s(dev=%p):\n", __func__, dev); 997 998 switch (eqos->phy->speed) { 999 case SPEED_1000: 1000 rate = 125 * 1000 * 1000; 1001 break; 1002 case SPEED_100: 1003 rate = 25 * 1000 * 1000; 1004 break; 1005 case SPEED_10: 1006 rate = 2.5 * 1000 * 1000; 1007 break; 1008 default: 1009 pr_err("invalid speed %d", eqos->phy->speed); 1010 return -EINVAL; 1011 } 1012 1013 ret = clk_set_rate(&eqos->clk_tx, rate); 1014 if (ret < 0) { 1015 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 1016 return ret; 1017 } 1018 #endif 1019 1020 return 0; 1021 } 1022 1023 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 1024 { 1025 return 0; 1026 } 1027 1028 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 1029 { 1030 struct eqos_priv *eqos = dev_get_priv(dev); 1031 ulong rate; 1032 int ret; 1033 1034 debug("%s(dev=%p):\n", __func__, dev); 1035 1036 switch (eqos->phy->speed) { 1037 case SPEED_1000: 1038 rate = 125 * 1000 * 1000; 1039 break; 1040 case SPEED_100: 1041 rate = 25 * 1000 * 1000; 1042 break; 1043 case SPEED_10: 1044 rate = 2.5 * 1000 * 1000; 1045 break; 1046 default: 1047 pr_err("invalid speed %d", eqos->phy->speed); 1048 return -EINVAL; 1049 } 1050 1051 ret = imx_eqos_txclk_set_rate(rate); 1052 if (ret < 0) { 1053 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 1054 return ret; 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int eqos_adjust_link(struct udevice *dev) 1061 { 1062 struct eqos_priv *eqos = dev_get_priv(dev); 1063 int ret; 1064 bool en_calibration; 1065 1066 debug("%s(dev=%p):\n", __func__, dev); 1067 1068 if (eqos->phy->duplex) 1069 ret = eqos_set_full_duplex(dev); 1070 else 1071 ret = eqos_set_half_duplex(dev); 1072 if (ret < 0) { 1073 pr_err("eqos_set_*_duplex() failed: %d", ret); 1074 return ret; 1075 } 1076 1077 switch (eqos->phy->speed) { 1078 case SPEED_1000: 1079 en_calibration = true; 1080 ret = eqos_set_gmii_speed(dev); 1081 break; 1082 case SPEED_100: 1083 en_calibration = true; 1084 ret = eqos_set_mii_speed_100(dev); 1085 break; 1086 case SPEED_10: 1087 en_calibration = false; 1088 ret = eqos_set_mii_speed_10(dev); 1089 break; 1090 default: 1091 pr_err("invalid speed %d", eqos->phy->speed); 1092 return -EINVAL; 1093 } 1094 if (ret < 0) { 1095 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1096 return ret; 1097 } 1098 1099 if (en_calibration) { 1100 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1101 if (ret < 0) { 1102 pr_err("eqos_calibrate_pads() failed: %d", 1103 ret); 1104 return ret; 1105 } 1106 } else { 1107 ret = eqos->config->ops->eqos_disable_calibration(dev); 1108 if (ret < 0) { 1109 pr_err("eqos_disable_calibration() failed: %d", 1110 ret); 1111 return ret; 1112 } 1113 } 1114 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1115 if (ret < 0) { 1116 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1117 return ret; 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int eqos_write_hwaddr(struct udevice *dev) 1124 { 1125 struct eth_pdata *plat = dev_get_platdata(dev); 1126 struct eqos_priv *eqos = dev_get_priv(dev); 1127 uint32_t val; 1128 1129 /* 1130 * This function may be called before start() or after stop(). At that 1131 * time, on at least some configurations of the EQoS HW, all clocks to 1132 * the EQoS HW block will be stopped, and a reset signal applied. If 1133 * any register access is attempted in this state, bus timeouts or CPU 1134 * hangs may occur. This check prevents that. 1135 * 1136 * A simple solution to this problem would be to not implement 1137 * write_hwaddr(), since start() always writes the MAC address into HW 1138 * anyway. However, it is desirable to implement write_hwaddr() to 1139 * support the case of SW that runs subsequent to U-Boot which expects 1140 * the MAC address to already be programmed into the EQoS registers, 1141 * which must happen irrespective of whether the U-Boot user (or 1142 * scripts) actually made use of the EQoS device, and hence 1143 * irrespective of whether start() was ever called. 1144 * 1145 * Note that this requirement by subsequent SW is not valid for 1146 * Tegra186, and is likely not valid for any non-PCI instantiation of 1147 * the EQoS HW block. This function is implemented solely as 1148 * future-proofing with the expectation the driver will eventually be 1149 * ported to some system where the expectation above is true. 1150 */ 1151 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1152 return 0; 1153 1154 /* Update the MAC address */ 1155 val = (plat->enetaddr[5] << 8) | 1156 (plat->enetaddr[4]); 1157 writel(val, &eqos->mac_regs->address0_high); 1158 val = (plat->enetaddr[3] << 24) | 1159 (plat->enetaddr[2] << 16) | 1160 (plat->enetaddr[1] << 8) | 1161 (plat->enetaddr[0]); 1162 writel(val, &eqos->mac_regs->address0_low); 1163 1164 return 0; 1165 } 1166 1167 static int eqos_read_rom_hwaddr(struct udevice *dev) 1168 { 1169 struct eth_pdata *pdata = dev_get_platdata(dev); 1170 1171 #ifdef CONFIG_ARCH_IMX8M 1172 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1173 #endif 1174 return !is_valid_ethaddr(pdata->enetaddr); 1175 } 1176 1177 static int eqos_start(struct udevice *dev) 1178 { 1179 struct eqos_priv *eqos = dev_get_priv(dev); 1180 int ret, i; 1181 ulong rate; 1182 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1183 ulong last_rx_desc; 1184 1185 debug("%s(dev=%p):\n", __func__, dev); 1186 1187 eqos->tx_desc_idx = 0; 1188 eqos->rx_desc_idx = 0; 1189 1190 ret = eqos->config->ops->eqos_start_clks(dev); 1191 if (ret < 0) { 1192 pr_err("eqos_start_clks() failed: %d", ret); 1193 goto err; 1194 } 1195 1196 ret = eqos->config->ops->eqos_start_resets(dev); 1197 if (ret < 0) { 1198 pr_err("eqos_start_resets() failed: %d", ret); 1199 goto err_stop_clks; 1200 } 1201 1202 udelay(10); 1203 1204 eqos->reg_access_ok = true; 1205 1206 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1207 EQOS_DMA_MODE_SWR, false, 1208 eqos->config->swr_wait, false); 1209 if (ret) { 1210 pr_err("EQOS_DMA_MODE_SWR stuck"); 1211 goto err_stop_resets; 1212 } 1213 1214 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1215 if (ret < 0) { 1216 pr_err("eqos_calibrate_pads() failed: %d", ret); 1217 goto err_stop_resets; 1218 } 1219 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1220 1221 val = (rate / 1000000) - 1; 1222 writel(val, &eqos->mac_regs->us_tic_counter); 1223 1224 /* 1225 * if PHY was already connected and configured, 1226 * don't need to reconnect/reconfigure again 1227 */ 1228 if (!eqos->phy) { 1229 int addr = -1; 1230 #ifdef CONFIG_DM_ETH_PHY 1231 addr = eth_phy_get_addr(dev); 1232 #endif 1233 #ifdef DWC_NET_PHYADDR 1234 addr = DWC_NET_PHYADDR; 1235 #endif 1236 eqos->phy = phy_connect(eqos->mii, addr, dev, 1237 eqos->config->ops->eqos_get_interface(dev)); 1238 if (!eqos->phy) { 1239 pr_err("phy_connect() failed"); 1240 goto err_stop_resets; 1241 } 1242 1243 if (eqos->max_speed) { 1244 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1245 if (ret) { 1246 pr_err("phy_set_supported() failed: %d", ret); 1247 goto err_shutdown_phy; 1248 } 1249 } 1250 1251 ret = phy_config(eqos->phy); 1252 if (ret < 0) { 1253 pr_err("phy_config() failed: %d", ret); 1254 goto err_shutdown_phy; 1255 } 1256 } 1257 1258 ret = phy_startup(eqos->phy); 1259 if (ret < 0) { 1260 pr_err("phy_startup() failed: %d", ret); 1261 goto err_shutdown_phy; 1262 } 1263 1264 if (!eqos->phy->link) { 1265 pr_err("No link"); 1266 goto err_shutdown_phy; 1267 } 1268 1269 ret = eqos_adjust_link(dev); 1270 if (ret < 0) { 1271 pr_err("eqos_adjust_link() failed: %d", ret); 1272 goto err_shutdown_phy; 1273 } 1274 1275 /* Configure MTL */ 1276 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1277 1278 /* Enable Store and Forward mode for TX */ 1279 /* Program Tx operating mode */ 1280 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1281 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1282 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1283 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1284 1285 /* Transmit Queue weight */ 1286 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1287 1288 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1289 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1290 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1291 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1292 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1293 1294 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1295 val = readl(&eqos->mac_regs->hw_feature1); 1296 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1297 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1298 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1299 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1300 1301 /* 1302 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1303 * r/tqs is encoded as (n / 256) - 1. 1304 */ 1305 tqs = (128 << tx_fifo_sz) / 256 - 1; 1306 rqs = (128 << rx_fifo_sz) / 256 - 1; 1307 1308 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1309 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1310 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1311 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1312 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1313 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1314 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1315 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1316 1317 /* Flow control used only if each channel gets 4KB or more FIFO */ 1318 if (rqs >= ((4096 / 256) - 1)) { 1319 u32 rfd, rfa; 1320 1321 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1322 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1323 1324 /* 1325 * Set Threshold for Activating Flow Contol space for min 2 1326 * frames ie, (1500 * 1) = 1500 bytes. 1327 * 1328 * Set Threshold for Deactivating Flow Contol for space of 1329 * min 1 frame (frame size 1500bytes) in receive fifo 1330 */ 1331 if (rqs == ((4096 / 256) - 1)) { 1332 /* 1333 * This violates the above formula because of FIFO size 1334 * limit therefore overflow may occur inspite of this. 1335 */ 1336 rfd = 0x3; /* Full-3K */ 1337 rfa = 0x1; /* Full-1.5K */ 1338 } else if (rqs == ((8192 / 256) - 1)) { 1339 rfd = 0x6; /* Full-4K */ 1340 rfa = 0xa; /* Full-6K */ 1341 } else if (rqs == ((16384 / 256) - 1)) { 1342 rfd = 0x6; /* Full-4K */ 1343 rfa = 0x12; /* Full-10K */ 1344 } else { 1345 rfd = 0x6; /* Full-4K */ 1346 rfa = 0x1E; /* Full-16K */ 1347 } 1348 1349 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1350 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1351 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1352 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1353 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1354 (rfd << 1355 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1356 (rfa << 1357 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1358 } 1359 1360 /* Configure MAC */ 1361 1362 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1363 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1364 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1365 eqos->config->config_mac << 1366 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1367 1368 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1369 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1370 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1371 0x2 << 1372 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1373 1374 /* Multicast and Broadcast Queue Enable */ 1375 setbits_le32(&eqos->mac_regs->unused_0a4, 1376 0x00100000); 1377 /* enable promise mode */ 1378 setbits_le32(&eqos->mac_regs->unused_004[1], 1379 0x1); 1380 1381 /* Set TX flow control parameters */ 1382 /* Set Pause Time */ 1383 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1384 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1385 /* Assign priority for TX flow control */ 1386 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1387 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1388 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1389 /* Assign priority for RX flow control */ 1390 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1391 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1392 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1393 /* Enable flow control */ 1394 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1395 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1396 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1397 EQOS_MAC_RX_FLOW_CTRL_RFE); 1398 1399 clrsetbits_le32(&eqos->mac_regs->configuration, 1400 EQOS_MAC_CONFIGURATION_GPSLCE | 1401 EQOS_MAC_CONFIGURATION_WD | 1402 EQOS_MAC_CONFIGURATION_JD | 1403 EQOS_MAC_CONFIGURATION_JE, 1404 EQOS_MAC_CONFIGURATION_CST | 1405 EQOS_MAC_CONFIGURATION_ACS); 1406 1407 eqos_write_hwaddr(dev); 1408 1409 /* Configure DMA */ 1410 1411 /* Enable OSP mode */ 1412 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1413 EQOS_DMA_CH0_TX_CONTROL_OSP); 1414 1415 /* RX buffer size. Must be a multiple of bus width */ 1416 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1417 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1418 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1419 EQOS_MAX_PACKET_SIZE << 1420 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1421 1422 setbits_le32(&eqos->dma_regs->ch0_control, 1423 EQOS_DMA_CH0_CONTROL_PBLX8); 1424 1425 /* 1426 * Burst length must be < 1/2 FIFO size. 1427 * FIFO size in tqs is encoded as (n / 256) - 1. 1428 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1429 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1430 */ 1431 pbl = tqs + 1; 1432 if (pbl > 32) 1433 pbl = 32; 1434 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1435 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1436 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1437 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1438 1439 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1440 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1441 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1442 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1443 1444 /* DMA performance configuration */ 1445 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1446 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1447 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1448 writel(val, &eqos->dma_regs->sysbus_mode); 1449 1450 /* Set up descriptors */ 1451 1452 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1453 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1454 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1455 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1456 (i * EQOS_MAX_PACKET_SIZE)); 1457 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1458 mb(); 1459 eqos->config->ops->eqos_flush_desc(rx_desc); 1460 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1461 (i * EQOS_MAX_PACKET_SIZE), 1462 EQOS_MAX_PACKET_SIZE); 1463 } 1464 1465 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1466 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1467 writel(EQOS_DESCRIPTORS_TX - 1, 1468 &eqos->dma_regs->ch0_txdesc_ring_length); 1469 1470 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1471 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1472 writel(EQOS_DESCRIPTORS_RX - 1, 1473 &eqos->dma_regs->ch0_rxdesc_ring_length); 1474 1475 /* Enable everything */ 1476 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1477 EQOS_DMA_CH0_TX_CONTROL_ST); 1478 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1479 EQOS_DMA_CH0_RX_CONTROL_SR); 1480 setbits_le32(&eqos->mac_regs->configuration, 1481 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1482 1483 /* TX tail pointer not written until we need to TX a packet */ 1484 /* 1485 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1486 * first descriptor, implying all descriptors were available. However, 1487 * that's not distinguishable from none of the descriptors being 1488 * available. 1489 */ 1490 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1491 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1492 1493 eqos->started = true; 1494 1495 debug("%s: OK\n", __func__); 1496 return 0; 1497 1498 err_shutdown_phy: 1499 phy_shutdown(eqos->phy); 1500 err_stop_resets: 1501 eqos->config->ops->eqos_stop_resets(dev); 1502 err_stop_clks: 1503 eqos->config->ops->eqos_stop_clks(dev); 1504 err: 1505 pr_err("FAILED: %d", ret); 1506 return ret; 1507 } 1508 1509 static void eqos_stop(struct udevice *dev) 1510 { 1511 struct eqos_priv *eqos = dev_get_priv(dev); 1512 int i; 1513 1514 debug("%s(dev=%p):\n", __func__, dev); 1515 1516 if (!eqos->started) 1517 return; 1518 eqos->started = false; 1519 eqos->reg_access_ok = false; 1520 1521 /* Disable TX DMA */ 1522 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1523 EQOS_DMA_CH0_TX_CONTROL_ST); 1524 1525 /* Wait for TX all packets to drain out of MTL */ 1526 for (i = 0; i < 1000000; i++) { 1527 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1528 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1529 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1530 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1531 if ((trcsts != 1) && (!txqsts)) 1532 break; 1533 } 1534 1535 /* Turn off MAC TX and RX */ 1536 clrbits_le32(&eqos->mac_regs->configuration, 1537 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1538 1539 /* Wait for all RX packets to drain out of MTL */ 1540 for (i = 0; i < 1000000; i++) { 1541 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1542 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1543 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1544 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1545 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1546 if ((!prxq) && (!rxqsts)) 1547 break; 1548 } 1549 1550 /* Turn off RX DMA */ 1551 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1552 EQOS_DMA_CH0_RX_CONTROL_SR); 1553 1554 if (eqos->phy) { 1555 phy_shutdown(eqos->phy); 1556 } 1557 eqos->config->ops->eqos_stop_resets(dev); 1558 eqos->config->ops->eqos_stop_clks(dev); 1559 1560 debug("%s: OK\n", __func__); 1561 } 1562 1563 static int eqos_send(struct udevice *dev, void *packet, int length) 1564 { 1565 struct eqos_priv *eqos = dev_get_priv(dev); 1566 struct eqos_desc *tx_desc; 1567 int i; 1568 1569 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1570 length); 1571 1572 memcpy(eqos->tx_dma_buf, packet, length); 1573 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1574 1575 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1576 eqos->tx_desc_idx++; 1577 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1578 1579 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1580 tx_desc->des1 = 0; 1581 tx_desc->des2 = length; 1582 /* 1583 * Make sure that if HW sees the _OWN write below, it will see all the 1584 * writes to the rest of the descriptor too. 1585 */ 1586 mb(); 1587 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1588 eqos->config->ops->eqos_flush_desc(tx_desc); 1589 1590 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1591 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1592 1593 for (i = 0; i < 1000000; i++) { 1594 eqos->config->ops->eqos_inval_desc(tx_desc); 1595 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1596 return 0; 1597 udelay(1); 1598 } 1599 1600 debug("%s: TX timeout\n", __func__); 1601 1602 return -ETIMEDOUT; 1603 } 1604 1605 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1606 { 1607 struct eqos_priv *eqos = dev_get_priv(dev); 1608 struct eqos_desc *rx_desc; 1609 int length; 1610 1611 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1612 1613 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1614 eqos->config->ops->eqos_inval_desc(rx_desc); 1615 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1616 debug("%s: RX packet not available\n", __func__); 1617 return -EAGAIN; 1618 } 1619 1620 *packetp = eqos->rx_dma_buf + 1621 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1622 length = rx_desc->des3 & 0x7fff; 1623 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1624 1625 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1626 1627 return length; 1628 } 1629 1630 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1631 { 1632 struct eqos_priv *eqos = dev_get_priv(dev); 1633 uchar *packet_expected; 1634 struct eqos_desc *rx_desc; 1635 1636 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1637 1638 packet_expected = eqos->rx_dma_buf + 1639 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1640 if (packet != packet_expected) { 1641 debug("%s: Unexpected packet (expected %p)\n", __func__, 1642 packet_expected); 1643 return -EINVAL; 1644 } 1645 1646 eqos->config->ops->eqos_inval_buffer(packet, length); 1647 1648 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1649 1650 rx_desc->des0 = 0; 1651 mb(); 1652 eqos->config->ops->eqos_flush_desc(rx_desc); 1653 eqos->config->ops->eqos_inval_buffer(packet, length); 1654 rx_desc->des0 = (u32)(ulong)packet; 1655 rx_desc->des1 = 0; 1656 rx_desc->des2 = 0; 1657 /* 1658 * Make sure that if HW sees the _OWN write below, it will see all the 1659 * writes to the rest of the descriptor too. 1660 */ 1661 mb(); 1662 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1663 eqos->config->ops->eqos_flush_desc(rx_desc); 1664 1665 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1666 1667 eqos->rx_desc_idx++; 1668 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1669 1670 return 0; 1671 } 1672 1673 static int eqos_probe_resources_core(struct udevice *dev) 1674 { 1675 struct eqos_priv *eqos = dev_get_priv(dev); 1676 int ret; 1677 1678 debug("%s(dev=%p):\n", __func__, dev); 1679 1680 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1681 EQOS_DESCRIPTORS_RX); 1682 if (!eqos->descs) { 1683 debug("%s: eqos_alloc_descs() failed\n", __func__); 1684 ret = -ENOMEM; 1685 goto err; 1686 } 1687 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1688 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1689 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1690 eqos->rx_descs); 1691 1692 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1693 if (!eqos->tx_dma_buf) { 1694 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1695 ret = -ENOMEM; 1696 goto err_free_descs; 1697 } 1698 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1699 1700 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1701 if (!eqos->rx_dma_buf) { 1702 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1703 ret = -ENOMEM; 1704 goto err_free_tx_dma_buf; 1705 } 1706 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1707 1708 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1709 if (!eqos->rx_pkt) { 1710 debug("%s: malloc(rx_pkt) failed\n", __func__); 1711 ret = -ENOMEM; 1712 goto err_free_rx_dma_buf; 1713 } 1714 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1715 1716 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1717 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1718 1719 debug("%s: OK\n", __func__); 1720 return 0; 1721 1722 err_free_rx_dma_buf: 1723 free(eqos->rx_dma_buf); 1724 err_free_tx_dma_buf: 1725 free(eqos->tx_dma_buf); 1726 err_free_descs: 1727 eqos_free_descs(eqos->descs); 1728 err: 1729 1730 debug("%s: returns %d\n", __func__, ret); 1731 return ret; 1732 } 1733 1734 static int eqos_remove_resources_core(struct udevice *dev) 1735 { 1736 struct eqos_priv *eqos = dev_get_priv(dev); 1737 1738 debug("%s(dev=%p):\n", __func__, dev); 1739 1740 free(eqos->rx_pkt); 1741 free(eqos->rx_dma_buf); 1742 free(eqos->tx_dma_buf); 1743 eqos_free_descs(eqos->descs); 1744 1745 debug("%s: OK\n", __func__); 1746 return 0; 1747 } 1748 1749 static int eqos_probe_resources_tegra186(struct udevice *dev) 1750 { 1751 struct eqos_priv *eqos = dev_get_priv(dev); 1752 int ret; 1753 1754 debug("%s(dev=%p):\n", __func__, dev); 1755 1756 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1757 if (ret) { 1758 pr_err("reset_get_by_name(rst) failed: %d", ret); 1759 return ret; 1760 } 1761 1762 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1763 &eqos->phy_reset_gpio, 1764 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1765 if (ret) { 1766 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1767 goto err_free_reset_eqos; 1768 } 1769 1770 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1771 if (ret) { 1772 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1773 goto err_free_gpio_phy_reset; 1774 } 1775 1776 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1777 if (ret) { 1778 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1779 goto err_free_clk_slave_bus; 1780 } 1781 1782 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1783 if (ret) { 1784 pr_err("clk_get_by_name(rx) failed: %d", ret); 1785 goto err_free_clk_master_bus; 1786 } 1787 1788 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1789 if (ret) { 1790 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1791 goto err_free_clk_rx; 1792 return ret; 1793 } 1794 1795 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1796 if (ret) { 1797 pr_err("clk_get_by_name(tx) failed: %d", ret); 1798 goto err_free_clk_ptp_ref; 1799 } 1800 1801 debug("%s: OK\n", __func__); 1802 return 0; 1803 1804 err_free_clk_ptp_ref: 1805 clk_free(&eqos->clk_ptp_ref); 1806 err_free_clk_rx: 1807 clk_free(&eqos->clk_rx); 1808 err_free_clk_master_bus: 1809 clk_free(&eqos->clk_master_bus); 1810 err_free_clk_slave_bus: 1811 clk_free(&eqos->clk_slave_bus); 1812 err_free_gpio_phy_reset: 1813 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1814 err_free_reset_eqos: 1815 reset_free(&eqos->reset_ctl); 1816 1817 debug("%s: returns %d\n", __func__, ret); 1818 return ret; 1819 } 1820 1821 /* board-specific Ethernet Interface initializations. */ 1822 __weak int board_interface_eth_init(struct udevice *dev, 1823 phy_interface_t interface_type) 1824 { 1825 return 0; 1826 } 1827 1828 static int eqos_probe_resources_stm32(struct udevice *dev) 1829 { 1830 struct eqos_priv *eqos = dev_get_priv(dev); 1831 int ret; 1832 phy_interface_t interface; 1833 struct ofnode_phandle_args phandle_args; 1834 1835 debug("%s(dev=%p):\n", __func__, dev); 1836 1837 interface = eqos->config->ops->eqos_get_interface(dev); 1838 1839 if (interface == PHY_INTERFACE_MODE_NONE) { 1840 pr_err("Invalid PHY interface\n"); 1841 return -EINVAL; 1842 } 1843 1844 ret = board_interface_eth_init(dev, interface); 1845 if (ret) 1846 return -EINVAL; 1847 1848 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1849 1850 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1851 if (ret) { 1852 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1853 return ret; 1854 } 1855 1856 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1857 if (ret) 1858 pr_warn("clk_get_by_name(rx) failed: %d", ret); 1859 1860 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1861 if (ret) 1862 pr_warn("clk_get_by_name(tx) failed: %d", ret); 1863 1864 /* Get ETH_CLK clocks (optional) */ 1865 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1866 if (ret) 1867 pr_warn("No phy clock provided %d", ret); 1868 1869 eqos->phyaddr = -1; 1870 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1871 &phandle_args); 1872 if (!ret) { 1873 /* search "reset-gpios" in phy node */ 1874 ret = gpio_request_by_name_nodev(phandle_args.node, 1875 "reset-gpios", 0, 1876 &eqos->phy_reset_gpio, 1877 GPIOD_IS_OUT | 1878 GPIOD_IS_OUT_ACTIVE); 1879 if (ret) 1880 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1881 ret); 1882 else 1883 eqos->reset_delays[1] = 2; 1884 1885 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1886 "reg", -1); 1887 } 1888 1889 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1890 int reset_flags = GPIOD_IS_OUT; 1891 1892 if (dev_read_bool(dev, "snps,reset-active-low")) 1893 reset_flags |= GPIOD_ACTIVE_LOW; 1894 1895 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1896 &eqos->phy_reset_gpio, reset_flags); 1897 if (ret == 0) 1898 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1899 eqos->reset_delays, 3); 1900 else 1901 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1902 ret); 1903 } 1904 1905 debug("%s: OK\n", __func__); 1906 return 0; 1907 } 1908 1909 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1910 { 1911 const char *phy_mode; 1912 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1913 1914 debug("%s(dev=%p):\n", __func__, dev); 1915 1916 phy_mode = dev_read_string(dev, "phy-mode"); 1917 if (phy_mode) 1918 interface = phy_get_interface_by_name(phy_mode); 1919 1920 return interface; 1921 } 1922 1923 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1924 { 1925 return PHY_INTERFACE_MODE_MII; 1926 } 1927 1928 static int eqos_probe_resources_imx(struct udevice *dev) 1929 { 1930 struct eqos_priv *eqos = dev_get_priv(dev); 1931 phy_interface_t interface; 1932 1933 debug("%s(dev=%p):\n", __func__, dev); 1934 1935 interface = eqos->config->ops->eqos_get_interface(dev); 1936 1937 if (interface == PHY_INTERFACE_MODE_NONE) { 1938 pr_err("Invalid PHY interface\n"); 1939 return -EINVAL; 1940 } 1941 1942 debug("%s: OK\n", __func__); 1943 return 0; 1944 } 1945 1946 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1947 { 1948 const char *phy_mode; 1949 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1950 1951 debug("%s(dev=%p):\n", __func__, dev); 1952 1953 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1954 NULL); 1955 if (phy_mode) 1956 interface = phy_get_interface_by_name(phy_mode); 1957 1958 return interface; 1959 } 1960 1961 static int eqos_remove_resources_tegra186(struct udevice *dev) 1962 { 1963 struct eqos_priv *eqos = dev_get_priv(dev); 1964 1965 debug("%s(dev=%p):\n", __func__, dev); 1966 1967 #ifdef CONFIG_CLK 1968 clk_free(&eqos->clk_tx); 1969 clk_free(&eqos->clk_ptp_ref); 1970 clk_free(&eqos->clk_rx); 1971 clk_free(&eqos->clk_slave_bus); 1972 clk_free(&eqos->clk_master_bus); 1973 #endif 1974 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1975 reset_free(&eqos->reset_ctl); 1976 1977 debug("%s: OK\n", __func__); 1978 return 0; 1979 } 1980 1981 static int eqos_remove_resources_stm32(struct udevice *dev) 1982 { 1983 #ifdef CONFIG_CLK 1984 struct eqos_priv *eqos = dev_get_priv(dev); 1985 1986 debug("%s(dev=%p):\n", __func__, dev); 1987 1988 if (clk_valid(&eqos->clk_tx)) 1989 clk_free(&eqos->clk_tx); 1990 if (clk_valid(&eqos->clk_rx)) 1991 clk_free(&eqos->clk_rx); 1992 clk_free(&eqos->clk_master_bus); 1993 if (clk_valid(&eqos->clk_ck)) 1994 clk_free(&eqos->clk_ck); 1995 #endif 1996 1997 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1998 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1999 2000 debug("%s: OK\n", __func__); 2001 return 0; 2002 } 2003 2004 static int eqos_remove_resources_imx(struct udevice *dev) 2005 { 2006 return 0; 2007 } 2008 2009 static int eqos_probe(struct udevice *dev) 2010 { 2011 struct eqos_priv *eqos = dev_get_priv(dev); 2012 int ret; 2013 2014 debug("%s(dev=%p):\n", __func__, dev); 2015 2016 eqos->dev = dev; 2017 eqos->config = (void *)dev_get_driver_data(dev); 2018 2019 eqos->regs = dev_read_addr(dev); 2020 if (eqos->regs == FDT_ADDR_T_NONE) { 2021 pr_err("dev_read_addr() failed"); 2022 return -ENODEV; 2023 } 2024 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 2025 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 2026 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 2027 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 2028 2029 ret = eqos_probe_resources_core(dev); 2030 if (ret < 0) { 2031 pr_err("eqos_probe_resources_core() failed: %d", ret); 2032 return ret; 2033 } 2034 2035 ret = eqos->config->ops->eqos_probe_resources(dev); 2036 if (ret < 0) { 2037 pr_err("eqos_probe_resources() failed: %d", ret); 2038 goto err_remove_resources_core; 2039 } 2040 2041 #ifdef CONFIG_DM_ETH_PHY 2042 eqos->mii = eth_phy_get_mdio_bus(dev); 2043 #endif 2044 if (!eqos->mii) { 2045 eqos->mii = mdio_alloc(); 2046 if (!eqos->mii) { 2047 pr_err("mdio_alloc() failed"); 2048 ret = -ENOMEM; 2049 goto err_remove_resources_tegra; 2050 } 2051 eqos->mii->read = eqos_mdio_read; 2052 eqos->mii->write = eqos_mdio_write; 2053 eqos->mii->priv = eqos; 2054 strcpy(eqos->mii->name, dev->name); 2055 2056 ret = mdio_register(eqos->mii); 2057 if (ret < 0) { 2058 pr_err("mdio_register() failed: %d", ret); 2059 goto err_free_mdio; 2060 } 2061 } 2062 2063 #ifdef CONFIG_DM_ETH_PHY 2064 eth_phy_set_mdio_bus(dev, eqos->mii); 2065 #endif 2066 2067 debug("%s: OK\n", __func__); 2068 return 0; 2069 2070 err_free_mdio: 2071 mdio_free(eqos->mii); 2072 err_remove_resources_tegra: 2073 eqos->config->ops->eqos_remove_resources(dev); 2074 err_remove_resources_core: 2075 eqos_remove_resources_core(dev); 2076 2077 debug("%s: returns %d\n", __func__, ret); 2078 return ret; 2079 } 2080 2081 static int eqos_remove(struct udevice *dev) 2082 { 2083 struct eqos_priv *eqos = dev_get_priv(dev); 2084 2085 debug("%s(dev=%p):\n", __func__, dev); 2086 2087 mdio_unregister(eqos->mii); 2088 mdio_free(eqos->mii); 2089 eqos->config->ops->eqos_remove_resources(dev); 2090 2091 eqos_probe_resources_core(dev); 2092 2093 debug("%s: OK\n", __func__); 2094 return 0; 2095 } 2096 2097 static const struct eth_ops eqos_ops = { 2098 .start = eqos_start, 2099 .stop = eqos_stop, 2100 .send = eqos_send, 2101 .recv = eqos_recv, 2102 .free_pkt = eqos_free_pkt, 2103 .write_hwaddr = eqos_write_hwaddr, 2104 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2105 }; 2106 2107 static struct eqos_ops eqos_tegra186_ops = { 2108 .eqos_inval_desc = eqos_inval_desc_tegra186, 2109 .eqos_flush_desc = eqos_flush_desc_tegra186, 2110 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2111 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2112 .eqos_probe_resources = eqos_probe_resources_tegra186, 2113 .eqos_remove_resources = eqos_remove_resources_tegra186, 2114 .eqos_stop_resets = eqos_stop_resets_tegra186, 2115 .eqos_start_resets = eqos_start_resets_tegra186, 2116 .eqos_stop_clks = eqos_stop_clks_tegra186, 2117 .eqos_start_clks = eqos_start_clks_tegra186, 2118 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2119 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2120 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2121 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2122 .eqos_get_interface = eqos_get_interface_tegra186 2123 }; 2124 2125 static const struct eqos_config eqos_tegra186_config = { 2126 .reg_access_always_ok = false, 2127 .mdio_wait = 10, 2128 .swr_wait = 10, 2129 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2130 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2131 .ops = &eqos_tegra186_ops 2132 }; 2133 2134 static struct eqos_ops eqos_stm32_ops = { 2135 .eqos_inval_desc = eqos_inval_desc_generic, 2136 .eqos_flush_desc = eqos_flush_desc_generic, 2137 .eqos_inval_buffer = eqos_inval_buffer_generic, 2138 .eqos_flush_buffer = eqos_flush_buffer_generic, 2139 .eqos_probe_resources = eqos_probe_resources_stm32, 2140 .eqos_remove_resources = eqos_remove_resources_stm32, 2141 .eqos_stop_resets = eqos_stop_resets_stm32, 2142 .eqos_start_resets = eqos_start_resets_stm32, 2143 .eqos_stop_clks = eqos_stop_clks_stm32, 2144 .eqos_start_clks = eqos_start_clks_stm32, 2145 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2146 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2147 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2148 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2149 .eqos_get_interface = eqos_get_interface_stm32 2150 }; 2151 2152 static const struct eqos_config eqos_stm32_config = { 2153 .reg_access_always_ok = false, 2154 .mdio_wait = 10000, 2155 .swr_wait = 50, 2156 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2157 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2158 .ops = &eqos_stm32_ops 2159 }; 2160 2161 static struct eqos_ops eqos_imx_ops = { 2162 .eqos_inval_desc = eqos_inval_desc_generic, 2163 .eqos_flush_desc = eqos_flush_desc_generic, 2164 .eqos_inval_buffer = eqos_inval_buffer_generic, 2165 .eqos_flush_buffer = eqos_flush_buffer_generic, 2166 .eqos_probe_resources = eqos_probe_resources_imx, 2167 .eqos_remove_resources = eqos_remove_resources_imx, 2168 .eqos_stop_resets = eqos_stop_resets_imx, 2169 .eqos_start_resets = eqos_start_resets_imx, 2170 .eqos_stop_clks = eqos_stop_clks_imx, 2171 .eqos_start_clks = eqos_start_clks_imx, 2172 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2173 .eqos_disable_calibration = eqos_disable_calibration_imx, 2174 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2175 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2176 .eqos_get_interface = eqos_get_interface_imx 2177 }; 2178 2179 struct eqos_config eqos_imx_config = { 2180 .reg_access_always_ok = false, 2181 .mdio_wait = 10000, 2182 .swr_wait = 50, 2183 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2184 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2185 .ops = &eqos_imx_ops 2186 }; 2187 2188 static const struct udevice_id eqos_ids[] = { 2189 { 2190 .compatible = "nvidia,tegra186-eqos", 2191 .data = (ulong)&eqos_tegra186_config 2192 }, 2193 { 2194 .compatible = "snps,dwmac-4.20a", 2195 .data = (ulong)&eqos_stm32_config 2196 }, 2197 { 2198 .compatible = "fsl,imx-eqos", 2199 .data = (ulong)&eqos_imx_config 2200 }, 2201 2202 { } 2203 }; 2204 2205 U_BOOT_DRIVER(eth_eqos) = { 2206 .name = "eth_eqos", 2207 .id = UCLASS_ETH, 2208 .of_match = of_match_ptr(eqos_ids), 2209 .probe = eqos_probe, 2210 .remove = eqos_remove, 2211 .ops = &eqos_ops, 2212 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2213 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2214 }; 2215