1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 49 /* Core registers */ 50 51 #define EQOS_MAC_REGS_BASE 0x000 52 struct eqos_mac_regs { 53 uint32_t configuration; /* 0x000 */ 54 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 55 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 56 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 57 uint32_t rx_flow_ctrl; /* 0x090 */ 58 uint32_t unused_094; /* 0x094 */ 59 uint32_t txq_prty_map0; /* 0x098 */ 60 uint32_t unused_09c; /* 0x09c */ 61 uint32_t rxq_ctrl0; /* 0x0a0 */ 62 uint32_t unused_0a4; /* 0x0a4 */ 63 uint32_t rxq_ctrl2; /* 0x0a8 */ 64 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 65 uint32_t us_tic_counter; /* 0x0dc */ 66 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 67 uint32_t hw_feature0; /* 0x11c */ 68 uint32_t hw_feature1; /* 0x120 */ 69 uint32_t hw_feature2; /* 0x124 */ 70 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 71 uint32_t mdio_address; /* 0x200 */ 72 uint32_t mdio_data; /* 0x204 */ 73 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 74 uint32_t address0_high; /* 0x300 */ 75 uint32_t address0_low; /* 0x304 */ 76 }; 77 78 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 79 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 80 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 81 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 82 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 83 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 84 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 85 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 86 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 87 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 88 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 89 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 90 91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 94 95 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 96 97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 99 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 103 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 104 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 105 106 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 107 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 108 109 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 110 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 111 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 112 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 113 114 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 115 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 116 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 117 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 118 119 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 120 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 121 122 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 123 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 124 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 125 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 126 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 127 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 128 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 129 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 130 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 131 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 132 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 133 134 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 135 136 #define EQOS_MTL_REGS_BASE 0xd00 137 struct eqos_mtl_regs { 138 uint32_t txq0_operation_mode; /* 0xd00 */ 139 uint32_t unused_d04; /* 0xd04 */ 140 uint32_t txq0_debug; /* 0xd08 */ 141 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 142 uint32_t txq0_quantum_weight; /* 0xd18 */ 143 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 144 uint32_t rxq0_operation_mode; /* 0xd30 */ 145 uint32_t unused_d34; /* 0xd34 */ 146 uint32_t rxq0_debug; /* 0xd38 */ 147 }; 148 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 152 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 153 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 154 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 155 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 156 157 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 158 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 159 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 160 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 167 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 168 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 169 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 170 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 171 172 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 173 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 174 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 175 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 176 177 #define EQOS_DMA_REGS_BASE 0x1000 178 struct eqos_dma_regs { 179 uint32_t mode; /* 0x1000 */ 180 uint32_t sysbus_mode; /* 0x1004 */ 181 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 182 uint32_t ch0_control; /* 0x1100 */ 183 uint32_t ch0_tx_control; /* 0x1104 */ 184 uint32_t ch0_rx_control; /* 0x1108 */ 185 uint32_t unused_110c; /* 0x110c */ 186 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 187 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 188 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 189 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 190 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 191 uint32_t unused_1124; /* 0x1124 */ 192 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 193 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 194 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 195 }; 196 197 #define EQOS_DMA_MODE_SWR BIT(0) 198 199 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 200 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 201 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 202 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 203 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 204 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 205 206 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 207 208 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 209 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 210 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 211 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 212 213 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 214 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 215 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 216 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 217 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 218 219 /* These registers are Tegra186-specific */ 220 #define EQOS_TEGRA186_REGS_BASE 0x8800 221 struct eqos_tegra186_regs { 222 uint32_t sdmemcomppadctrl; /* 0x8800 */ 223 uint32_t auto_cal_config; /* 0x8804 */ 224 uint32_t unused_8808; /* 0x8808 */ 225 uint32_t auto_cal_status; /* 0x880c */ 226 }; 227 228 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 229 230 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 231 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 232 233 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 234 235 /* Descriptors */ 236 237 #define EQOS_DESCRIPTOR_WORDS 4 238 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 239 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 240 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 241 #define EQOS_DESCRIPTORS_TX 4 242 #define EQOS_DESCRIPTORS_RX 4 243 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 244 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 245 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 246 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 247 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 248 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 249 250 /* 251 * Warn if the cache-line size is larger than the descriptor size. In such 252 * cases the driver will likely fail because the CPU needs to flush the cache 253 * when requeuing RX buffers, therefore descriptors written by the hardware 254 * may be discarded. Architectures with full IO coherence, such as x86, do not 255 * experience this issue, and hence are excluded from this condition. 256 * 257 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 258 * the driver to allocate descriptors from a pool of non-cached memory. 259 */ 260 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 261 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 262 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 263 #warning Cache line size is larger than descriptor size 264 #endif 265 #endif 266 267 struct eqos_desc { 268 u32 des0; 269 u32 des1; 270 u32 des2; 271 u32 des3; 272 }; 273 274 #define EQOS_DESC3_OWN BIT(31) 275 #define EQOS_DESC3_FD BIT(29) 276 #define EQOS_DESC3_LD BIT(28) 277 #define EQOS_DESC3_BUF1V BIT(24) 278 279 struct eqos_config { 280 bool reg_access_always_ok; 281 int mdio_wait; 282 int swr_wait; 283 int config_mac; 284 int config_mac_mdio; 285 struct eqos_ops *ops; 286 }; 287 288 struct eqos_ops { 289 void (*eqos_inval_desc)(void *desc); 290 void (*eqos_flush_desc)(void *desc); 291 void (*eqos_inval_buffer)(void *buf, size_t size); 292 void (*eqos_flush_buffer)(void *buf, size_t size); 293 int (*eqos_probe_resources)(struct udevice *dev); 294 int (*eqos_remove_resources)(struct udevice *dev); 295 int (*eqos_stop_resets)(struct udevice *dev); 296 int (*eqos_start_resets)(struct udevice *dev); 297 void (*eqos_stop_clks)(struct udevice *dev); 298 int (*eqos_start_clks)(struct udevice *dev); 299 int (*eqos_calibrate_pads)(struct udevice *dev); 300 int (*eqos_disable_calibration)(struct udevice *dev); 301 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 302 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 303 phy_interface_t (*eqos_get_interface)(struct udevice *dev); 304 }; 305 306 struct eqos_priv { 307 struct udevice *dev; 308 const struct eqos_config *config; 309 fdt_addr_t regs; 310 struct eqos_mac_regs *mac_regs; 311 struct eqos_mtl_regs *mtl_regs; 312 struct eqos_dma_regs *dma_regs; 313 struct eqos_tegra186_regs *tegra186_regs; 314 struct reset_ctl reset_ctl; 315 struct gpio_desc phy_reset_gpio; 316 u32 reset_delays[3]; 317 struct clk clk_master_bus; 318 struct clk clk_rx; 319 struct clk clk_ptp_ref; 320 struct clk clk_tx; 321 struct clk clk_ck; 322 struct clk clk_slave_bus; 323 struct mii_dev *mii; 324 struct phy_device *phy; 325 int phyaddr; 326 u32 max_speed; 327 void *descs; 328 struct eqos_desc *tx_descs; 329 struct eqos_desc *rx_descs; 330 int tx_desc_idx, rx_desc_idx; 331 void *tx_dma_buf; 332 void *rx_dma_buf; 333 void *rx_pkt; 334 bool started; 335 bool reg_access_ok; 336 }; 337 338 /* 339 * TX and RX descriptors are 16 bytes. This causes problems with the cache 340 * maintenance on CPUs where the cache-line size exceeds the size of these 341 * descriptors. What will happen is that when the driver receives a packet 342 * it will be immediately requeued for the hardware to reuse. The CPU will 343 * therefore need to flush the cache-line containing the descriptor, which 344 * will cause all other descriptors in the same cache-line to be flushed 345 * along with it. If one of those descriptors had been written to by the 346 * device those changes (and the associated packet) will be lost. 347 * 348 * To work around this, we make use of non-cached memory if available. If 349 * descriptors are mapped uncached there's no need to manually flush them 350 * or invalidate them. 351 * 352 * Note that this only applies to descriptors. The packet data buffers do 353 * not have the same constraints since they are 1536 bytes large, so they 354 * are unlikely to share cache-lines. 355 */ 356 static void *eqos_alloc_descs(unsigned int num) 357 { 358 #ifdef CONFIG_SYS_NONCACHED_MEMORY 359 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 360 EQOS_DESCRIPTOR_ALIGN); 361 #else 362 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 363 #endif 364 } 365 366 static void eqos_free_descs(void *descs) 367 { 368 #ifdef CONFIG_SYS_NONCACHED_MEMORY 369 /* FIXME: noncached_alloc() has no opposite */ 370 #else 371 free(descs); 372 #endif 373 } 374 375 static void eqos_inval_desc_tegra186(void *desc) 376 { 377 #ifndef CONFIG_SYS_NONCACHED_MEMORY 378 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 379 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 380 ARCH_DMA_MINALIGN); 381 382 invalidate_dcache_range(start, end); 383 #endif 384 } 385 386 static void eqos_inval_desc_generic(void *desc) 387 { 388 #ifndef CONFIG_SYS_NONCACHED_MEMORY 389 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 390 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 391 ARCH_DMA_MINALIGN); 392 393 invalidate_dcache_range(start, end); 394 #endif 395 } 396 397 static void eqos_flush_desc_tegra186(void *desc) 398 { 399 #ifndef CONFIG_SYS_NONCACHED_MEMORY 400 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 401 #endif 402 } 403 404 static void eqos_flush_desc_generic(void *desc) 405 { 406 #ifndef CONFIG_SYS_NONCACHED_MEMORY 407 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 408 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 409 ARCH_DMA_MINALIGN); 410 411 flush_dcache_range(start, end); 412 #endif 413 } 414 415 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 416 { 417 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 418 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 419 420 invalidate_dcache_range(start, end); 421 } 422 423 static void eqos_inval_buffer_generic(void *buf, size_t size) 424 { 425 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 426 unsigned long end = roundup((unsigned long)buf + size, 427 ARCH_DMA_MINALIGN); 428 429 invalidate_dcache_range(start, end); 430 } 431 432 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 433 { 434 flush_cache((unsigned long)buf, size); 435 } 436 437 static void eqos_flush_buffer_generic(void *buf, size_t size) 438 { 439 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 440 unsigned long end = roundup((unsigned long)buf + size, 441 ARCH_DMA_MINALIGN); 442 443 flush_dcache_range(start, end); 444 } 445 446 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 447 { 448 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 449 EQOS_MAC_MDIO_ADDRESS_GB, false, 450 1000000, true); 451 } 452 453 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 454 int mdio_reg) 455 { 456 struct eqos_priv *eqos = bus->priv; 457 u32 val; 458 int ret; 459 460 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 461 mdio_reg); 462 463 ret = eqos_mdio_wait_idle(eqos); 464 if (ret) { 465 pr_err("MDIO not idle at entry"); 466 return ret; 467 } 468 469 val = readl(&eqos->mac_regs->mdio_address); 470 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 471 EQOS_MAC_MDIO_ADDRESS_C45E; 472 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 473 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 474 (eqos->config->config_mac_mdio << 475 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 476 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 477 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 478 EQOS_MAC_MDIO_ADDRESS_GB; 479 writel(val, &eqos->mac_regs->mdio_address); 480 481 udelay(eqos->config->mdio_wait); 482 483 ret = eqos_mdio_wait_idle(eqos); 484 if (ret) { 485 pr_err("MDIO read didn't complete"); 486 return ret; 487 } 488 489 val = readl(&eqos->mac_regs->mdio_data); 490 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 491 492 debug("%s: val=%x\n", __func__, val); 493 494 return val; 495 } 496 497 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 498 int mdio_reg, u16 mdio_val) 499 { 500 struct eqos_priv *eqos = bus->priv; 501 u32 val; 502 int ret; 503 504 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 505 mdio_addr, mdio_reg, mdio_val); 506 507 ret = eqos_mdio_wait_idle(eqos); 508 if (ret) { 509 pr_err("MDIO not idle at entry"); 510 return ret; 511 } 512 513 writel(mdio_val, &eqos->mac_regs->mdio_data); 514 515 val = readl(&eqos->mac_regs->mdio_address); 516 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 517 EQOS_MAC_MDIO_ADDRESS_C45E; 518 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 519 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 520 (eqos->config->config_mac_mdio << 521 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 522 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 523 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 524 EQOS_MAC_MDIO_ADDRESS_GB; 525 writel(val, &eqos->mac_regs->mdio_address); 526 527 udelay(eqos->config->mdio_wait); 528 529 ret = eqos_mdio_wait_idle(eqos); 530 if (ret) { 531 pr_err("MDIO read didn't complete"); 532 return ret; 533 } 534 535 return 0; 536 } 537 538 static int eqos_start_clks_tegra186(struct udevice *dev) 539 { 540 #ifdef CONFIG_CLK 541 struct eqos_priv *eqos = dev_get_priv(dev); 542 int ret; 543 544 debug("%s(dev=%p):\n", __func__, dev); 545 546 ret = clk_enable(&eqos->clk_slave_bus); 547 if (ret < 0) { 548 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 549 goto err; 550 } 551 552 ret = clk_enable(&eqos->clk_master_bus); 553 if (ret < 0) { 554 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 555 goto err_disable_clk_slave_bus; 556 } 557 558 ret = clk_enable(&eqos->clk_rx); 559 if (ret < 0) { 560 pr_err("clk_enable(clk_rx) failed: %d", ret); 561 goto err_disable_clk_master_bus; 562 } 563 564 ret = clk_enable(&eqos->clk_ptp_ref); 565 if (ret < 0) { 566 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 567 goto err_disable_clk_rx; 568 } 569 570 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 571 if (ret < 0) { 572 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 573 goto err_disable_clk_ptp_ref; 574 } 575 576 ret = clk_enable(&eqos->clk_tx); 577 if (ret < 0) { 578 pr_err("clk_enable(clk_tx) failed: %d", ret); 579 goto err_disable_clk_ptp_ref; 580 } 581 #endif 582 583 debug("%s: OK\n", __func__); 584 return 0; 585 586 #ifdef CONFIG_CLK 587 err_disable_clk_ptp_ref: 588 clk_disable(&eqos->clk_ptp_ref); 589 err_disable_clk_rx: 590 clk_disable(&eqos->clk_rx); 591 err_disable_clk_master_bus: 592 clk_disable(&eqos->clk_master_bus); 593 err_disable_clk_slave_bus: 594 clk_disable(&eqos->clk_slave_bus); 595 err: 596 debug("%s: FAILED: %d\n", __func__, ret); 597 return ret; 598 #endif 599 } 600 601 static int eqos_start_clks_stm32(struct udevice *dev) 602 { 603 #ifdef CONFIG_CLK 604 struct eqos_priv *eqos = dev_get_priv(dev); 605 int ret; 606 607 debug("%s(dev=%p):\n", __func__, dev); 608 609 ret = clk_enable(&eqos->clk_master_bus); 610 if (ret < 0) { 611 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 612 goto err; 613 } 614 615 if (clk_valid(&eqos->clk_rx)) { 616 ret = clk_enable(&eqos->clk_rx); 617 if (ret < 0) { 618 pr_err("clk_enable(clk_rx) failed: %d", ret); 619 goto err_disable_clk_master_bus; 620 } 621 } 622 623 if (clk_valid(&eqos->clk_tx)) { 624 ret = clk_enable(&eqos->clk_tx); 625 if (ret < 0) { 626 pr_err("clk_enable(clk_tx) failed: %d", ret); 627 goto err_disable_clk_rx; 628 } 629 } 630 631 if (clk_valid(&eqos->clk_ck)) { 632 ret = clk_enable(&eqos->clk_ck); 633 if (ret < 0) { 634 pr_err("clk_enable(clk_ck) failed: %d", ret); 635 goto err_disable_clk_tx; 636 } 637 } 638 #endif 639 640 debug("%s: OK\n", __func__); 641 return 0; 642 643 #ifdef CONFIG_CLK 644 err_disable_clk_tx: 645 if (clk_valid(&eqos->clk_tx)) 646 clk_disable(&eqos->clk_tx); 647 err_disable_clk_rx: 648 if (clk_valid(&eqos->clk_rx)) 649 clk_disable(&eqos->clk_rx); 650 err_disable_clk_master_bus: 651 clk_disable(&eqos->clk_master_bus); 652 err: 653 debug("%s: FAILED: %d\n", __func__, ret); 654 return ret; 655 #endif 656 } 657 658 static int eqos_start_clks_imx(struct udevice *dev) 659 { 660 return 0; 661 } 662 663 static void eqos_stop_clks_tegra186(struct udevice *dev) 664 { 665 #ifdef CONFIG_CLK 666 struct eqos_priv *eqos = dev_get_priv(dev); 667 668 debug("%s(dev=%p):\n", __func__, dev); 669 670 clk_disable(&eqos->clk_tx); 671 clk_disable(&eqos->clk_ptp_ref); 672 clk_disable(&eqos->clk_rx); 673 clk_disable(&eqos->clk_master_bus); 674 clk_disable(&eqos->clk_slave_bus); 675 #endif 676 677 debug("%s: OK\n", __func__); 678 } 679 680 static void eqos_stop_clks_stm32(struct udevice *dev) 681 { 682 #ifdef CONFIG_CLK 683 struct eqos_priv *eqos = dev_get_priv(dev); 684 685 debug("%s(dev=%p):\n", __func__, dev); 686 687 if (clk_valid(&eqos->clk_tx)) 688 clk_disable(&eqos->clk_tx); 689 if (clk_valid(&eqos->clk_rx)) 690 clk_disable(&eqos->clk_rx); 691 clk_disable(&eqos->clk_master_bus); 692 if (clk_valid(&eqos->clk_ck)) 693 clk_disable(&eqos->clk_ck); 694 #endif 695 696 debug("%s: OK\n", __func__); 697 } 698 699 static void eqos_stop_clks_imx(struct udevice *dev) 700 { 701 /* empty */ 702 } 703 704 static int eqos_start_resets_tegra186(struct udevice *dev) 705 { 706 struct eqos_priv *eqos = dev_get_priv(dev); 707 int ret; 708 709 debug("%s(dev=%p):\n", __func__, dev); 710 711 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 712 if (ret < 0) { 713 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 714 return ret; 715 } 716 717 udelay(2); 718 719 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 720 if (ret < 0) { 721 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 722 return ret; 723 } 724 725 ret = reset_assert(&eqos->reset_ctl); 726 if (ret < 0) { 727 pr_err("reset_assert() failed: %d", ret); 728 return ret; 729 } 730 731 udelay(2); 732 733 ret = reset_deassert(&eqos->reset_ctl); 734 if (ret < 0) { 735 pr_err("reset_deassert() failed: %d", ret); 736 return ret; 737 } 738 739 debug("%s: OK\n", __func__); 740 return 0; 741 } 742 743 static int eqos_start_resets_stm32(struct udevice *dev) 744 { 745 struct eqos_priv *eqos = dev_get_priv(dev); 746 int ret; 747 748 debug("%s(dev=%p):\n", __func__, dev); 749 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 750 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 751 if (ret < 0) { 752 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 753 ret); 754 return ret; 755 } 756 757 udelay(eqos->reset_delays[0]); 758 759 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 760 if (ret < 0) { 761 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 762 ret); 763 return ret; 764 } 765 766 udelay(eqos->reset_delays[1]); 767 768 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 769 if (ret < 0) { 770 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 771 ret); 772 return ret; 773 } 774 775 udelay(eqos->reset_delays[2]); 776 } 777 debug("%s: OK\n", __func__); 778 779 return 0; 780 } 781 782 static int eqos_start_resets_imx(struct udevice *dev) 783 { 784 return 0; 785 } 786 787 static int eqos_stop_resets_tegra186(struct udevice *dev) 788 { 789 struct eqos_priv *eqos = dev_get_priv(dev); 790 791 reset_assert(&eqos->reset_ctl); 792 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 793 794 return 0; 795 } 796 797 static int eqos_stop_resets_stm32(struct udevice *dev) 798 { 799 struct eqos_priv *eqos = dev_get_priv(dev); 800 int ret; 801 802 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 803 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 804 if (ret < 0) { 805 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 806 ret); 807 return ret; 808 } 809 } 810 811 return 0; 812 } 813 814 static int eqos_stop_resets_imx(struct udevice *dev) 815 { 816 return 0; 817 } 818 819 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 820 { 821 struct eqos_priv *eqos = dev_get_priv(dev); 822 int ret; 823 824 debug("%s(dev=%p):\n", __func__, dev); 825 826 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 827 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 828 829 udelay(1); 830 831 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 832 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 833 834 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 835 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 836 if (ret) { 837 pr_err("calibrate didn't start"); 838 goto failed; 839 } 840 841 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 842 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 843 if (ret) { 844 pr_err("calibrate didn't finish"); 845 goto failed; 846 } 847 848 ret = 0; 849 850 failed: 851 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 852 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 853 854 debug("%s: returns %d\n", __func__, ret); 855 856 return ret; 857 } 858 859 static int eqos_disable_calibration_tegra186(struct udevice *dev) 860 { 861 struct eqos_priv *eqos = dev_get_priv(dev); 862 863 debug("%s(dev=%p):\n", __func__, dev); 864 865 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 866 EQOS_AUTO_CAL_CONFIG_ENABLE); 867 868 return 0; 869 } 870 871 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 872 { 873 #ifdef CONFIG_CLK 874 struct eqos_priv *eqos = dev_get_priv(dev); 875 876 return clk_get_rate(&eqos->clk_slave_bus); 877 #else 878 return 0; 879 #endif 880 } 881 882 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 883 { 884 #ifdef CONFIG_CLK 885 struct eqos_priv *eqos = dev_get_priv(dev); 886 887 return clk_get_rate(&eqos->clk_master_bus); 888 #else 889 return 0; 890 #endif 891 } 892 893 __weak u32 imx_get_eqos_csr_clk(void) 894 { 895 return 100 * 1000000; 896 } 897 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 898 { 899 return 0; 900 } 901 902 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 903 { 904 return imx_get_eqos_csr_clk(); 905 } 906 907 static int eqos_calibrate_pads_stm32(struct udevice *dev) 908 { 909 return 0; 910 } 911 912 static int eqos_calibrate_pads_imx(struct udevice *dev) 913 { 914 return 0; 915 } 916 917 static int eqos_disable_calibration_stm32(struct udevice *dev) 918 { 919 return 0; 920 } 921 922 static int eqos_disable_calibration_imx(struct udevice *dev) 923 { 924 return 0; 925 } 926 927 static int eqos_set_full_duplex(struct udevice *dev) 928 { 929 struct eqos_priv *eqos = dev_get_priv(dev); 930 931 debug("%s(dev=%p):\n", __func__, dev); 932 933 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 934 935 return 0; 936 } 937 938 static int eqos_set_half_duplex(struct udevice *dev) 939 { 940 struct eqos_priv *eqos = dev_get_priv(dev); 941 942 debug("%s(dev=%p):\n", __func__, dev); 943 944 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 945 946 /* WAR: Flush TX queue when switching to half-duplex */ 947 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 948 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 949 950 return 0; 951 } 952 953 static int eqos_set_gmii_speed(struct udevice *dev) 954 { 955 struct eqos_priv *eqos = dev_get_priv(dev); 956 957 debug("%s(dev=%p):\n", __func__, dev); 958 959 clrbits_le32(&eqos->mac_regs->configuration, 960 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 961 962 return 0; 963 } 964 965 static int eqos_set_mii_speed_100(struct udevice *dev) 966 { 967 struct eqos_priv *eqos = dev_get_priv(dev); 968 969 debug("%s(dev=%p):\n", __func__, dev); 970 971 setbits_le32(&eqos->mac_regs->configuration, 972 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 973 974 return 0; 975 } 976 977 static int eqos_set_mii_speed_10(struct udevice *dev) 978 { 979 struct eqos_priv *eqos = dev_get_priv(dev); 980 981 debug("%s(dev=%p):\n", __func__, dev); 982 983 clrsetbits_le32(&eqos->mac_regs->configuration, 984 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 985 986 return 0; 987 } 988 989 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 990 { 991 #ifdef CONFIG_CLK 992 struct eqos_priv *eqos = dev_get_priv(dev); 993 ulong rate; 994 int ret; 995 996 debug("%s(dev=%p):\n", __func__, dev); 997 998 switch (eqos->phy->speed) { 999 case SPEED_1000: 1000 rate = 125 * 1000 * 1000; 1001 break; 1002 case SPEED_100: 1003 rate = 25 * 1000 * 1000; 1004 break; 1005 case SPEED_10: 1006 rate = 2.5 * 1000 * 1000; 1007 break; 1008 default: 1009 pr_err("invalid speed %d", eqos->phy->speed); 1010 return -EINVAL; 1011 } 1012 1013 ret = clk_set_rate(&eqos->clk_tx, rate); 1014 if (ret < 0) { 1015 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 1016 return ret; 1017 } 1018 #endif 1019 1020 return 0; 1021 } 1022 1023 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 1024 { 1025 return 0; 1026 } 1027 1028 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 1029 { 1030 struct eqos_priv *eqos = dev_get_priv(dev); 1031 ulong rate; 1032 int ret; 1033 1034 debug("%s(dev=%p):\n", __func__, dev); 1035 1036 switch (eqos->phy->speed) { 1037 case SPEED_1000: 1038 rate = 125 * 1000 * 1000; 1039 break; 1040 case SPEED_100: 1041 rate = 25 * 1000 * 1000; 1042 break; 1043 case SPEED_10: 1044 rate = 2.5 * 1000 * 1000; 1045 break; 1046 default: 1047 pr_err("invalid speed %d", eqos->phy->speed); 1048 return -EINVAL; 1049 } 1050 1051 ret = imx_eqos_txclk_set_rate(rate); 1052 if (ret < 0) { 1053 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 1054 return ret; 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int eqos_adjust_link(struct udevice *dev) 1061 { 1062 struct eqos_priv *eqos = dev_get_priv(dev); 1063 int ret; 1064 bool en_calibration; 1065 1066 debug("%s(dev=%p):\n", __func__, dev); 1067 1068 if (eqos->phy->duplex) 1069 ret = eqos_set_full_duplex(dev); 1070 else 1071 ret = eqos_set_half_duplex(dev); 1072 if (ret < 0) { 1073 pr_err("eqos_set_*_duplex() failed: %d", ret); 1074 return ret; 1075 } 1076 1077 switch (eqos->phy->speed) { 1078 case SPEED_1000: 1079 en_calibration = true; 1080 ret = eqos_set_gmii_speed(dev); 1081 break; 1082 case SPEED_100: 1083 en_calibration = true; 1084 ret = eqos_set_mii_speed_100(dev); 1085 break; 1086 case SPEED_10: 1087 en_calibration = false; 1088 ret = eqos_set_mii_speed_10(dev); 1089 break; 1090 default: 1091 pr_err("invalid speed %d", eqos->phy->speed); 1092 return -EINVAL; 1093 } 1094 if (ret < 0) { 1095 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1096 return ret; 1097 } 1098 1099 if (en_calibration) { 1100 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1101 if (ret < 0) { 1102 pr_err("eqos_calibrate_pads() failed: %d", 1103 ret); 1104 return ret; 1105 } 1106 } else { 1107 ret = eqos->config->ops->eqos_disable_calibration(dev); 1108 if (ret < 0) { 1109 pr_err("eqos_disable_calibration() failed: %d", 1110 ret); 1111 return ret; 1112 } 1113 } 1114 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1115 if (ret < 0) { 1116 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1117 return ret; 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int eqos_write_hwaddr(struct udevice *dev) 1124 { 1125 struct eth_pdata *plat = dev_get_platdata(dev); 1126 struct eqos_priv *eqos = dev_get_priv(dev); 1127 uint32_t val; 1128 1129 /* 1130 * This function may be called before start() or after stop(). At that 1131 * time, on at least some configurations of the EQoS HW, all clocks to 1132 * the EQoS HW block will be stopped, and a reset signal applied. If 1133 * any register access is attempted in this state, bus timeouts or CPU 1134 * hangs may occur. This check prevents that. 1135 * 1136 * A simple solution to this problem would be to not implement 1137 * write_hwaddr(), since start() always writes the MAC address into HW 1138 * anyway. However, it is desirable to implement write_hwaddr() to 1139 * support the case of SW that runs subsequent to U-Boot which expects 1140 * the MAC address to already be programmed into the EQoS registers, 1141 * which must happen irrespective of whether the U-Boot user (or 1142 * scripts) actually made use of the EQoS device, and hence 1143 * irrespective of whether start() was ever called. 1144 * 1145 * Note that this requirement by subsequent SW is not valid for 1146 * Tegra186, and is likely not valid for any non-PCI instantiation of 1147 * the EQoS HW block. This function is implemented solely as 1148 * future-proofing with the expectation the driver will eventually be 1149 * ported to some system where the expectation above is true. 1150 */ 1151 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1152 return 0; 1153 1154 /* Update the MAC address */ 1155 val = (plat->enetaddr[5] << 8) | 1156 (plat->enetaddr[4]); 1157 writel(val, &eqos->mac_regs->address0_high); 1158 val = (plat->enetaddr[3] << 24) | 1159 (plat->enetaddr[2] << 16) | 1160 (plat->enetaddr[1] << 8) | 1161 (plat->enetaddr[0]); 1162 writel(val, &eqos->mac_regs->address0_low); 1163 1164 return 0; 1165 } 1166 1167 static int eqos_read_rom_hwaddr(struct udevice *dev) 1168 { 1169 struct eth_pdata *pdata = dev_get_platdata(dev); 1170 1171 #ifdef CONFIG_ARCH_IMX8M 1172 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1173 #endif 1174 return !is_valid_ethaddr(pdata->enetaddr); 1175 } 1176 1177 static int eqos_init(struct udevice *dev) 1178 { 1179 struct eqos_priv *eqos = dev_get_priv(dev); 1180 int ret; 1181 ulong rate; 1182 u32 val; 1183 1184 debug("%s(dev=%p):\n", __func__, dev); 1185 1186 ret = eqos->config->ops->eqos_start_clks(dev); 1187 if (ret < 0) { 1188 pr_err("eqos_start_clks() failed: %d", ret); 1189 goto err; 1190 } 1191 1192 ret = eqos->config->ops->eqos_start_resets(dev); 1193 if (ret < 0) { 1194 pr_err("eqos_start_resets() failed: %d", ret); 1195 goto err_stop_clks; 1196 } 1197 1198 udelay(10); 1199 1200 eqos->reg_access_ok = true; 1201 1202 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1203 EQOS_DMA_MODE_SWR, false, 1204 eqos->config->swr_wait, false); 1205 if (ret) { 1206 pr_err("EQOS_DMA_MODE_SWR stuck"); 1207 goto err_stop_resets; 1208 } 1209 1210 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1211 if (ret < 0) { 1212 pr_err("eqos_calibrate_pads() failed: %d", ret); 1213 goto err_stop_resets; 1214 } 1215 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1216 1217 val = (rate / 1000000) - 1; 1218 writel(val, &eqos->mac_regs->us_tic_counter); 1219 1220 /* 1221 * if PHY was already connected and configured, 1222 * don't need to reconnect/reconfigure again 1223 */ 1224 if (!eqos->phy) { 1225 int addr = -1; 1226 #ifdef CONFIG_DM_ETH_PHY 1227 addr = eth_phy_get_addr(dev); 1228 #endif 1229 #ifdef DWC_NET_PHYADDR 1230 addr = DWC_NET_PHYADDR; 1231 #endif 1232 eqos->phy = phy_connect(eqos->mii, addr, dev, 1233 eqos->config->ops->eqos_get_interface(dev)); 1234 if (!eqos->phy) { 1235 pr_err("phy_connect() failed"); 1236 goto err_stop_resets; 1237 } 1238 1239 if (eqos->max_speed) { 1240 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1241 if (ret) { 1242 pr_err("phy_set_supported() failed: %d", ret); 1243 goto err_shutdown_phy; 1244 } 1245 } 1246 1247 ret = phy_config(eqos->phy); 1248 if (ret < 0) { 1249 pr_err("phy_config() failed: %d", ret); 1250 goto err_shutdown_phy; 1251 } 1252 } 1253 1254 ret = phy_startup(eqos->phy); 1255 if (ret < 0) { 1256 pr_err("phy_startup() failed: %d", ret); 1257 goto err_shutdown_phy; 1258 } 1259 1260 if (!eqos->phy->link) { 1261 pr_err("No link"); 1262 goto err_shutdown_phy; 1263 } 1264 1265 ret = eqos_adjust_link(dev); 1266 if (ret < 0) { 1267 pr_err("eqos_adjust_link() failed: %d", ret); 1268 goto err_shutdown_phy; 1269 } 1270 1271 debug("%s: OK\n", __func__); 1272 return 0; 1273 1274 err_shutdown_phy: 1275 phy_shutdown(eqos->phy); 1276 err_stop_resets: 1277 eqos->config->ops->eqos_stop_resets(dev); 1278 err_stop_clks: 1279 eqos->config->ops->eqos_stop_clks(dev); 1280 err: 1281 pr_err("FAILED: %d", ret); 1282 return ret; 1283 } 1284 1285 static void eqos_enable(struct udevice *dev) 1286 { 1287 struct eqos_priv *eqos = dev_get_priv(dev); 1288 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1289 ulong last_rx_desc; 1290 int i; 1291 1292 eqos->tx_desc_idx = 0; 1293 eqos->rx_desc_idx = 0; 1294 1295 /* Configure MTL */ 1296 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1297 1298 /* Enable Store and Forward mode for TX */ 1299 /* Program Tx operating mode */ 1300 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1301 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1302 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1303 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1304 1305 /* Transmit Queue weight */ 1306 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1307 1308 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1309 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1310 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1311 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1312 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1313 1314 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1315 val = readl(&eqos->mac_regs->hw_feature1); 1316 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1317 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1318 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1319 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1320 1321 /* 1322 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1323 * r/tqs is encoded as (n / 256) - 1. 1324 */ 1325 tqs = (128 << tx_fifo_sz) / 256 - 1; 1326 rqs = (128 << rx_fifo_sz) / 256 - 1; 1327 1328 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1329 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1330 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1331 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1332 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1333 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1334 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1335 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1336 1337 /* Flow control used only if each channel gets 4KB or more FIFO */ 1338 if (rqs >= ((4096 / 256) - 1)) { 1339 u32 rfd, rfa; 1340 1341 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1342 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1343 1344 /* 1345 * Set Threshold for Activating Flow Contol space for min 2 1346 * frames ie, (1500 * 1) = 1500 bytes. 1347 * 1348 * Set Threshold for Deactivating Flow Contol for space of 1349 * min 1 frame (frame size 1500bytes) in receive fifo 1350 */ 1351 if (rqs == ((4096 / 256) - 1)) { 1352 /* 1353 * This violates the above formula because of FIFO size 1354 * limit therefore overflow may occur inspite of this. 1355 */ 1356 rfd = 0x3; /* Full-3K */ 1357 rfa = 0x1; /* Full-1.5K */ 1358 } else if (rqs == ((8192 / 256) - 1)) { 1359 rfd = 0x6; /* Full-4K */ 1360 rfa = 0xa; /* Full-6K */ 1361 } else if (rqs == ((16384 / 256) - 1)) { 1362 rfd = 0x6; /* Full-4K */ 1363 rfa = 0x12; /* Full-10K */ 1364 } else { 1365 rfd = 0x6; /* Full-4K */ 1366 rfa = 0x1E; /* Full-16K */ 1367 } 1368 1369 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1370 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1371 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1372 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1373 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1374 (rfd << 1375 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1376 (rfa << 1377 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1378 } 1379 1380 /* Configure MAC */ 1381 1382 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1383 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1384 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1385 eqos->config->config_mac << 1386 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1387 1388 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1389 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1390 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1391 0x2 << 1392 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1393 1394 /* Multicast and Broadcast Queue Enable */ 1395 setbits_le32(&eqos->mac_regs->unused_0a4, 1396 0x00100000); 1397 /* enable promise mode */ 1398 setbits_le32(&eqos->mac_regs->unused_004[1], 1399 0x1); 1400 1401 /* Set TX flow control parameters */ 1402 /* Set Pause Time */ 1403 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1404 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1405 /* Assign priority for TX flow control */ 1406 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1407 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1408 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1409 /* Assign priority for RX flow control */ 1410 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1411 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1412 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1413 /* Enable flow control */ 1414 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1415 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1416 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1417 EQOS_MAC_RX_FLOW_CTRL_RFE); 1418 1419 clrsetbits_le32(&eqos->mac_regs->configuration, 1420 EQOS_MAC_CONFIGURATION_GPSLCE | 1421 EQOS_MAC_CONFIGURATION_WD | 1422 EQOS_MAC_CONFIGURATION_JD | 1423 EQOS_MAC_CONFIGURATION_JE, 1424 EQOS_MAC_CONFIGURATION_CST | 1425 EQOS_MAC_CONFIGURATION_ACS); 1426 1427 eqos_write_hwaddr(dev); 1428 1429 /* Configure DMA */ 1430 1431 /* Enable OSP mode */ 1432 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1433 EQOS_DMA_CH0_TX_CONTROL_OSP); 1434 1435 /* RX buffer size. Must be a multiple of bus width */ 1436 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1437 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1438 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1439 EQOS_MAX_PACKET_SIZE << 1440 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1441 1442 setbits_le32(&eqos->dma_regs->ch0_control, 1443 EQOS_DMA_CH0_CONTROL_PBLX8); 1444 1445 /* 1446 * Burst length must be < 1/2 FIFO size. 1447 * FIFO size in tqs is encoded as (n / 256) - 1. 1448 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1449 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1450 */ 1451 pbl = tqs + 1; 1452 if (pbl > 32) 1453 pbl = 32; 1454 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1455 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1456 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1457 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1458 1459 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1460 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1461 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1462 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1463 1464 /* DMA performance configuration */ 1465 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1466 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1467 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1468 writel(val, &eqos->dma_regs->sysbus_mode); 1469 1470 /* Set up descriptors */ 1471 1472 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1473 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1474 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1475 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1476 (i * EQOS_MAX_PACKET_SIZE)); 1477 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1478 mb(); 1479 eqos->config->ops->eqos_flush_desc(rx_desc); 1480 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1481 (i * EQOS_MAX_PACKET_SIZE), 1482 EQOS_MAX_PACKET_SIZE); 1483 } 1484 1485 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1486 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1487 writel(EQOS_DESCRIPTORS_TX - 1, 1488 &eqos->dma_regs->ch0_txdesc_ring_length); 1489 1490 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1491 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1492 writel(EQOS_DESCRIPTORS_RX - 1, 1493 &eqos->dma_regs->ch0_rxdesc_ring_length); 1494 1495 /* Enable everything */ 1496 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1497 EQOS_DMA_CH0_TX_CONTROL_ST); 1498 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1499 EQOS_DMA_CH0_RX_CONTROL_SR); 1500 setbits_le32(&eqos->mac_regs->configuration, 1501 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1502 1503 /* TX tail pointer not written until we need to TX a packet */ 1504 /* 1505 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1506 * first descriptor, implying all descriptors were available. However, 1507 * that's not distinguishable from none of the descriptors being 1508 * available. 1509 */ 1510 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1511 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1512 1513 eqos->started = true; 1514 } 1515 1516 static int eqos_start(struct udevice *dev) 1517 { 1518 int ret; 1519 1520 ret = eqos_init(dev); 1521 if (ret) 1522 return ret; 1523 1524 eqos_enable(dev); 1525 1526 return 0; 1527 } 1528 1529 static void eqos_stop(struct udevice *dev) 1530 { 1531 struct eqos_priv *eqos = dev_get_priv(dev); 1532 int i; 1533 1534 debug("%s(dev=%p):\n", __func__, dev); 1535 1536 if (!eqos->started) 1537 return; 1538 eqos->started = false; 1539 eqos->reg_access_ok = false; 1540 1541 /* Disable TX DMA */ 1542 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1543 EQOS_DMA_CH0_TX_CONTROL_ST); 1544 1545 /* Wait for TX all packets to drain out of MTL */ 1546 for (i = 0; i < 1000000; i++) { 1547 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1548 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1549 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1550 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1551 if ((trcsts != 1) && (!txqsts)) 1552 break; 1553 } 1554 1555 /* Turn off MAC TX and RX */ 1556 clrbits_le32(&eqos->mac_regs->configuration, 1557 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1558 1559 /* Wait for all RX packets to drain out of MTL */ 1560 for (i = 0; i < 1000000; i++) { 1561 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1562 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1563 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1564 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1565 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1566 if ((!prxq) && (!rxqsts)) 1567 break; 1568 } 1569 1570 /* Turn off RX DMA */ 1571 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1572 EQOS_DMA_CH0_RX_CONTROL_SR); 1573 1574 if (eqos->phy) { 1575 phy_shutdown(eqos->phy); 1576 } 1577 eqos->config->ops->eqos_stop_resets(dev); 1578 eqos->config->ops->eqos_stop_clks(dev); 1579 1580 debug("%s: OK\n", __func__); 1581 } 1582 1583 static int eqos_send(struct udevice *dev, void *packet, int length) 1584 { 1585 struct eqos_priv *eqos = dev_get_priv(dev); 1586 struct eqos_desc *tx_desc; 1587 int i; 1588 1589 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1590 length); 1591 1592 memcpy(eqos->tx_dma_buf, packet, length); 1593 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1594 1595 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1596 eqos->tx_desc_idx++; 1597 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1598 1599 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1600 tx_desc->des1 = 0; 1601 tx_desc->des2 = length; 1602 /* 1603 * Make sure that if HW sees the _OWN write below, it will see all the 1604 * writes to the rest of the descriptor too. 1605 */ 1606 mb(); 1607 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1608 eqos->config->ops->eqos_flush_desc(tx_desc); 1609 1610 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1611 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1612 1613 for (i = 0; i < 1000000; i++) { 1614 eqos->config->ops->eqos_inval_desc(tx_desc); 1615 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1616 return 0; 1617 udelay(1); 1618 } 1619 1620 debug("%s: TX timeout\n", __func__); 1621 1622 return -ETIMEDOUT; 1623 } 1624 1625 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1626 { 1627 struct eqos_priv *eqos = dev_get_priv(dev); 1628 struct eqos_desc *rx_desc; 1629 int length; 1630 1631 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1632 1633 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1634 eqos->config->ops->eqos_inval_desc(rx_desc); 1635 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1636 debug("%s: RX packet not available\n", __func__); 1637 return -EAGAIN; 1638 } 1639 1640 *packetp = eqos->rx_dma_buf + 1641 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1642 length = rx_desc->des3 & 0x7fff; 1643 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1644 1645 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1646 1647 return length; 1648 } 1649 1650 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1651 { 1652 struct eqos_priv *eqos = dev_get_priv(dev); 1653 uchar *packet_expected; 1654 struct eqos_desc *rx_desc; 1655 1656 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1657 1658 packet_expected = eqos->rx_dma_buf + 1659 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1660 if (packet != packet_expected) { 1661 debug("%s: Unexpected packet (expected %p)\n", __func__, 1662 packet_expected); 1663 return -EINVAL; 1664 } 1665 1666 eqos->config->ops->eqos_inval_buffer(packet, length); 1667 1668 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1669 1670 rx_desc->des0 = 0; 1671 mb(); 1672 eqos->config->ops->eqos_flush_desc(rx_desc); 1673 eqos->config->ops->eqos_inval_buffer(packet, length); 1674 rx_desc->des0 = (u32)(ulong)packet; 1675 rx_desc->des1 = 0; 1676 rx_desc->des2 = 0; 1677 /* 1678 * Make sure that if HW sees the _OWN write below, it will see all the 1679 * writes to the rest of the descriptor too. 1680 */ 1681 mb(); 1682 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1683 eqos->config->ops->eqos_flush_desc(rx_desc); 1684 1685 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1686 1687 eqos->rx_desc_idx++; 1688 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1689 1690 return 0; 1691 } 1692 1693 static int eqos_probe_resources_core(struct udevice *dev) 1694 { 1695 struct eqos_priv *eqos = dev_get_priv(dev); 1696 int ret; 1697 1698 debug("%s(dev=%p):\n", __func__, dev); 1699 1700 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1701 EQOS_DESCRIPTORS_RX); 1702 if (!eqos->descs) { 1703 debug("%s: eqos_alloc_descs() failed\n", __func__); 1704 ret = -ENOMEM; 1705 goto err; 1706 } 1707 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1708 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1709 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1710 eqos->rx_descs); 1711 1712 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1713 if (!eqos->tx_dma_buf) { 1714 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1715 ret = -ENOMEM; 1716 goto err_free_descs; 1717 } 1718 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1719 1720 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1721 if (!eqos->rx_dma_buf) { 1722 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1723 ret = -ENOMEM; 1724 goto err_free_tx_dma_buf; 1725 } 1726 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1727 1728 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1729 if (!eqos->rx_pkt) { 1730 debug("%s: malloc(rx_pkt) failed\n", __func__); 1731 ret = -ENOMEM; 1732 goto err_free_rx_dma_buf; 1733 } 1734 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1735 1736 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1737 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1738 1739 debug("%s: OK\n", __func__); 1740 return 0; 1741 1742 err_free_rx_dma_buf: 1743 free(eqos->rx_dma_buf); 1744 err_free_tx_dma_buf: 1745 free(eqos->tx_dma_buf); 1746 err_free_descs: 1747 eqos_free_descs(eqos->descs); 1748 err: 1749 1750 debug("%s: returns %d\n", __func__, ret); 1751 return ret; 1752 } 1753 1754 static int eqos_remove_resources_core(struct udevice *dev) 1755 { 1756 struct eqos_priv *eqos = dev_get_priv(dev); 1757 1758 debug("%s(dev=%p):\n", __func__, dev); 1759 1760 free(eqos->rx_pkt); 1761 free(eqos->rx_dma_buf); 1762 free(eqos->tx_dma_buf); 1763 eqos_free_descs(eqos->descs); 1764 1765 debug("%s: OK\n", __func__); 1766 return 0; 1767 } 1768 1769 static int eqos_probe_resources_tegra186(struct udevice *dev) 1770 { 1771 struct eqos_priv *eqos = dev_get_priv(dev); 1772 int ret; 1773 1774 debug("%s(dev=%p):\n", __func__, dev); 1775 1776 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1777 if (ret) { 1778 pr_err("reset_get_by_name(rst) failed: %d", ret); 1779 return ret; 1780 } 1781 1782 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1783 &eqos->phy_reset_gpio, 1784 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1785 if (ret) { 1786 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1787 goto err_free_reset_eqos; 1788 } 1789 1790 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1791 if (ret) { 1792 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1793 goto err_free_gpio_phy_reset; 1794 } 1795 1796 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1797 if (ret) { 1798 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1799 goto err_free_clk_slave_bus; 1800 } 1801 1802 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1803 if (ret) { 1804 pr_err("clk_get_by_name(rx) failed: %d", ret); 1805 goto err_free_clk_master_bus; 1806 } 1807 1808 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1809 if (ret) { 1810 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1811 goto err_free_clk_rx; 1812 return ret; 1813 } 1814 1815 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1816 if (ret) { 1817 pr_err("clk_get_by_name(tx) failed: %d", ret); 1818 goto err_free_clk_ptp_ref; 1819 } 1820 1821 debug("%s: OK\n", __func__); 1822 return 0; 1823 1824 err_free_clk_ptp_ref: 1825 clk_free(&eqos->clk_ptp_ref); 1826 err_free_clk_rx: 1827 clk_free(&eqos->clk_rx); 1828 err_free_clk_master_bus: 1829 clk_free(&eqos->clk_master_bus); 1830 err_free_clk_slave_bus: 1831 clk_free(&eqos->clk_slave_bus); 1832 err_free_gpio_phy_reset: 1833 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1834 err_free_reset_eqos: 1835 reset_free(&eqos->reset_ctl); 1836 1837 debug("%s: returns %d\n", __func__, ret); 1838 return ret; 1839 } 1840 1841 /* board-specific Ethernet Interface initializations. */ 1842 __weak int board_interface_eth_init(struct udevice *dev, 1843 phy_interface_t interface_type) 1844 { 1845 return 0; 1846 } 1847 1848 static int eqos_probe_resources_stm32(struct udevice *dev) 1849 { 1850 struct eqos_priv *eqos = dev_get_priv(dev); 1851 int ret; 1852 phy_interface_t interface; 1853 struct ofnode_phandle_args phandle_args; 1854 1855 debug("%s(dev=%p):\n", __func__, dev); 1856 1857 interface = eqos->config->ops->eqos_get_interface(dev); 1858 1859 if (interface == PHY_INTERFACE_MODE_NONE) { 1860 pr_err("Invalid PHY interface\n"); 1861 return -EINVAL; 1862 } 1863 1864 ret = board_interface_eth_init(dev, interface); 1865 if (ret) 1866 return -EINVAL; 1867 1868 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1869 1870 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1871 if (ret) { 1872 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1873 return ret; 1874 } 1875 1876 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1877 if (ret) 1878 pr_warn("clk_get_by_name(rx) failed: %d", ret); 1879 1880 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1881 if (ret) 1882 pr_warn("clk_get_by_name(tx) failed: %d", ret); 1883 1884 /* Get ETH_CLK clocks (optional) */ 1885 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1886 if (ret) 1887 pr_warn("No phy clock provided %d", ret); 1888 1889 eqos->phyaddr = -1; 1890 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1891 &phandle_args); 1892 if (!ret) { 1893 /* search "reset-gpios" in phy node */ 1894 ret = gpio_request_by_name_nodev(phandle_args.node, 1895 "reset-gpios", 0, 1896 &eqos->phy_reset_gpio, 1897 GPIOD_IS_OUT | 1898 GPIOD_IS_OUT_ACTIVE); 1899 if (ret) 1900 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1901 ret); 1902 else 1903 eqos->reset_delays[1] = 2; 1904 1905 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1906 "reg", -1); 1907 } 1908 1909 if (!dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 1910 int reset_flags = GPIOD_IS_OUT; 1911 1912 if (dev_read_bool(dev, "snps,reset-active-low")) 1913 reset_flags |= GPIOD_ACTIVE_LOW; 1914 1915 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, 1916 &eqos->phy_reset_gpio, reset_flags); 1917 if (ret == 0) 1918 ret = dev_read_u32_array(dev, "snps,reset-delays-us", 1919 eqos->reset_delays, 3); 1920 else 1921 pr_warn("gpio_request_by_name(snps,reset-gpio) failed: %d", 1922 ret); 1923 } 1924 1925 debug("%s: OK\n", __func__); 1926 return 0; 1927 } 1928 1929 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1930 { 1931 const char *phy_mode; 1932 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1933 1934 debug("%s(dev=%p):\n", __func__, dev); 1935 1936 phy_mode = dev_read_string(dev, "phy-mode"); 1937 if (phy_mode) 1938 interface = phy_get_interface_by_name(phy_mode); 1939 1940 return interface; 1941 } 1942 1943 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1944 { 1945 return PHY_INTERFACE_MODE_MII; 1946 } 1947 1948 static int eqos_probe_resources_imx(struct udevice *dev) 1949 { 1950 struct eqos_priv *eqos = dev_get_priv(dev); 1951 phy_interface_t interface; 1952 1953 debug("%s(dev=%p):\n", __func__, dev); 1954 1955 interface = eqos->config->ops->eqos_get_interface(dev); 1956 1957 if (interface == PHY_INTERFACE_MODE_NONE) { 1958 pr_err("Invalid PHY interface\n"); 1959 return -EINVAL; 1960 } 1961 1962 debug("%s: OK\n", __func__); 1963 return 0; 1964 } 1965 1966 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1967 { 1968 const char *phy_mode; 1969 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1970 1971 debug("%s(dev=%p):\n", __func__, dev); 1972 1973 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1974 NULL); 1975 if (phy_mode) 1976 interface = phy_get_interface_by_name(phy_mode); 1977 1978 return interface; 1979 } 1980 1981 static int eqos_remove_resources_tegra186(struct udevice *dev) 1982 { 1983 struct eqos_priv *eqos = dev_get_priv(dev); 1984 1985 debug("%s(dev=%p):\n", __func__, dev); 1986 1987 #ifdef CONFIG_CLK 1988 clk_free(&eqos->clk_tx); 1989 clk_free(&eqos->clk_ptp_ref); 1990 clk_free(&eqos->clk_rx); 1991 clk_free(&eqos->clk_slave_bus); 1992 clk_free(&eqos->clk_master_bus); 1993 #endif 1994 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1995 reset_free(&eqos->reset_ctl); 1996 1997 debug("%s: OK\n", __func__); 1998 return 0; 1999 } 2000 2001 static int eqos_remove_resources_stm32(struct udevice *dev) 2002 { 2003 #ifdef CONFIG_CLK 2004 struct eqos_priv *eqos = dev_get_priv(dev); 2005 2006 debug("%s(dev=%p):\n", __func__, dev); 2007 2008 if (clk_valid(&eqos->clk_tx)) 2009 clk_free(&eqos->clk_tx); 2010 if (clk_valid(&eqos->clk_rx)) 2011 clk_free(&eqos->clk_rx); 2012 clk_free(&eqos->clk_master_bus); 2013 if (clk_valid(&eqos->clk_ck)) 2014 clk_free(&eqos->clk_ck); 2015 #endif 2016 2017 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 2018 dm_gpio_free(dev, &eqos->phy_reset_gpio); 2019 2020 debug("%s: OK\n", __func__); 2021 return 0; 2022 } 2023 2024 static int eqos_remove_resources_imx(struct udevice *dev) 2025 { 2026 return 0; 2027 } 2028 2029 static int eqos_probe(struct udevice *dev) 2030 { 2031 struct eqos_priv *eqos = dev_get_priv(dev); 2032 int ret; 2033 2034 debug("%s(dev=%p):\n", __func__, dev); 2035 2036 eqos->dev = dev; 2037 eqos->config = (void *)dev_get_driver_data(dev); 2038 2039 eqos->regs = dev_read_addr(dev); 2040 if (eqos->regs == FDT_ADDR_T_NONE) { 2041 pr_err("dev_read_addr() failed"); 2042 return -ENODEV; 2043 } 2044 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 2045 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 2046 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 2047 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 2048 2049 ret = eqos_probe_resources_core(dev); 2050 if (ret < 0) { 2051 pr_err("eqos_probe_resources_core() failed: %d", ret); 2052 return ret; 2053 } 2054 2055 ret = eqos->config->ops->eqos_probe_resources(dev); 2056 if (ret < 0) { 2057 pr_err("eqos_probe_resources() failed: %d", ret); 2058 goto err_remove_resources_core; 2059 } 2060 2061 #ifdef CONFIG_DM_ETH_PHY 2062 eqos->mii = eth_phy_get_mdio_bus(dev); 2063 #endif 2064 if (!eqos->mii) { 2065 eqos->mii = mdio_alloc(); 2066 if (!eqos->mii) { 2067 pr_err("mdio_alloc() failed"); 2068 ret = -ENOMEM; 2069 goto err_remove_resources_tegra; 2070 } 2071 eqos->mii->read = eqos_mdio_read; 2072 eqos->mii->write = eqos_mdio_write; 2073 eqos->mii->priv = eqos; 2074 strcpy(eqos->mii->name, dev->name); 2075 2076 ret = mdio_register(eqos->mii); 2077 if (ret < 0) { 2078 pr_err("mdio_register() failed: %d", ret); 2079 goto err_free_mdio; 2080 } 2081 } 2082 2083 #ifdef CONFIG_DM_ETH_PHY 2084 eth_phy_set_mdio_bus(dev, eqos->mii); 2085 #endif 2086 2087 debug("%s: OK\n", __func__); 2088 return 0; 2089 2090 err_free_mdio: 2091 mdio_free(eqos->mii); 2092 err_remove_resources_tegra: 2093 eqos->config->ops->eqos_remove_resources(dev); 2094 err_remove_resources_core: 2095 eqos_remove_resources_core(dev); 2096 2097 debug("%s: returns %d\n", __func__, ret); 2098 return ret; 2099 } 2100 2101 static int eqos_remove(struct udevice *dev) 2102 { 2103 struct eqos_priv *eqos = dev_get_priv(dev); 2104 2105 debug("%s(dev=%p):\n", __func__, dev); 2106 2107 mdio_unregister(eqos->mii); 2108 mdio_free(eqos->mii); 2109 eqos->config->ops->eqos_remove_resources(dev); 2110 2111 eqos_probe_resources_core(dev); 2112 2113 debug("%s: OK\n", __func__); 2114 return 0; 2115 } 2116 2117 static const struct eth_ops eqos_ops = { 2118 .start = eqos_start, 2119 .stop = eqos_stop, 2120 .send = eqos_send, 2121 .recv = eqos_recv, 2122 .free_pkt = eqos_free_pkt, 2123 .write_hwaddr = eqos_write_hwaddr, 2124 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2125 }; 2126 2127 static struct eqos_ops eqos_tegra186_ops = { 2128 .eqos_inval_desc = eqos_inval_desc_tegra186, 2129 .eqos_flush_desc = eqos_flush_desc_tegra186, 2130 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2131 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2132 .eqos_probe_resources = eqos_probe_resources_tegra186, 2133 .eqos_remove_resources = eqos_remove_resources_tegra186, 2134 .eqos_stop_resets = eqos_stop_resets_tegra186, 2135 .eqos_start_resets = eqos_start_resets_tegra186, 2136 .eqos_stop_clks = eqos_stop_clks_tegra186, 2137 .eqos_start_clks = eqos_start_clks_tegra186, 2138 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2139 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2140 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2141 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186, 2142 .eqos_get_interface = eqos_get_interface_tegra186 2143 }; 2144 2145 static const struct eqos_config eqos_tegra186_config = { 2146 .reg_access_always_ok = false, 2147 .mdio_wait = 10, 2148 .swr_wait = 10, 2149 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2150 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2151 .ops = &eqos_tegra186_ops 2152 }; 2153 2154 static struct eqos_ops eqos_stm32_ops = { 2155 .eqos_inval_desc = eqos_inval_desc_generic, 2156 .eqos_flush_desc = eqos_flush_desc_generic, 2157 .eqos_inval_buffer = eqos_inval_buffer_generic, 2158 .eqos_flush_buffer = eqos_flush_buffer_generic, 2159 .eqos_probe_resources = eqos_probe_resources_stm32, 2160 .eqos_remove_resources = eqos_remove_resources_stm32, 2161 .eqos_stop_resets = eqos_stop_resets_stm32, 2162 .eqos_start_resets = eqos_start_resets_stm32, 2163 .eqos_stop_clks = eqos_stop_clks_stm32, 2164 .eqos_start_clks = eqos_start_clks_stm32, 2165 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2166 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2167 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2168 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32, 2169 .eqos_get_interface = eqos_get_interface_stm32 2170 }; 2171 2172 static const struct eqos_config eqos_stm32_config = { 2173 .reg_access_always_ok = false, 2174 .mdio_wait = 10000, 2175 .swr_wait = 50, 2176 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2177 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2178 .ops = &eqos_stm32_ops 2179 }; 2180 2181 static struct eqos_ops eqos_imx_ops = { 2182 .eqos_inval_desc = eqos_inval_desc_generic, 2183 .eqos_flush_desc = eqos_flush_desc_generic, 2184 .eqos_inval_buffer = eqos_inval_buffer_generic, 2185 .eqos_flush_buffer = eqos_flush_buffer_generic, 2186 .eqos_probe_resources = eqos_probe_resources_imx, 2187 .eqos_remove_resources = eqos_remove_resources_imx, 2188 .eqos_stop_resets = eqos_stop_resets_imx, 2189 .eqos_start_resets = eqos_start_resets_imx, 2190 .eqos_stop_clks = eqos_stop_clks_imx, 2191 .eqos_start_clks = eqos_start_clks_imx, 2192 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2193 .eqos_disable_calibration = eqos_disable_calibration_imx, 2194 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2195 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, 2196 .eqos_get_interface = eqos_get_interface_imx 2197 }; 2198 2199 struct eqos_config eqos_imx_config = { 2200 .reg_access_always_ok = false, 2201 .mdio_wait = 10000, 2202 .swr_wait = 50, 2203 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2204 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2205 .ops = &eqos_imx_ops 2206 }; 2207 2208 static const struct udevice_id eqos_ids[] = { 2209 { 2210 .compatible = "nvidia,tegra186-eqos", 2211 .data = (ulong)&eqos_tegra186_config 2212 }, 2213 { 2214 .compatible = "snps,dwmac-4.20a", 2215 .data = (ulong)&eqos_stm32_config 2216 }, 2217 { 2218 .compatible = "fsl,imx-eqos", 2219 .data = (ulong)&eqos_imx_config 2220 }, 2221 2222 { } 2223 }; 2224 2225 U_BOOT_DRIVER(eth_eqos) = { 2226 .name = "eth_eqos", 2227 .id = UCLASS_ETH, 2228 .of_match = of_match_ptr(eqos_ids), 2229 .probe = eqos_probe, 2230 .remove = eqos_remove, 2231 .ops = &eqos_ops, 2232 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2233 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2234 }; 2235