1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 #ifdef CONFIG_ARCH_IMX8M 45 #include <asm/arch/clock.h> 46 #include <asm/mach-imx/sys_proto.h> 47 #endif 48 49 /* Core registers */ 50 51 #define EQOS_MAC_REGS_BASE 0x000 52 struct eqos_mac_regs { 53 uint32_t configuration; /* 0x000 */ 54 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 55 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 56 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 57 uint32_t rx_flow_ctrl; /* 0x090 */ 58 uint32_t unused_094; /* 0x094 */ 59 uint32_t txq_prty_map0; /* 0x098 */ 60 uint32_t unused_09c; /* 0x09c */ 61 uint32_t rxq_ctrl0; /* 0x0a0 */ 62 uint32_t unused_0a4; /* 0x0a4 */ 63 uint32_t rxq_ctrl2; /* 0x0a8 */ 64 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 65 uint32_t us_tic_counter; /* 0x0dc */ 66 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 67 uint32_t hw_feature0; /* 0x11c */ 68 uint32_t hw_feature1; /* 0x120 */ 69 uint32_t hw_feature2; /* 0x124 */ 70 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 71 uint32_t mdio_address; /* 0x200 */ 72 uint32_t mdio_data; /* 0x204 */ 73 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 74 uint32_t address0_high; /* 0x300 */ 75 uint32_t address0_low; /* 0x304 */ 76 }; 77 78 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 79 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 80 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 81 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 82 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 83 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 84 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 85 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 86 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 87 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 88 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 89 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 90 91 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 92 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 93 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 94 95 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 96 97 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 98 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 99 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 101 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 102 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 103 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 104 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 105 106 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 107 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 108 109 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 110 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 111 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 112 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 113 114 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 115 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 116 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 117 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 118 119 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 120 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 121 122 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 123 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 124 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 125 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 126 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 127 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 128 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 129 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 130 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 131 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 132 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 133 134 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 135 136 #define EQOS_MTL_REGS_BASE 0xd00 137 struct eqos_mtl_regs { 138 uint32_t txq0_operation_mode; /* 0xd00 */ 139 uint32_t unused_d04; /* 0xd04 */ 140 uint32_t txq0_debug; /* 0xd08 */ 141 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 142 uint32_t txq0_quantum_weight; /* 0xd18 */ 143 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 144 uint32_t rxq0_operation_mode; /* 0xd30 */ 145 uint32_t unused_d34; /* 0xd34 */ 146 uint32_t rxq0_debug; /* 0xd38 */ 147 }; 148 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 152 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 153 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 154 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 155 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 156 157 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 158 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 159 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 160 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 167 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 168 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 169 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 170 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 171 172 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 173 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 174 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 175 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 176 177 #define EQOS_DMA_REGS_BASE 0x1000 178 struct eqos_dma_regs { 179 uint32_t mode; /* 0x1000 */ 180 uint32_t sysbus_mode; /* 0x1004 */ 181 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 182 uint32_t ch0_control; /* 0x1100 */ 183 uint32_t ch0_tx_control; /* 0x1104 */ 184 uint32_t ch0_rx_control; /* 0x1108 */ 185 uint32_t unused_110c; /* 0x110c */ 186 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 187 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 188 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 189 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 190 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 191 uint32_t unused_1124; /* 0x1124 */ 192 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 193 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 194 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 195 }; 196 197 #define EQOS_DMA_MODE_SWR BIT(0) 198 199 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 200 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 201 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 202 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 203 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 204 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 205 206 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 207 208 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 209 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 210 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 211 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 212 213 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 214 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 215 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 216 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 217 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 218 219 /* These registers are Tegra186-specific */ 220 #define EQOS_TEGRA186_REGS_BASE 0x8800 221 struct eqos_tegra186_regs { 222 uint32_t sdmemcomppadctrl; /* 0x8800 */ 223 uint32_t auto_cal_config; /* 0x8804 */ 224 uint32_t unused_8808; /* 0x8808 */ 225 uint32_t auto_cal_status; /* 0x880c */ 226 }; 227 228 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 229 230 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 231 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 232 233 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 234 235 /* Descriptors */ 236 237 #define EQOS_DESCRIPTOR_WORDS 4 238 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 239 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 240 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 241 #define EQOS_DESCRIPTORS_TX 4 242 #define EQOS_DESCRIPTORS_RX 4 243 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 244 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 245 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 246 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 247 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 248 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 249 250 /* 251 * Warn if the cache-line size is larger than the descriptor size. In such 252 * cases the driver will likely fail because the CPU needs to flush the cache 253 * when requeuing RX buffers, therefore descriptors written by the hardware 254 * may be discarded. Architectures with full IO coherence, such as x86, do not 255 * experience this issue, and hence are excluded from this condition. 256 * 257 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 258 * the driver to allocate descriptors from a pool of non-cached memory. 259 */ 260 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 261 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 262 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 263 #warning Cache line size is larger than descriptor size 264 #endif 265 #endif 266 267 struct eqos_desc { 268 u32 des0; 269 u32 des1; 270 u32 des2; 271 u32 des3; 272 }; 273 274 #define EQOS_DESC3_OWN BIT(31) 275 #define EQOS_DESC3_FD BIT(29) 276 #define EQOS_DESC3_LD BIT(28) 277 #define EQOS_DESC3_BUF1V BIT(24) 278 279 struct eqos_config { 280 bool reg_access_always_ok; 281 int mdio_wait; 282 int swr_wait; 283 int config_mac; 284 int config_mac_mdio; 285 phy_interface_t (*interface)(struct udevice *dev); 286 struct eqos_ops *ops; 287 }; 288 289 struct eqos_ops { 290 void (*eqos_inval_desc)(void *desc); 291 void (*eqos_flush_desc)(void *desc); 292 void (*eqos_inval_buffer)(void *buf, size_t size); 293 void (*eqos_flush_buffer)(void *buf, size_t size); 294 int (*eqos_probe_resources)(struct udevice *dev); 295 int (*eqos_remove_resources)(struct udevice *dev); 296 int (*eqos_stop_resets)(struct udevice *dev); 297 int (*eqos_start_resets)(struct udevice *dev); 298 void (*eqos_stop_clks)(struct udevice *dev); 299 int (*eqos_start_clks)(struct udevice *dev); 300 int (*eqos_calibrate_pads)(struct udevice *dev); 301 int (*eqos_disable_calibration)(struct udevice *dev); 302 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 303 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 304 }; 305 306 struct eqos_priv { 307 struct udevice *dev; 308 const struct eqos_config *config; 309 fdt_addr_t regs; 310 struct eqos_mac_regs *mac_regs; 311 struct eqos_mtl_regs *mtl_regs; 312 struct eqos_dma_regs *dma_regs; 313 struct eqos_tegra186_regs *tegra186_regs; 314 struct reset_ctl reset_ctl; 315 struct gpio_desc phy_reset_gpio; 316 struct clk clk_master_bus; 317 struct clk clk_rx; 318 struct clk clk_ptp_ref; 319 struct clk clk_tx; 320 struct clk clk_ck; 321 struct clk clk_slave_bus; 322 struct mii_dev *mii; 323 struct phy_device *phy; 324 int phyaddr; 325 u32 max_speed; 326 void *descs; 327 struct eqos_desc *tx_descs; 328 struct eqos_desc *rx_descs; 329 int tx_desc_idx, rx_desc_idx; 330 void *tx_dma_buf; 331 void *rx_dma_buf; 332 void *rx_pkt; 333 bool started; 334 bool reg_access_ok; 335 }; 336 337 /* 338 * TX and RX descriptors are 16 bytes. This causes problems with the cache 339 * maintenance on CPUs where the cache-line size exceeds the size of these 340 * descriptors. What will happen is that when the driver receives a packet 341 * it will be immediately requeued for the hardware to reuse. The CPU will 342 * therefore need to flush the cache-line containing the descriptor, which 343 * will cause all other descriptors in the same cache-line to be flushed 344 * along with it. If one of those descriptors had been written to by the 345 * device those changes (and the associated packet) will be lost. 346 * 347 * To work around this, we make use of non-cached memory if available. If 348 * descriptors are mapped uncached there's no need to manually flush them 349 * or invalidate them. 350 * 351 * Note that this only applies to descriptors. The packet data buffers do 352 * not have the same constraints since they are 1536 bytes large, so they 353 * are unlikely to share cache-lines. 354 */ 355 static void *eqos_alloc_descs(unsigned int num) 356 { 357 #ifdef CONFIG_SYS_NONCACHED_MEMORY 358 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 359 EQOS_DESCRIPTOR_ALIGN); 360 #else 361 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 362 #endif 363 } 364 365 static void eqos_free_descs(void *descs) 366 { 367 #ifdef CONFIG_SYS_NONCACHED_MEMORY 368 /* FIXME: noncached_alloc() has no opposite */ 369 #else 370 free(descs); 371 #endif 372 } 373 374 static void eqos_inval_desc_tegra186(void *desc) 375 { 376 #ifndef CONFIG_SYS_NONCACHED_MEMORY 377 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 378 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 379 ARCH_DMA_MINALIGN); 380 381 invalidate_dcache_range(start, end); 382 #endif 383 } 384 385 static void eqos_inval_desc_generic(void *desc) 386 { 387 #ifndef CONFIG_SYS_NONCACHED_MEMORY 388 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 389 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 390 ARCH_DMA_MINALIGN); 391 392 invalidate_dcache_range(start, end); 393 #endif 394 } 395 396 static void eqos_flush_desc_tegra186(void *desc) 397 { 398 #ifndef CONFIG_SYS_NONCACHED_MEMORY 399 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 400 #endif 401 } 402 403 static void eqos_flush_desc_generic(void *desc) 404 { 405 #ifndef CONFIG_SYS_NONCACHED_MEMORY 406 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 407 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 408 ARCH_DMA_MINALIGN); 409 410 flush_dcache_range(start, end); 411 #endif 412 } 413 414 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 415 { 416 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 417 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 418 419 invalidate_dcache_range(start, end); 420 } 421 422 static void eqos_inval_buffer_generic(void *buf, size_t size) 423 { 424 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 425 unsigned long end = roundup((unsigned long)buf + size, 426 ARCH_DMA_MINALIGN); 427 428 invalidate_dcache_range(start, end); 429 } 430 431 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 432 { 433 flush_cache((unsigned long)buf, size); 434 } 435 436 static void eqos_flush_buffer_generic(void *buf, size_t size) 437 { 438 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 439 unsigned long end = roundup((unsigned long)buf + size, 440 ARCH_DMA_MINALIGN); 441 442 flush_dcache_range(start, end); 443 } 444 445 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 446 { 447 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 448 EQOS_MAC_MDIO_ADDRESS_GB, false, 449 1000000, true); 450 } 451 452 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 453 int mdio_reg) 454 { 455 struct eqos_priv *eqos = bus->priv; 456 u32 val; 457 int ret; 458 459 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 460 mdio_reg); 461 462 ret = eqos_mdio_wait_idle(eqos); 463 if (ret) { 464 pr_err("MDIO not idle at entry"); 465 return ret; 466 } 467 468 val = readl(&eqos->mac_regs->mdio_address); 469 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 470 EQOS_MAC_MDIO_ADDRESS_C45E; 471 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 472 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 473 (eqos->config->config_mac_mdio << 474 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 475 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 476 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 477 EQOS_MAC_MDIO_ADDRESS_GB; 478 writel(val, &eqos->mac_regs->mdio_address); 479 480 udelay(eqos->config->mdio_wait); 481 482 ret = eqos_mdio_wait_idle(eqos); 483 if (ret) { 484 pr_err("MDIO read didn't complete"); 485 return ret; 486 } 487 488 val = readl(&eqos->mac_regs->mdio_data); 489 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 490 491 debug("%s: val=%x\n", __func__, val); 492 493 return val; 494 } 495 496 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 497 int mdio_reg, u16 mdio_val) 498 { 499 struct eqos_priv *eqos = bus->priv; 500 u32 val; 501 int ret; 502 503 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 504 mdio_addr, mdio_reg, mdio_val); 505 506 ret = eqos_mdio_wait_idle(eqos); 507 if (ret) { 508 pr_err("MDIO not idle at entry"); 509 return ret; 510 } 511 512 writel(mdio_val, &eqos->mac_regs->mdio_data); 513 514 val = readl(&eqos->mac_regs->mdio_address); 515 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 516 EQOS_MAC_MDIO_ADDRESS_C45E; 517 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 518 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 519 (eqos->config->config_mac_mdio << 520 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 521 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 522 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 523 EQOS_MAC_MDIO_ADDRESS_GB; 524 writel(val, &eqos->mac_regs->mdio_address); 525 526 udelay(eqos->config->mdio_wait); 527 528 ret = eqos_mdio_wait_idle(eqos); 529 if (ret) { 530 pr_err("MDIO read didn't complete"); 531 return ret; 532 } 533 534 return 0; 535 } 536 537 static int eqos_start_clks_tegra186(struct udevice *dev) 538 { 539 #ifdef CONFIG_CLK 540 struct eqos_priv *eqos = dev_get_priv(dev); 541 int ret; 542 543 debug("%s(dev=%p):\n", __func__, dev); 544 545 ret = clk_enable(&eqos->clk_slave_bus); 546 if (ret < 0) { 547 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 548 goto err; 549 } 550 551 ret = clk_enable(&eqos->clk_master_bus); 552 if (ret < 0) { 553 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 554 goto err_disable_clk_slave_bus; 555 } 556 557 ret = clk_enable(&eqos->clk_rx); 558 if (ret < 0) { 559 pr_err("clk_enable(clk_rx) failed: %d", ret); 560 goto err_disable_clk_master_bus; 561 } 562 563 ret = clk_enable(&eqos->clk_ptp_ref); 564 if (ret < 0) { 565 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 566 goto err_disable_clk_rx; 567 } 568 569 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 570 if (ret < 0) { 571 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 572 goto err_disable_clk_ptp_ref; 573 } 574 575 ret = clk_enable(&eqos->clk_tx); 576 if (ret < 0) { 577 pr_err("clk_enable(clk_tx) failed: %d", ret); 578 goto err_disable_clk_ptp_ref; 579 } 580 #endif 581 582 debug("%s: OK\n", __func__); 583 return 0; 584 585 #ifdef CONFIG_CLK 586 err_disable_clk_ptp_ref: 587 clk_disable(&eqos->clk_ptp_ref); 588 err_disable_clk_rx: 589 clk_disable(&eqos->clk_rx); 590 err_disable_clk_master_bus: 591 clk_disable(&eqos->clk_master_bus); 592 err_disable_clk_slave_bus: 593 clk_disable(&eqos->clk_slave_bus); 594 err: 595 debug("%s: FAILED: %d\n", __func__, ret); 596 return ret; 597 #endif 598 } 599 600 static int eqos_start_clks_stm32(struct udevice *dev) 601 { 602 #ifdef CONFIG_CLK 603 struct eqos_priv *eqos = dev_get_priv(dev); 604 int ret; 605 606 debug("%s(dev=%p):\n", __func__, dev); 607 608 ret = clk_enable(&eqos->clk_master_bus); 609 if (ret < 0) { 610 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 611 goto err; 612 } 613 614 ret = clk_enable(&eqos->clk_rx); 615 if (ret < 0) { 616 pr_err("clk_enable(clk_rx) failed: %d", ret); 617 goto err_disable_clk_master_bus; 618 } 619 620 ret = clk_enable(&eqos->clk_tx); 621 if (ret < 0) { 622 pr_err("clk_enable(clk_tx) failed: %d", ret); 623 goto err_disable_clk_rx; 624 } 625 626 if (clk_valid(&eqos->clk_ck)) { 627 ret = clk_enable(&eqos->clk_ck); 628 if (ret < 0) { 629 pr_err("clk_enable(clk_ck) failed: %d", ret); 630 goto err_disable_clk_tx; 631 } 632 } 633 #endif 634 635 debug("%s: OK\n", __func__); 636 return 0; 637 638 #ifdef CONFIG_CLK 639 err_disable_clk_tx: 640 clk_disable(&eqos->clk_tx); 641 err_disable_clk_rx: 642 clk_disable(&eqos->clk_rx); 643 err_disable_clk_master_bus: 644 clk_disable(&eqos->clk_master_bus); 645 err: 646 debug("%s: FAILED: %d\n", __func__, ret); 647 return ret; 648 #endif 649 } 650 651 static int eqos_start_clks_imx(struct udevice *dev) 652 { 653 return 0; 654 } 655 656 static void eqos_stop_clks_tegra186(struct udevice *dev) 657 { 658 #ifdef CONFIG_CLK 659 struct eqos_priv *eqos = dev_get_priv(dev); 660 661 debug("%s(dev=%p):\n", __func__, dev); 662 663 clk_disable(&eqos->clk_tx); 664 clk_disable(&eqos->clk_ptp_ref); 665 clk_disable(&eqos->clk_rx); 666 clk_disable(&eqos->clk_master_bus); 667 clk_disable(&eqos->clk_slave_bus); 668 #endif 669 670 debug("%s: OK\n", __func__); 671 } 672 673 static void eqos_stop_clks_stm32(struct udevice *dev) 674 { 675 #ifdef CONFIG_CLK 676 struct eqos_priv *eqos = dev_get_priv(dev); 677 678 debug("%s(dev=%p):\n", __func__, dev); 679 680 clk_disable(&eqos->clk_tx); 681 clk_disable(&eqos->clk_rx); 682 clk_disable(&eqos->clk_master_bus); 683 if (clk_valid(&eqos->clk_ck)) 684 clk_disable(&eqos->clk_ck); 685 #endif 686 687 debug("%s: OK\n", __func__); 688 } 689 690 static void eqos_stop_clks_imx(struct udevice *dev) 691 { 692 /* empty */ 693 } 694 695 static int eqos_start_resets_tegra186(struct udevice *dev) 696 { 697 struct eqos_priv *eqos = dev_get_priv(dev); 698 int ret; 699 700 debug("%s(dev=%p):\n", __func__, dev); 701 702 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 703 if (ret < 0) { 704 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 705 return ret; 706 } 707 708 udelay(2); 709 710 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 711 if (ret < 0) { 712 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 713 return ret; 714 } 715 716 ret = reset_assert(&eqos->reset_ctl); 717 if (ret < 0) { 718 pr_err("reset_assert() failed: %d", ret); 719 return ret; 720 } 721 722 udelay(2); 723 724 ret = reset_deassert(&eqos->reset_ctl); 725 if (ret < 0) { 726 pr_err("reset_deassert() failed: %d", ret); 727 return ret; 728 } 729 730 debug("%s: OK\n", __func__); 731 return 0; 732 } 733 734 static int eqos_start_resets_stm32(struct udevice *dev) 735 { 736 struct eqos_priv *eqos = dev_get_priv(dev); 737 int ret; 738 739 debug("%s(dev=%p):\n", __func__, dev); 740 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 741 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 742 if (ret < 0) { 743 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 744 ret); 745 return ret; 746 } 747 748 udelay(2); 749 750 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 751 if (ret < 0) { 752 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 753 ret); 754 return ret; 755 } 756 } 757 debug("%s: OK\n", __func__); 758 759 return 0; 760 } 761 762 static int eqos_start_resets_imx(struct udevice *dev) 763 { 764 return 0; 765 } 766 767 static int eqos_stop_resets_tegra186(struct udevice *dev) 768 { 769 struct eqos_priv *eqos = dev_get_priv(dev); 770 771 reset_assert(&eqos->reset_ctl); 772 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 773 774 return 0; 775 } 776 777 static int eqos_stop_resets_stm32(struct udevice *dev) 778 { 779 struct eqos_priv *eqos = dev_get_priv(dev); 780 int ret; 781 782 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 783 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 784 if (ret < 0) { 785 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 786 ret); 787 return ret; 788 } 789 } 790 791 return 0; 792 } 793 794 static int eqos_stop_resets_imx(struct udevice *dev) 795 { 796 return 0; 797 } 798 799 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 800 { 801 struct eqos_priv *eqos = dev_get_priv(dev); 802 int ret; 803 804 debug("%s(dev=%p):\n", __func__, dev); 805 806 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 807 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 808 809 udelay(1); 810 811 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 812 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 813 814 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 815 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 816 if (ret) { 817 pr_err("calibrate didn't start"); 818 goto failed; 819 } 820 821 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 822 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 823 if (ret) { 824 pr_err("calibrate didn't finish"); 825 goto failed; 826 } 827 828 ret = 0; 829 830 failed: 831 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 832 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 833 834 debug("%s: returns %d\n", __func__, ret); 835 836 return ret; 837 } 838 839 static int eqos_disable_calibration_tegra186(struct udevice *dev) 840 { 841 struct eqos_priv *eqos = dev_get_priv(dev); 842 843 debug("%s(dev=%p):\n", __func__, dev); 844 845 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 846 EQOS_AUTO_CAL_CONFIG_ENABLE); 847 848 return 0; 849 } 850 851 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 852 { 853 #ifdef CONFIG_CLK 854 struct eqos_priv *eqos = dev_get_priv(dev); 855 856 return clk_get_rate(&eqos->clk_slave_bus); 857 #else 858 return 0; 859 #endif 860 } 861 862 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 863 { 864 #ifdef CONFIG_CLK 865 struct eqos_priv *eqos = dev_get_priv(dev); 866 867 return clk_get_rate(&eqos->clk_master_bus); 868 #else 869 return 0; 870 #endif 871 } 872 873 __weak u32 imx_get_eqos_csr_clk(void) 874 { 875 return 100 * 1000000; 876 } 877 __weak int imx_eqos_txclk_set_rate(unsigned long rate) 878 { 879 return 0; 880 } 881 882 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 883 { 884 return imx_get_eqos_csr_clk(); 885 } 886 887 static int eqos_calibrate_pads_stm32(struct udevice *dev) 888 { 889 return 0; 890 } 891 892 static int eqos_calibrate_pads_imx(struct udevice *dev) 893 { 894 return 0; 895 } 896 897 static int eqos_disable_calibration_stm32(struct udevice *dev) 898 { 899 return 0; 900 } 901 902 static int eqos_disable_calibration_imx(struct udevice *dev) 903 { 904 return 0; 905 } 906 907 static int eqos_set_full_duplex(struct udevice *dev) 908 { 909 struct eqos_priv *eqos = dev_get_priv(dev); 910 911 debug("%s(dev=%p):\n", __func__, dev); 912 913 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 914 915 return 0; 916 } 917 918 static int eqos_set_half_duplex(struct udevice *dev) 919 { 920 struct eqos_priv *eqos = dev_get_priv(dev); 921 922 debug("%s(dev=%p):\n", __func__, dev); 923 924 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 925 926 /* WAR: Flush TX queue when switching to half-duplex */ 927 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 928 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 929 930 return 0; 931 } 932 933 static int eqos_set_gmii_speed(struct udevice *dev) 934 { 935 struct eqos_priv *eqos = dev_get_priv(dev); 936 937 debug("%s(dev=%p):\n", __func__, dev); 938 939 clrbits_le32(&eqos->mac_regs->configuration, 940 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 941 942 return 0; 943 } 944 945 static int eqos_set_mii_speed_100(struct udevice *dev) 946 { 947 struct eqos_priv *eqos = dev_get_priv(dev); 948 949 debug("%s(dev=%p):\n", __func__, dev); 950 951 setbits_le32(&eqos->mac_regs->configuration, 952 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 953 954 return 0; 955 } 956 957 static int eqos_set_mii_speed_10(struct udevice *dev) 958 { 959 struct eqos_priv *eqos = dev_get_priv(dev); 960 961 debug("%s(dev=%p):\n", __func__, dev); 962 963 clrsetbits_le32(&eqos->mac_regs->configuration, 964 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 965 966 return 0; 967 } 968 969 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 970 { 971 #ifdef CONFIG_CLK 972 struct eqos_priv *eqos = dev_get_priv(dev); 973 ulong rate; 974 int ret; 975 976 debug("%s(dev=%p):\n", __func__, dev); 977 978 switch (eqos->phy->speed) { 979 case SPEED_1000: 980 rate = 125 * 1000 * 1000; 981 break; 982 case SPEED_100: 983 rate = 25 * 1000 * 1000; 984 break; 985 case SPEED_10: 986 rate = 2.5 * 1000 * 1000; 987 break; 988 default: 989 pr_err("invalid speed %d", eqos->phy->speed); 990 return -EINVAL; 991 } 992 993 ret = clk_set_rate(&eqos->clk_tx, rate); 994 if (ret < 0) { 995 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 996 return ret; 997 } 998 #endif 999 1000 return 0; 1001 } 1002 1003 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 1004 { 1005 return 0; 1006 } 1007 1008 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 1009 { 1010 struct eqos_priv *eqos = dev_get_priv(dev); 1011 ulong rate; 1012 int ret; 1013 1014 debug("%s(dev=%p):\n", __func__, dev); 1015 1016 switch (eqos->phy->speed) { 1017 case SPEED_1000: 1018 rate = 125 * 1000 * 1000; 1019 break; 1020 case SPEED_100: 1021 rate = 25 * 1000 * 1000; 1022 break; 1023 case SPEED_10: 1024 rate = 2.5 * 1000 * 1000; 1025 break; 1026 default: 1027 pr_err("invalid speed %d", eqos->phy->speed); 1028 return -EINVAL; 1029 } 1030 1031 ret = imx_eqos_txclk_set_rate(rate); 1032 if (ret < 0) { 1033 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret); 1034 return ret; 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int eqos_adjust_link(struct udevice *dev) 1041 { 1042 struct eqos_priv *eqos = dev_get_priv(dev); 1043 int ret; 1044 bool en_calibration; 1045 1046 debug("%s(dev=%p):\n", __func__, dev); 1047 1048 if (eqos->phy->duplex) 1049 ret = eqos_set_full_duplex(dev); 1050 else 1051 ret = eqos_set_half_duplex(dev); 1052 if (ret < 0) { 1053 pr_err("eqos_set_*_duplex() failed: %d", ret); 1054 return ret; 1055 } 1056 1057 switch (eqos->phy->speed) { 1058 case SPEED_1000: 1059 en_calibration = true; 1060 ret = eqos_set_gmii_speed(dev); 1061 break; 1062 case SPEED_100: 1063 en_calibration = true; 1064 ret = eqos_set_mii_speed_100(dev); 1065 break; 1066 case SPEED_10: 1067 en_calibration = false; 1068 ret = eqos_set_mii_speed_10(dev); 1069 break; 1070 default: 1071 pr_err("invalid speed %d", eqos->phy->speed); 1072 return -EINVAL; 1073 } 1074 if (ret < 0) { 1075 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1076 return ret; 1077 } 1078 1079 if (en_calibration) { 1080 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1081 if (ret < 0) { 1082 pr_err("eqos_calibrate_pads() failed: %d", 1083 ret); 1084 return ret; 1085 } 1086 } else { 1087 ret = eqos->config->ops->eqos_disable_calibration(dev); 1088 if (ret < 0) { 1089 pr_err("eqos_disable_calibration() failed: %d", 1090 ret); 1091 return ret; 1092 } 1093 } 1094 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1095 if (ret < 0) { 1096 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1097 return ret; 1098 } 1099 1100 return 0; 1101 } 1102 1103 static int eqos_write_hwaddr(struct udevice *dev) 1104 { 1105 struct eth_pdata *plat = dev_get_platdata(dev); 1106 struct eqos_priv *eqos = dev_get_priv(dev); 1107 uint32_t val; 1108 1109 /* 1110 * This function may be called before start() or after stop(). At that 1111 * time, on at least some configurations of the EQoS HW, all clocks to 1112 * the EQoS HW block will be stopped, and a reset signal applied. If 1113 * any register access is attempted in this state, bus timeouts or CPU 1114 * hangs may occur. This check prevents that. 1115 * 1116 * A simple solution to this problem would be to not implement 1117 * write_hwaddr(), since start() always writes the MAC address into HW 1118 * anyway. However, it is desirable to implement write_hwaddr() to 1119 * support the case of SW that runs subsequent to U-Boot which expects 1120 * the MAC address to already be programmed into the EQoS registers, 1121 * which must happen irrespective of whether the U-Boot user (or 1122 * scripts) actually made use of the EQoS device, and hence 1123 * irrespective of whether start() was ever called. 1124 * 1125 * Note that this requirement by subsequent SW is not valid for 1126 * Tegra186, and is likely not valid for any non-PCI instantiation of 1127 * the EQoS HW block. This function is implemented solely as 1128 * future-proofing with the expectation the driver will eventually be 1129 * ported to some system where the expectation above is true. 1130 */ 1131 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1132 return 0; 1133 1134 /* Update the MAC address */ 1135 val = (plat->enetaddr[5] << 8) | 1136 (plat->enetaddr[4]); 1137 writel(val, &eqos->mac_regs->address0_high); 1138 val = (plat->enetaddr[3] << 24) | 1139 (plat->enetaddr[2] << 16) | 1140 (plat->enetaddr[1] << 8) | 1141 (plat->enetaddr[0]); 1142 writel(val, &eqos->mac_regs->address0_low); 1143 1144 return 0; 1145 } 1146 1147 static int eqos_read_rom_hwaddr(struct udevice *dev) 1148 { 1149 struct eth_pdata *pdata = dev_get_platdata(dev); 1150 1151 #ifdef CONFIG_ARCH_IMX8M 1152 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr); 1153 #endif 1154 return !is_valid_ethaddr(pdata->enetaddr); 1155 } 1156 1157 static int eqos_start(struct udevice *dev) 1158 { 1159 struct eqos_priv *eqos = dev_get_priv(dev); 1160 int ret, i; 1161 ulong rate; 1162 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1163 ulong last_rx_desc; 1164 1165 debug("%s(dev=%p):\n", __func__, dev); 1166 1167 eqos->tx_desc_idx = 0; 1168 eqos->rx_desc_idx = 0; 1169 1170 ret = eqos->config->ops->eqos_start_clks(dev); 1171 if (ret < 0) { 1172 pr_err("eqos_start_clks() failed: %d", ret); 1173 goto err; 1174 } 1175 1176 ret = eqos->config->ops->eqos_start_resets(dev); 1177 if (ret < 0) { 1178 pr_err("eqos_start_resets() failed: %d", ret); 1179 goto err_stop_clks; 1180 } 1181 1182 udelay(10); 1183 1184 eqos->reg_access_ok = true; 1185 1186 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1187 EQOS_DMA_MODE_SWR, false, 1188 eqos->config->swr_wait, false); 1189 if (ret) { 1190 pr_err("EQOS_DMA_MODE_SWR stuck"); 1191 goto err_stop_resets; 1192 } 1193 1194 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1195 if (ret < 0) { 1196 pr_err("eqos_calibrate_pads() failed: %d", ret); 1197 goto err_stop_resets; 1198 } 1199 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1200 1201 val = (rate / 1000000) - 1; 1202 writel(val, &eqos->mac_regs->us_tic_counter); 1203 1204 /* 1205 * if PHY was already connected and configured, 1206 * don't need to reconnect/reconfigure again 1207 */ 1208 if (!eqos->phy) { 1209 int addr = -1; 1210 #ifdef CONFIG_DM_ETH_PHY 1211 addr = eth_phy_get_addr(dev); 1212 #endif 1213 #ifdef DWC_NET_PHYADDR 1214 addr = DWC_NET_PHYADDR; 1215 #endif 1216 eqos->phy = phy_connect(eqos->mii, addr, dev, 1217 eqos->config->interface(dev)); 1218 if (!eqos->phy) { 1219 pr_err("phy_connect() failed"); 1220 goto err_stop_resets; 1221 } 1222 1223 if (eqos->max_speed) { 1224 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1225 if (ret) { 1226 pr_err("phy_set_supported() failed: %d", ret); 1227 goto err_shutdown_phy; 1228 } 1229 } 1230 1231 ret = phy_config(eqos->phy); 1232 if (ret < 0) { 1233 pr_err("phy_config() failed: %d", ret); 1234 goto err_shutdown_phy; 1235 } 1236 } 1237 1238 ret = phy_startup(eqos->phy); 1239 if (ret < 0) { 1240 pr_err("phy_startup() failed: %d", ret); 1241 goto err_shutdown_phy; 1242 } 1243 1244 if (!eqos->phy->link) { 1245 pr_err("No link"); 1246 goto err_shutdown_phy; 1247 } 1248 1249 ret = eqos_adjust_link(dev); 1250 if (ret < 0) { 1251 pr_err("eqos_adjust_link() failed: %d", ret); 1252 goto err_shutdown_phy; 1253 } 1254 1255 /* Configure MTL */ 1256 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1257 1258 /* Enable Store and Forward mode for TX */ 1259 /* Program Tx operating mode */ 1260 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1261 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1262 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1263 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1264 1265 /* Transmit Queue weight */ 1266 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1267 1268 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1269 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1270 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1271 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1272 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1273 1274 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1275 val = readl(&eqos->mac_regs->hw_feature1); 1276 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1277 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1278 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1279 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1280 1281 /* 1282 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1283 * r/tqs is encoded as (n / 256) - 1. 1284 */ 1285 tqs = (128 << tx_fifo_sz) / 256 - 1; 1286 rqs = (128 << rx_fifo_sz) / 256 - 1; 1287 1288 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1289 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1290 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1291 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1292 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1293 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1294 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1295 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1296 1297 /* Flow control used only if each channel gets 4KB or more FIFO */ 1298 if (rqs >= ((4096 / 256) - 1)) { 1299 u32 rfd, rfa; 1300 1301 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1302 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1303 1304 /* 1305 * Set Threshold for Activating Flow Contol space for min 2 1306 * frames ie, (1500 * 1) = 1500 bytes. 1307 * 1308 * Set Threshold for Deactivating Flow Contol for space of 1309 * min 1 frame (frame size 1500bytes) in receive fifo 1310 */ 1311 if (rqs == ((4096 / 256) - 1)) { 1312 /* 1313 * This violates the above formula because of FIFO size 1314 * limit therefore overflow may occur inspite of this. 1315 */ 1316 rfd = 0x3; /* Full-3K */ 1317 rfa = 0x1; /* Full-1.5K */ 1318 } else if (rqs == ((8192 / 256) - 1)) { 1319 rfd = 0x6; /* Full-4K */ 1320 rfa = 0xa; /* Full-6K */ 1321 } else if (rqs == ((16384 / 256) - 1)) { 1322 rfd = 0x6; /* Full-4K */ 1323 rfa = 0x12; /* Full-10K */ 1324 } else { 1325 rfd = 0x6; /* Full-4K */ 1326 rfa = 0x1E; /* Full-16K */ 1327 } 1328 1329 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1330 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1331 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1332 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1333 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1334 (rfd << 1335 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1336 (rfa << 1337 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1338 } 1339 1340 /* Configure MAC */ 1341 1342 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1343 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1344 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1345 eqos->config->config_mac << 1346 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1347 1348 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1349 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1350 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1351 0x2 << 1352 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1353 1354 /* Multicast and Broadcast Queue Enable */ 1355 setbits_le32(&eqos->mac_regs->unused_0a4, 1356 0x00100000); 1357 /* enable promise mode */ 1358 setbits_le32(&eqos->mac_regs->unused_004[1], 1359 0x1); 1360 1361 /* Set TX flow control parameters */ 1362 /* Set Pause Time */ 1363 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1364 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1365 /* Assign priority for TX flow control */ 1366 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1367 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1368 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1369 /* Assign priority for RX flow control */ 1370 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1371 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1372 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1373 /* Enable flow control */ 1374 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1375 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1376 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1377 EQOS_MAC_RX_FLOW_CTRL_RFE); 1378 1379 clrsetbits_le32(&eqos->mac_regs->configuration, 1380 EQOS_MAC_CONFIGURATION_GPSLCE | 1381 EQOS_MAC_CONFIGURATION_WD | 1382 EQOS_MAC_CONFIGURATION_JD | 1383 EQOS_MAC_CONFIGURATION_JE, 1384 EQOS_MAC_CONFIGURATION_CST | 1385 EQOS_MAC_CONFIGURATION_ACS); 1386 1387 eqos_write_hwaddr(dev); 1388 1389 /* Configure DMA */ 1390 1391 /* Enable OSP mode */ 1392 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1393 EQOS_DMA_CH0_TX_CONTROL_OSP); 1394 1395 /* RX buffer size. Must be a multiple of bus width */ 1396 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1397 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1398 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1399 EQOS_MAX_PACKET_SIZE << 1400 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1401 1402 setbits_le32(&eqos->dma_regs->ch0_control, 1403 EQOS_DMA_CH0_CONTROL_PBLX8); 1404 1405 /* 1406 * Burst length must be < 1/2 FIFO size. 1407 * FIFO size in tqs is encoded as (n / 256) - 1. 1408 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1409 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1410 */ 1411 pbl = tqs + 1; 1412 if (pbl > 32) 1413 pbl = 32; 1414 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1415 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1416 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1417 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1418 1419 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1420 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1421 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1422 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1423 1424 /* DMA performance configuration */ 1425 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1426 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1427 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1428 writel(val, &eqos->dma_regs->sysbus_mode); 1429 1430 /* Set up descriptors */ 1431 1432 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1433 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1434 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1435 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1436 (i * EQOS_MAX_PACKET_SIZE)); 1437 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1438 mb(); 1439 eqos->config->ops->eqos_flush_desc(rx_desc); 1440 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1441 (i * EQOS_MAX_PACKET_SIZE), 1442 EQOS_MAX_PACKET_SIZE); 1443 } 1444 1445 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1446 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1447 writel(EQOS_DESCRIPTORS_TX - 1, 1448 &eqos->dma_regs->ch0_txdesc_ring_length); 1449 1450 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1451 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1452 writel(EQOS_DESCRIPTORS_RX - 1, 1453 &eqos->dma_regs->ch0_rxdesc_ring_length); 1454 1455 /* Enable everything */ 1456 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1457 EQOS_DMA_CH0_TX_CONTROL_ST); 1458 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1459 EQOS_DMA_CH0_RX_CONTROL_SR); 1460 setbits_le32(&eqos->mac_regs->configuration, 1461 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1462 1463 /* TX tail pointer not written until we need to TX a packet */ 1464 /* 1465 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1466 * first descriptor, implying all descriptors were available. However, 1467 * that's not distinguishable from none of the descriptors being 1468 * available. 1469 */ 1470 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1471 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1472 1473 eqos->started = true; 1474 1475 debug("%s: OK\n", __func__); 1476 return 0; 1477 1478 err_shutdown_phy: 1479 phy_shutdown(eqos->phy); 1480 err_stop_resets: 1481 eqos->config->ops->eqos_stop_resets(dev); 1482 err_stop_clks: 1483 eqos->config->ops->eqos_stop_clks(dev); 1484 err: 1485 pr_err("FAILED: %d", ret); 1486 return ret; 1487 } 1488 1489 static void eqos_stop(struct udevice *dev) 1490 { 1491 struct eqos_priv *eqos = dev_get_priv(dev); 1492 int i; 1493 1494 debug("%s(dev=%p):\n", __func__, dev); 1495 1496 if (!eqos->started) 1497 return; 1498 eqos->started = false; 1499 eqos->reg_access_ok = false; 1500 1501 /* Disable TX DMA */ 1502 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1503 EQOS_DMA_CH0_TX_CONTROL_ST); 1504 1505 /* Wait for TX all packets to drain out of MTL */ 1506 for (i = 0; i < 1000000; i++) { 1507 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1508 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1509 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1510 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1511 if ((trcsts != 1) && (!txqsts)) 1512 break; 1513 } 1514 1515 /* Turn off MAC TX and RX */ 1516 clrbits_le32(&eqos->mac_regs->configuration, 1517 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1518 1519 /* Wait for all RX packets to drain out of MTL */ 1520 for (i = 0; i < 1000000; i++) { 1521 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1522 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1523 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1524 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1525 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1526 if ((!prxq) && (!rxqsts)) 1527 break; 1528 } 1529 1530 /* Turn off RX DMA */ 1531 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1532 EQOS_DMA_CH0_RX_CONTROL_SR); 1533 1534 if (eqos->phy) { 1535 phy_shutdown(eqos->phy); 1536 } 1537 eqos->config->ops->eqos_stop_resets(dev); 1538 eqos->config->ops->eqos_stop_clks(dev); 1539 1540 debug("%s: OK\n", __func__); 1541 } 1542 1543 static int eqos_send(struct udevice *dev, void *packet, int length) 1544 { 1545 struct eqos_priv *eqos = dev_get_priv(dev); 1546 struct eqos_desc *tx_desc; 1547 int i; 1548 1549 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1550 length); 1551 1552 memcpy(eqos->tx_dma_buf, packet, length); 1553 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1554 1555 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1556 eqos->tx_desc_idx++; 1557 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1558 1559 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1560 tx_desc->des1 = 0; 1561 tx_desc->des2 = length; 1562 /* 1563 * Make sure that if HW sees the _OWN write below, it will see all the 1564 * writes to the rest of the descriptor too. 1565 */ 1566 mb(); 1567 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1568 eqos->config->ops->eqos_flush_desc(tx_desc); 1569 1570 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1571 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1572 1573 for (i = 0; i < 1000000; i++) { 1574 eqos->config->ops->eqos_inval_desc(tx_desc); 1575 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1576 return 0; 1577 udelay(1); 1578 } 1579 1580 debug("%s: TX timeout\n", __func__); 1581 1582 return -ETIMEDOUT; 1583 } 1584 1585 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1586 { 1587 struct eqos_priv *eqos = dev_get_priv(dev); 1588 struct eqos_desc *rx_desc; 1589 int length; 1590 1591 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1592 1593 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1594 eqos->config->ops->eqos_inval_desc(rx_desc); 1595 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1596 debug("%s: RX packet not available\n", __func__); 1597 return -EAGAIN; 1598 } 1599 1600 *packetp = eqos->rx_dma_buf + 1601 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1602 length = rx_desc->des3 & 0x7fff; 1603 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1604 1605 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1606 1607 return length; 1608 } 1609 1610 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1611 { 1612 struct eqos_priv *eqos = dev_get_priv(dev); 1613 uchar *packet_expected; 1614 struct eqos_desc *rx_desc; 1615 1616 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1617 1618 packet_expected = eqos->rx_dma_buf + 1619 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1620 if (packet != packet_expected) { 1621 debug("%s: Unexpected packet (expected %p)\n", __func__, 1622 packet_expected); 1623 return -EINVAL; 1624 } 1625 1626 eqos->config->ops->eqos_inval_buffer(packet, length); 1627 1628 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1629 1630 rx_desc->des0 = 0; 1631 mb(); 1632 eqos->config->ops->eqos_flush_desc(rx_desc); 1633 eqos->config->ops->eqos_inval_buffer(packet, length); 1634 rx_desc->des0 = (u32)(ulong)packet; 1635 rx_desc->des1 = 0; 1636 rx_desc->des2 = 0; 1637 /* 1638 * Make sure that if HW sees the _OWN write below, it will see all the 1639 * writes to the rest of the descriptor too. 1640 */ 1641 mb(); 1642 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1643 eqos->config->ops->eqos_flush_desc(rx_desc); 1644 1645 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1646 1647 eqos->rx_desc_idx++; 1648 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1649 1650 return 0; 1651 } 1652 1653 static int eqos_probe_resources_core(struct udevice *dev) 1654 { 1655 struct eqos_priv *eqos = dev_get_priv(dev); 1656 int ret; 1657 1658 debug("%s(dev=%p):\n", __func__, dev); 1659 1660 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1661 EQOS_DESCRIPTORS_RX); 1662 if (!eqos->descs) { 1663 debug("%s: eqos_alloc_descs() failed\n", __func__); 1664 ret = -ENOMEM; 1665 goto err; 1666 } 1667 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1668 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1669 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1670 eqos->rx_descs); 1671 1672 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1673 if (!eqos->tx_dma_buf) { 1674 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1675 ret = -ENOMEM; 1676 goto err_free_descs; 1677 } 1678 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1679 1680 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1681 if (!eqos->rx_dma_buf) { 1682 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1683 ret = -ENOMEM; 1684 goto err_free_tx_dma_buf; 1685 } 1686 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1687 1688 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1689 if (!eqos->rx_pkt) { 1690 debug("%s: malloc(rx_pkt) failed\n", __func__); 1691 ret = -ENOMEM; 1692 goto err_free_rx_dma_buf; 1693 } 1694 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1695 1696 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1697 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1698 1699 debug("%s: OK\n", __func__); 1700 return 0; 1701 1702 err_free_rx_dma_buf: 1703 free(eqos->rx_dma_buf); 1704 err_free_tx_dma_buf: 1705 free(eqos->tx_dma_buf); 1706 err_free_descs: 1707 eqos_free_descs(eqos->descs); 1708 err: 1709 1710 debug("%s: returns %d\n", __func__, ret); 1711 return ret; 1712 } 1713 1714 static int eqos_remove_resources_core(struct udevice *dev) 1715 { 1716 struct eqos_priv *eqos = dev_get_priv(dev); 1717 1718 debug("%s(dev=%p):\n", __func__, dev); 1719 1720 free(eqos->rx_pkt); 1721 free(eqos->rx_dma_buf); 1722 free(eqos->tx_dma_buf); 1723 eqos_free_descs(eqos->descs); 1724 1725 debug("%s: OK\n", __func__); 1726 return 0; 1727 } 1728 1729 static int eqos_probe_resources_tegra186(struct udevice *dev) 1730 { 1731 struct eqos_priv *eqos = dev_get_priv(dev); 1732 int ret; 1733 1734 debug("%s(dev=%p):\n", __func__, dev); 1735 1736 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1737 if (ret) { 1738 pr_err("reset_get_by_name(rst) failed: %d", ret); 1739 return ret; 1740 } 1741 1742 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1743 &eqos->phy_reset_gpio, 1744 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1745 if (ret) { 1746 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1747 goto err_free_reset_eqos; 1748 } 1749 1750 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1751 if (ret) { 1752 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1753 goto err_free_gpio_phy_reset; 1754 } 1755 1756 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1757 if (ret) { 1758 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1759 goto err_free_clk_slave_bus; 1760 } 1761 1762 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1763 if (ret) { 1764 pr_err("clk_get_by_name(rx) failed: %d", ret); 1765 goto err_free_clk_master_bus; 1766 } 1767 1768 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1769 if (ret) { 1770 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1771 goto err_free_clk_rx; 1772 return ret; 1773 } 1774 1775 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1776 if (ret) { 1777 pr_err("clk_get_by_name(tx) failed: %d", ret); 1778 goto err_free_clk_ptp_ref; 1779 } 1780 1781 debug("%s: OK\n", __func__); 1782 return 0; 1783 1784 err_free_clk_ptp_ref: 1785 clk_free(&eqos->clk_ptp_ref); 1786 err_free_clk_rx: 1787 clk_free(&eqos->clk_rx); 1788 err_free_clk_master_bus: 1789 clk_free(&eqos->clk_master_bus); 1790 err_free_clk_slave_bus: 1791 clk_free(&eqos->clk_slave_bus); 1792 err_free_gpio_phy_reset: 1793 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1794 err_free_reset_eqos: 1795 reset_free(&eqos->reset_ctl); 1796 1797 debug("%s: returns %d\n", __func__, ret); 1798 return ret; 1799 } 1800 1801 /* board-specific Ethernet Interface initializations. */ 1802 __weak int board_interface_eth_init(struct udevice *dev, 1803 phy_interface_t interface_type) 1804 { 1805 return 0; 1806 } 1807 1808 static int eqos_probe_resources_stm32(struct udevice *dev) 1809 { 1810 struct eqos_priv *eqos = dev_get_priv(dev); 1811 int ret; 1812 phy_interface_t interface; 1813 struct ofnode_phandle_args phandle_args; 1814 1815 debug("%s(dev=%p):\n", __func__, dev); 1816 1817 interface = eqos->config->interface(dev); 1818 1819 if (interface == PHY_INTERFACE_MODE_NONE) { 1820 pr_err("Invalid PHY interface\n"); 1821 return -EINVAL; 1822 } 1823 1824 ret = board_interface_eth_init(dev, interface); 1825 if (ret) 1826 return -EINVAL; 1827 1828 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1829 1830 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1831 if (ret) { 1832 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1833 goto err_probe; 1834 } 1835 1836 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1837 if (ret) { 1838 pr_err("clk_get_by_name(rx) failed: %d", ret); 1839 goto err_free_clk_master_bus; 1840 } 1841 1842 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1843 if (ret) { 1844 pr_err("clk_get_by_name(tx) failed: %d", ret); 1845 goto err_free_clk_rx; 1846 } 1847 1848 /* Get ETH_CLK clocks (optional) */ 1849 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1850 if (ret) 1851 pr_warn("No phy clock provided %d", ret); 1852 1853 eqos->phyaddr = -1; 1854 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1855 &phandle_args); 1856 if (!ret) { 1857 /* search "reset-gpios" in phy node */ 1858 ret = gpio_request_by_name_nodev(phandle_args.node, 1859 "reset-gpios", 0, 1860 &eqos->phy_reset_gpio, 1861 GPIOD_IS_OUT | 1862 GPIOD_IS_OUT_ACTIVE); 1863 if (ret) 1864 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1865 ret); 1866 1867 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1868 "reg", -1); 1869 } 1870 1871 debug("%s: OK\n", __func__); 1872 return 0; 1873 1874 err_free_clk_rx: 1875 clk_free(&eqos->clk_rx); 1876 err_free_clk_master_bus: 1877 clk_free(&eqos->clk_master_bus); 1878 err_probe: 1879 1880 debug("%s: returns %d\n", __func__, ret); 1881 return ret; 1882 } 1883 1884 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1885 { 1886 const char *phy_mode; 1887 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1888 1889 debug("%s(dev=%p):\n", __func__, dev); 1890 1891 phy_mode = dev_read_string(dev, "phy-mode"); 1892 if (phy_mode) 1893 interface = phy_get_interface_by_name(phy_mode); 1894 1895 return interface; 1896 } 1897 1898 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1899 { 1900 return PHY_INTERFACE_MODE_MII; 1901 } 1902 1903 static int eqos_probe_resources_imx(struct udevice *dev) 1904 { 1905 struct eqos_priv *eqos = dev_get_priv(dev); 1906 phy_interface_t interface; 1907 1908 debug("%s(dev=%p):\n", __func__, dev); 1909 1910 interface = eqos->config->interface(dev); 1911 1912 if (interface == PHY_INTERFACE_MODE_NONE) { 1913 pr_err("Invalid PHY interface\n"); 1914 return -EINVAL; 1915 } 1916 1917 debug("%s: OK\n", __func__); 1918 return 0; 1919 } 1920 1921 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1922 { 1923 const char *phy_mode; 1924 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1925 1926 debug("%s(dev=%p):\n", __func__, dev); 1927 1928 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1929 NULL); 1930 if (phy_mode) 1931 interface = phy_get_interface_by_name(phy_mode); 1932 1933 return interface; 1934 } 1935 1936 static int eqos_remove_resources_tegra186(struct udevice *dev) 1937 { 1938 struct eqos_priv *eqos = dev_get_priv(dev); 1939 1940 debug("%s(dev=%p):\n", __func__, dev); 1941 1942 #ifdef CONFIG_CLK 1943 clk_free(&eqos->clk_tx); 1944 clk_free(&eqos->clk_ptp_ref); 1945 clk_free(&eqos->clk_rx); 1946 clk_free(&eqos->clk_slave_bus); 1947 clk_free(&eqos->clk_master_bus); 1948 #endif 1949 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1950 reset_free(&eqos->reset_ctl); 1951 1952 debug("%s: OK\n", __func__); 1953 return 0; 1954 } 1955 1956 static int eqos_remove_resources_stm32(struct udevice *dev) 1957 { 1958 #ifdef CONFIG_CLK 1959 struct eqos_priv *eqos = dev_get_priv(dev); 1960 1961 debug("%s(dev=%p):\n", __func__, dev); 1962 1963 clk_free(&eqos->clk_tx); 1964 clk_free(&eqos->clk_rx); 1965 clk_free(&eqos->clk_master_bus); 1966 if (clk_valid(&eqos->clk_ck)) 1967 clk_free(&eqos->clk_ck); 1968 #endif 1969 1970 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1971 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1972 1973 debug("%s: OK\n", __func__); 1974 return 0; 1975 } 1976 1977 static int eqos_remove_resources_imx(struct udevice *dev) 1978 { 1979 return 0; 1980 } 1981 1982 static int eqos_probe(struct udevice *dev) 1983 { 1984 struct eqos_priv *eqos = dev_get_priv(dev); 1985 int ret; 1986 1987 debug("%s(dev=%p):\n", __func__, dev); 1988 1989 eqos->dev = dev; 1990 eqos->config = (void *)dev_get_driver_data(dev); 1991 1992 eqos->regs = dev_read_addr(dev); 1993 if (eqos->regs == FDT_ADDR_T_NONE) { 1994 pr_err("dev_read_addr() failed"); 1995 return -ENODEV; 1996 } 1997 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1998 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1999 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 2000 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 2001 2002 ret = eqos_probe_resources_core(dev); 2003 if (ret < 0) { 2004 pr_err("eqos_probe_resources_core() failed: %d", ret); 2005 return ret; 2006 } 2007 2008 ret = eqos->config->ops->eqos_probe_resources(dev); 2009 if (ret < 0) { 2010 pr_err("eqos_probe_resources() failed: %d", ret); 2011 goto err_remove_resources_core; 2012 } 2013 2014 #ifdef CONFIG_DM_ETH_PHY 2015 eqos->mii = eth_phy_get_mdio_bus(dev); 2016 #endif 2017 if (!eqos->mii) { 2018 eqos->mii = mdio_alloc(); 2019 if (!eqos->mii) { 2020 pr_err("mdio_alloc() failed"); 2021 ret = -ENOMEM; 2022 goto err_remove_resources_tegra; 2023 } 2024 eqos->mii->read = eqos_mdio_read; 2025 eqos->mii->write = eqos_mdio_write; 2026 eqos->mii->priv = eqos; 2027 strcpy(eqos->mii->name, dev->name); 2028 2029 ret = mdio_register(eqos->mii); 2030 if (ret < 0) { 2031 pr_err("mdio_register() failed: %d", ret); 2032 goto err_free_mdio; 2033 } 2034 } 2035 2036 #ifdef CONFIG_DM_ETH_PHY 2037 eth_phy_set_mdio_bus(dev, eqos->mii); 2038 #endif 2039 2040 debug("%s: OK\n", __func__); 2041 return 0; 2042 2043 err_free_mdio: 2044 mdio_free(eqos->mii); 2045 err_remove_resources_tegra: 2046 eqos->config->ops->eqos_remove_resources(dev); 2047 err_remove_resources_core: 2048 eqos_remove_resources_core(dev); 2049 2050 debug("%s: returns %d\n", __func__, ret); 2051 return ret; 2052 } 2053 2054 static int eqos_remove(struct udevice *dev) 2055 { 2056 struct eqos_priv *eqos = dev_get_priv(dev); 2057 2058 debug("%s(dev=%p):\n", __func__, dev); 2059 2060 mdio_unregister(eqos->mii); 2061 mdio_free(eqos->mii); 2062 eqos->config->ops->eqos_remove_resources(dev); 2063 2064 eqos_probe_resources_core(dev); 2065 2066 debug("%s: OK\n", __func__); 2067 return 0; 2068 } 2069 2070 static const struct eth_ops eqos_ops = { 2071 .start = eqos_start, 2072 .stop = eqos_stop, 2073 .send = eqos_send, 2074 .recv = eqos_recv, 2075 .free_pkt = eqos_free_pkt, 2076 .write_hwaddr = eqos_write_hwaddr, 2077 .read_rom_hwaddr = eqos_read_rom_hwaddr, 2078 }; 2079 2080 static struct eqos_ops eqos_tegra186_ops = { 2081 .eqos_inval_desc = eqos_inval_desc_tegra186, 2082 .eqos_flush_desc = eqos_flush_desc_tegra186, 2083 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2084 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2085 .eqos_probe_resources = eqos_probe_resources_tegra186, 2086 .eqos_remove_resources = eqos_remove_resources_tegra186, 2087 .eqos_stop_resets = eqos_stop_resets_tegra186, 2088 .eqos_start_resets = eqos_start_resets_tegra186, 2089 .eqos_stop_clks = eqos_stop_clks_tegra186, 2090 .eqos_start_clks = eqos_start_clks_tegra186, 2091 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2092 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2093 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2094 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186 2095 }; 2096 2097 static const struct eqos_config eqos_tegra186_config = { 2098 .reg_access_always_ok = false, 2099 .mdio_wait = 10, 2100 .swr_wait = 10, 2101 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2102 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2103 .interface = eqos_get_interface_tegra186, 2104 .ops = &eqos_tegra186_ops 2105 }; 2106 2107 static struct eqos_ops eqos_stm32_ops = { 2108 .eqos_inval_desc = eqos_inval_desc_generic, 2109 .eqos_flush_desc = eqos_flush_desc_generic, 2110 .eqos_inval_buffer = eqos_inval_buffer_generic, 2111 .eqos_flush_buffer = eqos_flush_buffer_generic, 2112 .eqos_probe_resources = eqos_probe_resources_stm32, 2113 .eqos_remove_resources = eqos_remove_resources_stm32, 2114 .eqos_stop_resets = eqos_stop_resets_stm32, 2115 .eqos_start_resets = eqos_start_resets_stm32, 2116 .eqos_stop_clks = eqos_stop_clks_stm32, 2117 .eqos_start_clks = eqos_start_clks_stm32, 2118 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2119 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2120 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2121 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32 2122 }; 2123 2124 static const struct eqos_config eqos_stm32_config = { 2125 .reg_access_always_ok = false, 2126 .mdio_wait = 10000, 2127 .swr_wait = 50, 2128 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2129 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2130 .interface = eqos_get_interface_stm32, 2131 .ops = &eqos_stm32_ops 2132 }; 2133 2134 static struct eqos_ops eqos_imx_ops = { 2135 .eqos_inval_desc = eqos_inval_desc_generic, 2136 .eqos_flush_desc = eqos_flush_desc_generic, 2137 .eqos_inval_buffer = eqos_inval_buffer_generic, 2138 .eqos_flush_buffer = eqos_flush_buffer_generic, 2139 .eqos_probe_resources = eqos_probe_resources_imx, 2140 .eqos_remove_resources = eqos_remove_resources_imx, 2141 .eqos_stop_resets = eqos_stop_resets_imx, 2142 .eqos_start_resets = eqos_start_resets_imx, 2143 .eqos_stop_clks = eqos_stop_clks_imx, 2144 .eqos_start_clks = eqos_start_clks_imx, 2145 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2146 .eqos_disable_calibration = eqos_disable_calibration_imx, 2147 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2148 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx 2149 }; 2150 2151 struct eqos_config eqos_imx_config = { 2152 .reg_access_always_ok = false, 2153 .mdio_wait = 10000, 2154 .swr_wait = 50, 2155 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2156 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2157 .interface = eqos_get_interface_imx, 2158 .ops = &eqos_imx_ops 2159 }; 2160 2161 static const struct udevice_id eqos_ids[] = { 2162 { 2163 .compatible = "nvidia,tegra186-eqos", 2164 .data = (ulong)&eqos_tegra186_config 2165 }, 2166 { 2167 .compatible = "snps,dwmac-4.20a", 2168 .data = (ulong)&eqos_stm32_config 2169 }, 2170 { 2171 .compatible = "fsl,imx-eqos", 2172 .data = (ulong)&eqos_imx_config 2173 }, 2174 2175 { } 2176 }; 2177 2178 U_BOOT_DRIVER(eth_eqos) = { 2179 .name = "eth_eqos", 2180 .id = UCLASS_ETH, 2181 .of_match = of_match_ptr(eqos_ids), 2182 .probe = eqos_probe, 2183 .remove = eqos_remove, 2184 .ops = &eqos_ops, 2185 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2186 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2187 }; 2188