1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 * 6 * Portions based on U-Boot's rtl8169.c. 7 */ 8 9 /* 10 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of 11 * Service) IP block. The IP supports multiple options for bus type, clocking/ 12 * reset structure, and feature list. 13 * 14 * The driver is written such that generic core logic is kept separate from 15 * configuration-specific logic. Code that interacts with configuration- 16 * specific resources is split out into separate functions to avoid polluting 17 * common code. If/when this driver is enhanced to support multiple 18 * configurations, the core code should be adapted to call all configuration- 19 * specific functions through function pointers, with the definition of those 20 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data 21 * field. 22 * 23 * The following configurations are currently supported: 24 * tegra186: 25 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an 26 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and 27 * supports a single RGMII PHY. This configuration also has SW control over 28 * all clock and reset signals to the HW block. 29 */ 30 #include <common.h> 31 #include <clk.h> 32 #include <dm.h> 33 #include <errno.h> 34 #include <memalign.h> 35 #include <miiphy.h> 36 #include <net.h> 37 #include <netdev.h> 38 #include <phy.h> 39 #include <reset.h> 40 #include <wait_bit.h> 41 #include <asm/gpio.h> 42 #include <asm/io.h> 43 #include <eth_phy.h> 44 45 /* Core registers */ 46 47 #define EQOS_MAC_REGS_BASE 0x000 48 struct eqos_mac_regs { 49 uint32_t configuration; /* 0x000 */ 50 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */ 51 uint32_t q0_tx_flow_ctrl; /* 0x070 */ 52 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */ 53 uint32_t rx_flow_ctrl; /* 0x090 */ 54 uint32_t unused_094; /* 0x094 */ 55 uint32_t txq_prty_map0; /* 0x098 */ 56 uint32_t unused_09c; /* 0x09c */ 57 uint32_t rxq_ctrl0; /* 0x0a0 */ 58 uint32_t unused_0a4; /* 0x0a4 */ 59 uint32_t rxq_ctrl2; /* 0x0a8 */ 60 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */ 61 uint32_t us_tic_counter; /* 0x0dc */ 62 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */ 63 uint32_t hw_feature0; /* 0x11c */ 64 uint32_t hw_feature1; /* 0x120 */ 65 uint32_t hw_feature2; /* 0x124 */ 66 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */ 67 uint32_t mdio_address; /* 0x200 */ 68 uint32_t mdio_data; /* 0x204 */ 69 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */ 70 uint32_t address0_high; /* 0x300 */ 71 uint32_t address0_low; /* 0x304 */ 72 }; 73 74 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23) 75 #define EQOS_MAC_CONFIGURATION_CST BIT(21) 76 #define EQOS_MAC_CONFIGURATION_ACS BIT(20) 77 #define EQOS_MAC_CONFIGURATION_WD BIT(19) 78 #define EQOS_MAC_CONFIGURATION_JD BIT(17) 79 #define EQOS_MAC_CONFIGURATION_JE BIT(16) 80 #define EQOS_MAC_CONFIGURATION_PS BIT(15) 81 #define EQOS_MAC_CONFIGURATION_FES BIT(14) 82 #define EQOS_MAC_CONFIGURATION_DM BIT(13) 83 #define EQOS_MAC_CONFIGURATION_LM BIT(12) 84 #define EQOS_MAC_CONFIGURATION_TE BIT(1) 85 #define EQOS_MAC_CONFIGURATION_RE BIT(0) 86 87 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16 88 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff 89 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1) 90 91 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 92 93 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0 94 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff 95 96 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0 97 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3 98 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0 99 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2 100 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1 101 102 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0 103 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff 104 105 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8 106 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2 107 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1 108 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0 109 110 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6 111 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f 112 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0 113 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f 114 115 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 116 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 117 118 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 119 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 120 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 121 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 122 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 123 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) 124 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 125 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 126 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 127 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) 128 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) 129 130 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff 131 132 #define EQOS_MTL_REGS_BASE 0xd00 133 struct eqos_mtl_regs { 134 uint32_t txq0_operation_mode; /* 0xd00 */ 135 uint32_t unused_d04; /* 0xd04 */ 136 uint32_t txq0_debug; /* 0xd08 */ 137 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */ 138 uint32_t txq0_quantum_weight; /* 0xd18 */ 139 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */ 140 uint32_t rxq0_operation_mode; /* 0xd30 */ 141 uint32_t unused_d34; /* 0xd34 */ 142 uint32_t rxq0_debug; /* 0xd38 */ 143 }; 144 145 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16 146 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff 147 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2 148 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3 149 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2 150 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1) 151 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0) 152 153 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4) 154 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1 155 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3 156 157 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20 158 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff 159 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14 160 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f 161 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8 162 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f 163 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7) 164 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5) 165 #define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4) 166 #define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3) 167 168 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16 169 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff 170 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4 171 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3 172 173 #define EQOS_DMA_REGS_BASE 0x1000 174 struct eqos_dma_regs { 175 uint32_t mode; /* 0x1000 */ 176 uint32_t sysbus_mode; /* 0x1004 */ 177 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */ 178 uint32_t ch0_control; /* 0x1100 */ 179 uint32_t ch0_tx_control; /* 0x1104 */ 180 uint32_t ch0_rx_control; /* 0x1108 */ 181 uint32_t unused_110c; /* 0x110c */ 182 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */ 183 uint32_t ch0_txdesc_list_address; /* 0x1114 */ 184 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */ 185 uint32_t ch0_rxdesc_list_address; /* 0x111c */ 186 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */ 187 uint32_t unused_1124; /* 0x1124 */ 188 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */ 189 uint32_t ch0_txdesc_ring_length; /* 0x112c */ 190 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */ 191 }; 192 193 #define EQOS_DMA_MODE_SWR BIT(0) 194 195 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16 196 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf 197 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11) 198 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3) 199 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2) 200 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1) 201 202 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16) 203 204 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16 205 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f 206 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4) 207 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0) 208 209 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16 210 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f 211 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1 212 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff 213 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0) 214 215 /* These registers are Tegra186-specific */ 216 #define EQOS_TEGRA186_REGS_BASE 0x8800 217 struct eqos_tegra186_regs { 218 uint32_t sdmemcomppadctrl; /* 0x8800 */ 219 uint32_t auto_cal_config; /* 0x8804 */ 220 uint32_t unused_8808; /* 0x8808 */ 221 uint32_t auto_cal_status; /* 0x880c */ 222 }; 223 224 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) 225 226 #define EQOS_AUTO_CAL_CONFIG_START BIT(31) 227 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29) 228 229 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31) 230 231 /* Descriptors */ 232 233 #define EQOS_DESCRIPTOR_WORDS 4 234 #define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4) 235 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */ 236 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN 237 #define EQOS_DESCRIPTORS_TX 4 238 #define EQOS_DESCRIPTORS_RX 4 239 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX) 240 #define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \ 241 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN) 242 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN 243 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN) 244 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE) 245 246 /* 247 * Warn if the cache-line size is larger than the descriptor size. In such 248 * cases the driver will likely fail because the CPU needs to flush the cache 249 * when requeuing RX buffers, therefore descriptors written by the hardware 250 * may be discarded. Architectures with full IO coherence, such as x86, do not 251 * experience this issue, and hence are excluded from this condition. 252 * 253 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause 254 * the driver to allocate descriptors from a pool of non-cached memory. 255 */ 256 #if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN 257 #if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \ 258 !defined(CONFIG_SYS_DCACHE_OFF) && !defined(CONFIG_X86) 259 #warning Cache line size is larger than descriptor size 260 #endif 261 #endif 262 263 struct eqos_desc { 264 u32 des0; 265 u32 des1; 266 u32 des2; 267 u32 des3; 268 }; 269 270 #define EQOS_DESC3_OWN BIT(31) 271 #define EQOS_DESC3_FD BIT(29) 272 #define EQOS_DESC3_LD BIT(28) 273 #define EQOS_DESC3_BUF1V BIT(24) 274 275 struct eqos_config { 276 bool reg_access_always_ok; 277 int mdio_wait; 278 int swr_wait; 279 int config_mac; 280 int config_mac_mdio; 281 phy_interface_t (*interface)(struct udevice *dev); 282 struct eqos_ops *ops; 283 }; 284 285 struct eqos_ops { 286 void (*eqos_inval_desc)(void *desc); 287 void (*eqos_flush_desc)(void *desc); 288 void (*eqos_inval_buffer)(void *buf, size_t size); 289 void (*eqos_flush_buffer)(void *buf, size_t size); 290 int (*eqos_probe_resources)(struct udevice *dev); 291 int (*eqos_remove_resources)(struct udevice *dev); 292 int (*eqos_stop_resets)(struct udevice *dev); 293 int (*eqos_start_resets)(struct udevice *dev); 294 void (*eqos_stop_clks)(struct udevice *dev); 295 int (*eqos_start_clks)(struct udevice *dev); 296 int (*eqos_calibrate_pads)(struct udevice *dev); 297 int (*eqos_disable_calibration)(struct udevice *dev); 298 int (*eqos_set_tx_clk_speed)(struct udevice *dev); 299 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); 300 }; 301 302 struct eqos_priv { 303 struct udevice *dev; 304 const struct eqos_config *config; 305 fdt_addr_t regs; 306 struct eqos_mac_regs *mac_regs; 307 struct eqos_mtl_regs *mtl_regs; 308 struct eqos_dma_regs *dma_regs; 309 struct eqos_tegra186_regs *tegra186_regs; 310 struct reset_ctl reset_ctl; 311 struct gpio_desc phy_reset_gpio; 312 struct clk clk_master_bus; 313 struct clk clk_rx; 314 struct clk clk_ptp_ref; 315 struct clk clk_tx; 316 struct clk clk_ck; 317 struct clk clk_slave_bus; 318 struct mii_dev *mii; 319 struct phy_device *phy; 320 int phyaddr; 321 u32 max_speed; 322 void *descs; 323 struct eqos_desc *tx_descs; 324 struct eqos_desc *rx_descs; 325 int tx_desc_idx, rx_desc_idx; 326 void *tx_dma_buf; 327 void *rx_dma_buf; 328 void *rx_pkt; 329 bool started; 330 bool reg_access_ok; 331 }; 332 333 /* 334 * TX and RX descriptors are 16 bytes. This causes problems with the cache 335 * maintenance on CPUs where the cache-line size exceeds the size of these 336 * descriptors. What will happen is that when the driver receives a packet 337 * it will be immediately requeued for the hardware to reuse. The CPU will 338 * therefore need to flush the cache-line containing the descriptor, which 339 * will cause all other descriptors in the same cache-line to be flushed 340 * along with it. If one of those descriptors had been written to by the 341 * device those changes (and the associated packet) will be lost. 342 * 343 * To work around this, we make use of non-cached memory if available. If 344 * descriptors are mapped uncached there's no need to manually flush them 345 * or invalidate them. 346 * 347 * Note that this only applies to descriptors. The packet data buffers do 348 * not have the same constraints since they are 1536 bytes large, so they 349 * are unlikely to share cache-lines. 350 */ 351 static void *eqos_alloc_descs(unsigned int num) 352 { 353 #ifdef CONFIG_SYS_NONCACHED_MEMORY 354 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE, 355 EQOS_DESCRIPTOR_ALIGN); 356 #else 357 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE); 358 #endif 359 } 360 361 static void eqos_free_descs(void *descs) 362 { 363 #ifdef CONFIG_SYS_NONCACHED_MEMORY 364 /* FIXME: noncached_alloc() has no opposite */ 365 #else 366 free(descs); 367 #endif 368 } 369 370 static void eqos_inval_desc_tegra186(void *desc) 371 { 372 #ifndef CONFIG_SYS_NONCACHED_MEMORY 373 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); 374 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE, 375 ARCH_DMA_MINALIGN); 376 377 invalidate_dcache_range(start, end); 378 #endif 379 } 380 381 static void eqos_inval_desc_generic(void *desc) 382 { 383 #ifndef CONFIG_SYS_NONCACHED_MEMORY 384 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 385 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 386 ARCH_DMA_MINALIGN); 387 388 invalidate_dcache_range(start, end); 389 #endif 390 } 391 392 static void eqos_flush_desc_tegra186(void *desc) 393 { 394 #ifndef CONFIG_SYS_NONCACHED_MEMORY 395 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE); 396 #endif 397 } 398 399 static void eqos_flush_desc_generic(void *desc) 400 { 401 #ifndef CONFIG_SYS_NONCACHED_MEMORY 402 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN); 403 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE, 404 ARCH_DMA_MINALIGN); 405 406 flush_dcache_range(start, end); 407 #endif 408 } 409 410 static void eqos_inval_buffer_tegra186(void *buf, size_t size) 411 { 412 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); 413 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); 414 415 invalidate_dcache_range(start, end); 416 } 417 418 static void eqos_inval_buffer_generic(void *buf, size_t size) 419 { 420 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 421 unsigned long end = roundup((unsigned long)buf + size, 422 ARCH_DMA_MINALIGN); 423 424 invalidate_dcache_range(start, end); 425 } 426 427 static void eqos_flush_buffer_tegra186(void *buf, size_t size) 428 { 429 flush_cache((unsigned long)buf, size); 430 } 431 432 static void eqos_flush_buffer_generic(void *buf, size_t size) 433 { 434 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN); 435 unsigned long end = roundup((unsigned long)buf + size, 436 ARCH_DMA_MINALIGN); 437 438 flush_dcache_range(start, end); 439 } 440 441 static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 442 { 443 return wait_for_bit_le32(&eqos->mac_regs->mdio_address, 444 EQOS_MAC_MDIO_ADDRESS_GB, false, 445 1000000, true); 446 } 447 448 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, 449 int mdio_reg) 450 { 451 struct eqos_priv *eqos = bus->priv; 452 u32 val; 453 int ret; 454 455 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr, 456 mdio_reg); 457 458 ret = eqos_mdio_wait_idle(eqos); 459 if (ret) { 460 pr_err("MDIO not idle at entry"); 461 return ret; 462 } 463 464 val = readl(&eqos->mac_regs->mdio_address); 465 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 466 EQOS_MAC_MDIO_ADDRESS_C45E; 467 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 468 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 469 (eqos->config->config_mac_mdio << 470 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 471 (EQOS_MAC_MDIO_ADDRESS_GOC_READ << 472 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 473 EQOS_MAC_MDIO_ADDRESS_GB; 474 writel(val, &eqos->mac_regs->mdio_address); 475 476 udelay(eqos->config->mdio_wait); 477 478 ret = eqos_mdio_wait_idle(eqos); 479 if (ret) { 480 pr_err("MDIO read didn't complete"); 481 return ret; 482 } 483 484 val = readl(&eqos->mac_regs->mdio_data); 485 val &= EQOS_MAC_MDIO_DATA_GD_MASK; 486 487 debug("%s: val=%x\n", __func__, val); 488 489 return val; 490 } 491 492 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, 493 int mdio_reg, u16 mdio_val) 494 { 495 struct eqos_priv *eqos = bus->priv; 496 u32 val; 497 int ret; 498 499 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, 500 mdio_addr, mdio_reg, mdio_val); 501 502 ret = eqos_mdio_wait_idle(eqos); 503 if (ret) { 504 pr_err("MDIO not idle at entry"); 505 return ret; 506 } 507 508 writel(mdio_val, &eqos->mac_regs->mdio_data); 509 510 val = readl(&eqos->mac_regs->mdio_address); 511 val &= EQOS_MAC_MDIO_ADDRESS_SKAP | 512 EQOS_MAC_MDIO_ADDRESS_C45E; 513 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | 514 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | 515 (eqos->config->config_mac_mdio << 516 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | 517 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << 518 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | 519 EQOS_MAC_MDIO_ADDRESS_GB; 520 writel(val, &eqos->mac_regs->mdio_address); 521 522 udelay(eqos->config->mdio_wait); 523 524 ret = eqos_mdio_wait_idle(eqos); 525 if (ret) { 526 pr_err("MDIO read didn't complete"); 527 return ret; 528 } 529 530 return 0; 531 } 532 533 static int eqos_start_clks_tegra186(struct udevice *dev) 534 { 535 #ifdef CONFIG_CLK 536 struct eqos_priv *eqos = dev_get_priv(dev); 537 int ret; 538 539 debug("%s(dev=%p):\n", __func__, dev); 540 541 ret = clk_enable(&eqos->clk_slave_bus); 542 if (ret < 0) { 543 pr_err("clk_enable(clk_slave_bus) failed: %d", ret); 544 goto err; 545 } 546 547 ret = clk_enable(&eqos->clk_master_bus); 548 if (ret < 0) { 549 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 550 goto err_disable_clk_slave_bus; 551 } 552 553 ret = clk_enable(&eqos->clk_rx); 554 if (ret < 0) { 555 pr_err("clk_enable(clk_rx) failed: %d", ret); 556 goto err_disable_clk_master_bus; 557 } 558 559 ret = clk_enable(&eqos->clk_ptp_ref); 560 if (ret < 0) { 561 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); 562 goto err_disable_clk_rx; 563 } 564 565 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); 566 if (ret < 0) { 567 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); 568 goto err_disable_clk_ptp_ref; 569 } 570 571 ret = clk_enable(&eqos->clk_tx); 572 if (ret < 0) { 573 pr_err("clk_enable(clk_tx) failed: %d", ret); 574 goto err_disable_clk_ptp_ref; 575 } 576 #endif 577 578 debug("%s: OK\n", __func__); 579 return 0; 580 581 #ifdef CONFIG_CLK 582 err_disable_clk_ptp_ref: 583 clk_disable(&eqos->clk_ptp_ref); 584 err_disable_clk_rx: 585 clk_disable(&eqos->clk_rx); 586 err_disable_clk_master_bus: 587 clk_disable(&eqos->clk_master_bus); 588 err_disable_clk_slave_bus: 589 clk_disable(&eqos->clk_slave_bus); 590 err: 591 debug("%s: FAILED: %d\n", __func__, ret); 592 return ret; 593 #endif 594 } 595 596 static int eqos_start_clks_stm32(struct udevice *dev) 597 { 598 #ifdef CONFIG_CLK 599 struct eqos_priv *eqos = dev_get_priv(dev); 600 int ret; 601 602 debug("%s(dev=%p):\n", __func__, dev); 603 604 ret = clk_enable(&eqos->clk_master_bus); 605 if (ret < 0) { 606 pr_err("clk_enable(clk_master_bus) failed: %d", ret); 607 goto err; 608 } 609 610 ret = clk_enable(&eqos->clk_rx); 611 if (ret < 0) { 612 pr_err("clk_enable(clk_rx) failed: %d", ret); 613 goto err_disable_clk_master_bus; 614 } 615 616 ret = clk_enable(&eqos->clk_tx); 617 if (ret < 0) { 618 pr_err("clk_enable(clk_tx) failed: %d", ret); 619 goto err_disable_clk_rx; 620 } 621 622 if (clk_valid(&eqos->clk_ck)) { 623 ret = clk_enable(&eqos->clk_ck); 624 if (ret < 0) { 625 pr_err("clk_enable(clk_ck) failed: %d", ret); 626 goto err_disable_clk_tx; 627 } 628 } 629 #endif 630 631 debug("%s: OK\n", __func__); 632 return 0; 633 634 #ifdef CONFIG_CLK 635 err_disable_clk_tx: 636 clk_disable(&eqos->clk_tx); 637 err_disable_clk_rx: 638 clk_disable(&eqos->clk_rx); 639 err_disable_clk_master_bus: 640 clk_disable(&eqos->clk_master_bus); 641 err: 642 debug("%s: FAILED: %d\n", __func__, ret); 643 return ret; 644 #endif 645 } 646 647 static int eqos_start_clks_imx(struct udevice *dev) 648 { 649 return 0; 650 } 651 652 static void eqos_stop_clks_tegra186(struct udevice *dev) 653 { 654 #ifdef CONFIG_CLK 655 struct eqos_priv *eqos = dev_get_priv(dev); 656 657 debug("%s(dev=%p):\n", __func__, dev); 658 659 clk_disable(&eqos->clk_tx); 660 clk_disable(&eqos->clk_ptp_ref); 661 clk_disable(&eqos->clk_rx); 662 clk_disable(&eqos->clk_master_bus); 663 clk_disable(&eqos->clk_slave_bus); 664 #endif 665 666 debug("%s: OK\n", __func__); 667 } 668 669 static void eqos_stop_clks_stm32(struct udevice *dev) 670 { 671 #ifdef CONFIG_CLK 672 struct eqos_priv *eqos = dev_get_priv(dev); 673 674 debug("%s(dev=%p):\n", __func__, dev); 675 676 clk_disable(&eqos->clk_tx); 677 clk_disable(&eqos->clk_rx); 678 clk_disable(&eqos->clk_master_bus); 679 if (clk_valid(&eqos->clk_ck)) 680 clk_disable(&eqos->clk_ck); 681 #endif 682 683 debug("%s: OK\n", __func__); 684 } 685 686 static void eqos_stop_clks_imx(struct udevice *dev) 687 { 688 /* empty */ 689 } 690 691 static int eqos_start_resets_tegra186(struct udevice *dev) 692 { 693 struct eqos_priv *eqos = dev_get_priv(dev); 694 int ret; 695 696 debug("%s(dev=%p):\n", __func__, dev); 697 698 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 699 if (ret < 0) { 700 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); 701 return ret; 702 } 703 704 udelay(2); 705 706 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 707 if (ret < 0) { 708 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); 709 return ret; 710 } 711 712 ret = reset_assert(&eqos->reset_ctl); 713 if (ret < 0) { 714 pr_err("reset_assert() failed: %d", ret); 715 return ret; 716 } 717 718 udelay(2); 719 720 ret = reset_deassert(&eqos->reset_ctl); 721 if (ret < 0) { 722 pr_err("reset_deassert() failed: %d", ret); 723 return ret; 724 } 725 726 debug("%s: OK\n", __func__); 727 return 0; 728 } 729 730 static int eqos_start_resets_stm32(struct udevice *dev) 731 { 732 struct eqos_priv *eqos = dev_get_priv(dev); 733 int ret; 734 735 debug("%s(dev=%p):\n", __func__, dev); 736 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 737 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 738 if (ret < 0) { 739 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 740 ret); 741 return ret; 742 } 743 744 udelay(2); 745 746 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); 747 if (ret < 0) { 748 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", 749 ret); 750 return ret; 751 } 752 } 753 debug("%s: OK\n", __func__); 754 755 return 0; 756 } 757 758 static int eqos_start_resets_imx(struct udevice *dev) 759 { 760 return 0; 761 } 762 763 static int eqos_stop_resets_tegra186(struct udevice *dev) 764 { 765 struct eqos_priv *eqos = dev_get_priv(dev); 766 767 reset_assert(&eqos->reset_ctl); 768 dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 769 770 return 0; 771 } 772 773 static int eqos_stop_resets_stm32(struct udevice *dev) 774 { 775 struct eqos_priv *eqos = dev_get_priv(dev); 776 int ret; 777 778 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) { 779 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); 780 if (ret < 0) { 781 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", 782 ret); 783 return ret; 784 } 785 } 786 787 return 0; 788 } 789 790 static int eqos_stop_resets_imx(struct udevice *dev) 791 { 792 return 0; 793 } 794 795 static int eqos_calibrate_pads_tegra186(struct udevice *dev) 796 { 797 struct eqos_priv *eqos = dev_get_priv(dev); 798 int ret; 799 800 debug("%s(dev=%p):\n", __func__, dev); 801 802 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 803 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 804 805 udelay(1); 806 807 setbits_le32(&eqos->tegra186_regs->auto_cal_config, 808 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE); 809 810 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 811 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); 812 if (ret) { 813 pr_err("calibrate didn't start"); 814 goto failed; 815 } 816 817 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, 818 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); 819 if (ret) { 820 pr_err("calibrate didn't finish"); 821 goto failed; 822 } 823 824 ret = 0; 825 826 failed: 827 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl, 828 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD); 829 830 debug("%s: returns %d\n", __func__, ret); 831 832 return ret; 833 } 834 835 static int eqos_disable_calibration_tegra186(struct udevice *dev) 836 { 837 struct eqos_priv *eqos = dev_get_priv(dev); 838 839 debug("%s(dev=%p):\n", __func__, dev); 840 841 clrbits_le32(&eqos->tegra186_regs->auto_cal_config, 842 EQOS_AUTO_CAL_CONFIG_ENABLE); 843 844 return 0; 845 } 846 847 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev) 848 { 849 #ifdef CONFIG_CLK 850 struct eqos_priv *eqos = dev_get_priv(dev); 851 852 return clk_get_rate(&eqos->clk_slave_bus); 853 #else 854 return 0; 855 #endif 856 } 857 858 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev) 859 { 860 #ifdef CONFIG_CLK 861 struct eqos_priv *eqos = dev_get_priv(dev); 862 863 return clk_get_rate(&eqos->clk_master_bus); 864 #else 865 return 0; 866 #endif 867 } 868 869 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev) 870 { 871 /* TODO: retrieve from CSR clock */ 872 return 100 * 1000000; 873 } 874 875 static int eqos_calibrate_pads_stm32(struct udevice *dev) 876 { 877 return 0; 878 } 879 880 static int eqos_calibrate_pads_imx(struct udevice *dev) 881 { 882 return 0; 883 } 884 885 static int eqos_disable_calibration_stm32(struct udevice *dev) 886 { 887 return 0; 888 } 889 890 static int eqos_disable_calibration_imx(struct udevice *dev) 891 { 892 return 0; 893 } 894 895 static int eqos_set_full_duplex(struct udevice *dev) 896 { 897 struct eqos_priv *eqos = dev_get_priv(dev); 898 899 debug("%s(dev=%p):\n", __func__, dev); 900 901 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 902 903 return 0; 904 } 905 906 static int eqos_set_half_duplex(struct udevice *dev) 907 { 908 struct eqos_priv *eqos = dev_get_priv(dev); 909 910 debug("%s(dev=%p):\n", __func__, dev); 911 912 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM); 913 914 /* WAR: Flush TX queue when switching to half-duplex */ 915 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 916 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ); 917 918 return 0; 919 } 920 921 static int eqos_set_gmii_speed(struct udevice *dev) 922 { 923 struct eqos_priv *eqos = dev_get_priv(dev); 924 925 debug("%s(dev=%p):\n", __func__, dev); 926 927 clrbits_le32(&eqos->mac_regs->configuration, 928 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 929 930 return 0; 931 } 932 933 static int eqos_set_mii_speed_100(struct udevice *dev) 934 { 935 struct eqos_priv *eqos = dev_get_priv(dev); 936 937 debug("%s(dev=%p):\n", __func__, dev); 938 939 setbits_le32(&eqos->mac_regs->configuration, 940 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES); 941 942 return 0; 943 } 944 945 static int eqos_set_mii_speed_10(struct udevice *dev) 946 { 947 struct eqos_priv *eqos = dev_get_priv(dev); 948 949 debug("%s(dev=%p):\n", __func__, dev); 950 951 clrsetbits_le32(&eqos->mac_regs->configuration, 952 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS); 953 954 return 0; 955 } 956 957 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) 958 { 959 #ifdef CONFIG_CLK 960 struct eqos_priv *eqos = dev_get_priv(dev); 961 ulong rate; 962 int ret; 963 964 debug("%s(dev=%p):\n", __func__, dev); 965 966 switch (eqos->phy->speed) { 967 case SPEED_1000: 968 rate = 125 * 1000 * 1000; 969 break; 970 case SPEED_100: 971 rate = 25 * 1000 * 1000; 972 break; 973 case SPEED_10: 974 rate = 2.5 * 1000 * 1000; 975 break; 976 default: 977 pr_err("invalid speed %d", eqos->phy->speed); 978 return -EINVAL; 979 } 980 981 ret = clk_set_rate(&eqos->clk_tx, rate); 982 if (ret < 0) { 983 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); 984 return ret; 985 } 986 #endif 987 988 return 0; 989 } 990 991 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev) 992 { 993 return 0; 994 } 995 996 static int eqos_set_tx_clk_speed_imx(struct udevice *dev) 997 { 998 return 0; 999 } 1000 1001 static int eqos_adjust_link(struct udevice *dev) 1002 { 1003 struct eqos_priv *eqos = dev_get_priv(dev); 1004 int ret; 1005 bool en_calibration; 1006 1007 debug("%s(dev=%p):\n", __func__, dev); 1008 1009 if (eqos->phy->duplex) 1010 ret = eqos_set_full_duplex(dev); 1011 else 1012 ret = eqos_set_half_duplex(dev); 1013 if (ret < 0) { 1014 pr_err("eqos_set_*_duplex() failed: %d", ret); 1015 return ret; 1016 } 1017 1018 switch (eqos->phy->speed) { 1019 case SPEED_1000: 1020 en_calibration = true; 1021 ret = eqos_set_gmii_speed(dev); 1022 break; 1023 case SPEED_100: 1024 en_calibration = true; 1025 ret = eqos_set_mii_speed_100(dev); 1026 break; 1027 case SPEED_10: 1028 en_calibration = false; 1029 ret = eqos_set_mii_speed_10(dev); 1030 break; 1031 default: 1032 pr_err("invalid speed %d", eqos->phy->speed); 1033 return -EINVAL; 1034 } 1035 if (ret < 0) { 1036 pr_err("eqos_set_*mii_speed*() failed: %d", ret); 1037 return ret; 1038 } 1039 1040 if (en_calibration) { 1041 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1042 if (ret < 0) { 1043 pr_err("eqos_calibrate_pads() failed: %d", 1044 ret); 1045 return ret; 1046 } 1047 } else { 1048 ret = eqos->config->ops->eqos_disable_calibration(dev); 1049 if (ret < 0) { 1050 pr_err("eqos_disable_calibration() failed: %d", 1051 ret); 1052 return ret; 1053 } 1054 } 1055 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); 1056 if (ret < 0) { 1057 pr_err("eqos_set_tx_clk_speed() failed: %d", ret); 1058 return ret; 1059 } 1060 1061 return 0; 1062 } 1063 1064 static int eqos_write_hwaddr(struct udevice *dev) 1065 { 1066 struct eth_pdata *plat = dev_get_platdata(dev); 1067 struct eqos_priv *eqos = dev_get_priv(dev); 1068 uint32_t val; 1069 1070 /* 1071 * This function may be called before start() or after stop(). At that 1072 * time, on at least some configurations of the EQoS HW, all clocks to 1073 * the EQoS HW block will be stopped, and a reset signal applied. If 1074 * any register access is attempted in this state, bus timeouts or CPU 1075 * hangs may occur. This check prevents that. 1076 * 1077 * A simple solution to this problem would be to not implement 1078 * write_hwaddr(), since start() always writes the MAC address into HW 1079 * anyway. However, it is desirable to implement write_hwaddr() to 1080 * support the case of SW that runs subsequent to U-Boot which expects 1081 * the MAC address to already be programmed into the EQoS registers, 1082 * which must happen irrespective of whether the U-Boot user (or 1083 * scripts) actually made use of the EQoS device, and hence 1084 * irrespective of whether start() was ever called. 1085 * 1086 * Note that this requirement by subsequent SW is not valid for 1087 * Tegra186, and is likely not valid for any non-PCI instantiation of 1088 * the EQoS HW block. This function is implemented solely as 1089 * future-proofing with the expectation the driver will eventually be 1090 * ported to some system where the expectation above is true. 1091 */ 1092 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok) 1093 return 0; 1094 1095 /* Update the MAC address */ 1096 val = (plat->enetaddr[5] << 8) | 1097 (plat->enetaddr[4]); 1098 writel(val, &eqos->mac_regs->address0_high); 1099 val = (plat->enetaddr[3] << 24) | 1100 (plat->enetaddr[2] << 16) | 1101 (plat->enetaddr[1] << 8) | 1102 (plat->enetaddr[0]); 1103 writel(val, &eqos->mac_regs->address0_low); 1104 1105 return 0; 1106 } 1107 1108 static int eqos_start(struct udevice *dev) 1109 { 1110 struct eqos_priv *eqos = dev_get_priv(dev); 1111 int ret, i; 1112 ulong rate; 1113 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl; 1114 ulong last_rx_desc; 1115 1116 debug("%s(dev=%p):\n", __func__, dev); 1117 1118 eqos->tx_desc_idx = 0; 1119 eqos->rx_desc_idx = 0; 1120 1121 ret = eqos->config->ops->eqos_start_clks(dev); 1122 if (ret < 0) { 1123 pr_err("eqos_start_clks() failed: %d", ret); 1124 goto err; 1125 } 1126 1127 ret = eqos->config->ops->eqos_start_resets(dev); 1128 if (ret < 0) { 1129 pr_err("eqos_start_resets() failed: %d", ret); 1130 goto err_stop_clks; 1131 } 1132 1133 udelay(10); 1134 1135 eqos->reg_access_ok = true; 1136 1137 ret = wait_for_bit_le32(&eqos->dma_regs->mode, 1138 EQOS_DMA_MODE_SWR, false, 1139 eqos->config->swr_wait, false); 1140 if (ret) { 1141 pr_err("EQOS_DMA_MODE_SWR stuck"); 1142 goto err_stop_resets; 1143 } 1144 1145 ret = eqos->config->ops->eqos_calibrate_pads(dev); 1146 if (ret < 0) { 1147 pr_err("eqos_calibrate_pads() failed: %d", ret); 1148 goto err_stop_resets; 1149 } 1150 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev); 1151 1152 val = (rate / 1000000) - 1; 1153 writel(val, &eqos->mac_regs->us_tic_counter); 1154 1155 /* 1156 * if PHY was already connected and configured, 1157 * don't need to reconnect/reconfigure again 1158 */ 1159 if (!eqos->phy) { 1160 int addr = -1; 1161 #ifdef CONFIG_DM_ETH_PHY 1162 addr = eth_phy_get_addr(dev); 1163 #endif 1164 #ifdef DWC_NET_PHYADDR 1165 addr = DWC_NET_PHYADDR; 1166 #endif 1167 eqos->phy = phy_connect(eqos->mii, addr, dev, 1168 eqos->config->interface(dev)); 1169 if (!eqos->phy) { 1170 pr_err("phy_connect() failed"); 1171 goto err_stop_resets; 1172 } 1173 1174 if (eqos->max_speed) { 1175 ret = phy_set_supported(eqos->phy, eqos->max_speed); 1176 if (ret) { 1177 pr_err("phy_set_supported() failed: %d", ret); 1178 goto err_shutdown_phy; 1179 } 1180 } 1181 1182 ret = phy_config(eqos->phy); 1183 if (ret < 0) { 1184 pr_err("phy_config() failed: %d", ret); 1185 goto err_shutdown_phy; 1186 } 1187 } 1188 1189 ret = phy_startup(eqos->phy); 1190 if (ret < 0) { 1191 pr_err("phy_startup() failed: %d", ret); 1192 goto err_shutdown_phy; 1193 } 1194 1195 if (!eqos->phy->link) { 1196 pr_err("No link"); 1197 goto err_shutdown_phy; 1198 } 1199 1200 ret = eqos_adjust_link(dev); 1201 if (ret < 0) { 1202 pr_err("eqos_adjust_link() failed: %d", ret); 1203 goto err_shutdown_phy; 1204 } 1205 1206 /* Configure MTL */ 1207 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100); 1208 1209 /* Enable Store and Forward mode for TX */ 1210 /* Program Tx operating mode */ 1211 setbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1212 EQOS_MTL_TXQ0_OPERATION_MODE_TSF | 1213 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED << 1214 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT)); 1215 1216 /* Transmit Queue weight */ 1217 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight); 1218 1219 /* Enable Store and Forward mode for RX, since no jumbo frame */ 1220 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1221 EQOS_MTL_RXQ0_OPERATION_MODE_RSF | 1222 EQOS_MTL_RXQ0_OPERATION_MODE_FEP | 1223 EQOS_MTL_RXQ0_OPERATION_MODE_FUP); 1224 1225 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */ 1226 val = readl(&eqos->mac_regs->hw_feature1); 1227 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) & 1228 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK; 1229 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) & 1230 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK; 1231 1232 /* 1233 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting. 1234 * r/tqs is encoded as (n / 256) - 1. 1235 */ 1236 tqs = (128 << tx_fifo_sz) / 256 - 1; 1237 rqs = (128 << rx_fifo_sz) / 256 - 1; 1238 1239 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode, 1240 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK << 1241 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT, 1242 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT); 1243 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1244 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK << 1245 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT, 1246 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT); 1247 1248 /* Flow control used only if each channel gets 4KB or more FIFO */ 1249 if (rqs >= ((4096 / 256) - 1)) { 1250 u32 rfd, rfa; 1251 1252 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1253 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC); 1254 1255 /* 1256 * Set Threshold for Activating Flow Contol space for min 2 1257 * frames ie, (1500 * 1) = 1500 bytes. 1258 * 1259 * Set Threshold for Deactivating Flow Contol for space of 1260 * min 1 frame (frame size 1500bytes) in receive fifo 1261 */ 1262 if (rqs == ((4096 / 256) - 1)) { 1263 /* 1264 * This violates the above formula because of FIFO size 1265 * limit therefore overflow may occur inspite of this. 1266 */ 1267 rfd = 0x3; /* Full-3K */ 1268 rfa = 0x1; /* Full-1.5K */ 1269 } else if (rqs == ((8192 / 256) - 1)) { 1270 rfd = 0x6; /* Full-4K */ 1271 rfa = 0xa; /* Full-6K */ 1272 } else if (rqs == ((16384 / 256) - 1)) { 1273 rfd = 0x6; /* Full-4K */ 1274 rfa = 0x12; /* Full-10K */ 1275 } else { 1276 rfd = 0x6; /* Full-4K */ 1277 rfa = 0x1E; /* Full-16K */ 1278 } 1279 1280 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode, 1281 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK << 1282 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1283 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK << 1284 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT), 1285 (rfd << 1286 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) | 1287 (rfa << 1288 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT)); 1289 } 1290 1291 /* Configure MAC */ 1292 1293 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1294 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1295 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1296 eqos->config->config_mac << 1297 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1298 1299 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0, 1300 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK << 1301 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT, 1302 0x2 << 1303 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT); 1304 1305 /* Multicast and Broadcast Queue Enable */ 1306 setbits_le32(&eqos->mac_regs->unused_0a4, 1307 0x00100000); 1308 /* enable promise mode */ 1309 setbits_le32(&eqos->mac_regs->unused_004[1], 1310 0x1); 1311 1312 /* Set TX flow control parameters */ 1313 /* Set Pause Time */ 1314 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1315 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT); 1316 /* Assign priority for TX flow control */ 1317 clrbits_le32(&eqos->mac_regs->txq_prty_map0, 1318 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << 1319 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT); 1320 /* Assign priority for RX flow control */ 1321 clrbits_le32(&eqos->mac_regs->rxq_ctrl2, 1322 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK << 1323 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT); 1324 /* Enable flow control */ 1325 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl, 1326 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE); 1327 setbits_le32(&eqos->mac_regs->rx_flow_ctrl, 1328 EQOS_MAC_RX_FLOW_CTRL_RFE); 1329 1330 clrsetbits_le32(&eqos->mac_regs->configuration, 1331 EQOS_MAC_CONFIGURATION_GPSLCE | 1332 EQOS_MAC_CONFIGURATION_WD | 1333 EQOS_MAC_CONFIGURATION_JD | 1334 EQOS_MAC_CONFIGURATION_JE, 1335 EQOS_MAC_CONFIGURATION_CST | 1336 EQOS_MAC_CONFIGURATION_ACS); 1337 1338 eqos_write_hwaddr(dev); 1339 1340 /* Configure DMA */ 1341 1342 /* Enable OSP mode */ 1343 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1344 EQOS_DMA_CH0_TX_CONTROL_OSP); 1345 1346 /* RX buffer size. Must be a multiple of bus width */ 1347 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1348 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK << 1349 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT, 1350 EQOS_MAX_PACKET_SIZE << 1351 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT); 1352 1353 setbits_le32(&eqos->dma_regs->ch0_control, 1354 EQOS_DMA_CH0_CONTROL_PBLX8); 1355 1356 /* 1357 * Burst length must be < 1/2 FIFO size. 1358 * FIFO size in tqs is encoded as (n / 256) - 1. 1359 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes. 1360 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1. 1361 */ 1362 pbl = tqs + 1; 1363 if (pbl > 32) 1364 pbl = 32; 1365 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control, 1366 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK << 1367 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT, 1368 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT); 1369 1370 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control, 1371 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK << 1372 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT, 1373 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT); 1374 1375 /* DMA performance configuration */ 1376 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) | 1377 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 | 1378 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4; 1379 writel(val, &eqos->dma_regs->sysbus_mode); 1380 1381 /* Set up descriptors */ 1382 1383 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE); 1384 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) { 1385 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]); 1386 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf + 1387 (i * EQOS_MAX_PACKET_SIZE)); 1388 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1389 mb(); 1390 eqos->config->ops->eqos_flush_desc(rx_desc); 1391 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf + 1392 (i * EQOS_MAX_PACKET_SIZE), 1393 EQOS_MAX_PACKET_SIZE); 1394 } 1395 1396 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress); 1397 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address); 1398 writel(EQOS_DESCRIPTORS_TX - 1, 1399 &eqos->dma_regs->ch0_txdesc_ring_length); 1400 1401 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress); 1402 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address); 1403 writel(EQOS_DESCRIPTORS_RX - 1, 1404 &eqos->dma_regs->ch0_rxdesc_ring_length); 1405 1406 /* Enable everything */ 1407 setbits_le32(&eqos->dma_regs->ch0_tx_control, 1408 EQOS_DMA_CH0_TX_CONTROL_ST); 1409 setbits_le32(&eqos->dma_regs->ch0_rx_control, 1410 EQOS_DMA_CH0_RX_CONTROL_SR); 1411 setbits_le32(&eqos->mac_regs->configuration, 1412 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1413 1414 /* TX tail pointer not written until we need to TX a packet */ 1415 /* 1416 * Point RX tail pointer at last descriptor. Ideally, we'd point at the 1417 * first descriptor, implying all descriptors were available. However, 1418 * that's not distinguishable from none of the descriptors being 1419 * available. 1420 */ 1421 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]); 1422 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1423 1424 eqos->started = true; 1425 1426 debug("%s: OK\n", __func__); 1427 return 0; 1428 1429 err_shutdown_phy: 1430 phy_shutdown(eqos->phy); 1431 err_stop_resets: 1432 eqos->config->ops->eqos_stop_resets(dev); 1433 err_stop_clks: 1434 eqos->config->ops->eqos_stop_clks(dev); 1435 err: 1436 pr_err("FAILED: %d", ret); 1437 return ret; 1438 } 1439 1440 static void eqos_stop(struct udevice *dev) 1441 { 1442 struct eqos_priv *eqos = dev_get_priv(dev); 1443 int i; 1444 1445 debug("%s(dev=%p):\n", __func__, dev); 1446 1447 if (!eqos->started) 1448 return; 1449 eqos->started = false; 1450 eqos->reg_access_ok = false; 1451 1452 /* Disable TX DMA */ 1453 clrbits_le32(&eqos->dma_regs->ch0_tx_control, 1454 EQOS_DMA_CH0_TX_CONTROL_ST); 1455 1456 /* Wait for TX all packets to drain out of MTL */ 1457 for (i = 0; i < 1000000; i++) { 1458 u32 val = readl(&eqos->mtl_regs->txq0_debug); 1459 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) & 1460 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK; 1461 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS; 1462 if ((trcsts != 1) && (!txqsts)) 1463 break; 1464 } 1465 1466 /* Turn off MAC TX and RX */ 1467 clrbits_le32(&eqos->mac_regs->configuration, 1468 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE); 1469 1470 /* Wait for all RX packets to drain out of MTL */ 1471 for (i = 0; i < 1000000; i++) { 1472 u32 val = readl(&eqos->mtl_regs->rxq0_debug); 1473 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) & 1474 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK; 1475 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) & 1476 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK; 1477 if ((!prxq) && (!rxqsts)) 1478 break; 1479 } 1480 1481 /* Turn off RX DMA */ 1482 clrbits_le32(&eqos->dma_regs->ch0_rx_control, 1483 EQOS_DMA_CH0_RX_CONTROL_SR); 1484 1485 if (eqos->phy) { 1486 phy_shutdown(eqos->phy); 1487 } 1488 eqos->config->ops->eqos_stop_resets(dev); 1489 eqos->config->ops->eqos_stop_clks(dev); 1490 1491 debug("%s: OK\n", __func__); 1492 } 1493 1494 static int eqos_send(struct udevice *dev, void *packet, int length) 1495 { 1496 struct eqos_priv *eqos = dev_get_priv(dev); 1497 struct eqos_desc *tx_desc; 1498 int i; 1499 1500 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet, 1501 length); 1502 1503 memcpy(eqos->tx_dma_buf, packet, length); 1504 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length); 1505 1506 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]); 1507 eqos->tx_desc_idx++; 1508 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX; 1509 1510 tx_desc->des0 = (ulong)eqos->tx_dma_buf; 1511 tx_desc->des1 = 0; 1512 tx_desc->des2 = length; 1513 /* 1514 * Make sure that if HW sees the _OWN write below, it will see all the 1515 * writes to the rest of the descriptor too. 1516 */ 1517 mb(); 1518 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length; 1519 eqos->config->ops->eqos_flush_desc(tx_desc); 1520 1521 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])), 1522 &eqos->dma_regs->ch0_txdesc_tail_pointer); 1523 1524 for (i = 0; i < 1000000; i++) { 1525 eqos->config->ops->eqos_inval_desc(tx_desc); 1526 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN)) 1527 return 0; 1528 udelay(1); 1529 } 1530 1531 debug("%s: TX timeout\n", __func__); 1532 1533 return -ETIMEDOUT; 1534 } 1535 1536 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp) 1537 { 1538 struct eqos_priv *eqos = dev_get_priv(dev); 1539 struct eqos_desc *rx_desc; 1540 int length; 1541 1542 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags); 1543 1544 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1545 eqos->config->ops->eqos_inval_desc(rx_desc); 1546 if (rx_desc->des3 & EQOS_DESC3_OWN) { 1547 debug("%s: RX packet not available\n", __func__); 1548 return -EAGAIN; 1549 } 1550 1551 *packetp = eqos->rx_dma_buf + 1552 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1553 length = rx_desc->des3 & 0x7fff; 1554 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length); 1555 1556 eqos->config->ops->eqos_inval_buffer(*packetp, length); 1557 1558 return length; 1559 } 1560 1561 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) 1562 { 1563 struct eqos_priv *eqos = dev_get_priv(dev); 1564 uchar *packet_expected; 1565 struct eqos_desc *rx_desc; 1566 1567 debug("%s(packet=%p, length=%d)\n", __func__, packet, length); 1568 1569 packet_expected = eqos->rx_dma_buf + 1570 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE); 1571 if (packet != packet_expected) { 1572 debug("%s: Unexpected packet (expected %p)\n", __func__, 1573 packet_expected); 1574 return -EINVAL; 1575 } 1576 1577 eqos->config->ops->eqos_inval_buffer(packet, length); 1578 1579 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]); 1580 1581 rx_desc->des0 = 0; 1582 mb(); 1583 eqos->config->ops->eqos_flush_desc(rx_desc); 1584 eqos->config->ops->eqos_inval_buffer(packet, length); 1585 rx_desc->des0 = (u32)(ulong)packet; 1586 rx_desc->des1 = 0; 1587 rx_desc->des2 = 0; 1588 /* 1589 * Make sure that if HW sees the _OWN write below, it will see all the 1590 * writes to the rest of the descriptor too. 1591 */ 1592 mb(); 1593 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 1594 eqos->config->ops->eqos_flush_desc(rx_desc); 1595 1596 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer); 1597 1598 eqos->rx_desc_idx++; 1599 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX; 1600 1601 return 0; 1602 } 1603 1604 static int eqos_probe_resources_core(struct udevice *dev) 1605 { 1606 struct eqos_priv *eqos = dev_get_priv(dev); 1607 int ret; 1608 1609 debug("%s(dev=%p):\n", __func__, dev); 1610 1611 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX + 1612 EQOS_DESCRIPTORS_RX); 1613 if (!eqos->descs) { 1614 debug("%s: eqos_alloc_descs() failed\n", __func__); 1615 ret = -ENOMEM; 1616 goto err; 1617 } 1618 eqos->tx_descs = (struct eqos_desc *)eqos->descs; 1619 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX); 1620 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs, 1621 eqos->rx_descs); 1622 1623 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE); 1624 if (!eqos->tx_dma_buf) { 1625 debug("%s: memalign(tx_dma_buf) failed\n", __func__); 1626 ret = -ENOMEM; 1627 goto err_free_descs; 1628 } 1629 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf); 1630 1631 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE); 1632 if (!eqos->rx_dma_buf) { 1633 debug("%s: memalign(rx_dma_buf) failed\n", __func__); 1634 ret = -ENOMEM; 1635 goto err_free_tx_dma_buf; 1636 } 1637 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf); 1638 1639 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE); 1640 if (!eqos->rx_pkt) { 1641 debug("%s: malloc(rx_pkt) failed\n", __func__); 1642 ret = -ENOMEM; 1643 goto err_free_rx_dma_buf; 1644 } 1645 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt); 1646 1647 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf, 1648 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX); 1649 1650 debug("%s: OK\n", __func__); 1651 return 0; 1652 1653 err_free_rx_dma_buf: 1654 free(eqos->rx_dma_buf); 1655 err_free_tx_dma_buf: 1656 free(eqos->tx_dma_buf); 1657 err_free_descs: 1658 eqos_free_descs(eqos->descs); 1659 err: 1660 1661 debug("%s: returns %d\n", __func__, ret); 1662 return ret; 1663 } 1664 1665 static int eqos_remove_resources_core(struct udevice *dev) 1666 { 1667 struct eqos_priv *eqos = dev_get_priv(dev); 1668 1669 debug("%s(dev=%p):\n", __func__, dev); 1670 1671 free(eqos->rx_pkt); 1672 free(eqos->rx_dma_buf); 1673 free(eqos->tx_dma_buf); 1674 eqos_free_descs(eqos->descs); 1675 1676 debug("%s: OK\n", __func__); 1677 return 0; 1678 } 1679 1680 static int eqos_probe_resources_tegra186(struct udevice *dev) 1681 { 1682 struct eqos_priv *eqos = dev_get_priv(dev); 1683 int ret; 1684 1685 debug("%s(dev=%p):\n", __func__, dev); 1686 1687 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); 1688 if (ret) { 1689 pr_err("reset_get_by_name(rst) failed: %d", ret); 1690 return ret; 1691 } 1692 1693 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0, 1694 &eqos->phy_reset_gpio, 1695 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); 1696 if (ret) { 1697 pr_err("gpio_request_by_name(phy reset) failed: %d", ret); 1698 goto err_free_reset_eqos; 1699 } 1700 1701 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); 1702 if (ret) { 1703 pr_err("clk_get_by_name(slave_bus) failed: %d", ret); 1704 goto err_free_gpio_phy_reset; 1705 } 1706 1707 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); 1708 if (ret) { 1709 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1710 goto err_free_clk_slave_bus; 1711 } 1712 1713 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); 1714 if (ret) { 1715 pr_err("clk_get_by_name(rx) failed: %d", ret); 1716 goto err_free_clk_master_bus; 1717 } 1718 1719 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); 1720 if (ret) { 1721 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); 1722 goto err_free_clk_rx; 1723 return ret; 1724 } 1725 1726 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); 1727 if (ret) { 1728 pr_err("clk_get_by_name(tx) failed: %d", ret); 1729 goto err_free_clk_ptp_ref; 1730 } 1731 1732 debug("%s: OK\n", __func__); 1733 return 0; 1734 1735 err_free_clk_ptp_ref: 1736 clk_free(&eqos->clk_ptp_ref); 1737 err_free_clk_rx: 1738 clk_free(&eqos->clk_rx); 1739 err_free_clk_master_bus: 1740 clk_free(&eqos->clk_master_bus); 1741 err_free_clk_slave_bus: 1742 clk_free(&eqos->clk_slave_bus); 1743 err_free_gpio_phy_reset: 1744 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1745 err_free_reset_eqos: 1746 reset_free(&eqos->reset_ctl); 1747 1748 debug("%s: returns %d\n", __func__, ret); 1749 return ret; 1750 } 1751 1752 /* board-specific Ethernet Interface initializations. */ 1753 __weak int board_interface_eth_init(struct udevice *dev, 1754 phy_interface_t interface_type) 1755 { 1756 return 0; 1757 } 1758 1759 static int eqos_probe_resources_stm32(struct udevice *dev) 1760 { 1761 struct eqos_priv *eqos = dev_get_priv(dev); 1762 int ret; 1763 phy_interface_t interface; 1764 struct ofnode_phandle_args phandle_args; 1765 1766 debug("%s(dev=%p):\n", __func__, dev); 1767 1768 interface = eqos->config->interface(dev); 1769 1770 if (interface == PHY_INTERFACE_MODE_NONE) { 1771 pr_err("Invalid PHY interface\n"); 1772 return -EINVAL; 1773 } 1774 1775 ret = board_interface_eth_init(dev, interface); 1776 if (ret) 1777 return -EINVAL; 1778 1779 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); 1780 1781 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); 1782 if (ret) { 1783 pr_err("clk_get_by_name(master_bus) failed: %d", ret); 1784 goto err_probe; 1785 } 1786 1787 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); 1788 if (ret) { 1789 pr_err("clk_get_by_name(rx) failed: %d", ret); 1790 goto err_free_clk_master_bus; 1791 } 1792 1793 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); 1794 if (ret) { 1795 pr_err("clk_get_by_name(tx) failed: %d", ret); 1796 goto err_free_clk_rx; 1797 } 1798 1799 /* Get ETH_CLK clocks (optional) */ 1800 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck); 1801 if (ret) 1802 pr_warn("No phy clock provided %d", ret); 1803 1804 eqos->phyaddr = -1; 1805 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 1806 &phandle_args); 1807 if (!ret) { 1808 /* search "reset-gpios" in phy node */ 1809 ret = gpio_request_by_name_nodev(phandle_args.node, 1810 "reset-gpios", 0, 1811 &eqos->phy_reset_gpio, 1812 GPIOD_IS_OUT | 1813 GPIOD_IS_OUT_ACTIVE); 1814 if (ret) 1815 pr_warn("gpio_request_by_name(phy reset) not provided %d", 1816 ret); 1817 1818 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node, 1819 "reg", -1); 1820 } 1821 1822 debug("%s: OK\n", __func__); 1823 return 0; 1824 1825 err_free_clk_rx: 1826 clk_free(&eqos->clk_rx); 1827 err_free_clk_master_bus: 1828 clk_free(&eqos->clk_master_bus); 1829 err_probe: 1830 1831 debug("%s: returns %d\n", __func__, ret); 1832 return ret; 1833 } 1834 1835 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev) 1836 { 1837 const char *phy_mode; 1838 phy_interface_t interface = PHY_INTERFACE_MODE_NONE; 1839 1840 debug("%s(dev=%p):\n", __func__, dev); 1841 1842 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 1843 NULL); 1844 if (phy_mode) 1845 interface = phy_get_interface_by_name(phy_mode); 1846 1847 return interface; 1848 } 1849 1850 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev) 1851 { 1852 return PHY_INTERFACE_MODE_MII; 1853 } 1854 1855 static int eqos_probe_resources_imx(struct udevice *dev) 1856 { 1857 struct eqos_priv *eqos = dev_get_priv(dev); 1858 phy_interface_t interface; 1859 1860 debug("%s(dev=%p):\n", __func__, dev); 1861 1862 interface = eqos->config->interface(dev); 1863 1864 if (interface == PHY_INTERFACE_MODE_NONE) { 1865 pr_err("Invalid PHY interface\n"); 1866 return -EINVAL; 1867 } 1868 1869 debug("%s: OK\n", __func__); 1870 return 0; 1871 } 1872 1873 static phy_interface_t eqos_get_interface_imx(struct udevice *dev) 1874 { 1875 return PHY_INTERFACE_MODE_RGMII; 1876 } 1877 1878 static int eqos_remove_resources_tegra186(struct udevice *dev) 1879 { 1880 struct eqos_priv *eqos = dev_get_priv(dev); 1881 1882 debug("%s(dev=%p):\n", __func__, dev); 1883 1884 #ifdef CONFIG_CLK 1885 clk_free(&eqos->clk_tx); 1886 clk_free(&eqos->clk_ptp_ref); 1887 clk_free(&eqos->clk_rx); 1888 clk_free(&eqos->clk_slave_bus); 1889 clk_free(&eqos->clk_master_bus); 1890 #endif 1891 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1892 reset_free(&eqos->reset_ctl); 1893 1894 debug("%s: OK\n", __func__); 1895 return 0; 1896 } 1897 1898 static int eqos_remove_resources_stm32(struct udevice *dev) 1899 { 1900 #ifdef CONFIG_CLK 1901 struct eqos_priv *eqos = dev_get_priv(dev); 1902 1903 debug("%s(dev=%p):\n", __func__, dev); 1904 1905 clk_free(&eqos->clk_tx); 1906 clk_free(&eqos->clk_rx); 1907 clk_free(&eqos->clk_master_bus); 1908 if (clk_valid(&eqos->clk_ck)) 1909 clk_free(&eqos->clk_ck); 1910 #endif 1911 1912 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) 1913 dm_gpio_free(dev, &eqos->phy_reset_gpio); 1914 1915 debug("%s: OK\n", __func__); 1916 return 0; 1917 } 1918 1919 static int eqos_remove_resources_imx(struct udevice *dev) 1920 { 1921 return 0; 1922 } 1923 1924 static int eqos_probe(struct udevice *dev) 1925 { 1926 struct eqos_priv *eqos = dev_get_priv(dev); 1927 int ret; 1928 1929 debug("%s(dev=%p):\n", __func__, dev); 1930 1931 eqos->dev = dev; 1932 eqos->config = (void *)dev_get_driver_data(dev); 1933 1934 eqos->regs = devfdt_get_addr(dev); 1935 if (eqos->regs == FDT_ADDR_T_NONE) { 1936 pr_err("devfdt_get_addr() failed"); 1937 return -ENODEV; 1938 } 1939 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); 1940 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); 1941 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); 1942 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); 1943 1944 ret = eqos_probe_resources_core(dev); 1945 if (ret < 0) { 1946 pr_err("eqos_probe_resources_core() failed: %d", ret); 1947 return ret; 1948 } 1949 1950 ret = eqos->config->ops->eqos_probe_resources(dev); 1951 if (ret < 0) { 1952 pr_err("eqos_probe_resources() failed: %d", ret); 1953 goto err_remove_resources_core; 1954 } 1955 1956 #ifdef CONFIG_DM_ETH_PHY 1957 eqos->mii = eth_phy_get_mdio_bus(dev); 1958 #endif 1959 if (!eqos->mii) { 1960 eqos->mii = mdio_alloc(); 1961 if (!eqos->mii) { 1962 pr_err("mdio_alloc() failed"); 1963 ret = -ENOMEM; 1964 goto err_remove_resources_tegra; 1965 } 1966 eqos->mii->read = eqos_mdio_read; 1967 eqos->mii->write = eqos_mdio_write; 1968 eqos->mii->priv = eqos; 1969 strcpy(eqos->mii->name, dev->name); 1970 1971 ret = mdio_register(eqos->mii); 1972 if (ret < 0) { 1973 pr_err("mdio_register() failed: %d", ret); 1974 goto err_free_mdio; 1975 } 1976 } 1977 1978 #ifdef CONFIG_DM_ETH_PHY 1979 eth_phy_set_mdio_bus(dev, eqos->mii); 1980 #endif 1981 1982 debug("%s: OK\n", __func__); 1983 return 0; 1984 1985 err_free_mdio: 1986 mdio_free(eqos->mii); 1987 err_remove_resources_tegra: 1988 eqos->config->ops->eqos_remove_resources(dev); 1989 err_remove_resources_core: 1990 eqos_remove_resources_core(dev); 1991 1992 debug("%s: returns %d\n", __func__, ret); 1993 return ret; 1994 } 1995 1996 static int eqos_remove(struct udevice *dev) 1997 { 1998 struct eqos_priv *eqos = dev_get_priv(dev); 1999 2000 debug("%s(dev=%p):\n", __func__, dev); 2001 2002 mdio_unregister(eqos->mii); 2003 mdio_free(eqos->mii); 2004 eqos->config->ops->eqos_remove_resources(dev); 2005 2006 eqos_probe_resources_core(dev); 2007 2008 debug("%s: OK\n", __func__); 2009 return 0; 2010 } 2011 2012 static const struct eth_ops eqos_ops = { 2013 .start = eqos_start, 2014 .stop = eqos_stop, 2015 .send = eqos_send, 2016 .recv = eqos_recv, 2017 .free_pkt = eqos_free_pkt, 2018 .write_hwaddr = eqos_write_hwaddr, 2019 }; 2020 2021 static struct eqos_ops eqos_tegra186_ops = { 2022 .eqos_inval_desc = eqos_inval_desc_tegra186, 2023 .eqos_flush_desc = eqos_flush_desc_tegra186, 2024 .eqos_inval_buffer = eqos_inval_buffer_tegra186, 2025 .eqos_flush_buffer = eqos_flush_buffer_tegra186, 2026 .eqos_probe_resources = eqos_probe_resources_tegra186, 2027 .eqos_remove_resources = eqos_remove_resources_tegra186, 2028 .eqos_stop_resets = eqos_stop_resets_tegra186, 2029 .eqos_start_resets = eqos_start_resets_tegra186, 2030 .eqos_stop_clks = eqos_stop_clks_tegra186, 2031 .eqos_start_clks = eqos_start_clks_tegra186, 2032 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186, 2033 .eqos_disable_calibration = eqos_disable_calibration_tegra186, 2034 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186, 2035 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186 2036 }; 2037 2038 static const struct eqos_config eqos_tegra186_config = { 2039 .reg_access_always_ok = false, 2040 .mdio_wait = 10, 2041 .swr_wait = 10, 2042 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2043 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35, 2044 .interface = eqos_get_interface_tegra186, 2045 .ops = &eqos_tegra186_ops 2046 }; 2047 2048 static struct eqos_ops eqos_stm32_ops = { 2049 .eqos_inval_desc = eqos_inval_desc_generic, 2050 .eqos_flush_desc = eqos_flush_desc_generic, 2051 .eqos_inval_buffer = eqos_inval_buffer_generic, 2052 .eqos_flush_buffer = eqos_flush_buffer_generic, 2053 .eqos_probe_resources = eqos_probe_resources_stm32, 2054 .eqos_remove_resources = eqos_remove_resources_stm32, 2055 .eqos_stop_resets = eqos_stop_resets_stm32, 2056 .eqos_start_resets = eqos_start_resets_stm32, 2057 .eqos_stop_clks = eqos_stop_clks_stm32, 2058 .eqos_start_clks = eqos_start_clks_stm32, 2059 .eqos_calibrate_pads = eqos_calibrate_pads_stm32, 2060 .eqos_disable_calibration = eqos_disable_calibration_stm32, 2061 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32, 2062 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32 2063 }; 2064 2065 static const struct eqos_config eqos_stm32_config = { 2066 .reg_access_always_ok = false, 2067 .mdio_wait = 10000, 2068 .swr_wait = 50, 2069 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV, 2070 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2071 .interface = eqos_get_interface_stm32, 2072 .ops = &eqos_stm32_ops 2073 }; 2074 2075 static struct eqos_ops eqos_imx_ops = { 2076 .eqos_inval_desc = eqos_inval_desc_generic, 2077 .eqos_flush_desc = eqos_flush_desc_generic, 2078 .eqos_inval_buffer = eqos_inval_buffer_generic, 2079 .eqos_flush_buffer = eqos_flush_buffer_generic, 2080 .eqos_probe_resources = eqos_probe_resources_imx, 2081 .eqos_remove_resources = eqos_remove_resources_imx, 2082 .eqos_stop_resets = eqos_stop_resets_imx, 2083 .eqos_start_resets = eqos_start_resets_imx, 2084 .eqos_stop_clks = eqos_stop_clks_imx, 2085 .eqos_start_clks = eqos_start_clks_imx, 2086 .eqos_calibrate_pads = eqos_calibrate_pads_imx, 2087 .eqos_disable_calibration = eqos_disable_calibration_imx, 2088 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, 2089 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx 2090 }; 2091 2092 struct eqos_config eqos_imx_config = { 2093 .reg_access_always_ok = false, 2094 .mdio_wait = 10000, 2095 .swr_wait = 50, 2096 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, 2097 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, 2098 .interface = eqos_get_interface_imx, 2099 .ops = &eqos_imx_ops 2100 }; 2101 2102 static const struct udevice_id eqos_ids[] = { 2103 { 2104 .compatible = "nvidia,tegra186-eqos", 2105 .data = (ulong)&eqos_tegra186_config 2106 }, 2107 { 2108 .compatible = "snps,dwmac-4.20a", 2109 .data = (ulong)&eqos_stm32_config 2110 }, 2111 { 2112 .compatible = "fsl,imx-eqos", 2113 .data = (ulong)&eqos_imx_config 2114 }, 2115 2116 { } 2117 }; 2118 2119 U_BOOT_DRIVER(eth_eqos) = { 2120 .name = "eth_eqos", 2121 .id = UCLASS_ETH, 2122 .of_match = of_match_ptr(eqos_ids), 2123 .probe = eqos_probe, 2124 .remove = eqos_remove, 2125 .ops = &eqos_ops, 2126 .priv_auto_alloc_size = sizeof(struct eqos_priv), 2127 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 2128 }; 2129