1From 3881f7b32a846e44ae94ab1b066e2b081c05244e Mon Sep 17 00:00:00 2001 2From: David Wu <david.wu@rock-chips.com> 3Date: Thu, 03 Nov 2022 10:47:48 +0800 4Subject: [PATCH] net: ethernet: stmmac: Add uio support for stmmac 5 6Currently only supports single channel, and the network card 7name needs to be eth0 and eth1. 8 9Signed-off-by: David Wu <david.wu@rock-chips.com> 10Change-Id: I19975b10e2ed12931edc2e8bd50c003416a1109c 11--- 12 13diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig 14index 21f4074..7d1ae95 100644 15--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig 16+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig 17@@ -12,6 +12,13 @@ 18 19 if STMMAC_ETH 20 21+config STMMAC_UIO 22+ tristate "STMMAC_UIO ethernet controller" 23+ default n 24+ select UIO 25+ help 26+ Say M here if you want to use the stmmac_uio.ko for DPDK. 27+ 28 config STMMAC_ETHTOOL 29 bool "Ethtool feature for STMMAC" 30 default STMMAC_ETH 31diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile 32index f8275ed..82adb5a 100644 33--- a/drivers/net/ethernet/stmicro/stmmac/Makefile 34+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile 35@@ -25,6 +25,7 @@ 36 obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rockchip.o 37 dwmac-rockchip-objs := dwmac-rk.o 38 dwmac-rockchip-$(CONFIG_DWMAC_ROCKCHIP_TOOL) += dwmac-rk-tool.o 39+obj-$(CONFIG_STMMAC_UIO) += stmmac_uio.o 40 obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o 41 obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o 42 obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o 43diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h 44index e6fa3b1..6bb4f4e 100644 45--- a/drivers/net/ethernet/stmicro/stmmac/common.h 46+++ b/drivers/net/ethernet/stmicro/stmmac/common.h 47@@ -47,8 +47,13 @@ 48 #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ 49 50 /* These need to be power of two, and >= 4 */ 51+#if IS_ENABLED(CONFIG_STMMAC_UIO) 52+#define DMA_TX_SIZE 1024 53+#define DMA_RX_SIZE 1024 54+#else 55 #define DMA_TX_SIZE 512 56 #define DMA_RX_SIZE 512 57+#endif 58 #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) 59 60 #undef FRAME_FILTER_DEBUG 61diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 62index e9b04c2..787f725 100644 63--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 64+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 65@@ -139,6 +139,7 @@ 66 pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", 67 MMC_CNTRL, value); 68 } 69+EXPORT_SYMBOL(dwmac_mmc_ctrl); 70 71 /* To mask all all interrupts.*/ 72 void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr) 73@@ -147,6 +148,7 @@ 74 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); 75 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK); 76 } 77+EXPORT_SYMBOL(dwmac_mmc_intr_all_mask); 78 79 /* This reads the MAC core counters (if actaully supported). 80 * by default the MMC core is programmed to reset each 81diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 82index 093a223..12a5f99 100644 83--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 84+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 85@@ -302,6 +302,7 @@ 86 #endif 87 return 0; 88 } 89+EXPORT_SYMBOL(stmmac_mdio_reset); 90 91 /** 92 * stmmac_mdio_register 93diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c 94new file mode 100644 95index 0000000..b241bd9 96--- /dev/null 97+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c 98@@ -0,0 +1,1178 @@ 99+// SPDX-License-Identifier: GPL-2.0 100+/** 101+ * Copyright 2023 ROCKCHIP 102+ */ 103+ 104+#include <linux/module.h> 105+#include <linux/kernel.h> 106+#include <linux/of_address.h> 107+#include <linux/of_platform.h> 108+#include <linux/of_net.h> 109+#include <linux/uio_driver.h> 110+#include <linux/list.h> 111+ 112+#include <linux/clk.h> 113+#include <linux/kernel.h> 114+#include <linux/interrupt.h> 115+#include <linux/ip.h> 116+#include <linux/tcp.h> 117+#include <linux/skbuff.h> 118+#include <linux/ethtool.h> 119+#include <linux/if_ether.h> 120+#include <linux/crc32.h> 121+#include <linux/mii.h> 122+#include <linux/if.h> 123+#include <linux/if_vlan.h> 124+#include <linux/dma-mapping.h> 125+#include <linux/slab.h> 126+#include <linux/prefetch.h> 127+#include <linux/pinctrl/consumer.h> 128+#ifdef CONFIG_DEBUG_FS 129+#include <linux/debugfs.h> 130+#include <linux/seq_file.h> 131+#endif /* CONFIG_DEBUG_FS */ 132+#include <linux/net_tstamp.h> 133+#include <linux/udp.h> 134+#include <net/pkt_cls.h> 135+#include "stmmac_ptp.h" 136+#include "stmmac.h" 137+#include <linux/reset.h> 138+#include <linux/of_mdio.h> 139+#include "dwmac1000.h" 140+#include "dwxgmac2.h" 141+#include "hwif.h" 142+#include "mmc.h" 143+ 144+#define DRIVER_NAME "rockchip_gmac_uio_drv" 145+#define DRIVER_VERSION "0.1" 146+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 147+ 148+#define TC_DEFAULT 64 149+#define DEFAULT_BUFSIZE 1536 150+#define STMMAC_RX_COPYBREAK 256 151+ 152+static int buf_sz = DEFAULT_BUFSIZE; 153+static int tc = TC_DEFAULT; 154+ 155+/** 156+ * rockchip_gmac_uio_pdev_info 157+ * local information for uio module driver 158+ * 159+ * @dev: device pointer 160+ * @ndev: network device pointer 161+ * @name: uio name 162+ * @uio: uio information 163+ * @map_num: number of uio memory regions 164+ */ 165+struct rockchip_gmac_uio_pdev_info { 166+ struct device *dev; 167+ struct net_device *ndev; 168+ char name[16]; 169+ struct uio_info uio; 170+ int map_num; 171+}; 172+ 173+static int rockchip_gmac_uio_open(struct uio_info *info, struct inode *inode) 174+{ 175+ return 0; 176+} 177+ 178+static int rockchip_gmac_uio_release(struct uio_info *info, 179+ struct inode *inode) 180+{ 181+ return 0; 182+} 183+ 184+static int rockchip_gmac_uio_mmap(struct uio_info *info, 185+ struct vm_area_struct *vma) 186+{ 187+ u32 ret; 188+ u32 pfn; 189+ 190+ pfn = (info->mem[vma->vm_pgoff].addr) >> PAGE_SHIFT; 191+ 192+ if (vma->vm_pgoff) 193+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 194+ else 195+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot); 196+ 197+ ret = remap_pfn_range(vma, vma->vm_start, pfn, 198+ vma->vm_end - vma->vm_start, vma->vm_page_prot); 199+ if (ret) { 200+ /* Error Handle */ 201+ pr_err("remap_pfn_range failed"); 202+ } 203+ return ret; 204+} 205+ 206+/** 207+ * uio_free_dma_rx_desc_resources - free RX dma desc resources 208+ * @priv: private structure 209+ */ 210+static void uio_free_dma_rx_desc_resources(struct stmmac_priv *priv) 211+{ 212+ u32 rx_count = priv->plat->rx_queues_to_use; 213+ u32 queue; 214+ 215+ /* Free RX queue resources */ 216+ for (queue = 0; queue < rx_count; queue++) { 217+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 218+ 219+ /* Free DMA regions of consistent memory previously allocated */ 220+ if (!priv->extend_desc) 221+ dma_free_coherent(priv->device, 222+ DMA_RX_SIZE * sizeof(struct dma_desc), 223+ rx_q->dma_rx, rx_q->dma_rx_phy); 224+ else 225+ dma_free_coherent(priv->device, DMA_RX_SIZE * 226+ sizeof(struct dma_extended_desc), 227+ rx_q->dma_erx, rx_q->dma_rx_phy); 228+ } 229+} 230+ 231+/** 232+ * uio_free_dma_tx_desc_resources - free TX dma desc resources 233+ * @priv: private structure 234+ */ 235+static void uio_free_dma_tx_desc_resources(struct stmmac_priv *priv) 236+{ 237+ u32 tx_count = priv->plat->tx_queues_to_use; 238+ u32 queue; 239+ 240+ /* Free TX queue resources */ 241+ for (queue = 0; queue < tx_count; queue++) { 242+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 243+ 244+ /* Free DMA regions of consistent memory previously allocated */ 245+ if (!priv->extend_desc) 246+ dma_free_coherent(priv->device, 247+ DMA_TX_SIZE * sizeof(struct dma_desc), 248+ tx_q->dma_tx, tx_q->dma_tx_phy); 249+ else 250+ dma_free_coherent(priv->device, DMA_TX_SIZE * 251+ sizeof(struct dma_extended_desc), 252+ tx_q->dma_etx, tx_q->dma_tx_phy); 253+ } 254+} 255+ 256+/** 257+ * uio_alloc_dma_rx_desc_resources - alloc RX resources. 258+ * @priv: private structure 259+ * Description: according to which descriptor can be used (extend or basic) 260+ * this function allocates the resources for TX and RX paths. In case of 261+ * reception, for example, it pre-allocated the RX socket buffer in order to 262+ * allow zero-copy mechanism. 263+ */ 264+static int uio_alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 265+{ 266+ u32 rx_count = priv->plat->rx_queues_to_use; 267+ int ret = -ENOMEM; 268+ u32 queue; 269+ 270+ /* RX queues buffers and DMA */ 271+ for (queue = 0; queue < rx_count; queue++) { 272+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 273+ 274+ rx_q->queue_index = queue; 275+ rx_q->priv_data = priv; 276+ 277+ if (priv->extend_desc) { 278+ rx_q->dma_erx = dma_zalloc_coherent(priv->device, 279+ DMA_RX_SIZE * 280+ sizeof(struct 281+ dma_extended_desc), 282+ &rx_q->dma_rx_phy, 283+ GFP_KERNEL); 284+ if (!rx_q->dma_erx) 285+ goto err_dma; 286+ 287+ } else { 288+ rx_q->dma_rx = dma_zalloc_coherent(priv->device, 289+ DMA_RX_SIZE * 290+ sizeof(struct 291+ dma_desc), 292+ &rx_q->dma_rx_phy, 293+ GFP_KERNEL); 294+ if (!rx_q->dma_rx) 295+ goto err_dma; 296+ } 297+ } 298+ 299+ return 0; 300+ 301+err_dma: 302+ uio_free_dma_rx_desc_resources(priv); 303+ 304+ return ret; 305+} 306+ 307+/** 308+ * uio_alloc_dma_tx_desc_resources - alloc TX resources. 309+ * @priv: private structure 310+ * Description: according to which descriptor can be used (extend or basic) 311+ * this function allocates the resources for TX and RX paths. In case of 312+ * reception, for example, it pre-allocated the RX socket buffer in order to 313+ * allow zero-copy mechanism. 314+ */ 315+static int uio_alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 316+{ 317+ u32 tx_count = priv->plat->tx_queues_to_use; 318+ int ret = -ENOMEM; 319+ u32 queue; 320+ 321+ /* TX queues buffers and DMA */ 322+ for (queue = 0; queue < tx_count; queue++) { 323+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 324+ 325+ tx_q->queue_index = queue; 326+ tx_q->priv_data = priv; 327+ 328+ if (priv->extend_desc) { 329+ tx_q->dma_etx = dma_zalloc_coherent(priv->device, 330+ DMA_TX_SIZE * 331+ sizeof(struct 332+ dma_extended_desc), 333+ &tx_q->dma_tx_phy, 334+ GFP_KERNEL); 335+ if (!tx_q->dma_etx) 336+ goto err_dma; 337+ } else { 338+ tx_q->dma_tx = dma_zalloc_coherent(priv->device, 339+ DMA_TX_SIZE * 340+ sizeof(struct 341+ dma_desc), 342+ &tx_q->dma_tx_phy, 343+ GFP_KERNEL); 344+ if (!tx_q->dma_tx) 345+ goto err_dma; 346+ } 347+ } 348+ 349+ return 0; 350+ 351+err_dma: 352+ uio_free_dma_tx_desc_resources(priv); 353+ 354+ return ret; 355+} 356+ 357+/** 358+ * uio_alloc_dma_desc_resources - alloc TX/RX resources. 359+ * @priv: private structure 360+ * Description: according to which descriptor can be used (extend or basic) 361+ * this function allocates the resources for TX and RX paths. In case of 362+ * reception, for example, it pre-allocated the RX socket buffer in order to 363+ * allow zero-copy mechanism. 364+ */ 365+static int uio_alloc_dma_desc_resources(struct stmmac_priv *priv) 366+{ 367+ /* RX Allocation */ 368+ int ret = uio_alloc_dma_rx_desc_resources(priv); 369+ 370+ if (ret) 371+ return ret; 372+ 373+ ret = uio_alloc_dma_tx_desc_resources(priv); 374+ 375+ return ret; 376+} 377+ 378+/** 379+ * uio_free_dma_desc_resources - free dma desc resources 380+ * @priv: private structure 381+ */ 382+static void uio_free_dma_desc_resources(struct stmmac_priv *priv) 383+{ 384+ /* Release the DMA RX socket buffers */ 385+ uio_free_dma_rx_desc_resources(priv); 386+ 387+ /* Release the DMA TX socket buffers */ 388+ uio_free_dma_tx_desc_resources(priv); 389+} 390+ 391+/** 392+ * uio_hw_fix_mac_speed - callback for speed selection 393+ * @priv: driver private structure 394+ * Description: on some platforms (e.g. ST), some HW system configuration 395+ * registers have to be set according to the link speed negotiated. 396+ */ 397+static inline void uio_hw_fix_mac_speed(struct stmmac_priv *priv) 398+{ 399+ struct net_device *ndev = priv->dev; 400+ struct phy_device *phydev = ndev->phydev; 401+ 402+ if (likely(priv->plat->fix_mac_speed)) 403+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); 404+} 405+ 406+/** 407+ * uio_mac_flow_ctrl - Configure flow control in all queues 408+ * @priv: driver private structure 409+ * Description: It is used for configuring the flow control in all queues 410+ */ 411+static void uio_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 412+{ 413+ u32 tx_cnt = priv->plat->tx_queues_to_use; 414+ 415+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 416+ priv->pause, tx_cnt); 417+} 418+ 419+/** 420+ * uio_adjust_link - adjusts the link parameters 421+ * @dev: net device structure 422+ * Description: this is the helper called by the physical abstraction layer 423+ * drivers to communicate the phy link status. According the speed and duplex 424+ * this driver can invoke registered glue-logic as well. 425+ * It also invoke the eee initialization because it could happen when switch 426+ * on different networks (that are eee capable). 427+ */ 428+static void uio_adjust_link(struct net_device *dev) 429+{ 430+ struct stmmac_priv *priv = netdev_priv(dev); 431+ struct phy_device *phydev = dev->phydev; 432+ bool new_state = false; 433+ 434+ if (!phydev) 435+ return; 436+ 437+ mutex_lock(&priv->lock); 438+ 439+ if (phydev->link) { 440+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 441+ 442+ /* Now we make sure that we can be in full duplex mode. 443+ * If not, we operate in half-duplex mode. 444+ */ 445+ if (phydev->duplex != priv->oldduplex) { 446+ new_state = true; 447+ if (!phydev->duplex) 448+ ctrl &= ~priv->hw->link.duplex; 449+ else 450+ ctrl |= priv->hw->link.duplex; 451+ priv->oldduplex = phydev->duplex; 452+ } 453+ /* Flow Control operation */ 454+ if (phydev->pause) 455+ uio_mac_flow_ctrl(priv, phydev->duplex); 456+ 457+ if (phydev->speed != priv->speed) { 458+ new_state = true; 459+ ctrl &= ~priv->hw->link.speed_mask; 460+ switch (phydev->speed) { 461+ case SPEED_1000: 462+ ctrl |= priv->hw->link.speed1000; 463+ break; 464+ case SPEED_100: 465+ ctrl |= priv->hw->link.speed100; 466+ break; 467+ case SPEED_10: 468+ ctrl |= priv->hw->link.speed10; 469+ break; 470+ default: 471+ netif_warn(priv, link, priv->dev, 472+ "broken speed: %d\n", phydev->speed); 473+ phydev->speed = SPEED_UNKNOWN; 474+ break; 475+ } 476+ if (phydev->speed != SPEED_UNKNOWN) 477+ uio_hw_fix_mac_speed(priv); 478+ priv->speed = phydev->speed; 479+ } 480+ 481+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 482+ 483+ if (!priv->oldlink) { 484+ new_state = true; 485+ priv->oldlink = true; 486+ } 487+ } else if (priv->oldlink) { 488+ new_state = true; 489+ priv->oldlink = false; 490+ priv->speed = SPEED_UNKNOWN; 491+ priv->oldduplex = DUPLEX_UNKNOWN; 492+ } 493+ 494+ if (new_state && netif_msg_link(priv)) 495+ phy_print_status(phydev); 496+ 497+ mutex_unlock(&priv->lock); 498+ 499+ if (phydev->is_pseudo_fixed_link) 500+ /* Stop PHY layer to call the hook to adjust the link in case 501+ * of a switch is attached to the stmmac driver. 502+ */ 503+ phydev->irq = PHY_IGNORE_INTERRUPT; 504+} 505+ 506+/** 507+ * rockchip_gmac_uio_init_phy - PHY initialization 508+ * @dev: net device structure 509+ * Description: it initializes the driver's PHY state, and attaches the PHY 510+ * to the mac driver. 511+ * Return value: 512+ * 0 on success 513+ */ 514+static int rockchip_gmac_uio_init_phy(struct net_device *dev) 515+{ 516+ struct stmmac_priv *priv = netdev_priv(dev); 517+ u32 tx_cnt = priv->plat->tx_queues_to_use; 518+ struct phy_device *phydev; 519+ char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 520+ char bus_id[MII_BUS_ID_SIZE]; 521+ int interface = priv->plat->interface; 522+ int max_speed = priv->plat->max_speed; 523+ 524+ priv->oldlink = false; 525+ priv->speed = SPEED_UNKNOWN; 526+ priv->oldduplex = DUPLEX_UNKNOWN; 527+ 528+ if (priv->plat->integrated_phy_power) 529+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, true); 530+ 531+ if (priv->mii) 532+ stmmac_mdio_reset(priv->mii); 533+ 534+ if (priv->plat->phy_node) { 535+ phydev = of_phy_connect(dev, priv->plat->phy_node, 536+ &uio_adjust_link, 0, interface); 537+ } else { 538+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 539+ priv->plat->bus_id); 540+ 541+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 542+ priv->plat->phy_addr); 543+ netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, 544+ phy_id_fmt); 545+ 546+ phydev = phy_connect(dev, phy_id_fmt, &uio_adjust_link, 547+ interface); 548+ } 549+ 550+ if (IS_ERR_OR_NULL(phydev)) { 551+ netdev_err(priv->dev, "Could not attach to PHY\n"); 552+ if (!phydev) 553+ return -ENODEV; 554+ 555+ return PTR_ERR(phydev); 556+ } 557+ 558+ /* Stop Advertising 1000BASE Capability if interface is not GMII */ 559+ if (interface == PHY_INTERFACE_MODE_MII || 560+ interface == PHY_INTERFACE_MODE_RMII || 561+ (max_speed < 1000 && max_speed > 0)) 562+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 563+ SUPPORTED_1000baseT_Full); 564+ 565+ /* Half-duplex mode not supported with multiqueue 566+ * half-duplex can only works with single queue 567+ */ 568+ if (tx_cnt > 1) 569+ phydev->supported &= ~(SUPPORTED_1000baseT_Half | 570+ SUPPORTED_100baseT_Half | 571+ SUPPORTED_10baseT_Half); 572+ 573+ /* Broken HW is sometimes missing the pull-up resistor on the 574+ * MDIO line, which results in reads to non-existent devices returning 575+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 576+ * device as well. 577+ * Note: phydev->phy_id is the result of reading the UID PHY registers. 578+ */ 579+ if (!priv->plat->phy_node && phydev->phy_id == 0) { 580+ phy_disconnect(phydev); 581+ return -ENODEV; 582+ } 583+ 584+ /* uio_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid 585+ * subsequent PHY polling, make sure we force a link transition if 586+ * we have a UP/DOWN/UP transition 587+ */ 588+ if (phydev->is_pseudo_fixed_link) 589+ phydev->irq = PHY_POLL; 590+ 591+ phy_attached_info(phydev); 592+ return 0; 593+} 594+ 595+/** 596+ * rockchip_gmac_uio_init_dma_engine - DMA init. 597+ * @priv: driver private structure 598+ * Description: 599+ * It inits the DMA invoking the specific MAC/GMAC callback. 600+ * Some DMA parameters can be passed from the platform; 601+ * in case of these are not passed a default is kept for the MAC or GMAC. 602+ */ 603+static int rockchip_gmac_uio_init_dma_engine(struct stmmac_priv *priv) 604+{ 605+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 606+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 607+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 608+ struct stmmac_rx_queue *rx_q; 609+ struct stmmac_tx_queue *tx_q; 610+ u32 chan = 0; 611+ int atds = 0; 612+ int ret = 0; 613+ 614+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 615+ dev_err(priv->device, "Invalid DMA configuration\n"); 616+ return -EINVAL; 617+ } 618+ 619+ if (priv->extend_desc && priv->mode == STMMAC_RING_MODE) 620+ atds = 1; 621+ 622+ ret = stmmac_reset(priv, priv->ioaddr); 623+ if (ret) { 624+ dev_err(priv->device, "Failed to reset the dma\n"); 625+ return ret; 626+ } 627+ 628+ /* DMA Configuration */ 629+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 630+ 631+ if (priv->plat->axi) 632+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 633+ 634+ /* DMA CSR Channel configuration */ 635+ for (chan = 0; chan < dma_csr_ch; chan++) 636+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 637+ 638+ /* DMA RX Channel Configuration */ 639+ for (chan = 0; chan < rx_channels_count; chan++) { 640+ rx_q = &priv->rx_queue[chan]; 641+ 642+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 643+ rx_q->dma_rx_phy, chan); 644+ 645+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + 646+ (DMA_RX_SIZE * sizeof(struct dma_desc)); 647+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 648+ rx_q->rx_tail_addr, chan); 649+ } 650+ 651+ /* DMA TX Channel Configuration */ 652+ for (chan = 0; chan < tx_channels_count; chan++) { 653+ tx_q = &priv->tx_queue[chan]; 654+ 655+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 656+ tx_q->dma_tx_phy, chan); 657+ 658+ tx_q->tx_tail_addr = tx_q->dma_tx_phy; 659+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 660+ tx_q->tx_tail_addr, chan); 661+ } 662+ 663+ return ret; 664+} 665+ 666+static void uio_set_rings_length(struct stmmac_priv *priv) 667+{ 668+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 669+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 670+ u32 chan; 671+ 672+ /* set TX ring length */ 673+ for (chan = 0; chan < tx_channels_count; chan++) 674+ stmmac_set_tx_ring_len(priv, priv->ioaddr, 675+ (DMA_TX_SIZE - 1), chan); 676+ 677+ /* set RX ring length */ 678+ for (chan = 0; chan < rx_channels_count; chan++) 679+ stmmac_set_rx_ring_len(priv, priv->ioaddr, 680+ (DMA_RX_SIZE - 1), chan); 681+} 682+ 683+/** 684+ * uio_set_tx_queue_weight - Set TX queue weight 685+ * @priv: driver private structure 686+ * Description: It is used for setting TX queues weight 687+ */ 688+static void uio_set_tx_queue_weight(struct stmmac_priv *priv) 689+{ 690+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 691+ u32 weight; 692+ u32 queue; 693+ 694+ for (queue = 0; queue < tx_queues_count; queue++) { 695+ weight = priv->plat->tx_queues_cfg[queue].weight; 696+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 697+ } 698+} 699+ 700+/** 701+ * uio_configure_cbs - Configure CBS in TX queue 702+ * @priv: driver private structure 703+ * Description: It is used for configuring CBS in AVB TX queues 704+ */ 705+static void uio_configure_cbs(struct stmmac_priv *priv) 706+{ 707+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 708+ u32 mode_to_use; 709+ u32 queue; 710+ 711+ /* queue 0 is reserved for legacy traffic */ 712+ for (queue = 1; queue < tx_queues_count; queue++) { 713+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 714+ if (mode_to_use == MTL_QUEUE_DCB) 715+ continue; 716+ 717+ stmmac_config_cbs(priv, priv->hw, 718+ priv->plat->tx_queues_cfg[queue].send_slope, 719+ priv->plat->tx_queues_cfg[queue].idle_slope, 720+ priv->plat->tx_queues_cfg[queue].high_credit, 721+ priv->plat->tx_queues_cfg[queue].low_credit, 722+ queue); 723+ } 724+} 725+ 726+/** 727+ * uio_rx_queue_dma_chan_map - Map RX queue to RX dma channel 728+ * @priv: driver private structure 729+ * Description: It is used for mapping RX queues to RX dma channels 730+ */ 731+static void uio_rx_queue_dma_chan_map(struct stmmac_priv *priv) 732+{ 733+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 734+ u32 queue; 735+ u32 chan; 736+ 737+ for (queue = 0; queue < rx_queues_count; queue++) { 738+ chan = priv->plat->rx_queues_cfg[queue].chan; 739+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 740+ } 741+} 742+ 743+/** 744+ * uio_mac_config_rx_queues_prio - Configure RX Queue priority 745+ * @priv: driver private structure 746+ * Description: It is used for configuring the RX Queue Priority 747+ */ 748+static void uio_mac_config_rx_queues_prio(struct stmmac_priv *priv) 749+{ 750+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 751+ u32 queue; 752+ u32 prio; 753+ 754+ for (queue = 0; queue < rx_queues_count; queue++) { 755+ if (!priv->plat->rx_queues_cfg[queue].use_prio) 756+ continue; 757+ 758+ prio = priv->plat->rx_queues_cfg[queue].prio; 759+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 760+ } 761+} 762+ 763+/** 764+ * uio_mac_config_tx_queues_prio - Configure TX Queue priority 765+ * @priv: driver private structure 766+ * Description: It is used for configuring the TX Queue Priority 767+ */ 768+static void uio_mac_config_tx_queues_prio(struct stmmac_priv *priv) 769+{ 770+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 771+ u32 queue; 772+ u32 prio; 773+ 774+ for (queue = 0; queue < tx_queues_count; queue++) { 775+ if (!priv->plat->tx_queues_cfg[queue].use_prio) 776+ continue; 777+ 778+ prio = priv->plat->tx_queues_cfg[queue].prio; 779+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 780+ } 781+} 782+ 783+/** 784+ * uio_mac_config_rx_queues_routing - Configure RX Queue Routing 785+ * @priv: driver private structure 786+ * Description: It is used for configuring the RX queue routing 787+ */ 788+static void uio_mac_config_rx_queues_routing(struct stmmac_priv *priv) 789+{ 790+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 791+ u32 queue; 792+ u8 packet; 793+ 794+ for (queue = 0; queue < rx_queues_count; queue++) { 795+ /* no specific packet type routing specified for the queue */ 796+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 797+ continue; 798+ 799+ packet = priv->plat->rx_queues_cfg[queue].pkt_route; 800+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 801+ } 802+} 803+ 804+/** 805+ * uio_mac_enable_rx_queues - Enable MAC rx queues 806+ * @priv: driver private structure 807+ * Description: It is used for enabling the rx queues in the MAC 808+ */ 809+static void uio_mac_enable_rx_queues(struct stmmac_priv *priv) 810+{ 811+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 812+ int queue; 813+ u8 mode; 814+ 815+ for (queue = 0; queue < rx_queues_count; queue++) { 816+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 817+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 818+ } 819+} 820+ 821+/** 822+ * rockchip_gmac_uio_mtl_configuration - Configure MTL 823+ * @priv: driver private structure 824+ * Description: It is used for configuring MTL 825+ */ 826+static void rockchip_gmac_uio_mtl_configuration(struct stmmac_priv *priv) 827+{ 828+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 829+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 830+ 831+ if (tx_queues_count > 1) 832+ uio_set_tx_queue_weight(priv); 833+ 834+ /* Configure MTL RX algorithms */ 835+ if (rx_queues_count > 1) 836+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 837+ priv->plat->rx_sched_algorithm); 838+ 839+ /* Configure MTL TX algorithms */ 840+ if (tx_queues_count > 1) 841+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 842+ priv->plat->tx_sched_algorithm); 843+ 844+ /* Configure CBS in AVB TX queues */ 845+ if (tx_queues_count > 1) 846+ uio_configure_cbs(priv); 847+ 848+ /* Map RX MTL to DMA channels */ 849+ uio_rx_queue_dma_chan_map(priv); 850+ 851+ /* Enable MAC RX Queues */ 852+ uio_mac_enable_rx_queues(priv); 853+ 854+ /* Set RX priorities */ 855+ if (rx_queues_count > 1) 856+ uio_mac_config_rx_queues_prio(priv); 857+ 858+ /* Set TX priorities */ 859+ if (tx_queues_count > 1) 860+ uio_mac_config_tx_queues_prio(priv); 861+ 862+ /* Set RX routing */ 863+ if (rx_queues_count > 1) 864+ uio_mac_config_rx_queues_routing(priv); 865+} 866+ 867+static void uio_safety_feat_configuration(struct stmmac_priv *priv) 868+{ 869+ if (priv->dma_cap.asp) { 870+ netdev_info(priv->dev, "Enabling Safety Features\n"); 871+ stmmac_safety_feat_config(priv, priv->ioaddr, 872+ priv->dma_cap.asp); 873+ } else { 874+ netdev_info(priv->dev, "No Safety Features support found\n"); 875+ } 876+} 877+ 878+/** 879+ * uio_dma_operation_mode - HW DMA operation mode 880+ * @priv: driver private structure 881+ * Description: it is used for configuring the DMA operation mode register in 882+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 883+ */ 884+static void uio_dma_operation_mode(struct stmmac_priv *priv) 885+{ 886+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 887+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 888+ int rxfifosz = priv->plat->rx_fifo_size; 889+ int txfifosz = priv->plat->tx_fifo_size; 890+ u32 txmode = 0; 891+ u32 rxmode = 0; 892+ u32 chan = 0; 893+ u8 qmode = 0; 894+ 895+ if (rxfifosz == 0) 896+ rxfifosz = priv->dma_cap.rx_fifo_size; 897+ if (txfifosz == 0) 898+ txfifosz = priv->dma_cap.tx_fifo_size; 899+ 900+ /* Adjust for real per queue fifo size */ 901+ rxfifosz /= rx_channels_count; 902+ txfifosz /= tx_channels_count; 903+ 904+ if (priv->plat->force_thresh_dma_mode) { 905+ txmode = tc; 906+ rxmode = tc; 907+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 908+ /* In case of GMAC, SF mode can be enabled 909+ * to perform the TX COE in HW. This depends on: 910+ * 1) TX COE if actually supported 911+ * 2) There is no bugged Jumbo frame support 912+ * that needs to not insert csum in the TDES. 913+ */ 914+ txmode = SF_DMA_MODE; 915+ rxmode = SF_DMA_MODE; 916+ priv->xstats.threshold = SF_DMA_MODE; 917+ } else { 918+ txmode = tc; 919+ rxmode = SF_DMA_MODE; 920+ } 921+ 922+ /* configure all channels */ 923+ for (chan = 0; chan < rx_channels_count; chan++) { 924+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 925+ 926+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 927+ rxfifosz, qmode); 928+ stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 929+ chan); 930+ } 931+ 932+ for (chan = 0; chan < tx_channels_count; chan++) { 933+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 934+ 935+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 936+ txfifosz, qmode); 937+ } 938+} 939+ 940+/** 941+ * rockchip_gmac_uio_mmc_setup: setup the Mac Management Counters (MMC) 942+ * @priv: driver private structure 943+ * Description: this masks the MMC irq, in fact, the counters are managed in SW. 944+ */ 945+static void rockchip_gmac_uio_mmc_setup(struct stmmac_priv *priv) 946+{ 947+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 948+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 949+ 950+ dwmac_mmc_intr_all_mask(priv->mmcaddr); 951+ 952+ if (priv->dma_cap.rmon) { 953+ dwmac_mmc_ctrl(priv->mmcaddr, mode); 954+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 955+ } else { 956+ netdev_info(priv->dev, "No MAC Management Counters available\n"); 957+ } 958+} 959+ 960+/** 961+ * rockchip_gmac_uio_hw_setup - setup mac in a usable state. 962+ * @dev : pointer to the device structure. 963+ * @init_ptp: initialize PTP if set 964+ * Description: 965+ * this is the main function to setup the HW in a usable state because the 966+ * dma engine is reset, the core registers are configured (e.g. AXI, 967+ * Checksum features, timers). The DMA is ready to start receiving and 968+ * transmitting. 969+ * Return value: 970+ * 0 on success and an appropriate (-)ve integer as defined in errno.h 971+ * file on failure. 972+ */ 973+static int rockchip_gmac_uio_hw_setup(struct net_device *dev, bool init_ptp) 974+{ 975+ struct stmmac_priv *priv = netdev_priv(dev); 976+ int ret; 977+ 978+ /* DMA initialization and SW reset */ 979+ ret = rockchip_gmac_uio_init_dma_engine(priv); 980+ if (ret < 0) { 981+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 982+ __func__); 983+ return ret; 984+ } 985+ 986+ /* Copy the MAC addr into the HW */ 987+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 988+ 989+ /* PS and related bits will be programmed according to the speed */ 990+ if (priv->hw->pcs) { 991+ int speed = priv->plat->mac_port_sel_speed; 992+ 993+ if (speed == SPEED_10 || speed == SPEED_100 || 994+ speed == SPEED_1000) { 995+ priv->hw->ps = speed; 996+ } else { 997+ dev_warn(priv->device, "invalid port speed\n"); 998+ priv->hw->ps = 0; 999+ } 1000+ } 1001+ 1002+ /* Initialize the MAC Core */ 1003+ stmmac_core_init(priv, priv->hw, dev); 1004+ 1005+ /* Initialize MTL*/ 1006+ rockchip_gmac_uio_mtl_configuration(priv); 1007+ 1008+ /* Initialize Safety Features */ 1009+ uio_safety_feat_configuration(priv); 1010+ 1011+ ret = stmmac_rx_ipc(priv, priv->hw); 1012+ if (!ret) { 1013+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 1014+ priv->plat->rx_coe = STMMAC_RX_COE_NONE; 1015+ priv->hw->rx_csum = 0; 1016+ } 1017+ 1018+ /* Enable the MAC Rx/Tx */ 1019+ stmmac_mac_set(priv, priv->ioaddr, true); 1020+ 1021+ /* Set the HW DMA mode and the COE */ 1022+ uio_dma_operation_mode(priv); 1023+ 1024+ rockchip_gmac_uio_mmc_setup(priv); 1025+ 1026+ if (priv->hw->pcs) 1027+ stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 1028+ 1029+ /* set TX and RX rings length */ 1030+ uio_set_rings_length(priv); 1031+ 1032+ return ret; 1033+} 1034+ 1035+/** 1036+ * uio_open - open entry point of the driver 1037+ * @dev : pointer to the device structure. 1038+ * Description: 1039+ * This function is the open entry point of the driver. 1040+ * Return value: 1041+ * 0 on success and an appropriate (-)ve integer as defined in errno.h 1042+ * file on failure. 1043+ */ 1044+static int uio_open(struct net_device *dev) 1045+{ 1046+ struct stmmac_priv *priv = netdev_priv(dev); 1047+ int ret; 1048+ 1049+ if (priv->hw->pcs != STMMAC_PCS_RGMII && 1050+ priv->hw->pcs != STMMAC_PCS_TBI && 1051+ priv->hw->pcs != STMMAC_PCS_RTBI) { 1052+ ret = rockchip_gmac_uio_init_phy(dev); 1053+ if (ret) { 1054+ netdev_err(priv->dev, 1055+ "%s: Cannot attach to PHY (error: %d)\n", 1056+ __func__, ret); 1057+ return ret; 1058+ } 1059+ } 1060+ 1061+ /* Extra statistics */ 1062+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1063+ priv->xstats.threshold = tc; 1064+ 1065+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1066+ priv->rx_copybreak = STMMAC_RX_COPYBREAK; 1067+ 1068+ ret = uio_alloc_dma_desc_resources(priv); 1069+ if (ret < 0) { 1070+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 1071+ __func__); 1072+ goto dma_desc_error; 1073+ } 1074+ 1075+ ret = rockchip_gmac_uio_hw_setup(dev, true); 1076+ if (ret < 0) { 1077+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 1078+ goto init_error; 1079+ } 1080+ 1081+ if (dev->phydev) 1082+ phy_start(dev->phydev); 1083+ 1084+ return 0; 1085+ 1086+init_error: 1087+ uio_free_dma_desc_resources(priv); 1088+dma_desc_error: 1089+ if (dev->phydev) 1090+ phy_disconnect(dev->phydev); 1091+ return ret; 1092+} 1093+ 1094+/** 1095+ * uio_release - close entry point of the driver 1096+ * @dev : device pointer. 1097+ * Description: 1098+ * This is the stop entry point of the driver. 1099+ */ 1100+static int uio_release(struct net_device *dev) 1101+{ 1102+ struct stmmac_priv *priv = netdev_priv(dev); 1103+ 1104+ /* Stop and disconnect the PHY */ 1105+ if (dev->phydev) { 1106+ phy_stop(dev->phydev); 1107+ phy_disconnect(dev->phydev); 1108+ if (priv->plat->integrated_phy_power) 1109+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, 1110+ false); 1111+ } 1112+ 1113+ /* Release and free the Rx/Tx resources */ 1114+ uio_free_dma_desc_resources(priv); 1115+ 1116+ /* Disable the MAC Rx/Tx */ 1117+ stmmac_mac_set(priv, priv->ioaddr, false); 1118+ 1119+ netif_carrier_off(dev); 1120+ 1121+ return 0; 1122+} 1123+ 1124+/** 1125+ * rockchip_gmac_uio_probe() platform driver probe routine 1126+ * - register uio devices filled with memory maps retrieved 1127+ * from device tree 1128+ */ 1129+static int rockchip_gmac_uio_probe(struct platform_device *pdev) 1130+{ 1131+ struct device *dev = &pdev->dev; 1132+ struct device_node *np = dev->of_node, *mac_node; 1133+ struct rockchip_gmac_uio_pdev_info *pdev_info; 1134+ struct net_device *netdev; 1135+ struct stmmac_priv *priv; 1136+ struct uio_info *uio; 1137+ struct resource *res; 1138+ int err = 0; 1139+ 1140+ pdev_info = devm_kzalloc(dev, sizeof(struct rockchip_gmac_uio_pdev_info), 1141+ GFP_KERNEL); 1142+ if (!pdev_info) 1143+ return -ENOMEM; 1144+ 1145+ uio = &pdev_info->uio; 1146+ pdev_info->dev = dev; 1147+ mac_node = of_parse_phandle(np, "rockchip,ethernet", 0); 1148+ if (!mac_node) 1149+ return -ENODEV; 1150+ 1151+ if (of_device_is_available(mac_node)) { 1152+ netdev = of_find_net_device_by_node(mac_node); 1153+ of_node_put(mac_node); 1154+ if (!netdev) 1155+ return -ENODEV; 1156+ } else { 1157+ of_node_put(mac_node); 1158+ return -EINVAL; 1159+ } 1160+ 1161+ pdev_info->ndev = netdev; 1162+ rtnl_lock(); 1163+ dev_close(netdev); 1164+ rtnl_unlock(); 1165+ 1166+ rtnl_lock(); 1167+ err = uio_open(netdev); 1168+ if (err) { 1169+ rtnl_unlock(); 1170+ dev_err(dev, "Failed to open stmmac resource: %d\n", err); 1171+ return err; 1172+ } 1173+ rtnl_unlock(); 1174+ 1175+ priv = netdev_priv(netdev); 1176+ snprintf(pdev_info->name, sizeof(pdev_info->name), "uio_%s", 1177+ netdev->name); 1178+ uio->name = pdev_info->name; 1179+ uio->version = DRIVER_VERSION; 1180+ 1181+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1182+ if (!res) 1183+ return -ENODEV; 1184+ 1185+ uio->mem[0].name = "eth_regs"; 1186+ uio->mem[0].addr = res->start & PAGE_MASK; 1187+ uio->mem[0].size = PAGE_ALIGN(resource_size(res)); 1188+ uio->mem[0].memtype = UIO_MEM_PHYS; 1189+ 1190+ uio->mem[1].name = "eth_rx_bd"; 1191+ uio->mem[1].addr = priv->rx_queue[0].dma_rx_phy; 1192+ uio->mem[1].size = DMA_RX_SIZE * sizeof(struct dma_desc); 1193+ uio->mem[1].memtype = UIO_MEM_PHYS; 1194+ 1195+ uio->mem[2].name = "eth_tx_bd"; 1196+ uio->mem[2].addr = priv->tx_queue[0].dma_tx_phy; 1197+ uio->mem[2].size = DMA_TX_SIZE * sizeof(struct dma_desc); 1198+ uio->mem[2].memtype = UIO_MEM_PHYS; 1199+ 1200+ uio->open = rockchip_gmac_uio_open; 1201+ uio->release = rockchip_gmac_uio_release; 1202+ /* Custom mmap function. */ 1203+ uio->mmap = rockchip_gmac_uio_mmap; 1204+ uio->priv = pdev_info; 1205+ 1206+ err = uio_register_device(dev, uio); 1207+ if (err) { 1208+ dev_err(dev, "Failed to register uio device: %d\n", err); 1209+ return err; 1210+ } 1211+ 1212+ pdev_info->map_num = 3; 1213+ 1214+ dev_info(dev, "Registered %s uio devices, %d register maps attached\n", 1215+ pdev_info->name, pdev_info->map_num); 1216+ 1217+ platform_set_drvdata(pdev, pdev_info); 1218+ 1219+ return 0; 1220+} 1221+ 1222+/** 1223+ * rockchip_gmac_uio_remove() - ROCKCHIP ETH UIO platform driver release 1224+ * routine - unregister uio devices 1225+ */ 1226+static int rockchip_gmac_uio_remove(struct platform_device *pdev) 1227+{ 1228+ struct rockchip_gmac_uio_pdev_info *pdev_info = 1229+ platform_get_drvdata(pdev); 1230+ struct net_device *netdev; 1231+ 1232+ if (!pdev_info) 1233+ return -EINVAL; 1234+ 1235+ netdev = pdev_info->ndev; 1236+ 1237+ uio_unregister_device(&pdev_info->uio); 1238+ 1239+ if (netdev) { 1240+ rtnl_lock(); 1241+ uio_release(netdev); 1242+ rtnl_unlock(); 1243+ } 1244+ 1245+ platform_set_drvdata(pdev, NULL); 1246+ 1247+ if (netdev) { 1248+ rtnl_lock(); 1249+ dev_open(netdev); 1250+ rtnl_unlock(); 1251+ } 1252+ 1253+ return 0; 1254+} 1255+ 1256+static const struct of_device_id rockchip_gmac_uio_of_match[] = { 1257+ { .compatible = "rockchip,uio-gmac", }, 1258+ { } 1259+}; 1260+ 1261+static struct platform_driver rockchip_gmac_uio_driver = { 1262+ .driver = { 1263+ .owner = THIS_MODULE, 1264+ .name = DRIVER_NAME, 1265+ .of_match_table = rockchip_gmac_uio_of_match, 1266+ }, 1267+ .probe = rockchip_gmac_uio_probe, 1268+ .remove = rockchip_gmac_uio_remove, 1269+}; 1270+ 1271+module_platform_driver(rockchip_gmac_uio_driver); 1272+ 1273+MODULE_LICENSE("GPL"); 1274+MODULE_AUTHOR("ROCKCHIP"); 1275+MODULE_DESCRIPTION("ROCKCHIP GMAC UIO Driver"); 1276+ 1277