1*4882a593SmuzhiyunFrom 23f2752638f0ec22fb848b46b2d16d65bd98b49d Mon Sep 17 00:00:00 2001 2*4882a593SmuzhiyunFrom: David Wu <david.wu@rock-chips.com> 3*4882a593SmuzhiyunDate: Thu, 03 Nov 2022 10:47:48 +0800 4*4882a593SmuzhiyunSubject: [PATCH] net: ethernet: stmmac: Add uio support for stmmac 5*4882a593Smuzhiyun 6*4882a593SmuzhiyunCurrently only supports single channel, and the network card 7*4882a593Smuzhiyunname needs to be eth0 and eth1. 8*4882a593Smuzhiyun 9*4882a593SmuzhiyunSigned-off-by: David Wu <david.wu@rock-chips.com> 10*4882a593SmuzhiyunChange-Id: I19975b10e2ed12931edc2e8bd50c003416a1109c 11*4882a593Smuzhiyun--- 12*4882a593Smuzhiyun 13*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig 14*4882a593Smuzhiyunindex 38039c9..c19cdd6 100644 15*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig 16*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig 17*4882a593Smuzhiyun@@ -52,6 +52,13 @@ 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun If unsure, say N. 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun+config STMMAC_UIO 22*4882a593Smuzhiyun+ tristate "STMMAC_UIO ethernet controller" 23*4882a593Smuzhiyun+ default n 24*4882a593Smuzhiyun+ select UIO 25*4882a593Smuzhiyun+ help 26*4882a593Smuzhiyun+ Say M here if you want to use the stmmac_uio.ko for DPDK. 27*4882a593Smuzhiyun+ 28*4882a593Smuzhiyun if STMMAC_PLATFORM 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun config DWMAC_DWC_QOS_ETH 31*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile 32*4882a593Smuzhiyunindex 05c792e..d017cbc 100644 33*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/Makefile 34*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile 35*4882a593Smuzhiyun@@ -27,6 +27,7 @@ 36*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rockchip.o 37*4882a593Smuzhiyun dwmac-rockchip-objs := dwmac-rk.o 38*4882a593Smuzhiyun dwmac-rockchip-$(CONFIG_DWMAC_ROCKCHIP_TOOL) += dwmac-rk-tool.o 39*4882a593Smuzhiyun+obj-$(CONFIG_STMMAC_UIO) += stmmac_uio.o 40*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o 41*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o 42*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o 43*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h 44*4882a593Smuzhiyunindex df7de50..37658d7 100644 45*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/common.h 46*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/common.h 47*4882a593Smuzhiyun@@ -48,10 +48,18 @@ 48*4882a593Smuzhiyun */ 49*4882a593Smuzhiyun #define DMA_MIN_TX_SIZE 64 50*4882a593Smuzhiyun #define DMA_MAX_TX_SIZE 1024 51*4882a593Smuzhiyun+#if IS_ENABLED(CONFIG_STMMAC_UIO) 52*4882a593Smuzhiyun+#define DMA_DEFAULT_TX_SIZE 1024 53*4882a593Smuzhiyun+#else 54*4882a593Smuzhiyun #define DMA_DEFAULT_TX_SIZE 512 55*4882a593Smuzhiyun+#endif 56*4882a593Smuzhiyun #define DMA_MIN_RX_SIZE 64 57*4882a593Smuzhiyun #define DMA_MAX_RX_SIZE 1024 58*4882a593Smuzhiyun+#if IS_ENABLED(CONFIG_STMMAC_UIO) 59*4882a593Smuzhiyun+#define DMA_DEFAULT_RX_SIZE 1024 60*4882a593Smuzhiyun+#else 61*4882a593Smuzhiyun #define DMA_DEFAULT_RX_SIZE 512 62*4882a593Smuzhiyun+#endif 63*4882a593Smuzhiyun #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun #undef FRAME_FILTER_DEBUG 66*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 67*4882a593Smuzhiyunindex 7c1a14b..8a29921 100644 68*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 69*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 70*4882a593Smuzhiyun@@ -396,6 +396,7 @@ 71*4882a593Smuzhiyun #endif 72*4882a593Smuzhiyun return 0; 73*4882a593Smuzhiyun } 74*4882a593Smuzhiyun+EXPORT_SYMBOL(stmmac_mdio_reset); 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun /** 77*4882a593Smuzhiyun * stmmac_mdio_register 78*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c 79*4882a593Smuzhiyunnew file mode 100644 80*4882a593Smuzhiyunindex 0000000..11ec4b7 81*4882a593Smuzhiyun--- /dev/null 82*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c 83*4882a593Smuzhiyun@@ -0,0 +1,1050 @@ 84*4882a593Smuzhiyun+// SPDX-License-Identifier: GPL-2.0 85*4882a593Smuzhiyun+/** 86*4882a593Smuzhiyun+ * Copyright 2023 ROCKCHIP 87*4882a593Smuzhiyun+ */ 88*4882a593Smuzhiyun+ 89*4882a593Smuzhiyun+#include <linux/module.h> 90*4882a593Smuzhiyun+#include <linux/kernel.h> 91*4882a593Smuzhiyun+#include <linux/of_address.h> 92*4882a593Smuzhiyun+#include <linux/of_platform.h> 93*4882a593Smuzhiyun+#include <linux/of_net.h> 94*4882a593Smuzhiyun+#include <linux/uio_driver.h> 95*4882a593Smuzhiyun+#include <linux/list.h> 96*4882a593Smuzhiyun+ 97*4882a593Smuzhiyun+#include <linux/clk.h> 98*4882a593Smuzhiyun+#include <linux/kernel.h> 99*4882a593Smuzhiyun+#include <linux/interrupt.h> 100*4882a593Smuzhiyun+#include <linux/ip.h> 101*4882a593Smuzhiyun+#include <linux/tcp.h> 102*4882a593Smuzhiyun+#include <linux/ethtool.h> 103*4882a593Smuzhiyun+#include <linux/if_ether.h> 104*4882a593Smuzhiyun+#include <linux/crc32.h> 105*4882a593Smuzhiyun+#include <linux/mii.h> 106*4882a593Smuzhiyun+#include <linux/if.h> 107*4882a593Smuzhiyun+#include <linux/if_vlan.h> 108*4882a593Smuzhiyun+#include <linux/dma-mapping.h> 109*4882a593Smuzhiyun+#include <linux/slab.h> 110*4882a593Smuzhiyun+#include <linux/prefetch.h> 111*4882a593Smuzhiyun+#include <linux/pinctrl/consumer.h> 112*4882a593Smuzhiyun+#ifdef CONFIG_DEBUG_FS 113*4882a593Smuzhiyun+#include <linux/debugfs.h> 114*4882a593Smuzhiyun+#include <linux/seq_file.h> 115*4882a593Smuzhiyun+#endif /* CONFIG_DEBUG_FS */ 116*4882a593Smuzhiyun+#include <linux/net_tstamp.h> 117*4882a593Smuzhiyun+#include <linux/udp.h> 118*4882a593Smuzhiyun+#include <net/pkt_cls.h> 119*4882a593Smuzhiyun+#include "stmmac_ptp.h" 120*4882a593Smuzhiyun+#include "stmmac.h" 121*4882a593Smuzhiyun+#include <linux/reset.h> 122*4882a593Smuzhiyun+#include <linux/of_mdio.h> 123*4882a593Smuzhiyun+#include "dwmac1000.h" 124*4882a593Smuzhiyun+#include "dwxgmac2.h" 125*4882a593Smuzhiyun+#include "hwif.h" 126*4882a593Smuzhiyun+#include "mmc.h" 127*4882a593Smuzhiyun+ 128*4882a593Smuzhiyun+#define DRIVER_NAME "rockchip_gmac_uio_drv" 129*4882a593Smuzhiyun+#define DRIVER_VERSION "0.1" 130*4882a593Smuzhiyun+ 131*4882a593Smuzhiyun+#define TC_DEFAULT 64 132*4882a593Smuzhiyun+static int tc = TC_DEFAULT; 133*4882a593Smuzhiyun+ 134*4882a593Smuzhiyun+#define DEFAULT_BUFSIZE 1536 135*4882a593Smuzhiyun+static int buf_sz = DEFAULT_BUFSIZE; 136*4882a593Smuzhiyun+ 137*4882a593Smuzhiyun+#define STMMAC_RX_COPYBREAK 256 138*4882a593Smuzhiyun+ 139*4882a593Smuzhiyun+/** 140*4882a593Smuzhiyun+ * rockchip_gmac_uio_pdev_info 141*4882a593Smuzhiyun+ * local information for uio module driver 142*4882a593Smuzhiyun+ * 143*4882a593Smuzhiyun+ * @dev: device pointer 144*4882a593Smuzhiyun+ * @ndev: network device pointer 145*4882a593Smuzhiyun+ * @name: uio name 146*4882a593Smuzhiyun+ * @uio: uio information 147*4882a593Smuzhiyun+ * @map_num: number of uio memory regions 148*4882a593Smuzhiyun+ */ 149*4882a593Smuzhiyun+struct rockchip_gmac_uio_pdev_info { 150*4882a593Smuzhiyun+ struct device *dev; 151*4882a593Smuzhiyun+ struct net_device *ndev; 152*4882a593Smuzhiyun+ char name[16]; 153*4882a593Smuzhiyun+ struct uio_info uio; 154*4882a593Smuzhiyun+ int map_num; 155*4882a593Smuzhiyun+}; 156*4882a593Smuzhiyun+ 157*4882a593Smuzhiyun+static int rockchip_gmac_uio_open(struct uio_info *info, struct inode *inode) 158*4882a593Smuzhiyun+{ 159*4882a593Smuzhiyun+ return 0; 160*4882a593Smuzhiyun+} 161*4882a593Smuzhiyun+ 162*4882a593Smuzhiyun+static int rockchip_gmac_uio_release(struct uio_info *info, 163*4882a593Smuzhiyun+ struct inode *inode) 164*4882a593Smuzhiyun+{ 165*4882a593Smuzhiyun+ return 0; 166*4882a593Smuzhiyun+} 167*4882a593Smuzhiyun+ 168*4882a593Smuzhiyun+static int rockchip_gmac_uio_mmap(struct uio_info *info, 169*4882a593Smuzhiyun+ struct vm_area_struct *vma) 170*4882a593Smuzhiyun+{ 171*4882a593Smuzhiyun+ u32 ret; 172*4882a593Smuzhiyun+ u32 pfn; 173*4882a593Smuzhiyun+ 174*4882a593Smuzhiyun+ pfn = (info->mem[vma->vm_pgoff].addr) >> PAGE_SHIFT; 175*4882a593Smuzhiyun+ 176*4882a593Smuzhiyun+ if (vma->vm_pgoff) 177*4882a593Smuzhiyun+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 178*4882a593Smuzhiyun+ else 179*4882a593Smuzhiyun+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot); 180*4882a593Smuzhiyun+ 181*4882a593Smuzhiyun+ ret = remap_pfn_range(vma, vma->vm_start, pfn, 182*4882a593Smuzhiyun+ vma->vm_end - vma->vm_start, vma->vm_page_prot); 183*4882a593Smuzhiyun+ if (ret) { 184*4882a593Smuzhiyun+ /* Error Handle */ 185*4882a593Smuzhiyun+ pr_err("remap_pfn_range failed"); 186*4882a593Smuzhiyun+ } 187*4882a593Smuzhiyun+ return ret; 188*4882a593Smuzhiyun+} 189*4882a593Smuzhiyun+ 190*4882a593Smuzhiyun+/** 191*4882a593Smuzhiyun+ * uio_free_dma_rx_desc_resources - free RX dma desc resources 192*4882a593Smuzhiyun+ * @priv: private structure 193*4882a593Smuzhiyun+ */ 194*4882a593Smuzhiyun+static void uio_free_dma_rx_desc_resources(struct stmmac_priv *priv) 195*4882a593Smuzhiyun+{ 196*4882a593Smuzhiyun+ u32 rx_count = priv->plat->rx_queues_to_use; 197*4882a593Smuzhiyun+ u32 queue; 198*4882a593Smuzhiyun+ 199*4882a593Smuzhiyun+ /* Free RX queue resources */ 200*4882a593Smuzhiyun+ for (queue = 0; queue < rx_count; queue++) { 201*4882a593Smuzhiyun+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 202*4882a593Smuzhiyun+ 203*4882a593Smuzhiyun+ /* Free DMA regions of consistent memory previously allocated */ 204*4882a593Smuzhiyun+ if (!priv->extend_desc) 205*4882a593Smuzhiyun+ dma_free_coherent(priv->device, priv->dma_rx_size * 206*4882a593Smuzhiyun+ sizeof(struct dma_desc), 207*4882a593Smuzhiyun+ rx_q->dma_rx, rx_q->dma_rx_phy); 208*4882a593Smuzhiyun+ else 209*4882a593Smuzhiyun+ dma_free_coherent(priv->device, priv->dma_rx_size * 210*4882a593Smuzhiyun+ sizeof(struct dma_extended_desc), 211*4882a593Smuzhiyun+ rx_q->dma_erx, rx_q->dma_rx_phy); 212*4882a593Smuzhiyun+ } 213*4882a593Smuzhiyun+} 214*4882a593Smuzhiyun+ 215*4882a593Smuzhiyun+/** 216*4882a593Smuzhiyun+ * uio_free_dma_tx_desc_resources - free TX dma desc resources 217*4882a593Smuzhiyun+ * @priv: private structure 218*4882a593Smuzhiyun+ */ 219*4882a593Smuzhiyun+static void uio_free_dma_tx_desc_resources(struct stmmac_priv *priv) 220*4882a593Smuzhiyun+{ 221*4882a593Smuzhiyun+ u32 tx_count = priv->plat->tx_queues_to_use; 222*4882a593Smuzhiyun+ u32 queue; 223*4882a593Smuzhiyun+ 224*4882a593Smuzhiyun+ /* Free TX queue resources */ 225*4882a593Smuzhiyun+ for (queue = 0; queue < tx_count; queue++) { 226*4882a593Smuzhiyun+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 227*4882a593Smuzhiyun+ size_t size; 228*4882a593Smuzhiyun+ void *addr; 229*4882a593Smuzhiyun+ 230*4882a593Smuzhiyun+ if (priv->extend_desc) { 231*4882a593Smuzhiyun+ size = sizeof(struct dma_extended_desc); 232*4882a593Smuzhiyun+ addr = tx_q->dma_etx; 233*4882a593Smuzhiyun+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 234*4882a593Smuzhiyun+ size = sizeof(struct dma_edesc); 235*4882a593Smuzhiyun+ addr = tx_q->dma_entx; 236*4882a593Smuzhiyun+ } else { 237*4882a593Smuzhiyun+ size = sizeof(struct dma_desc); 238*4882a593Smuzhiyun+ addr = tx_q->dma_tx; 239*4882a593Smuzhiyun+ } 240*4882a593Smuzhiyun+ 241*4882a593Smuzhiyun+ size *= priv->dma_tx_size; 242*4882a593Smuzhiyun+ 243*4882a593Smuzhiyun+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 244*4882a593Smuzhiyun+ } 245*4882a593Smuzhiyun+} 246*4882a593Smuzhiyun+ 247*4882a593Smuzhiyun+/** 248*4882a593Smuzhiyun+ * uio_alloc_dma_rx_desc_resources - alloc RX resources. 249*4882a593Smuzhiyun+ * @priv: private structure 250*4882a593Smuzhiyun+ * Description: according to which descriptor can be used (extend or basic) 251*4882a593Smuzhiyun+ * this function allocates the resources for TX and RX paths. In case of 252*4882a593Smuzhiyun+ * reception, for example, it pre-allocated the RX socket buffer in order to 253*4882a593Smuzhiyun+ * allow zero-copy mechanism. 254*4882a593Smuzhiyun+ */ 255*4882a593Smuzhiyun+static int uio_alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 256*4882a593Smuzhiyun+{ 257*4882a593Smuzhiyun+ u32 rx_count = priv->plat->rx_queues_to_use; 258*4882a593Smuzhiyun+ int ret = -ENOMEM; 259*4882a593Smuzhiyun+ u32 queue; 260*4882a593Smuzhiyun+ 261*4882a593Smuzhiyun+ /* RX queues buffers and DMA */ 262*4882a593Smuzhiyun+ for (queue = 0; queue < rx_count; queue++) { 263*4882a593Smuzhiyun+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 264*4882a593Smuzhiyun+ 265*4882a593Smuzhiyun+ if (priv->extend_desc) { 266*4882a593Smuzhiyun+ rx_q->dma_erx = dma_alloc_coherent(priv->device, 267*4882a593Smuzhiyun+ priv->dma_rx_size * 268*4882a593Smuzhiyun+ sizeof(struct dma_extended_desc), 269*4882a593Smuzhiyun+ &rx_q->dma_rx_phy, 270*4882a593Smuzhiyun+ GFP_KERNEL); 271*4882a593Smuzhiyun+ if (!rx_q->dma_erx) 272*4882a593Smuzhiyun+ goto err_dma; 273*4882a593Smuzhiyun+ } else { 274*4882a593Smuzhiyun+ rx_q->dma_rx = dma_alloc_coherent(priv->device, 275*4882a593Smuzhiyun+ priv->dma_rx_size * 276*4882a593Smuzhiyun+ sizeof(struct dma_desc), 277*4882a593Smuzhiyun+ &rx_q->dma_rx_phy, 278*4882a593Smuzhiyun+ GFP_KERNEL); 279*4882a593Smuzhiyun+ if (!rx_q->dma_rx) 280*4882a593Smuzhiyun+ goto err_dma; 281*4882a593Smuzhiyun+ } 282*4882a593Smuzhiyun+ } 283*4882a593Smuzhiyun+ 284*4882a593Smuzhiyun+ return 0; 285*4882a593Smuzhiyun+ 286*4882a593Smuzhiyun+err_dma: 287*4882a593Smuzhiyun+ uio_free_dma_rx_desc_resources(priv); 288*4882a593Smuzhiyun+ 289*4882a593Smuzhiyun+ return ret; 290*4882a593Smuzhiyun+} 291*4882a593Smuzhiyun+ 292*4882a593Smuzhiyun+/** 293*4882a593Smuzhiyun+ * uio_alloc_dma_tx_desc_resources - alloc TX resources. 294*4882a593Smuzhiyun+ * @priv: private structure 295*4882a593Smuzhiyun+ * Description: according to which descriptor can be used (extend or basic) 296*4882a593Smuzhiyun+ * this function allocates the resources for TX and RX paths. In case of 297*4882a593Smuzhiyun+ * reception, for example, it pre-allocated the RX socket buffer in order to 298*4882a593Smuzhiyun+ * allow zero-copy mechanism. 299*4882a593Smuzhiyun+ */ 300*4882a593Smuzhiyun+static int uio_alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 301*4882a593Smuzhiyun+{ 302*4882a593Smuzhiyun+ u32 tx_count = priv->plat->tx_queues_to_use; 303*4882a593Smuzhiyun+ int ret = -ENOMEM; 304*4882a593Smuzhiyun+ u32 queue; 305*4882a593Smuzhiyun+ 306*4882a593Smuzhiyun+ /* TX queues buffers and DMA */ 307*4882a593Smuzhiyun+ for (queue = 0; queue < tx_count; queue++) { 308*4882a593Smuzhiyun+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 309*4882a593Smuzhiyun+ size_t size; 310*4882a593Smuzhiyun+ void *addr; 311*4882a593Smuzhiyun+ 312*4882a593Smuzhiyun+ tx_q->queue_index = queue; 313*4882a593Smuzhiyun+ tx_q->priv_data = priv; 314*4882a593Smuzhiyun+ 315*4882a593Smuzhiyun+ if (priv->extend_desc) 316*4882a593Smuzhiyun+ size = sizeof(struct dma_extended_desc); 317*4882a593Smuzhiyun+ else if (tx_q->tbs & STMMAC_TBS_AVAIL) 318*4882a593Smuzhiyun+ size = sizeof(struct dma_edesc); 319*4882a593Smuzhiyun+ else 320*4882a593Smuzhiyun+ size = sizeof(struct dma_desc); 321*4882a593Smuzhiyun+ 322*4882a593Smuzhiyun+ size *= priv->dma_tx_size; 323*4882a593Smuzhiyun+ 324*4882a593Smuzhiyun+ addr = dma_alloc_coherent(priv->device, size, 325*4882a593Smuzhiyun+ &tx_q->dma_tx_phy, GFP_KERNEL); 326*4882a593Smuzhiyun+ if (!addr) 327*4882a593Smuzhiyun+ goto err_dma; 328*4882a593Smuzhiyun+ 329*4882a593Smuzhiyun+ if (priv->extend_desc) 330*4882a593Smuzhiyun+ tx_q->dma_etx = addr; 331*4882a593Smuzhiyun+ else if (tx_q->tbs & STMMAC_TBS_AVAIL) 332*4882a593Smuzhiyun+ tx_q->dma_entx = addr; 333*4882a593Smuzhiyun+ else 334*4882a593Smuzhiyun+ tx_q->dma_tx = addr; 335*4882a593Smuzhiyun+ } 336*4882a593Smuzhiyun+ 337*4882a593Smuzhiyun+ return 0; 338*4882a593Smuzhiyun+ 339*4882a593Smuzhiyun+err_dma: 340*4882a593Smuzhiyun+ uio_free_dma_tx_desc_resources(priv); 341*4882a593Smuzhiyun+ return ret; 342*4882a593Smuzhiyun+} 343*4882a593Smuzhiyun+ 344*4882a593Smuzhiyun+/** 345*4882a593Smuzhiyun+ * uio_alloc_dma_desc_resources - alloc TX/RX resources. 346*4882a593Smuzhiyun+ * @priv: private structure 347*4882a593Smuzhiyun+ * Description: according to which descriptor can be used (extend or basic) 348*4882a593Smuzhiyun+ * this function allocates the resources for TX and RX paths. In case of 349*4882a593Smuzhiyun+ * reception, for example, it pre-allocated the RX socket buffer in order to 350*4882a593Smuzhiyun+ * allow zero-copy mechanism. 351*4882a593Smuzhiyun+ */ 352*4882a593Smuzhiyun+static int uio_alloc_dma_desc_resources(struct stmmac_priv *priv) 353*4882a593Smuzhiyun+{ 354*4882a593Smuzhiyun+ /* RX Allocation */ 355*4882a593Smuzhiyun+ int ret = uio_alloc_dma_rx_desc_resources(priv); 356*4882a593Smuzhiyun+ 357*4882a593Smuzhiyun+ if (ret) 358*4882a593Smuzhiyun+ return ret; 359*4882a593Smuzhiyun+ 360*4882a593Smuzhiyun+ ret = uio_alloc_dma_tx_desc_resources(priv); 361*4882a593Smuzhiyun+ 362*4882a593Smuzhiyun+ return ret; 363*4882a593Smuzhiyun+} 364*4882a593Smuzhiyun+ 365*4882a593Smuzhiyun+/** 366*4882a593Smuzhiyun+ * uio_free_dma_desc_resources - free dma desc resources 367*4882a593Smuzhiyun+ * @priv: private structure 368*4882a593Smuzhiyun+ */ 369*4882a593Smuzhiyun+static void uio_free_dma_desc_resources(struct stmmac_priv *priv) 370*4882a593Smuzhiyun+{ 371*4882a593Smuzhiyun+ /* Release the DMA RX socket buffers */ 372*4882a593Smuzhiyun+ uio_free_dma_rx_desc_resources(priv); 373*4882a593Smuzhiyun+ 374*4882a593Smuzhiyun+ /* Release the DMA TX socket buffers */ 375*4882a593Smuzhiyun+ uio_free_dma_tx_desc_resources(priv); 376*4882a593Smuzhiyun+} 377*4882a593Smuzhiyun+ 378*4882a593Smuzhiyun+/** 379*4882a593Smuzhiyun+ * rockchip_gmac_uio_init_phy - PHY initialization 380*4882a593Smuzhiyun+ * @dev: net device structure 381*4882a593Smuzhiyun+ * Description: it initializes the driver's PHY state, and attaches the PHY 382*4882a593Smuzhiyun+ * to the mac driver. 383*4882a593Smuzhiyun+ * Return value: 384*4882a593Smuzhiyun+ * 0 on success 385*4882a593Smuzhiyun+ */ 386*4882a593Smuzhiyun+static int rockchip_gmac_uio_init_phy(struct net_device *dev) 387*4882a593Smuzhiyun+{ 388*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 389*4882a593Smuzhiyun+ struct device_node *node; 390*4882a593Smuzhiyun+ int ret; 391*4882a593Smuzhiyun+ 392*4882a593Smuzhiyun+ node = priv->plat->phylink_node; 393*4882a593Smuzhiyun+ 394*4882a593Smuzhiyun+ if (node) 395*4882a593Smuzhiyun+ ret = phylink_of_phy_connect(priv->phylink, node, 0); 396*4882a593Smuzhiyun+ 397*4882a593Smuzhiyun+ /* Some DT bindings do not set-up the PHY handle. Let's try to 398*4882a593Smuzhiyun+ * manually parse it 399*4882a593Smuzhiyun+ */ 400*4882a593Smuzhiyun+ if (!node || ret) { 401*4882a593Smuzhiyun+ int addr = priv->plat->phy_addr; 402*4882a593Smuzhiyun+ struct phy_device *phydev; 403*4882a593Smuzhiyun+ 404*4882a593Smuzhiyun+ phydev = mdiobus_get_phy(priv->mii, addr); 405*4882a593Smuzhiyun+ if (!phydev) { 406*4882a593Smuzhiyun+ netdev_err(priv->dev, "no phy at addr %d\n", addr); 407*4882a593Smuzhiyun+ return -ENODEV; 408*4882a593Smuzhiyun+ } 409*4882a593Smuzhiyun+ 410*4882a593Smuzhiyun+ ret = phylink_connect_phy(priv->phylink, phydev); 411*4882a593Smuzhiyun+ } 412*4882a593Smuzhiyun+ 413*4882a593Smuzhiyun+ if (!priv->plat->pmt) { 414*4882a593Smuzhiyun+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 415*4882a593Smuzhiyun+ 416*4882a593Smuzhiyun+ phylink_ethtool_get_wol(priv->phylink, &wol); 417*4882a593Smuzhiyun+ device_set_wakeup_capable(priv->device, !!wol.supported); 418*4882a593Smuzhiyun+ } 419*4882a593Smuzhiyun+ 420*4882a593Smuzhiyun+ return ret; 421*4882a593Smuzhiyun+} 422*4882a593Smuzhiyun+ 423*4882a593Smuzhiyun+/** 424*4882a593Smuzhiyun+ * rockchip_gmac_uio_init_dma_engine - DMA init. 425*4882a593Smuzhiyun+ * @priv: driver private structure 426*4882a593Smuzhiyun+ * Description: 427*4882a593Smuzhiyun+ * It inits the DMA invoking the specific MAC/GMAC callback. 428*4882a593Smuzhiyun+ * Some DMA parameters can be passed from the platform; 429*4882a593Smuzhiyun+ * in case of these are not passed a default is kept for the MAC or GMAC. 430*4882a593Smuzhiyun+ */ 431*4882a593Smuzhiyun+static int rockchip_gmac_uio_init_dma_engine(struct stmmac_priv *priv) 432*4882a593Smuzhiyun+{ 433*4882a593Smuzhiyun+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 434*4882a593Smuzhiyun+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 435*4882a593Smuzhiyun+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 436*4882a593Smuzhiyun+ struct stmmac_rx_queue *rx_q; 437*4882a593Smuzhiyun+ struct stmmac_tx_queue *tx_q; 438*4882a593Smuzhiyun+ u32 chan = 0; 439*4882a593Smuzhiyun+ int atds = 0; 440*4882a593Smuzhiyun+ int ret = 0; 441*4882a593Smuzhiyun+ 442*4882a593Smuzhiyun+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 443*4882a593Smuzhiyun+ dev_err(priv->device, "Invalid DMA configuration\n"); 444*4882a593Smuzhiyun+ return -EINVAL; 445*4882a593Smuzhiyun+ } 446*4882a593Smuzhiyun+ 447*4882a593Smuzhiyun+ if (priv->extend_desc && priv->mode == STMMAC_RING_MODE) 448*4882a593Smuzhiyun+ atds = 1; 449*4882a593Smuzhiyun+ 450*4882a593Smuzhiyun+ ret = stmmac_reset(priv, priv->ioaddr); 451*4882a593Smuzhiyun+ if (ret) { 452*4882a593Smuzhiyun+ dev_err(priv->device, "Failed to reset the dma\n"); 453*4882a593Smuzhiyun+ return ret; 454*4882a593Smuzhiyun+ } 455*4882a593Smuzhiyun+ 456*4882a593Smuzhiyun+ /* DMA Configuration */ 457*4882a593Smuzhiyun+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 458*4882a593Smuzhiyun+ 459*4882a593Smuzhiyun+ if (priv->plat->axi) 460*4882a593Smuzhiyun+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 461*4882a593Smuzhiyun+ 462*4882a593Smuzhiyun+ /* DMA CSR Channel configuration */ 463*4882a593Smuzhiyun+ for (chan = 0; chan < dma_csr_ch; chan++) 464*4882a593Smuzhiyun+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 465*4882a593Smuzhiyun+ 466*4882a593Smuzhiyun+ /* DMA RX Channel Configuration */ 467*4882a593Smuzhiyun+ for (chan = 0; chan < rx_channels_count; chan++) { 468*4882a593Smuzhiyun+ rx_q = &priv->rx_queue[chan]; 469*4882a593Smuzhiyun+ 470*4882a593Smuzhiyun+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 471*4882a593Smuzhiyun+ rx_q->dma_rx_phy, chan); 472*4882a593Smuzhiyun+ 473*4882a593Smuzhiyun+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + 474*4882a593Smuzhiyun+ (priv->dma_rx_size * 475*4882a593Smuzhiyun+ sizeof(struct dma_desc)); 476*4882a593Smuzhiyun+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 477*4882a593Smuzhiyun+ rx_q->rx_tail_addr, chan); 478*4882a593Smuzhiyun+ } 479*4882a593Smuzhiyun+ 480*4882a593Smuzhiyun+ /* DMA TX Channel Configuration */ 481*4882a593Smuzhiyun+ for (chan = 0; chan < tx_channels_count; chan++) { 482*4882a593Smuzhiyun+ tx_q = &priv->tx_queue[chan]; 483*4882a593Smuzhiyun+ 484*4882a593Smuzhiyun+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 485*4882a593Smuzhiyun+ tx_q->dma_tx_phy, chan); 486*4882a593Smuzhiyun+ 487*4882a593Smuzhiyun+ tx_q->tx_tail_addr = tx_q->dma_tx_phy; 488*4882a593Smuzhiyun+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 489*4882a593Smuzhiyun+ tx_q->tx_tail_addr, chan); 490*4882a593Smuzhiyun+ } 491*4882a593Smuzhiyun+ 492*4882a593Smuzhiyun+ return ret; 493*4882a593Smuzhiyun+} 494*4882a593Smuzhiyun+ 495*4882a593Smuzhiyun+static void uio_set_rings_length(struct stmmac_priv *priv) 496*4882a593Smuzhiyun+{ 497*4882a593Smuzhiyun+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 498*4882a593Smuzhiyun+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 499*4882a593Smuzhiyun+ u32 chan; 500*4882a593Smuzhiyun+ 501*4882a593Smuzhiyun+ /* set TX ring length */ 502*4882a593Smuzhiyun+ for (chan = 0; chan < tx_channels_count; chan++) 503*4882a593Smuzhiyun+ stmmac_set_tx_ring_len(priv, priv->ioaddr, 504*4882a593Smuzhiyun+ (priv->dma_tx_size - 1), chan); 505*4882a593Smuzhiyun+ 506*4882a593Smuzhiyun+ /* set RX ring length */ 507*4882a593Smuzhiyun+ for (chan = 0; chan < rx_channels_count; chan++) 508*4882a593Smuzhiyun+ stmmac_set_rx_ring_len(priv, priv->ioaddr, 509*4882a593Smuzhiyun+ (priv->dma_rx_size - 1), chan); 510*4882a593Smuzhiyun+} 511*4882a593Smuzhiyun+ 512*4882a593Smuzhiyun+/** 513*4882a593Smuzhiyun+ * uio_set_tx_queue_weight - Set TX queue weight 514*4882a593Smuzhiyun+ * @priv: driver private structure 515*4882a593Smuzhiyun+ * Description: It is used for setting TX queues weight 516*4882a593Smuzhiyun+ */ 517*4882a593Smuzhiyun+static void uio_set_tx_queue_weight(struct stmmac_priv *priv) 518*4882a593Smuzhiyun+{ 519*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 520*4882a593Smuzhiyun+ u32 weight; 521*4882a593Smuzhiyun+ u32 queue; 522*4882a593Smuzhiyun+ 523*4882a593Smuzhiyun+ for (queue = 0; queue < tx_queues_count; queue++) { 524*4882a593Smuzhiyun+ weight = priv->plat->tx_queues_cfg[queue].weight; 525*4882a593Smuzhiyun+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 526*4882a593Smuzhiyun+ } 527*4882a593Smuzhiyun+} 528*4882a593Smuzhiyun+ 529*4882a593Smuzhiyun+/** 530*4882a593Smuzhiyun+ * uio_configure_cbs - Configure CBS in TX queue 531*4882a593Smuzhiyun+ * @priv: driver private structure 532*4882a593Smuzhiyun+ * Description: It is used for configuring CBS in AVB TX queues 533*4882a593Smuzhiyun+ */ 534*4882a593Smuzhiyun+static void uio_configure_cbs(struct stmmac_priv *priv) 535*4882a593Smuzhiyun+{ 536*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 537*4882a593Smuzhiyun+ u32 mode_to_use; 538*4882a593Smuzhiyun+ u32 queue; 539*4882a593Smuzhiyun+ 540*4882a593Smuzhiyun+ /* queue 0 is reserved for legacy traffic */ 541*4882a593Smuzhiyun+ for (queue = 1; queue < tx_queues_count; queue++) { 542*4882a593Smuzhiyun+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 543*4882a593Smuzhiyun+ if (mode_to_use == MTL_QUEUE_DCB) 544*4882a593Smuzhiyun+ continue; 545*4882a593Smuzhiyun+ 546*4882a593Smuzhiyun+ stmmac_config_cbs(priv, priv->hw, 547*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].send_slope, 548*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].idle_slope, 549*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].high_credit, 550*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].low_credit, 551*4882a593Smuzhiyun+ queue); 552*4882a593Smuzhiyun+ } 553*4882a593Smuzhiyun+} 554*4882a593Smuzhiyun+ 555*4882a593Smuzhiyun+/** 556*4882a593Smuzhiyun+ * uio_rx_queue_dma_chan_map - Map RX queue to RX dma channel 557*4882a593Smuzhiyun+ * @priv: driver private structure 558*4882a593Smuzhiyun+ * Description: It is used for mapping RX queues to RX dma channels 559*4882a593Smuzhiyun+ */ 560*4882a593Smuzhiyun+static void uio_rx_queue_dma_chan_map(struct stmmac_priv *priv) 561*4882a593Smuzhiyun+{ 562*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 563*4882a593Smuzhiyun+ u32 queue; 564*4882a593Smuzhiyun+ u32 chan; 565*4882a593Smuzhiyun+ 566*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 567*4882a593Smuzhiyun+ chan = priv->plat->rx_queues_cfg[queue].chan; 568*4882a593Smuzhiyun+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 569*4882a593Smuzhiyun+ } 570*4882a593Smuzhiyun+} 571*4882a593Smuzhiyun+ 572*4882a593Smuzhiyun+/** 573*4882a593Smuzhiyun+ * uio_mac_config_rx_queues_prio - Configure RX Queue priority 574*4882a593Smuzhiyun+ * @priv: driver private structure 575*4882a593Smuzhiyun+ * Description: It is used for configuring the RX Queue Priority 576*4882a593Smuzhiyun+ */ 577*4882a593Smuzhiyun+static void uio_mac_config_rx_queues_prio(struct stmmac_priv *priv) 578*4882a593Smuzhiyun+{ 579*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 580*4882a593Smuzhiyun+ u32 queue; 581*4882a593Smuzhiyun+ u32 prio; 582*4882a593Smuzhiyun+ 583*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 584*4882a593Smuzhiyun+ if (!priv->plat->rx_queues_cfg[queue].use_prio) 585*4882a593Smuzhiyun+ continue; 586*4882a593Smuzhiyun+ 587*4882a593Smuzhiyun+ prio = priv->plat->rx_queues_cfg[queue].prio; 588*4882a593Smuzhiyun+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 589*4882a593Smuzhiyun+ } 590*4882a593Smuzhiyun+} 591*4882a593Smuzhiyun+ 592*4882a593Smuzhiyun+/** 593*4882a593Smuzhiyun+ * uio_mac_config_tx_queues_prio - Configure TX Queue priority 594*4882a593Smuzhiyun+ * @priv: driver private structure 595*4882a593Smuzhiyun+ * Description: It is used for configuring the TX Queue Priority 596*4882a593Smuzhiyun+ */ 597*4882a593Smuzhiyun+static void uio_mac_config_tx_queues_prio(struct stmmac_priv *priv) 598*4882a593Smuzhiyun+{ 599*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 600*4882a593Smuzhiyun+ u32 queue; 601*4882a593Smuzhiyun+ u32 prio; 602*4882a593Smuzhiyun+ 603*4882a593Smuzhiyun+ for (queue = 0; queue < tx_queues_count; queue++) { 604*4882a593Smuzhiyun+ if (!priv->plat->tx_queues_cfg[queue].use_prio) 605*4882a593Smuzhiyun+ continue; 606*4882a593Smuzhiyun+ 607*4882a593Smuzhiyun+ prio = priv->plat->tx_queues_cfg[queue].prio; 608*4882a593Smuzhiyun+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 609*4882a593Smuzhiyun+ } 610*4882a593Smuzhiyun+} 611*4882a593Smuzhiyun+ 612*4882a593Smuzhiyun+/** 613*4882a593Smuzhiyun+ * uio_mac_config_rx_queues_routing - Configure RX Queue Routing 614*4882a593Smuzhiyun+ * @priv: driver private structure 615*4882a593Smuzhiyun+ * Description: It is used for configuring the RX queue routing 616*4882a593Smuzhiyun+ */ 617*4882a593Smuzhiyun+static void uio_mac_config_rx_queues_routing(struct stmmac_priv *priv) 618*4882a593Smuzhiyun+{ 619*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 620*4882a593Smuzhiyun+ u32 queue; 621*4882a593Smuzhiyun+ u8 packet; 622*4882a593Smuzhiyun+ 623*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 624*4882a593Smuzhiyun+ /* no specific packet type routing specified for the queue */ 625*4882a593Smuzhiyun+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 626*4882a593Smuzhiyun+ continue; 627*4882a593Smuzhiyun+ 628*4882a593Smuzhiyun+ packet = priv->plat->rx_queues_cfg[queue].pkt_route; 629*4882a593Smuzhiyun+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 630*4882a593Smuzhiyun+ } 631*4882a593Smuzhiyun+} 632*4882a593Smuzhiyun+ 633*4882a593Smuzhiyun+static void uio_mac_config_rss(struct stmmac_priv *priv) 634*4882a593Smuzhiyun+{ 635*4882a593Smuzhiyun+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 636*4882a593Smuzhiyun+ priv->rss.enable = false; 637*4882a593Smuzhiyun+ return; 638*4882a593Smuzhiyun+ } 639*4882a593Smuzhiyun+ 640*4882a593Smuzhiyun+ if (priv->dev->features & NETIF_F_RXHASH) 641*4882a593Smuzhiyun+ priv->rss.enable = true; 642*4882a593Smuzhiyun+ else 643*4882a593Smuzhiyun+ priv->rss.enable = false; 644*4882a593Smuzhiyun+ 645*4882a593Smuzhiyun+ stmmac_rss_configure(priv, priv->hw, &priv->rss, 646*4882a593Smuzhiyun+ priv->plat->rx_queues_to_use); 647*4882a593Smuzhiyun+} 648*4882a593Smuzhiyun+ 649*4882a593Smuzhiyun+/** 650*4882a593Smuzhiyun+ * uio_mac_enable_rx_queues - Enable MAC rx queues 651*4882a593Smuzhiyun+ * @priv: driver private structure 652*4882a593Smuzhiyun+ * Description: It is used for enabling the rx queues in the MAC 653*4882a593Smuzhiyun+ */ 654*4882a593Smuzhiyun+static void uio_mac_enable_rx_queues(struct stmmac_priv *priv) 655*4882a593Smuzhiyun+{ 656*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 657*4882a593Smuzhiyun+ int queue; 658*4882a593Smuzhiyun+ u8 mode; 659*4882a593Smuzhiyun+ 660*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 661*4882a593Smuzhiyun+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 662*4882a593Smuzhiyun+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 663*4882a593Smuzhiyun+ } 664*4882a593Smuzhiyun+} 665*4882a593Smuzhiyun+ 666*4882a593Smuzhiyun+/** 667*4882a593Smuzhiyun+ * rockchip_gmac_uio_mtl_configuration - Configure MTL 668*4882a593Smuzhiyun+ * @priv: driver private structure 669*4882a593Smuzhiyun+ * Description: It is used for configuring MTL 670*4882a593Smuzhiyun+ */ 671*4882a593Smuzhiyun+static void rockchip_gmac_uio_mtl_configuration(struct stmmac_priv *priv) 672*4882a593Smuzhiyun+{ 673*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 674*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 675*4882a593Smuzhiyun+ 676*4882a593Smuzhiyun+ if (tx_queues_count > 1) 677*4882a593Smuzhiyun+ uio_set_tx_queue_weight(priv); 678*4882a593Smuzhiyun+ 679*4882a593Smuzhiyun+ /* Configure MTL RX algorithms */ 680*4882a593Smuzhiyun+ if (rx_queues_count > 1) 681*4882a593Smuzhiyun+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 682*4882a593Smuzhiyun+ priv->plat->rx_sched_algorithm); 683*4882a593Smuzhiyun+ 684*4882a593Smuzhiyun+ /* Configure MTL TX algorithms */ 685*4882a593Smuzhiyun+ if (tx_queues_count > 1) 686*4882a593Smuzhiyun+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 687*4882a593Smuzhiyun+ priv->plat->tx_sched_algorithm); 688*4882a593Smuzhiyun+ 689*4882a593Smuzhiyun+ /* Configure CBS in AVB TX queues */ 690*4882a593Smuzhiyun+ if (tx_queues_count > 1) 691*4882a593Smuzhiyun+ uio_configure_cbs(priv); 692*4882a593Smuzhiyun+ 693*4882a593Smuzhiyun+ /* Map RX MTL to DMA channels */ 694*4882a593Smuzhiyun+ uio_rx_queue_dma_chan_map(priv); 695*4882a593Smuzhiyun+ 696*4882a593Smuzhiyun+ /* Enable MAC RX Queues */ 697*4882a593Smuzhiyun+ uio_mac_enable_rx_queues(priv); 698*4882a593Smuzhiyun+ 699*4882a593Smuzhiyun+ /* Set RX priorities */ 700*4882a593Smuzhiyun+ if (rx_queues_count > 1) 701*4882a593Smuzhiyun+ uio_mac_config_rx_queues_prio(priv); 702*4882a593Smuzhiyun+ 703*4882a593Smuzhiyun+ /* Set TX priorities */ 704*4882a593Smuzhiyun+ if (tx_queues_count > 1) 705*4882a593Smuzhiyun+ uio_mac_config_tx_queues_prio(priv); 706*4882a593Smuzhiyun+ 707*4882a593Smuzhiyun+ /* Set RX routing */ 708*4882a593Smuzhiyun+ if (rx_queues_count > 1) 709*4882a593Smuzhiyun+ uio_mac_config_rx_queues_routing(priv); 710*4882a593Smuzhiyun+ 711*4882a593Smuzhiyun+ /* Receive Side Scaling */ 712*4882a593Smuzhiyun+ if (rx_queues_count > 1) 713*4882a593Smuzhiyun+ uio_mac_config_rss(priv); 714*4882a593Smuzhiyun+} 715*4882a593Smuzhiyun+ 716*4882a593Smuzhiyun+static void uio_safety_feat_configuration(struct stmmac_priv *priv) 717*4882a593Smuzhiyun+{ 718*4882a593Smuzhiyun+ if (priv->dma_cap.asp) { 719*4882a593Smuzhiyun+ netdev_info(priv->dev, "Enabling Safety Features\n"); 720*4882a593Smuzhiyun+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 721*4882a593Smuzhiyun+ } else { 722*4882a593Smuzhiyun+ netdev_info(priv->dev, "No Safety Features support found\n"); 723*4882a593Smuzhiyun+ } 724*4882a593Smuzhiyun+} 725*4882a593Smuzhiyun+ 726*4882a593Smuzhiyun+/** 727*4882a593Smuzhiyun+ * uio_dma_operation_mode - HW DMA operation mode 728*4882a593Smuzhiyun+ * @priv: driver private structure 729*4882a593Smuzhiyun+ * Description: it is used for configuring the DMA operation mode register in 730*4882a593Smuzhiyun+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 731*4882a593Smuzhiyun+ */ 732*4882a593Smuzhiyun+static void uio_dma_operation_mode(struct stmmac_priv *priv) 733*4882a593Smuzhiyun+{ 734*4882a593Smuzhiyun+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 735*4882a593Smuzhiyun+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 736*4882a593Smuzhiyun+ int rxfifosz = priv->plat->rx_fifo_size; 737*4882a593Smuzhiyun+ int txfifosz = priv->plat->tx_fifo_size; 738*4882a593Smuzhiyun+ u32 txmode = 0; 739*4882a593Smuzhiyun+ u32 rxmode = 0; 740*4882a593Smuzhiyun+ u32 chan = 0; 741*4882a593Smuzhiyun+ u8 qmode = 0; 742*4882a593Smuzhiyun+ 743*4882a593Smuzhiyun+ if (rxfifosz == 0) 744*4882a593Smuzhiyun+ rxfifosz = priv->dma_cap.rx_fifo_size; 745*4882a593Smuzhiyun+ if (txfifosz == 0) 746*4882a593Smuzhiyun+ txfifosz = priv->dma_cap.tx_fifo_size; 747*4882a593Smuzhiyun+ 748*4882a593Smuzhiyun+ /* Adjust for real per queue fifo size */ 749*4882a593Smuzhiyun+ rxfifosz /= rx_channels_count; 750*4882a593Smuzhiyun+ txfifosz /= tx_channels_count; 751*4882a593Smuzhiyun+ 752*4882a593Smuzhiyun+ if (priv->plat->force_thresh_dma_mode) { 753*4882a593Smuzhiyun+ txmode = tc; 754*4882a593Smuzhiyun+ rxmode = tc; 755*4882a593Smuzhiyun+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 756*4882a593Smuzhiyun+ /* In case of GMAC, SF mode can be enabled 757*4882a593Smuzhiyun+ * to perform the TX COE in HW. This depends on: 758*4882a593Smuzhiyun+ * 1) TX COE if actually supported 759*4882a593Smuzhiyun+ * 2) There is no bugged Jumbo frame support 760*4882a593Smuzhiyun+ * that needs to not insert csum in the TDES. 761*4882a593Smuzhiyun+ */ 762*4882a593Smuzhiyun+ txmode = SF_DMA_MODE; 763*4882a593Smuzhiyun+ rxmode = SF_DMA_MODE; 764*4882a593Smuzhiyun+ priv->xstats.threshold = SF_DMA_MODE; 765*4882a593Smuzhiyun+ } else { 766*4882a593Smuzhiyun+ txmode = tc; 767*4882a593Smuzhiyun+ rxmode = SF_DMA_MODE; 768*4882a593Smuzhiyun+ } 769*4882a593Smuzhiyun+ 770*4882a593Smuzhiyun+ /* configure all channels */ 771*4882a593Smuzhiyun+ for (chan = 0; chan < rx_channels_count; chan++) { 772*4882a593Smuzhiyun+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 773*4882a593Smuzhiyun+ 774*4882a593Smuzhiyun+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 775*4882a593Smuzhiyun+ rxfifosz, qmode); 776*4882a593Smuzhiyun+ stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 777*4882a593Smuzhiyun+ chan); 778*4882a593Smuzhiyun+ } 779*4882a593Smuzhiyun+ 780*4882a593Smuzhiyun+ for (chan = 0; chan < tx_channels_count; chan++) { 781*4882a593Smuzhiyun+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 782*4882a593Smuzhiyun+ 783*4882a593Smuzhiyun+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 784*4882a593Smuzhiyun+ txfifosz, qmode); 785*4882a593Smuzhiyun+ } 786*4882a593Smuzhiyun+} 787*4882a593Smuzhiyun+ 788*4882a593Smuzhiyun+/** 789*4882a593Smuzhiyun+ * rockchip_gmac_uio_hw_setup - setup mac in a usable state. 790*4882a593Smuzhiyun+ * @dev : pointer to the device structure. 791*4882a593Smuzhiyun+ * @init_ptp: initialize PTP if set 792*4882a593Smuzhiyun+ * Description: 793*4882a593Smuzhiyun+ * this is the main function to setup the HW in a usable state because the 794*4882a593Smuzhiyun+ * dma engine is reset, the core registers are configured (e.g. AXI, 795*4882a593Smuzhiyun+ * Checksum features, timers). The DMA is ready to start receiving and 796*4882a593Smuzhiyun+ * transmitting. 797*4882a593Smuzhiyun+ * Return value: 798*4882a593Smuzhiyun+ * 0 on success and an appropriate (-)ve integer as defined in errno.h 799*4882a593Smuzhiyun+ * file on failure. 800*4882a593Smuzhiyun+ */ 801*4882a593Smuzhiyun+static int rockchip_gmac_uio_hw_setup(struct net_device *dev, bool init_ptp) 802*4882a593Smuzhiyun+{ 803*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 804*4882a593Smuzhiyun+ int ret; 805*4882a593Smuzhiyun+ 806*4882a593Smuzhiyun+ /* DMA initialization and SW reset */ 807*4882a593Smuzhiyun+ ret = rockchip_gmac_uio_init_dma_engine(priv); 808*4882a593Smuzhiyun+ if (ret < 0) { 809*4882a593Smuzhiyun+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 810*4882a593Smuzhiyun+ __func__); 811*4882a593Smuzhiyun+ return ret; 812*4882a593Smuzhiyun+ } 813*4882a593Smuzhiyun+ 814*4882a593Smuzhiyun+ /* Copy the MAC addr into the HW */ 815*4882a593Smuzhiyun+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 816*4882a593Smuzhiyun+ 817*4882a593Smuzhiyun+ /* PS and related bits will be programmed according to the speed */ 818*4882a593Smuzhiyun+ if (priv->hw->pcs) { 819*4882a593Smuzhiyun+ int speed = priv->plat->mac_port_sel_speed; 820*4882a593Smuzhiyun+ 821*4882a593Smuzhiyun+ if (speed == SPEED_10 || speed == SPEED_100 || 822*4882a593Smuzhiyun+ speed == SPEED_1000) { 823*4882a593Smuzhiyun+ priv->hw->ps = speed; 824*4882a593Smuzhiyun+ } else { 825*4882a593Smuzhiyun+ dev_warn(priv->device, "invalid port speed\n"); 826*4882a593Smuzhiyun+ priv->hw->ps = 0; 827*4882a593Smuzhiyun+ } 828*4882a593Smuzhiyun+ } 829*4882a593Smuzhiyun+ 830*4882a593Smuzhiyun+ /* Initialize the MAC Core */ 831*4882a593Smuzhiyun+ stmmac_core_init(priv, priv->hw, dev); 832*4882a593Smuzhiyun+ 833*4882a593Smuzhiyun+ /* Initialize MTL*/ 834*4882a593Smuzhiyun+ rockchip_gmac_uio_mtl_configuration(priv); 835*4882a593Smuzhiyun+ 836*4882a593Smuzhiyun+ /* Initialize Safety Features */ 837*4882a593Smuzhiyun+ uio_safety_feat_configuration(priv); 838*4882a593Smuzhiyun+ 839*4882a593Smuzhiyun+ ret = stmmac_rx_ipc(priv, priv->hw); 840*4882a593Smuzhiyun+ if (!ret) { 841*4882a593Smuzhiyun+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 842*4882a593Smuzhiyun+ priv->plat->rx_coe = STMMAC_RX_COE_NONE; 843*4882a593Smuzhiyun+ priv->hw->rx_csum = 0; 844*4882a593Smuzhiyun+ } 845*4882a593Smuzhiyun+ 846*4882a593Smuzhiyun+ /* Enable the MAC Rx/Tx */ 847*4882a593Smuzhiyun+ stmmac_mac_set(priv, priv->ioaddr, true); 848*4882a593Smuzhiyun+ 849*4882a593Smuzhiyun+ /* Set the HW DMA mode and the COE */ 850*4882a593Smuzhiyun+ uio_dma_operation_mode(priv); 851*4882a593Smuzhiyun+ 852*4882a593Smuzhiyun+ if (priv->hw->pcs) 853*4882a593Smuzhiyun+ stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 854*4882a593Smuzhiyun+ 855*4882a593Smuzhiyun+ /* set TX and RX rings length */ 856*4882a593Smuzhiyun+ uio_set_rings_length(priv); 857*4882a593Smuzhiyun+ 858*4882a593Smuzhiyun+ return 0; 859*4882a593Smuzhiyun+} 860*4882a593Smuzhiyun+ 861*4882a593Smuzhiyun+static int uio_set_bfsize(int mtu, int bufsize) 862*4882a593Smuzhiyun+{ 863*4882a593Smuzhiyun+ int ret = bufsize; 864*4882a593Smuzhiyun+ 865*4882a593Smuzhiyun+ if (mtu >= BUF_SIZE_8KiB) 866*4882a593Smuzhiyun+ ret = BUF_SIZE_16KiB; 867*4882a593Smuzhiyun+ else if (mtu >= BUF_SIZE_4KiB) 868*4882a593Smuzhiyun+ ret = BUF_SIZE_8KiB; 869*4882a593Smuzhiyun+ else if (mtu >= BUF_SIZE_2KiB) 870*4882a593Smuzhiyun+ ret = BUF_SIZE_4KiB; 871*4882a593Smuzhiyun+ else if (mtu > DEFAULT_BUFSIZE) 872*4882a593Smuzhiyun+ ret = BUF_SIZE_2KiB; 873*4882a593Smuzhiyun+ else 874*4882a593Smuzhiyun+ ret = DEFAULT_BUFSIZE; 875*4882a593Smuzhiyun+ 876*4882a593Smuzhiyun+ return ret; 877*4882a593Smuzhiyun+} 878*4882a593Smuzhiyun+ 879*4882a593Smuzhiyun+/** 880*4882a593Smuzhiyun+ * uio_open - open entry point of the driver 881*4882a593Smuzhiyun+ * @dev : pointer to the device structure. 882*4882a593Smuzhiyun+ * Description: 883*4882a593Smuzhiyun+ * This function is the open entry point of the driver. 884*4882a593Smuzhiyun+ * Return value: 885*4882a593Smuzhiyun+ * 0 on success and an appropriate (-)ve integer as defined in errno.h 886*4882a593Smuzhiyun+ * file on failure. 887*4882a593Smuzhiyun+ */ 888*4882a593Smuzhiyun+static int uio_open(struct net_device *dev) 889*4882a593Smuzhiyun+{ 890*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 891*4882a593Smuzhiyun+ int bfsize = 0; 892*4882a593Smuzhiyun+ int ret; 893*4882a593Smuzhiyun+ 894*4882a593Smuzhiyun+ if (priv->hw->pcs != STMMAC_PCS_TBI && 895*4882a593Smuzhiyun+ priv->hw->pcs != STMMAC_PCS_RTBI && 896*4882a593Smuzhiyun+ !priv->hw->xpcs) { 897*4882a593Smuzhiyun+ ret = rockchip_gmac_uio_init_phy(dev); 898*4882a593Smuzhiyun+ if (ret) { 899*4882a593Smuzhiyun+ netdev_err(priv->dev, 900*4882a593Smuzhiyun+ "%s: Cannot attach to PHY (error: %d)\n", 901*4882a593Smuzhiyun+ __func__, ret); 902*4882a593Smuzhiyun+ return ret; 903*4882a593Smuzhiyun+ } 904*4882a593Smuzhiyun+ } 905*4882a593Smuzhiyun+ 906*4882a593Smuzhiyun+ /* Extra statistics */ 907*4882a593Smuzhiyun+ priv->xstats.threshold = tc; 908*4882a593Smuzhiyun+ 909*4882a593Smuzhiyun+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 910*4882a593Smuzhiyun+ if (bfsize < 0) 911*4882a593Smuzhiyun+ bfsize = 0; 912*4882a593Smuzhiyun+ 913*4882a593Smuzhiyun+ if (bfsize < BUF_SIZE_16KiB) 914*4882a593Smuzhiyun+ bfsize = uio_set_bfsize(dev->mtu, priv->dma_buf_sz); 915*4882a593Smuzhiyun+ 916*4882a593Smuzhiyun+ priv->dma_buf_sz = bfsize; 917*4882a593Smuzhiyun+ buf_sz = bfsize; 918*4882a593Smuzhiyun+ 919*4882a593Smuzhiyun+ priv->rx_copybreak = STMMAC_RX_COPYBREAK; 920*4882a593Smuzhiyun+ 921*4882a593Smuzhiyun+ if (!priv->dma_tx_size) 922*4882a593Smuzhiyun+ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 923*4882a593Smuzhiyun+ if (!priv->dma_rx_size) 924*4882a593Smuzhiyun+ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 925*4882a593Smuzhiyun+ 926*4882a593Smuzhiyun+ ret = uio_alloc_dma_desc_resources(priv); 927*4882a593Smuzhiyun+ if (ret < 0) { 928*4882a593Smuzhiyun+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 929*4882a593Smuzhiyun+ __func__); 930*4882a593Smuzhiyun+ goto dma_desc_error; 931*4882a593Smuzhiyun+ } 932*4882a593Smuzhiyun+ 933*4882a593Smuzhiyun+ ret = rockchip_gmac_uio_hw_setup(dev, true); 934*4882a593Smuzhiyun+ if (ret < 0) { 935*4882a593Smuzhiyun+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 936*4882a593Smuzhiyun+ goto init_error; 937*4882a593Smuzhiyun+ } 938*4882a593Smuzhiyun+ 939*4882a593Smuzhiyun+ phylink_start(priv->phylink); 940*4882a593Smuzhiyun+ /* We may have called phylink_speed_down before */ 941*4882a593Smuzhiyun+ phylink_speed_up(priv->phylink); 942*4882a593Smuzhiyun+ 943*4882a593Smuzhiyun+ return 0; 944*4882a593Smuzhiyun+ 945*4882a593Smuzhiyun+init_error: 946*4882a593Smuzhiyun+ uio_free_dma_desc_resources(priv); 947*4882a593Smuzhiyun+dma_desc_error: 948*4882a593Smuzhiyun+ phylink_disconnect_phy(priv->phylink); 949*4882a593Smuzhiyun+ return ret; 950*4882a593Smuzhiyun+} 951*4882a593Smuzhiyun+ 952*4882a593Smuzhiyun+/** 953*4882a593Smuzhiyun+ * uio_release - close entry point of the driver 954*4882a593Smuzhiyun+ * @dev : device pointer. 955*4882a593Smuzhiyun+ * Description: 956*4882a593Smuzhiyun+ * This is the stop entry point of the driver. 957*4882a593Smuzhiyun+ */ 958*4882a593Smuzhiyun+static int uio_release(struct net_device *dev) 959*4882a593Smuzhiyun+{ 960*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 961*4882a593Smuzhiyun+ 962*4882a593Smuzhiyun+ /* Stop and disconnect the PHY */ 963*4882a593Smuzhiyun+ if (dev->phydev) { 964*4882a593Smuzhiyun+ phy_stop(dev->phydev); 965*4882a593Smuzhiyun+ phy_disconnect(dev->phydev); 966*4882a593Smuzhiyun+ if (priv->plat->integrated_phy_power) 967*4882a593Smuzhiyun+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, 968*4882a593Smuzhiyun+ false); 969*4882a593Smuzhiyun+ } 970*4882a593Smuzhiyun+ 971*4882a593Smuzhiyun+ /* Release and free the Rx/Tx resources */ 972*4882a593Smuzhiyun+ uio_free_dma_desc_resources(priv); 973*4882a593Smuzhiyun+ 974*4882a593Smuzhiyun+ /* Disable the MAC Rx/Tx */ 975*4882a593Smuzhiyun+ stmmac_mac_set(priv, priv->ioaddr, false); 976*4882a593Smuzhiyun+ 977*4882a593Smuzhiyun+ netif_carrier_off(dev); 978*4882a593Smuzhiyun+ 979*4882a593Smuzhiyun+ return 0; 980*4882a593Smuzhiyun+} 981*4882a593Smuzhiyun+ 982*4882a593Smuzhiyun+/** 983*4882a593Smuzhiyun+ * rockchip_gmac_uio_probe() platform driver probe routine 984*4882a593Smuzhiyun+ * - register uio devices filled with memory maps retrieved 985*4882a593Smuzhiyun+ * from device tree 986*4882a593Smuzhiyun+ */ 987*4882a593Smuzhiyun+static int rockchip_gmac_uio_probe(struct platform_device *pdev) 988*4882a593Smuzhiyun+{ 989*4882a593Smuzhiyun+ struct device *dev = &pdev->dev; 990*4882a593Smuzhiyun+ struct device_node *np = dev->of_node, *mac_node; 991*4882a593Smuzhiyun+ struct rockchip_gmac_uio_pdev_info *pdev_info; 992*4882a593Smuzhiyun+ struct net_device *netdev; 993*4882a593Smuzhiyun+ struct stmmac_priv *priv; 994*4882a593Smuzhiyun+ struct uio_info *uio; 995*4882a593Smuzhiyun+ struct resource *res; 996*4882a593Smuzhiyun+ int err = 0; 997*4882a593Smuzhiyun+ 998*4882a593Smuzhiyun+ pdev_info = devm_kzalloc(dev, sizeof(struct rockchip_gmac_uio_pdev_info), 999*4882a593Smuzhiyun+ GFP_KERNEL); 1000*4882a593Smuzhiyun+ if (!pdev_info) 1001*4882a593Smuzhiyun+ return -ENOMEM; 1002*4882a593Smuzhiyun+ 1003*4882a593Smuzhiyun+ uio = &pdev_info->uio; 1004*4882a593Smuzhiyun+ pdev_info->dev = dev; 1005*4882a593Smuzhiyun+ mac_node = of_parse_phandle(np, "rockchip,ethernet", 0); 1006*4882a593Smuzhiyun+ if (!mac_node) 1007*4882a593Smuzhiyun+ return -ENODEV; 1008*4882a593Smuzhiyun+ 1009*4882a593Smuzhiyun+ if (of_device_is_available(mac_node)) { 1010*4882a593Smuzhiyun+ netdev = of_find_net_device_by_node(mac_node); 1011*4882a593Smuzhiyun+ of_node_put(mac_node); 1012*4882a593Smuzhiyun+ if (!netdev) 1013*4882a593Smuzhiyun+ return -ENODEV; 1014*4882a593Smuzhiyun+ } else { 1015*4882a593Smuzhiyun+ of_node_put(mac_node); 1016*4882a593Smuzhiyun+ return -EINVAL; 1017*4882a593Smuzhiyun+ } 1018*4882a593Smuzhiyun+ 1019*4882a593Smuzhiyun+ pdev_info->ndev = netdev; 1020*4882a593Smuzhiyun+ rtnl_lock(); 1021*4882a593Smuzhiyun+ dev_close(netdev); 1022*4882a593Smuzhiyun+ rtnl_unlock(); 1023*4882a593Smuzhiyun+ 1024*4882a593Smuzhiyun+ rtnl_lock(); 1025*4882a593Smuzhiyun+ err = uio_open(netdev); 1026*4882a593Smuzhiyun+ if (err) { 1027*4882a593Smuzhiyun+ rtnl_unlock(); 1028*4882a593Smuzhiyun+ dev_err(dev, "Failed to open stmmac resource: %d\n", err); 1029*4882a593Smuzhiyun+ return err; 1030*4882a593Smuzhiyun+ } 1031*4882a593Smuzhiyun+ rtnl_unlock(); 1032*4882a593Smuzhiyun+ 1033*4882a593Smuzhiyun+ priv = netdev_priv(netdev); 1034*4882a593Smuzhiyun+ snprintf(pdev_info->name, sizeof(pdev_info->name), "uio_%s", 1035*4882a593Smuzhiyun+ netdev->name); 1036*4882a593Smuzhiyun+ uio->name = pdev_info->name; 1037*4882a593Smuzhiyun+ uio->version = DRIVER_VERSION; 1038*4882a593Smuzhiyun+ 1039*4882a593Smuzhiyun+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1040*4882a593Smuzhiyun+ if (!res) 1041*4882a593Smuzhiyun+ return -ENODEV; 1042*4882a593Smuzhiyun+ 1043*4882a593Smuzhiyun+ uio->mem[0].name = "eth_regs"; 1044*4882a593Smuzhiyun+ uio->mem[0].addr = res->start & PAGE_MASK; 1045*4882a593Smuzhiyun+ uio->mem[0].size = PAGE_ALIGN(resource_size(res)); 1046*4882a593Smuzhiyun+ uio->mem[0].memtype = UIO_MEM_PHYS; 1047*4882a593Smuzhiyun+ 1048*4882a593Smuzhiyun+ uio->mem[1].name = "eth_rx_bd"; 1049*4882a593Smuzhiyun+ uio->mem[1].addr = priv->rx_queue[0].dma_rx_phy; 1050*4882a593Smuzhiyun+ uio->mem[1].size = priv->dma_rx_size * sizeof(struct dma_desc); 1051*4882a593Smuzhiyun+ uio->mem[1].memtype = UIO_MEM_PHYS; 1052*4882a593Smuzhiyun+ 1053*4882a593Smuzhiyun+ uio->mem[2].name = "eth_tx_bd"; 1054*4882a593Smuzhiyun+ uio->mem[2].addr = priv->tx_queue[0].dma_tx_phy; 1055*4882a593Smuzhiyun+ uio->mem[2].size = priv->dma_tx_size * sizeof(struct dma_desc); 1056*4882a593Smuzhiyun+ uio->mem[2].memtype = UIO_MEM_PHYS; 1057*4882a593Smuzhiyun+ 1058*4882a593Smuzhiyun+ uio->open = rockchip_gmac_uio_open; 1059*4882a593Smuzhiyun+ uio->release = rockchip_gmac_uio_release; 1060*4882a593Smuzhiyun+ /* Custom mmap function. */ 1061*4882a593Smuzhiyun+ uio->mmap = rockchip_gmac_uio_mmap; 1062*4882a593Smuzhiyun+ uio->priv = pdev_info; 1063*4882a593Smuzhiyun+ 1064*4882a593Smuzhiyun+ err = uio_register_device(dev, uio); 1065*4882a593Smuzhiyun+ if (err) { 1066*4882a593Smuzhiyun+ dev_err(dev, "Failed to register uio device: %d\n", err); 1067*4882a593Smuzhiyun+ return err; 1068*4882a593Smuzhiyun+ } 1069*4882a593Smuzhiyun+ 1070*4882a593Smuzhiyun+ pdev_info->map_num = 3; 1071*4882a593Smuzhiyun+ 1072*4882a593Smuzhiyun+ dev_info(dev, "Registered %s uio devices, %d register maps attached\n", 1073*4882a593Smuzhiyun+ pdev_info->name, pdev_info->map_num); 1074*4882a593Smuzhiyun+ 1075*4882a593Smuzhiyun+ platform_set_drvdata(pdev, pdev_info); 1076*4882a593Smuzhiyun+ 1077*4882a593Smuzhiyun+ return 0; 1078*4882a593Smuzhiyun+} 1079*4882a593Smuzhiyun+ 1080*4882a593Smuzhiyun+/** 1081*4882a593Smuzhiyun+ * rockchip_gmac_uio_remove() - ROCKCHIP ETH UIO platform driver release 1082*4882a593Smuzhiyun+ * routine - unregister uio devices 1083*4882a593Smuzhiyun+ */ 1084*4882a593Smuzhiyun+static int rockchip_gmac_uio_remove(struct platform_device *pdev) 1085*4882a593Smuzhiyun+{ 1086*4882a593Smuzhiyun+ struct rockchip_gmac_uio_pdev_info *pdev_info = 1087*4882a593Smuzhiyun+ platform_get_drvdata(pdev); 1088*4882a593Smuzhiyun+ struct net_device *netdev; 1089*4882a593Smuzhiyun+ 1090*4882a593Smuzhiyun+ if (!pdev_info) 1091*4882a593Smuzhiyun+ return -EINVAL; 1092*4882a593Smuzhiyun+ 1093*4882a593Smuzhiyun+ netdev = pdev_info->ndev; 1094*4882a593Smuzhiyun+ 1095*4882a593Smuzhiyun+ uio_unregister_device(&pdev_info->uio); 1096*4882a593Smuzhiyun+ 1097*4882a593Smuzhiyun+ if (netdev) { 1098*4882a593Smuzhiyun+ rtnl_lock(); 1099*4882a593Smuzhiyun+ uio_release(netdev); 1100*4882a593Smuzhiyun+ rtnl_unlock(); 1101*4882a593Smuzhiyun+ } 1102*4882a593Smuzhiyun+ 1103*4882a593Smuzhiyun+ platform_set_drvdata(pdev, NULL); 1104*4882a593Smuzhiyun+ 1105*4882a593Smuzhiyun+ if (netdev) { 1106*4882a593Smuzhiyun+ rtnl_lock(); 1107*4882a593Smuzhiyun+ dev_open(netdev, NULL); 1108*4882a593Smuzhiyun+ rtnl_unlock(); 1109*4882a593Smuzhiyun+ } 1110*4882a593Smuzhiyun+ 1111*4882a593Smuzhiyun+ return 0; 1112*4882a593Smuzhiyun+} 1113*4882a593Smuzhiyun+ 1114*4882a593Smuzhiyun+static const struct of_device_id rockchip_gmac_uio_of_match[] = { 1115*4882a593Smuzhiyun+ { .compatible = "rockchip,uio-gmac", }, 1116*4882a593Smuzhiyun+ { } 1117*4882a593Smuzhiyun+}; 1118*4882a593Smuzhiyun+ 1119*4882a593Smuzhiyun+static struct platform_driver rockchip_gmac_uio_driver = { 1120*4882a593Smuzhiyun+ .driver = { 1121*4882a593Smuzhiyun+ .owner = THIS_MODULE, 1122*4882a593Smuzhiyun+ .name = DRIVER_NAME, 1123*4882a593Smuzhiyun+ .of_match_table = rockchip_gmac_uio_of_match, 1124*4882a593Smuzhiyun+ }, 1125*4882a593Smuzhiyun+ .probe = rockchip_gmac_uio_probe, 1126*4882a593Smuzhiyun+ .remove = rockchip_gmac_uio_remove, 1127*4882a593Smuzhiyun+}; 1128*4882a593Smuzhiyun+ 1129*4882a593Smuzhiyun+module_platform_driver(rockchip_gmac_uio_driver); 1130*4882a593Smuzhiyun+ 1131*4882a593Smuzhiyun+MODULE_LICENSE("GPL"); 1132*4882a593Smuzhiyun+MODULE_AUTHOR("ROCKCHIP"); 1133*4882a593Smuzhiyun+MODULE_DESCRIPTION("ROCKCHIP GMAC UIO Driver"); 1134