1*4882a593SmuzhiyunFrom 3881f7b32a846e44ae94ab1b066e2b081c05244e Mon Sep 17 00:00:00 2001 2*4882a593SmuzhiyunFrom: David Wu <david.wu@rock-chips.com> 3*4882a593SmuzhiyunDate: Thu, 03 Nov 2022 10:47:48 +0800 4*4882a593SmuzhiyunSubject: [PATCH] net: ethernet: stmmac: Add uio support for stmmac 5*4882a593Smuzhiyun 6*4882a593SmuzhiyunCurrently only supports single channel, and the network card 7*4882a593Smuzhiyunname needs to be eth0 and eth1. 8*4882a593Smuzhiyun 9*4882a593SmuzhiyunSigned-off-by: David Wu <david.wu@rock-chips.com> 10*4882a593SmuzhiyunChange-Id: I19975b10e2ed12931edc2e8bd50c003416a1109c 11*4882a593Smuzhiyun--- 12*4882a593Smuzhiyun 13*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig 14*4882a593Smuzhiyunindex 21f4074..7d1ae95 100644 15*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig 16*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig 17*4882a593Smuzhiyun@@ -12,6 +12,13 @@ 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun if STMMAC_ETH 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun+config STMMAC_UIO 22*4882a593Smuzhiyun+ tristate "STMMAC_UIO ethernet controller" 23*4882a593Smuzhiyun+ default n 24*4882a593Smuzhiyun+ select UIO 25*4882a593Smuzhiyun+ help 26*4882a593Smuzhiyun+ Say M here if you want to use the stmmac_uio.ko for DPDK. 27*4882a593Smuzhiyun+ 28*4882a593Smuzhiyun config STMMAC_ETHTOOL 29*4882a593Smuzhiyun bool "Ethtool feature for STMMAC" 30*4882a593Smuzhiyun default STMMAC_ETH 31*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile 32*4882a593Smuzhiyunindex f8275ed..82adb5a 100644 33*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/Makefile 34*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile 35*4882a593Smuzhiyun@@ -25,6 +25,7 @@ 36*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rockchip.o 37*4882a593Smuzhiyun dwmac-rockchip-objs := dwmac-rk.o 38*4882a593Smuzhiyun dwmac-rockchip-$(CONFIG_DWMAC_ROCKCHIP_TOOL) += dwmac-rk-tool.o 39*4882a593Smuzhiyun+obj-$(CONFIG_STMMAC_UIO) += stmmac_uio.o 40*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o 41*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o 42*4882a593Smuzhiyun obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o 43*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h 44*4882a593Smuzhiyunindex e6fa3b1..6bb4f4e 100644 45*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/common.h 46*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/common.h 47*4882a593Smuzhiyun@@ -47,8 +47,13 @@ 48*4882a593Smuzhiyun #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* These need to be power of two, and >= 4 */ 51*4882a593Smuzhiyun+#if IS_ENABLED(CONFIG_STMMAC_UIO) 52*4882a593Smuzhiyun+#define DMA_TX_SIZE 1024 53*4882a593Smuzhiyun+#define DMA_RX_SIZE 1024 54*4882a593Smuzhiyun+#else 55*4882a593Smuzhiyun #define DMA_TX_SIZE 512 56*4882a593Smuzhiyun #define DMA_RX_SIZE 512 57*4882a593Smuzhiyun+#endif 58*4882a593Smuzhiyun #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun #undef FRAME_FILTER_DEBUG 61*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 62*4882a593Smuzhiyunindex e9b04c2..787f725 100644 63*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 64*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c 65*4882a593Smuzhiyun@@ -139,6 +139,7 @@ 66*4882a593Smuzhiyun pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", 67*4882a593Smuzhiyun MMC_CNTRL, value); 68*4882a593Smuzhiyun } 69*4882a593Smuzhiyun+EXPORT_SYMBOL(dwmac_mmc_ctrl); 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun /* To mask all all interrupts.*/ 72*4882a593Smuzhiyun void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr) 73*4882a593Smuzhiyun@@ -147,6 +148,7 @@ 74*4882a593Smuzhiyun writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); 75*4882a593Smuzhiyun writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK); 76*4882a593Smuzhiyun } 77*4882a593Smuzhiyun+EXPORT_SYMBOL(dwmac_mmc_intr_all_mask); 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun /* This reads the MAC core counters (if actaully supported). 80*4882a593Smuzhiyun * by default the MMC core is programmed to reset each 81*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 82*4882a593Smuzhiyunindex 093a223..12a5f99 100644 83*4882a593Smuzhiyun--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 84*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 85*4882a593Smuzhiyun@@ -302,6 +302,7 @@ 86*4882a593Smuzhiyun #endif 87*4882a593Smuzhiyun return 0; 88*4882a593Smuzhiyun } 89*4882a593Smuzhiyun+EXPORT_SYMBOL(stmmac_mdio_reset); 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun /** 92*4882a593Smuzhiyun * stmmac_mdio_register 93*4882a593Smuzhiyundiff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c 94*4882a593Smuzhiyunnew file mode 100644 95*4882a593Smuzhiyunindex 0000000..b241bd9 96*4882a593Smuzhiyun--- /dev/null 97*4882a593Smuzhiyun+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_uio.c 98*4882a593Smuzhiyun@@ -0,0 +1,1178 @@ 99*4882a593Smuzhiyun+// SPDX-License-Identifier: GPL-2.0 100*4882a593Smuzhiyun+/** 101*4882a593Smuzhiyun+ * Copyright 2023 ROCKCHIP 102*4882a593Smuzhiyun+ */ 103*4882a593Smuzhiyun+ 104*4882a593Smuzhiyun+#include <linux/module.h> 105*4882a593Smuzhiyun+#include <linux/kernel.h> 106*4882a593Smuzhiyun+#include <linux/of_address.h> 107*4882a593Smuzhiyun+#include <linux/of_platform.h> 108*4882a593Smuzhiyun+#include <linux/of_net.h> 109*4882a593Smuzhiyun+#include <linux/uio_driver.h> 110*4882a593Smuzhiyun+#include <linux/list.h> 111*4882a593Smuzhiyun+ 112*4882a593Smuzhiyun+#include <linux/clk.h> 113*4882a593Smuzhiyun+#include <linux/kernel.h> 114*4882a593Smuzhiyun+#include <linux/interrupt.h> 115*4882a593Smuzhiyun+#include <linux/ip.h> 116*4882a593Smuzhiyun+#include <linux/tcp.h> 117*4882a593Smuzhiyun+#include <linux/skbuff.h> 118*4882a593Smuzhiyun+#include <linux/ethtool.h> 119*4882a593Smuzhiyun+#include <linux/if_ether.h> 120*4882a593Smuzhiyun+#include <linux/crc32.h> 121*4882a593Smuzhiyun+#include <linux/mii.h> 122*4882a593Smuzhiyun+#include <linux/if.h> 123*4882a593Smuzhiyun+#include <linux/if_vlan.h> 124*4882a593Smuzhiyun+#include <linux/dma-mapping.h> 125*4882a593Smuzhiyun+#include <linux/slab.h> 126*4882a593Smuzhiyun+#include <linux/prefetch.h> 127*4882a593Smuzhiyun+#include <linux/pinctrl/consumer.h> 128*4882a593Smuzhiyun+#ifdef CONFIG_DEBUG_FS 129*4882a593Smuzhiyun+#include <linux/debugfs.h> 130*4882a593Smuzhiyun+#include <linux/seq_file.h> 131*4882a593Smuzhiyun+#endif /* CONFIG_DEBUG_FS */ 132*4882a593Smuzhiyun+#include <linux/net_tstamp.h> 133*4882a593Smuzhiyun+#include <linux/udp.h> 134*4882a593Smuzhiyun+#include <net/pkt_cls.h> 135*4882a593Smuzhiyun+#include "stmmac_ptp.h" 136*4882a593Smuzhiyun+#include "stmmac.h" 137*4882a593Smuzhiyun+#include <linux/reset.h> 138*4882a593Smuzhiyun+#include <linux/of_mdio.h> 139*4882a593Smuzhiyun+#include "dwmac1000.h" 140*4882a593Smuzhiyun+#include "dwxgmac2.h" 141*4882a593Smuzhiyun+#include "hwif.h" 142*4882a593Smuzhiyun+#include "mmc.h" 143*4882a593Smuzhiyun+ 144*4882a593Smuzhiyun+#define DRIVER_NAME "rockchip_gmac_uio_drv" 145*4882a593Smuzhiyun+#define DRIVER_VERSION "0.1" 146*4882a593Smuzhiyun+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 147*4882a593Smuzhiyun+ 148*4882a593Smuzhiyun+#define TC_DEFAULT 64 149*4882a593Smuzhiyun+#define DEFAULT_BUFSIZE 1536 150*4882a593Smuzhiyun+#define STMMAC_RX_COPYBREAK 256 151*4882a593Smuzhiyun+ 152*4882a593Smuzhiyun+static int buf_sz = DEFAULT_BUFSIZE; 153*4882a593Smuzhiyun+static int tc = TC_DEFAULT; 154*4882a593Smuzhiyun+ 155*4882a593Smuzhiyun+/** 156*4882a593Smuzhiyun+ * rockchip_gmac_uio_pdev_info 157*4882a593Smuzhiyun+ * local information for uio module driver 158*4882a593Smuzhiyun+ * 159*4882a593Smuzhiyun+ * @dev: device pointer 160*4882a593Smuzhiyun+ * @ndev: network device pointer 161*4882a593Smuzhiyun+ * @name: uio name 162*4882a593Smuzhiyun+ * @uio: uio information 163*4882a593Smuzhiyun+ * @map_num: number of uio memory regions 164*4882a593Smuzhiyun+ */ 165*4882a593Smuzhiyun+struct rockchip_gmac_uio_pdev_info { 166*4882a593Smuzhiyun+ struct device *dev; 167*4882a593Smuzhiyun+ struct net_device *ndev; 168*4882a593Smuzhiyun+ char name[16]; 169*4882a593Smuzhiyun+ struct uio_info uio; 170*4882a593Smuzhiyun+ int map_num; 171*4882a593Smuzhiyun+}; 172*4882a593Smuzhiyun+ 173*4882a593Smuzhiyun+static int rockchip_gmac_uio_open(struct uio_info *info, struct inode *inode) 174*4882a593Smuzhiyun+{ 175*4882a593Smuzhiyun+ return 0; 176*4882a593Smuzhiyun+} 177*4882a593Smuzhiyun+ 178*4882a593Smuzhiyun+static int rockchip_gmac_uio_release(struct uio_info *info, 179*4882a593Smuzhiyun+ struct inode *inode) 180*4882a593Smuzhiyun+{ 181*4882a593Smuzhiyun+ return 0; 182*4882a593Smuzhiyun+} 183*4882a593Smuzhiyun+ 184*4882a593Smuzhiyun+static int rockchip_gmac_uio_mmap(struct uio_info *info, 185*4882a593Smuzhiyun+ struct vm_area_struct *vma) 186*4882a593Smuzhiyun+{ 187*4882a593Smuzhiyun+ u32 ret; 188*4882a593Smuzhiyun+ u32 pfn; 189*4882a593Smuzhiyun+ 190*4882a593Smuzhiyun+ pfn = (info->mem[vma->vm_pgoff].addr) >> PAGE_SHIFT; 191*4882a593Smuzhiyun+ 192*4882a593Smuzhiyun+ if (vma->vm_pgoff) 193*4882a593Smuzhiyun+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 194*4882a593Smuzhiyun+ else 195*4882a593Smuzhiyun+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot); 196*4882a593Smuzhiyun+ 197*4882a593Smuzhiyun+ ret = remap_pfn_range(vma, vma->vm_start, pfn, 198*4882a593Smuzhiyun+ vma->vm_end - vma->vm_start, vma->vm_page_prot); 199*4882a593Smuzhiyun+ if (ret) { 200*4882a593Smuzhiyun+ /* Error Handle */ 201*4882a593Smuzhiyun+ pr_err("remap_pfn_range failed"); 202*4882a593Smuzhiyun+ } 203*4882a593Smuzhiyun+ return ret; 204*4882a593Smuzhiyun+} 205*4882a593Smuzhiyun+ 206*4882a593Smuzhiyun+/** 207*4882a593Smuzhiyun+ * uio_free_dma_rx_desc_resources - free RX dma desc resources 208*4882a593Smuzhiyun+ * @priv: private structure 209*4882a593Smuzhiyun+ */ 210*4882a593Smuzhiyun+static void uio_free_dma_rx_desc_resources(struct stmmac_priv *priv) 211*4882a593Smuzhiyun+{ 212*4882a593Smuzhiyun+ u32 rx_count = priv->plat->rx_queues_to_use; 213*4882a593Smuzhiyun+ u32 queue; 214*4882a593Smuzhiyun+ 215*4882a593Smuzhiyun+ /* Free RX queue resources */ 216*4882a593Smuzhiyun+ for (queue = 0; queue < rx_count; queue++) { 217*4882a593Smuzhiyun+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 218*4882a593Smuzhiyun+ 219*4882a593Smuzhiyun+ /* Free DMA regions of consistent memory previously allocated */ 220*4882a593Smuzhiyun+ if (!priv->extend_desc) 221*4882a593Smuzhiyun+ dma_free_coherent(priv->device, 222*4882a593Smuzhiyun+ DMA_RX_SIZE * sizeof(struct dma_desc), 223*4882a593Smuzhiyun+ rx_q->dma_rx, rx_q->dma_rx_phy); 224*4882a593Smuzhiyun+ else 225*4882a593Smuzhiyun+ dma_free_coherent(priv->device, DMA_RX_SIZE * 226*4882a593Smuzhiyun+ sizeof(struct dma_extended_desc), 227*4882a593Smuzhiyun+ rx_q->dma_erx, rx_q->dma_rx_phy); 228*4882a593Smuzhiyun+ } 229*4882a593Smuzhiyun+} 230*4882a593Smuzhiyun+ 231*4882a593Smuzhiyun+/** 232*4882a593Smuzhiyun+ * uio_free_dma_tx_desc_resources - free TX dma desc resources 233*4882a593Smuzhiyun+ * @priv: private structure 234*4882a593Smuzhiyun+ */ 235*4882a593Smuzhiyun+static void uio_free_dma_tx_desc_resources(struct stmmac_priv *priv) 236*4882a593Smuzhiyun+{ 237*4882a593Smuzhiyun+ u32 tx_count = priv->plat->tx_queues_to_use; 238*4882a593Smuzhiyun+ u32 queue; 239*4882a593Smuzhiyun+ 240*4882a593Smuzhiyun+ /* Free TX queue resources */ 241*4882a593Smuzhiyun+ for (queue = 0; queue < tx_count; queue++) { 242*4882a593Smuzhiyun+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 243*4882a593Smuzhiyun+ 244*4882a593Smuzhiyun+ /* Free DMA regions of consistent memory previously allocated */ 245*4882a593Smuzhiyun+ if (!priv->extend_desc) 246*4882a593Smuzhiyun+ dma_free_coherent(priv->device, 247*4882a593Smuzhiyun+ DMA_TX_SIZE * sizeof(struct dma_desc), 248*4882a593Smuzhiyun+ tx_q->dma_tx, tx_q->dma_tx_phy); 249*4882a593Smuzhiyun+ else 250*4882a593Smuzhiyun+ dma_free_coherent(priv->device, DMA_TX_SIZE * 251*4882a593Smuzhiyun+ sizeof(struct dma_extended_desc), 252*4882a593Smuzhiyun+ tx_q->dma_etx, tx_q->dma_tx_phy); 253*4882a593Smuzhiyun+ } 254*4882a593Smuzhiyun+} 255*4882a593Smuzhiyun+ 256*4882a593Smuzhiyun+/** 257*4882a593Smuzhiyun+ * uio_alloc_dma_rx_desc_resources - alloc RX resources. 258*4882a593Smuzhiyun+ * @priv: private structure 259*4882a593Smuzhiyun+ * Description: according to which descriptor can be used (extend or basic) 260*4882a593Smuzhiyun+ * this function allocates the resources for TX and RX paths. In case of 261*4882a593Smuzhiyun+ * reception, for example, it pre-allocated the RX socket buffer in order to 262*4882a593Smuzhiyun+ * allow zero-copy mechanism. 263*4882a593Smuzhiyun+ */ 264*4882a593Smuzhiyun+static int uio_alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 265*4882a593Smuzhiyun+{ 266*4882a593Smuzhiyun+ u32 rx_count = priv->plat->rx_queues_to_use; 267*4882a593Smuzhiyun+ int ret = -ENOMEM; 268*4882a593Smuzhiyun+ u32 queue; 269*4882a593Smuzhiyun+ 270*4882a593Smuzhiyun+ /* RX queues buffers and DMA */ 271*4882a593Smuzhiyun+ for (queue = 0; queue < rx_count; queue++) { 272*4882a593Smuzhiyun+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 273*4882a593Smuzhiyun+ 274*4882a593Smuzhiyun+ rx_q->queue_index = queue; 275*4882a593Smuzhiyun+ rx_q->priv_data = priv; 276*4882a593Smuzhiyun+ 277*4882a593Smuzhiyun+ if (priv->extend_desc) { 278*4882a593Smuzhiyun+ rx_q->dma_erx = dma_zalloc_coherent(priv->device, 279*4882a593Smuzhiyun+ DMA_RX_SIZE * 280*4882a593Smuzhiyun+ sizeof(struct 281*4882a593Smuzhiyun+ dma_extended_desc), 282*4882a593Smuzhiyun+ &rx_q->dma_rx_phy, 283*4882a593Smuzhiyun+ GFP_KERNEL); 284*4882a593Smuzhiyun+ if (!rx_q->dma_erx) 285*4882a593Smuzhiyun+ goto err_dma; 286*4882a593Smuzhiyun+ 287*4882a593Smuzhiyun+ } else { 288*4882a593Smuzhiyun+ rx_q->dma_rx = dma_zalloc_coherent(priv->device, 289*4882a593Smuzhiyun+ DMA_RX_SIZE * 290*4882a593Smuzhiyun+ sizeof(struct 291*4882a593Smuzhiyun+ dma_desc), 292*4882a593Smuzhiyun+ &rx_q->dma_rx_phy, 293*4882a593Smuzhiyun+ GFP_KERNEL); 294*4882a593Smuzhiyun+ if (!rx_q->dma_rx) 295*4882a593Smuzhiyun+ goto err_dma; 296*4882a593Smuzhiyun+ } 297*4882a593Smuzhiyun+ } 298*4882a593Smuzhiyun+ 299*4882a593Smuzhiyun+ return 0; 300*4882a593Smuzhiyun+ 301*4882a593Smuzhiyun+err_dma: 302*4882a593Smuzhiyun+ uio_free_dma_rx_desc_resources(priv); 303*4882a593Smuzhiyun+ 304*4882a593Smuzhiyun+ return ret; 305*4882a593Smuzhiyun+} 306*4882a593Smuzhiyun+ 307*4882a593Smuzhiyun+/** 308*4882a593Smuzhiyun+ * uio_alloc_dma_tx_desc_resources - alloc TX resources. 309*4882a593Smuzhiyun+ * @priv: private structure 310*4882a593Smuzhiyun+ * Description: according to which descriptor can be used (extend or basic) 311*4882a593Smuzhiyun+ * this function allocates the resources for TX and RX paths. In case of 312*4882a593Smuzhiyun+ * reception, for example, it pre-allocated the RX socket buffer in order to 313*4882a593Smuzhiyun+ * allow zero-copy mechanism. 314*4882a593Smuzhiyun+ */ 315*4882a593Smuzhiyun+static int uio_alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 316*4882a593Smuzhiyun+{ 317*4882a593Smuzhiyun+ u32 tx_count = priv->plat->tx_queues_to_use; 318*4882a593Smuzhiyun+ int ret = -ENOMEM; 319*4882a593Smuzhiyun+ u32 queue; 320*4882a593Smuzhiyun+ 321*4882a593Smuzhiyun+ /* TX queues buffers and DMA */ 322*4882a593Smuzhiyun+ for (queue = 0; queue < tx_count; queue++) { 323*4882a593Smuzhiyun+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 324*4882a593Smuzhiyun+ 325*4882a593Smuzhiyun+ tx_q->queue_index = queue; 326*4882a593Smuzhiyun+ tx_q->priv_data = priv; 327*4882a593Smuzhiyun+ 328*4882a593Smuzhiyun+ if (priv->extend_desc) { 329*4882a593Smuzhiyun+ tx_q->dma_etx = dma_zalloc_coherent(priv->device, 330*4882a593Smuzhiyun+ DMA_TX_SIZE * 331*4882a593Smuzhiyun+ sizeof(struct 332*4882a593Smuzhiyun+ dma_extended_desc), 333*4882a593Smuzhiyun+ &tx_q->dma_tx_phy, 334*4882a593Smuzhiyun+ GFP_KERNEL); 335*4882a593Smuzhiyun+ if (!tx_q->dma_etx) 336*4882a593Smuzhiyun+ goto err_dma; 337*4882a593Smuzhiyun+ } else { 338*4882a593Smuzhiyun+ tx_q->dma_tx = dma_zalloc_coherent(priv->device, 339*4882a593Smuzhiyun+ DMA_TX_SIZE * 340*4882a593Smuzhiyun+ sizeof(struct 341*4882a593Smuzhiyun+ dma_desc), 342*4882a593Smuzhiyun+ &tx_q->dma_tx_phy, 343*4882a593Smuzhiyun+ GFP_KERNEL); 344*4882a593Smuzhiyun+ if (!tx_q->dma_tx) 345*4882a593Smuzhiyun+ goto err_dma; 346*4882a593Smuzhiyun+ } 347*4882a593Smuzhiyun+ } 348*4882a593Smuzhiyun+ 349*4882a593Smuzhiyun+ return 0; 350*4882a593Smuzhiyun+ 351*4882a593Smuzhiyun+err_dma: 352*4882a593Smuzhiyun+ uio_free_dma_tx_desc_resources(priv); 353*4882a593Smuzhiyun+ 354*4882a593Smuzhiyun+ return ret; 355*4882a593Smuzhiyun+} 356*4882a593Smuzhiyun+ 357*4882a593Smuzhiyun+/** 358*4882a593Smuzhiyun+ * uio_alloc_dma_desc_resources - alloc TX/RX resources. 359*4882a593Smuzhiyun+ * @priv: private structure 360*4882a593Smuzhiyun+ * Description: according to which descriptor can be used (extend or basic) 361*4882a593Smuzhiyun+ * this function allocates the resources for TX and RX paths. In case of 362*4882a593Smuzhiyun+ * reception, for example, it pre-allocated the RX socket buffer in order to 363*4882a593Smuzhiyun+ * allow zero-copy mechanism. 364*4882a593Smuzhiyun+ */ 365*4882a593Smuzhiyun+static int uio_alloc_dma_desc_resources(struct stmmac_priv *priv) 366*4882a593Smuzhiyun+{ 367*4882a593Smuzhiyun+ /* RX Allocation */ 368*4882a593Smuzhiyun+ int ret = uio_alloc_dma_rx_desc_resources(priv); 369*4882a593Smuzhiyun+ 370*4882a593Smuzhiyun+ if (ret) 371*4882a593Smuzhiyun+ return ret; 372*4882a593Smuzhiyun+ 373*4882a593Smuzhiyun+ ret = uio_alloc_dma_tx_desc_resources(priv); 374*4882a593Smuzhiyun+ 375*4882a593Smuzhiyun+ return ret; 376*4882a593Smuzhiyun+} 377*4882a593Smuzhiyun+ 378*4882a593Smuzhiyun+/** 379*4882a593Smuzhiyun+ * uio_free_dma_desc_resources - free dma desc resources 380*4882a593Smuzhiyun+ * @priv: private structure 381*4882a593Smuzhiyun+ */ 382*4882a593Smuzhiyun+static void uio_free_dma_desc_resources(struct stmmac_priv *priv) 383*4882a593Smuzhiyun+{ 384*4882a593Smuzhiyun+ /* Release the DMA RX socket buffers */ 385*4882a593Smuzhiyun+ uio_free_dma_rx_desc_resources(priv); 386*4882a593Smuzhiyun+ 387*4882a593Smuzhiyun+ /* Release the DMA TX socket buffers */ 388*4882a593Smuzhiyun+ uio_free_dma_tx_desc_resources(priv); 389*4882a593Smuzhiyun+} 390*4882a593Smuzhiyun+ 391*4882a593Smuzhiyun+/** 392*4882a593Smuzhiyun+ * uio_hw_fix_mac_speed - callback for speed selection 393*4882a593Smuzhiyun+ * @priv: driver private structure 394*4882a593Smuzhiyun+ * Description: on some platforms (e.g. ST), some HW system configuration 395*4882a593Smuzhiyun+ * registers have to be set according to the link speed negotiated. 396*4882a593Smuzhiyun+ */ 397*4882a593Smuzhiyun+static inline void uio_hw_fix_mac_speed(struct stmmac_priv *priv) 398*4882a593Smuzhiyun+{ 399*4882a593Smuzhiyun+ struct net_device *ndev = priv->dev; 400*4882a593Smuzhiyun+ struct phy_device *phydev = ndev->phydev; 401*4882a593Smuzhiyun+ 402*4882a593Smuzhiyun+ if (likely(priv->plat->fix_mac_speed)) 403*4882a593Smuzhiyun+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); 404*4882a593Smuzhiyun+} 405*4882a593Smuzhiyun+ 406*4882a593Smuzhiyun+/** 407*4882a593Smuzhiyun+ * uio_mac_flow_ctrl - Configure flow control in all queues 408*4882a593Smuzhiyun+ * @priv: driver private structure 409*4882a593Smuzhiyun+ * Description: It is used for configuring the flow control in all queues 410*4882a593Smuzhiyun+ */ 411*4882a593Smuzhiyun+static void uio_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 412*4882a593Smuzhiyun+{ 413*4882a593Smuzhiyun+ u32 tx_cnt = priv->plat->tx_queues_to_use; 414*4882a593Smuzhiyun+ 415*4882a593Smuzhiyun+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 416*4882a593Smuzhiyun+ priv->pause, tx_cnt); 417*4882a593Smuzhiyun+} 418*4882a593Smuzhiyun+ 419*4882a593Smuzhiyun+/** 420*4882a593Smuzhiyun+ * uio_adjust_link - adjusts the link parameters 421*4882a593Smuzhiyun+ * @dev: net device structure 422*4882a593Smuzhiyun+ * Description: this is the helper called by the physical abstraction layer 423*4882a593Smuzhiyun+ * drivers to communicate the phy link status. According the speed and duplex 424*4882a593Smuzhiyun+ * this driver can invoke registered glue-logic as well. 425*4882a593Smuzhiyun+ * It also invoke the eee initialization because it could happen when switch 426*4882a593Smuzhiyun+ * on different networks (that are eee capable). 427*4882a593Smuzhiyun+ */ 428*4882a593Smuzhiyun+static void uio_adjust_link(struct net_device *dev) 429*4882a593Smuzhiyun+{ 430*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 431*4882a593Smuzhiyun+ struct phy_device *phydev = dev->phydev; 432*4882a593Smuzhiyun+ bool new_state = false; 433*4882a593Smuzhiyun+ 434*4882a593Smuzhiyun+ if (!phydev) 435*4882a593Smuzhiyun+ return; 436*4882a593Smuzhiyun+ 437*4882a593Smuzhiyun+ mutex_lock(&priv->lock); 438*4882a593Smuzhiyun+ 439*4882a593Smuzhiyun+ if (phydev->link) { 440*4882a593Smuzhiyun+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 441*4882a593Smuzhiyun+ 442*4882a593Smuzhiyun+ /* Now we make sure that we can be in full duplex mode. 443*4882a593Smuzhiyun+ * If not, we operate in half-duplex mode. 444*4882a593Smuzhiyun+ */ 445*4882a593Smuzhiyun+ if (phydev->duplex != priv->oldduplex) { 446*4882a593Smuzhiyun+ new_state = true; 447*4882a593Smuzhiyun+ if (!phydev->duplex) 448*4882a593Smuzhiyun+ ctrl &= ~priv->hw->link.duplex; 449*4882a593Smuzhiyun+ else 450*4882a593Smuzhiyun+ ctrl |= priv->hw->link.duplex; 451*4882a593Smuzhiyun+ priv->oldduplex = phydev->duplex; 452*4882a593Smuzhiyun+ } 453*4882a593Smuzhiyun+ /* Flow Control operation */ 454*4882a593Smuzhiyun+ if (phydev->pause) 455*4882a593Smuzhiyun+ uio_mac_flow_ctrl(priv, phydev->duplex); 456*4882a593Smuzhiyun+ 457*4882a593Smuzhiyun+ if (phydev->speed != priv->speed) { 458*4882a593Smuzhiyun+ new_state = true; 459*4882a593Smuzhiyun+ ctrl &= ~priv->hw->link.speed_mask; 460*4882a593Smuzhiyun+ switch (phydev->speed) { 461*4882a593Smuzhiyun+ case SPEED_1000: 462*4882a593Smuzhiyun+ ctrl |= priv->hw->link.speed1000; 463*4882a593Smuzhiyun+ break; 464*4882a593Smuzhiyun+ case SPEED_100: 465*4882a593Smuzhiyun+ ctrl |= priv->hw->link.speed100; 466*4882a593Smuzhiyun+ break; 467*4882a593Smuzhiyun+ case SPEED_10: 468*4882a593Smuzhiyun+ ctrl |= priv->hw->link.speed10; 469*4882a593Smuzhiyun+ break; 470*4882a593Smuzhiyun+ default: 471*4882a593Smuzhiyun+ netif_warn(priv, link, priv->dev, 472*4882a593Smuzhiyun+ "broken speed: %d\n", phydev->speed); 473*4882a593Smuzhiyun+ phydev->speed = SPEED_UNKNOWN; 474*4882a593Smuzhiyun+ break; 475*4882a593Smuzhiyun+ } 476*4882a593Smuzhiyun+ if (phydev->speed != SPEED_UNKNOWN) 477*4882a593Smuzhiyun+ uio_hw_fix_mac_speed(priv); 478*4882a593Smuzhiyun+ priv->speed = phydev->speed; 479*4882a593Smuzhiyun+ } 480*4882a593Smuzhiyun+ 481*4882a593Smuzhiyun+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 482*4882a593Smuzhiyun+ 483*4882a593Smuzhiyun+ if (!priv->oldlink) { 484*4882a593Smuzhiyun+ new_state = true; 485*4882a593Smuzhiyun+ priv->oldlink = true; 486*4882a593Smuzhiyun+ } 487*4882a593Smuzhiyun+ } else if (priv->oldlink) { 488*4882a593Smuzhiyun+ new_state = true; 489*4882a593Smuzhiyun+ priv->oldlink = false; 490*4882a593Smuzhiyun+ priv->speed = SPEED_UNKNOWN; 491*4882a593Smuzhiyun+ priv->oldduplex = DUPLEX_UNKNOWN; 492*4882a593Smuzhiyun+ } 493*4882a593Smuzhiyun+ 494*4882a593Smuzhiyun+ if (new_state && netif_msg_link(priv)) 495*4882a593Smuzhiyun+ phy_print_status(phydev); 496*4882a593Smuzhiyun+ 497*4882a593Smuzhiyun+ mutex_unlock(&priv->lock); 498*4882a593Smuzhiyun+ 499*4882a593Smuzhiyun+ if (phydev->is_pseudo_fixed_link) 500*4882a593Smuzhiyun+ /* Stop PHY layer to call the hook to adjust the link in case 501*4882a593Smuzhiyun+ * of a switch is attached to the stmmac driver. 502*4882a593Smuzhiyun+ */ 503*4882a593Smuzhiyun+ phydev->irq = PHY_IGNORE_INTERRUPT; 504*4882a593Smuzhiyun+} 505*4882a593Smuzhiyun+ 506*4882a593Smuzhiyun+/** 507*4882a593Smuzhiyun+ * rockchip_gmac_uio_init_phy - PHY initialization 508*4882a593Smuzhiyun+ * @dev: net device structure 509*4882a593Smuzhiyun+ * Description: it initializes the driver's PHY state, and attaches the PHY 510*4882a593Smuzhiyun+ * to the mac driver. 511*4882a593Smuzhiyun+ * Return value: 512*4882a593Smuzhiyun+ * 0 on success 513*4882a593Smuzhiyun+ */ 514*4882a593Smuzhiyun+static int rockchip_gmac_uio_init_phy(struct net_device *dev) 515*4882a593Smuzhiyun+{ 516*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 517*4882a593Smuzhiyun+ u32 tx_cnt = priv->plat->tx_queues_to_use; 518*4882a593Smuzhiyun+ struct phy_device *phydev; 519*4882a593Smuzhiyun+ char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 520*4882a593Smuzhiyun+ char bus_id[MII_BUS_ID_SIZE]; 521*4882a593Smuzhiyun+ int interface = priv->plat->interface; 522*4882a593Smuzhiyun+ int max_speed = priv->plat->max_speed; 523*4882a593Smuzhiyun+ 524*4882a593Smuzhiyun+ priv->oldlink = false; 525*4882a593Smuzhiyun+ priv->speed = SPEED_UNKNOWN; 526*4882a593Smuzhiyun+ priv->oldduplex = DUPLEX_UNKNOWN; 527*4882a593Smuzhiyun+ 528*4882a593Smuzhiyun+ if (priv->plat->integrated_phy_power) 529*4882a593Smuzhiyun+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, true); 530*4882a593Smuzhiyun+ 531*4882a593Smuzhiyun+ if (priv->mii) 532*4882a593Smuzhiyun+ stmmac_mdio_reset(priv->mii); 533*4882a593Smuzhiyun+ 534*4882a593Smuzhiyun+ if (priv->plat->phy_node) { 535*4882a593Smuzhiyun+ phydev = of_phy_connect(dev, priv->plat->phy_node, 536*4882a593Smuzhiyun+ &uio_adjust_link, 0, interface); 537*4882a593Smuzhiyun+ } else { 538*4882a593Smuzhiyun+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 539*4882a593Smuzhiyun+ priv->plat->bus_id); 540*4882a593Smuzhiyun+ 541*4882a593Smuzhiyun+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 542*4882a593Smuzhiyun+ priv->plat->phy_addr); 543*4882a593Smuzhiyun+ netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, 544*4882a593Smuzhiyun+ phy_id_fmt); 545*4882a593Smuzhiyun+ 546*4882a593Smuzhiyun+ phydev = phy_connect(dev, phy_id_fmt, &uio_adjust_link, 547*4882a593Smuzhiyun+ interface); 548*4882a593Smuzhiyun+ } 549*4882a593Smuzhiyun+ 550*4882a593Smuzhiyun+ if (IS_ERR_OR_NULL(phydev)) { 551*4882a593Smuzhiyun+ netdev_err(priv->dev, "Could not attach to PHY\n"); 552*4882a593Smuzhiyun+ if (!phydev) 553*4882a593Smuzhiyun+ return -ENODEV; 554*4882a593Smuzhiyun+ 555*4882a593Smuzhiyun+ return PTR_ERR(phydev); 556*4882a593Smuzhiyun+ } 557*4882a593Smuzhiyun+ 558*4882a593Smuzhiyun+ /* Stop Advertising 1000BASE Capability if interface is not GMII */ 559*4882a593Smuzhiyun+ if (interface == PHY_INTERFACE_MODE_MII || 560*4882a593Smuzhiyun+ interface == PHY_INTERFACE_MODE_RMII || 561*4882a593Smuzhiyun+ (max_speed < 1000 && max_speed > 0)) 562*4882a593Smuzhiyun+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 563*4882a593Smuzhiyun+ SUPPORTED_1000baseT_Full); 564*4882a593Smuzhiyun+ 565*4882a593Smuzhiyun+ /* Half-duplex mode not supported with multiqueue 566*4882a593Smuzhiyun+ * half-duplex can only works with single queue 567*4882a593Smuzhiyun+ */ 568*4882a593Smuzhiyun+ if (tx_cnt > 1) 569*4882a593Smuzhiyun+ phydev->supported &= ~(SUPPORTED_1000baseT_Half | 570*4882a593Smuzhiyun+ SUPPORTED_100baseT_Half | 571*4882a593Smuzhiyun+ SUPPORTED_10baseT_Half); 572*4882a593Smuzhiyun+ 573*4882a593Smuzhiyun+ /* Broken HW is sometimes missing the pull-up resistor on the 574*4882a593Smuzhiyun+ * MDIO line, which results in reads to non-existent devices returning 575*4882a593Smuzhiyun+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 576*4882a593Smuzhiyun+ * device as well. 577*4882a593Smuzhiyun+ * Note: phydev->phy_id is the result of reading the UID PHY registers. 578*4882a593Smuzhiyun+ */ 579*4882a593Smuzhiyun+ if (!priv->plat->phy_node && phydev->phy_id == 0) { 580*4882a593Smuzhiyun+ phy_disconnect(phydev); 581*4882a593Smuzhiyun+ return -ENODEV; 582*4882a593Smuzhiyun+ } 583*4882a593Smuzhiyun+ 584*4882a593Smuzhiyun+ /* uio_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid 585*4882a593Smuzhiyun+ * subsequent PHY polling, make sure we force a link transition if 586*4882a593Smuzhiyun+ * we have a UP/DOWN/UP transition 587*4882a593Smuzhiyun+ */ 588*4882a593Smuzhiyun+ if (phydev->is_pseudo_fixed_link) 589*4882a593Smuzhiyun+ phydev->irq = PHY_POLL; 590*4882a593Smuzhiyun+ 591*4882a593Smuzhiyun+ phy_attached_info(phydev); 592*4882a593Smuzhiyun+ return 0; 593*4882a593Smuzhiyun+} 594*4882a593Smuzhiyun+ 595*4882a593Smuzhiyun+/** 596*4882a593Smuzhiyun+ * rockchip_gmac_uio_init_dma_engine - DMA init. 597*4882a593Smuzhiyun+ * @priv: driver private structure 598*4882a593Smuzhiyun+ * Description: 599*4882a593Smuzhiyun+ * It inits the DMA invoking the specific MAC/GMAC callback. 600*4882a593Smuzhiyun+ * Some DMA parameters can be passed from the platform; 601*4882a593Smuzhiyun+ * in case of these are not passed a default is kept for the MAC or GMAC. 602*4882a593Smuzhiyun+ */ 603*4882a593Smuzhiyun+static int rockchip_gmac_uio_init_dma_engine(struct stmmac_priv *priv) 604*4882a593Smuzhiyun+{ 605*4882a593Smuzhiyun+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 606*4882a593Smuzhiyun+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 607*4882a593Smuzhiyun+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 608*4882a593Smuzhiyun+ struct stmmac_rx_queue *rx_q; 609*4882a593Smuzhiyun+ struct stmmac_tx_queue *tx_q; 610*4882a593Smuzhiyun+ u32 chan = 0; 611*4882a593Smuzhiyun+ int atds = 0; 612*4882a593Smuzhiyun+ int ret = 0; 613*4882a593Smuzhiyun+ 614*4882a593Smuzhiyun+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 615*4882a593Smuzhiyun+ dev_err(priv->device, "Invalid DMA configuration\n"); 616*4882a593Smuzhiyun+ return -EINVAL; 617*4882a593Smuzhiyun+ } 618*4882a593Smuzhiyun+ 619*4882a593Smuzhiyun+ if (priv->extend_desc && priv->mode == STMMAC_RING_MODE) 620*4882a593Smuzhiyun+ atds = 1; 621*4882a593Smuzhiyun+ 622*4882a593Smuzhiyun+ ret = stmmac_reset(priv, priv->ioaddr); 623*4882a593Smuzhiyun+ if (ret) { 624*4882a593Smuzhiyun+ dev_err(priv->device, "Failed to reset the dma\n"); 625*4882a593Smuzhiyun+ return ret; 626*4882a593Smuzhiyun+ } 627*4882a593Smuzhiyun+ 628*4882a593Smuzhiyun+ /* DMA Configuration */ 629*4882a593Smuzhiyun+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 630*4882a593Smuzhiyun+ 631*4882a593Smuzhiyun+ if (priv->plat->axi) 632*4882a593Smuzhiyun+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 633*4882a593Smuzhiyun+ 634*4882a593Smuzhiyun+ /* DMA CSR Channel configuration */ 635*4882a593Smuzhiyun+ for (chan = 0; chan < dma_csr_ch; chan++) 636*4882a593Smuzhiyun+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 637*4882a593Smuzhiyun+ 638*4882a593Smuzhiyun+ /* DMA RX Channel Configuration */ 639*4882a593Smuzhiyun+ for (chan = 0; chan < rx_channels_count; chan++) { 640*4882a593Smuzhiyun+ rx_q = &priv->rx_queue[chan]; 641*4882a593Smuzhiyun+ 642*4882a593Smuzhiyun+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 643*4882a593Smuzhiyun+ rx_q->dma_rx_phy, chan); 644*4882a593Smuzhiyun+ 645*4882a593Smuzhiyun+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + 646*4882a593Smuzhiyun+ (DMA_RX_SIZE * sizeof(struct dma_desc)); 647*4882a593Smuzhiyun+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 648*4882a593Smuzhiyun+ rx_q->rx_tail_addr, chan); 649*4882a593Smuzhiyun+ } 650*4882a593Smuzhiyun+ 651*4882a593Smuzhiyun+ /* DMA TX Channel Configuration */ 652*4882a593Smuzhiyun+ for (chan = 0; chan < tx_channels_count; chan++) { 653*4882a593Smuzhiyun+ tx_q = &priv->tx_queue[chan]; 654*4882a593Smuzhiyun+ 655*4882a593Smuzhiyun+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 656*4882a593Smuzhiyun+ tx_q->dma_tx_phy, chan); 657*4882a593Smuzhiyun+ 658*4882a593Smuzhiyun+ tx_q->tx_tail_addr = tx_q->dma_tx_phy; 659*4882a593Smuzhiyun+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 660*4882a593Smuzhiyun+ tx_q->tx_tail_addr, chan); 661*4882a593Smuzhiyun+ } 662*4882a593Smuzhiyun+ 663*4882a593Smuzhiyun+ return ret; 664*4882a593Smuzhiyun+} 665*4882a593Smuzhiyun+ 666*4882a593Smuzhiyun+static void uio_set_rings_length(struct stmmac_priv *priv) 667*4882a593Smuzhiyun+{ 668*4882a593Smuzhiyun+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 669*4882a593Smuzhiyun+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 670*4882a593Smuzhiyun+ u32 chan; 671*4882a593Smuzhiyun+ 672*4882a593Smuzhiyun+ /* set TX ring length */ 673*4882a593Smuzhiyun+ for (chan = 0; chan < tx_channels_count; chan++) 674*4882a593Smuzhiyun+ stmmac_set_tx_ring_len(priv, priv->ioaddr, 675*4882a593Smuzhiyun+ (DMA_TX_SIZE - 1), chan); 676*4882a593Smuzhiyun+ 677*4882a593Smuzhiyun+ /* set RX ring length */ 678*4882a593Smuzhiyun+ for (chan = 0; chan < rx_channels_count; chan++) 679*4882a593Smuzhiyun+ stmmac_set_rx_ring_len(priv, priv->ioaddr, 680*4882a593Smuzhiyun+ (DMA_RX_SIZE - 1), chan); 681*4882a593Smuzhiyun+} 682*4882a593Smuzhiyun+ 683*4882a593Smuzhiyun+/** 684*4882a593Smuzhiyun+ * uio_set_tx_queue_weight - Set TX queue weight 685*4882a593Smuzhiyun+ * @priv: driver private structure 686*4882a593Smuzhiyun+ * Description: It is used for setting TX queues weight 687*4882a593Smuzhiyun+ */ 688*4882a593Smuzhiyun+static void uio_set_tx_queue_weight(struct stmmac_priv *priv) 689*4882a593Smuzhiyun+{ 690*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 691*4882a593Smuzhiyun+ u32 weight; 692*4882a593Smuzhiyun+ u32 queue; 693*4882a593Smuzhiyun+ 694*4882a593Smuzhiyun+ for (queue = 0; queue < tx_queues_count; queue++) { 695*4882a593Smuzhiyun+ weight = priv->plat->tx_queues_cfg[queue].weight; 696*4882a593Smuzhiyun+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 697*4882a593Smuzhiyun+ } 698*4882a593Smuzhiyun+} 699*4882a593Smuzhiyun+ 700*4882a593Smuzhiyun+/** 701*4882a593Smuzhiyun+ * uio_configure_cbs - Configure CBS in TX queue 702*4882a593Smuzhiyun+ * @priv: driver private structure 703*4882a593Smuzhiyun+ * Description: It is used for configuring CBS in AVB TX queues 704*4882a593Smuzhiyun+ */ 705*4882a593Smuzhiyun+static void uio_configure_cbs(struct stmmac_priv *priv) 706*4882a593Smuzhiyun+{ 707*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 708*4882a593Smuzhiyun+ u32 mode_to_use; 709*4882a593Smuzhiyun+ u32 queue; 710*4882a593Smuzhiyun+ 711*4882a593Smuzhiyun+ /* queue 0 is reserved for legacy traffic */ 712*4882a593Smuzhiyun+ for (queue = 1; queue < tx_queues_count; queue++) { 713*4882a593Smuzhiyun+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 714*4882a593Smuzhiyun+ if (mode_to_use == MTL_QUEUE_DCB) 715*4882a593Smuzhiyun+ continue; 716*4882a593Smuzhiyun+ 717*4882a593Smuzhiyun+ stmmac_config_cbs(priv, priv->hw, 718*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].send_slope, 719*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].idle_slope, 720*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].high_credit, 721*4882a593Smuzhiyun+ priv->plat->tx_queues_cfg[queue].low_credit, 722*4882a593Smuzhiyun+ queue); 723*4882a593Smuzhiyun+ } 724*4882a593Smuzhiyun+} 725*4882a593Smuzhiyun+ 726*4882a593Smuzhiyun+/** 727*4882a593Smuzhiyun+ * uio_rx_queue_dma_chan_map - Map RX queue to RX dma channel 728*4882a593Smuzhiyun+ * @priv: driver private structure 729*4882a593Smuzhiyun+ * Description: It is used for mapping RX queues to RX dma channels 730*4882a593Smuzhiyun+ */ 731*4882a593Smuzhiyun+static void uio_rx_queue_dma_chan_map(struct stmmac_priv *priv) 732*4882a593Smuzhiyun+{ 733*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 734*4882a593Smuzhiyun+ u32 queue; 735*4882a593Smuzhiyun+ u32 chan; 736*4882a593Smuzhiyun+ 737*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 738*4882a593Smuzhiyun+ chan = priv->plat->rx_queues_cfg[queue].chan; 739*4882a593Smuzhiyun+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 740*4882a593Smuzhiyun+ } 741*4882a593Smuzhiyun+} 742*4882a593Smuzhiyun+ 743*4882a593Smuzhiyun+/** 744*4882a593Smuzhiyun+ * uio_mac_config_rx_queues_prio - Configure RX Queue priority 745*4882a593Smuzhiyun+ * @priv: driver private structure 746*4882a593Smuzhiyun+ * Description: It is used for configuring the RX Queue Priority 747*4882a593Smuzhiyun+ */ 748*4882a593Smuzhiyun+static void uio_mac_config_rx_queues_prio(struct stmmac_priv *priv) 749*4882a593Smuzhiyun+{ 750*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 751*4882a593Smuzhiyun+ u32 queue; 752*4882a593Smuzhiyun+ u32 prio; 753*4882a593Smuzhiyun+ 754*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 755*4882a593Smuzhiyun+ if (!priv->plat->rx_queues_cfg[queue].use_prio) 756*4882a593Smuzhiyun+ continue; 757*4882a593Smuzhiyun+ 758*4882a593Smuzhiyun+ prio = priv->plat->rx_queues_cfg[queue].prio; 759*4882a593Smuzhiyun+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 760*4882a593Smuzhiyun+ } 761*4882a593Smuzhiyun+} 762*4882a593Smuzhiyun+ 763*4882a593Smuzhiyun+/** 764*4882a593Smuzhiyun+ * uio_mac_config_tx_queues_prio - Configure TX Queue priority 765*4882a593Smuzhiyun+ * @priv: driver private structure 766*4882a593Smuzhiyun+ * Description: It is used for configuring the TX Queue Priority 767*4882a593Smuzhiyun+ */ 768*4882a593Smuzhiyun+static void uio_mac_config_tx_queues_prio(struct stmmac_priv *priv) 769*4882a593Smuzhiyun+{ 770*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 771*4882a593Smuzhiyun+ u32 queue; 772*4882a593Smuzhiyun+ u32 prio; 773*4882a593Smuzhiyun+ 774*4882a593Smuzhiyun+ for (queue = 0; queue < tx_queues_count; queue++) { 775*4882a593Smuzhiyun+ if (!priv->plat->tx_queues_cfg[queue].use_prio) 776*4882a593Smuzhiyun+ continue; 777*4882a593Smuzhiyun+ 778*4882a593Smuzhiyun+ prio = priv->plat->tx_queues_cfg[queue].prio; 779*4882a593Smuzhiyun+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 780*4882a593Smuzhiyun+ } 781*4882a593Smuzhiyun+} 782*4882a593Smuzhiyun+ 783*4882a593Smuzhiyun+/** 784*4882a593Smuzhiyun+ * uio_mac_config_rx_queues_routing - Configure RX Queue Routing 785*4882a593Smuzhiyun+ * @priv: driver private structure 786*4882a593Smuzhiyun+ * Description: It is used for configuring the RX queue routing 787*4882a593Smuzhiyun+ */ 788*4882a593Smuzhiyun+static void uio_mac_config_rx_queues_routing(struct stmmac_priv *priv) 789*4882a593Smuzhiyun+{ 790*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 791*4882a593Smuzhiyun+ u32 queue; 792*4882a593Smuzhiyun+ u8 packet; 793*4882a593Smuzhiyun+ 794*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 795*4882a593Smuzhiyun+ /* no specific packet type routing specified for the queue */ 796*4882a593Smuzhiyun+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 797*4882a593Smuzhiyun+ continue; 798*4882a593Smuzhiyun+ 799*4882a593Smuzhiyun+ packet = priv->plat->rx_queues_cfg[queue].pkt_route; 800*4882a593Smuzhiyun+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 801*4882a593Smuzhiyun+ } 802*4882a593Smuzhiyun+} 803*4882a593Smuzhiyun+ 804*4882a593Smuzhiyun+/** 805*4882a593Smuzhiyun+ * uio_mac_enable_rx_queues - Enable MAC rx queues 806*4882a593Smuzhiyun+ * @priv: driver private structure 807*4882a593Smuzhiyun+ * Description: It is used for enabling the rx queues in the MAC 808*4882a593Smuzhiyun+ */ 809*4882a593Smuzhiyun+static void uio_mac_enable_rx_queues(struct stmmac_priv *priv) 810*4882a593Smuzhiyun+{ 811*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 812*4882a593Smuzhiyun+ int queue; 813*4882a593Smuzhiyun+ u8 mode; 814*4882a593Smuzhiyun+ 815*4882a593Smuzhiyun+ for (queue = 0; queue < rx_queues_count; queue++) { 816*4882a593Smuzhiyun+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 817*4882a593Smuzhiyun+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 818*4882a593Smuzhiyun+ } 819*4882a593Smuzhiyun+} 820*4882a593Smuzhiyun+ 821*4882a593Smuzhiyun+/** 822*4882a593Smuzhiyun+ * rockchip_gmac_uio_mtl_configuration - Configure MTL 823*4882a593Smuzhiyun+ * @priv: driver private structure 824*4882a593Smuzhiyun+ * Description: It is used for configuring MTL 825*4882a593Smuzhiyun+ */ 826*4882a593Smuzhiyun+static void rockchip_gmac_uio_mtl_configuration(struct stmmac_priv *priv) 827*4882a593Smuzhiyun+{ 828*4882a593Smuzhiyun+ u32 rx_queues_count = priv->plat->rx_queues_to_use; 829*4882a593Smuzhiyun+ u32 tx_queues_count = priv->plat->tx_queues_to_use; 830*4882a593Smuzhiyun+ 831*4882a593Smuzhiyun+ if (tx_queues_count > 1) 832*4882a593Smuzhiyun+ uio_set_tx_queue_weight(priv); 833*4882a593Smuzhiyun+ 834*4882a593Smuzhiyun+ /* Configure MTL RX algorithms */ 835*4882a593Smuzhiyun+ if (rx_queues_count > 1) 836*4882a593Smuzhiyun+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 837*4882a593Smuzhiyun+ priv->plat->rx_sched_algorithm); 838*4882a593Smuzhiyun+ 839*4882a593Smuzhiyun+ /* Configure MTL TX algorithms */ 840*4882a593Smuzhiyun+ if (tx_queues_count > 1) 841*4882a593Smuzhiyun+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 842*4882a593Smuzhiyun+ priv->plat->tx_sched_algorithm); 843*4882a593Smuzhiyun+ 844*4882a593Smuzhiyun+ /* Configure CBS in AVB TX queues */ 845*4882a593Smuzhiyun+ if (tx_queues_count > 1) 846*4882a593Smuzhiyun+ uio_configure_cbs(priv); 847*4882a593Smuzhiyun+ 848*4882a593Smuzhiyun+ /* Map RX MTL to DMA channels */ 849*4882a593Smuzhiyun+ uio_rx_queue_dma_chan_map(priv); 850*4882a593Smuzhiyun+ 851*4882a593Smuzhiyun+ /* Enable MAC RX Queues */ 852*4882a593Smuzhiyun+ uio_mac_enable_rx_queues(priv); 853*4882a593Smuzhiyun+ 854*4882a593Smuzhiyun+ /* Set RX priorities */ 855*4882a593Smuzhiyun+ if (rx_queues_count > 1) 856*4882a593Smuzhiyun+ uio_mac_config_rx_queues_prio(priv); 857*4882a593Smuzhiyun+ 858*4882a593Smuzhiyun+ /* Set TX priorities */ 859*4882a593Smuzhiyun+ if (tx_queues_count > 1) 860*4882a593Smuzhiyun+ uio_mac_config_tx_queues_prio(priv); 861*4882a593Smuzhiyun+ 862*4882a593Smuzhiyun+ /* Set RX routing */ 863*4882a593Smuzhiyun+ if (rx_queues_count > 1) 864*4882a593Smuzhiyun+ uio_mac_config_rx_queues_routing(priv); 865*4882a593Smuzhiyun+} 866*4882a593Smuzhiyun+ 867*4882a593Smuzhiyun+static void uio_safety_feat_configuration(struct stmmac_priv *priv) 868*4882a593Smuzhiyun+{ 869*4882a593Smuzhiyun+ if (priv->dma_cap.asp) { 870*4882a593Smuzhiyun+ netdev_info(priv->dev, "Enabling Safety Features\n"); 871*4882a593Smuzhiyun+ stmmac_safety_feat_config(priv, priv->ioaddr, 872*4882a593Smuzhiyun+ priv->dma_cap.asp); 873*4882a593Smuzhiyun+ } else { 874*4882a593Smuzhiyun+ netdev_info(priv->dev, "No Safety Features support found\n"); 875*4882a593Smuzhiyun+ } 876*4882a593Smuzhiyun+} 877*4882a593Smuzhiyun+ 878*4882a593Smuzhiyun+/** 879*4882a593Smuzhiyun+ * uio_dma_operation_mode - HW DMA operation mode 880*4882a593Smuzhiyun+ * @priv: driver private structure 881*4882a593Smuzhiyun+ * Description: it is used for configuring the DMA operation mode register in 882*4882a593Smuzhiyun+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 883*4882a593Smuzhiyun+ */ 884*4882a593Smuzhiyun+static void uio_dma_operation_mode(struct stmmac_priv *priv) 885*4882a593Smuzhiyun+{ 886*4882a593Smuzhiyun+ u32 rx_channels_count = priv->plat->rx_queues_to_use; 887*4882a593Smuzhiyun+ u32 tx_channels_count = priv->plat->tx_queues_to_use; 888*4882a593Smuzhiyun+ int rxfifosz = priv->plat->rx_fifo_size; 889*4882a593Smuzhiyun+ int txfifosz = priv->plat->tx_fifo_size; 890*4882a593Smuzhiyun+ u32 txmode = 0; 891*4882a593Smuzhiyun+ u32 rxmode = 0; 892*4882a593Smuzhiyun+ u32 chan = 0; 893*4882a593Smuzhiyun+ u8 qmode = 0; 894*4882a593Smuzhiyun+ 895*4882a593Smuzhiyun+ if (rxfifosz == 0) 896*4882a593Smuzhiyun+ rxfifosz = priv->dma_cap.rx_fifo_size; 897*4882a593Smuzhiyun+ if (txfifosz == 0) 898*4882a593Smuzhiyun+ txfifosz = priv->dma_cap.tx_fifo_size; 899*4882a593Smuzhiyun+ 900*4882a593Smuzhiyun+ /* Adjust for real per queue fifo size */ 901*4882a593Smuzhiyun+ rxfifosz /= rx_channels_count; 902*4882a593Smuzhiyun+ txfifosz /= tx_channels_count; 903*4882a593Smuzhiyun+ 904*4882a593Smuzhiyun+ if (priv->plat->force_thresh_dma_mode) { 905*4882a593Smuzhiyun+ txmode = tc; 906*4882a593Smuzhiyun+ rxmode = tc; 907*4882a593Smuzhiyun+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 908*4882a593Smuzhiyun+ /* In case of GMAC, SF mode can be enabled 909*4882a593Smuzhiyun+ * to perform the TX COE in HW. This depends on: 910*4882a593Smuzhiyun+ * 1) TX COE if actually supported 911*4882a593Smuzhiyun+ * 2) There is no bugged Jumbo frame support 912*4882a593Smuzhiyun+ * that needs to not insert csum in the TDES. 913*4882a593Smuzhiyun+ */ 914*4882a593Smuzhiyun+ txmode = SF_DMA_MODE; 915*4882a593Smuzhiyun+ rxmode = SF_DMA_MODE; 916*4882a593Smuzhiyun+ priv->xstats.threshold = SF_DMA_MODE; 917*4882a593Smuzhiyun+ } else { 918*4882a593Smuzhiyun+ txmode = tc; 919*4882a593Smuzhiyun+ rxmode = SF_DMA_MODE; 920*4882a593Smuzhiyun+ } 921*4882a593Smuzhiyun+ 922*4882a593Smuzhiyun+ /* configure all channels */ 923*4882a593Smuzhiyun+ for (chan = 0; chan < rx_channels_count; chan++) { 924*4882a593Smuzhiyun+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 925*4882a593Smuzhiyun+ 926*4882a593Smuzhiyun+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 927*4882a593Smuzhiyun+ rxfifosz, qmode); 928*4882a593Smuzhiyun+ stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 929*4882a593Smuzhiyun+ chan); 930*4882a593Smuzhiyun+ } 931*4882a593Smuzhiyun+ 932*4882a593Smuzhiyun+ for (chan = 0; chan < tx_channels_count; chan++) { 933*4882a593Smuzhiyun+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 934*4882a593Smuzhiyun+ 935*4882a593Smuzhiyun+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 936*4882a593Smuzhiyun+ txfifosz, qmode); 937*4882a593Smuzhiyun+ } 938*4882a593Smuzhiyun+} 939*4882a593Smuzhiyun+ 940*4882a593Smuzhiyun+/** 941*4882a593Smuzhiyun+ * rockchip_gmac_uio_mmc_setup: setup the Mac Management Counters (MMC) 942*4882a593Smuzhiyun+ * @priv: driver private structure 943*4882a593Smuzhiyun+ * Description: this masks the MMC irq, in fact, the counters are managed in SW. 944*4882a593Smuzhiyun+ */ 945*4882a593Smuzhiyun+static void rockchip_gmac_uio_mmc_setup(struct stmmac_priv *priv) 946*4882a593Smuzhiyun+{ 947*4882a593Smuzhiyun+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 948*4882a593Smuzhiyun+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 949*4882a593Smuzhiyun+ 950*4882a593Smuzhiyun+ dwmac_mmc_intr_all_mask(priv->mmcaddr); 951*4882a593Smuzhiyun+ 952*4882a593Smuzhiyun+ if (priv->dma_cap.rmon) { 953*4882a593Smuzhiyun+ dwmac_mmc_ctrl(priv->mmcaddr, mode); 954*4882a593Smuzhiyun+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 955*4882a593Smuzhiyun+ } else { 956*4882a593Smuzhiyun+ netdev_info(priv->dev, "No MAC Management Counters available\n"); 957*4882a593Smuzhiyun+ } 958*4882a593Smuzhiyun+} 959*4882a593Smuzhiyun+ 960*4882a593Smuzhiyun+/** 961*4882a593Smuzhiyun+ * rockchip_gmac_uio_hw_setup - setup mac in a usable state. 962*4882a593Smuzhiyun+ * @dev : pointer to the device structure. 963*4882a593Smuzhiyun+ * @init_ptp: initialize PTP if set 964*4882a593Smuzhiyun+ * Description: 965*4882a593Smuzhiyun+ * this is the main function to setup the HW in a usable state because the 966*4882a593Smuzhiyun+ * dma engine is reset, the core registers are configured (e.g. AXI, 967*4882a593Smuzhiyun+ * Checksum features, timers). The DMA is ready to start receiving and 968*4882a593Smuzhiyun+ * transmitting. 969*4882a593Smuzhiyun+ * Return value: 970*4882a593Smuzhiyun+ * 0 on success and an appropriate (-)ve integer as defined in errno.h 971*4882a593Smuzhiyun+ * file on failure. 972*4882a593Smuzhiyun+ */ 973*4882a593Smuzhiyun+static int rockchip_gmac_uio_hw_setup(struct net_device *dev, bool init_ptp) 974*4882a593Smuzhiyun+{ 975*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 976*4882a593Smuzhiyun+ int ret; 977*4882a593Smuzhiyun+ 978*4882a593Smuzhiyun+ /* DMA initialization and SW reset */ 979*4882a593Smuzhiyun+ ret = rockchip_gmac_uio_init_dma_engine(priv); 980*4882a593Smuzhiyun+ if (ret < 0) { 981*4882a593Smuzhiyun+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 982*4882a593Smuzhiyun+ __func__); 983*4882a593Smuzhiyun+ return ret; 984*4882a593Smuzhiyun+ } 985*4882a593Smuzhiyun+ 986*4882a593Smuzhiyun+ /* Copy the MAC addr into the HW */ 987*4882a593Smuzhiyun+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 988*4882a593Smuzhiyun+ 989*4882a593Smuzhiyun+ /* PS and related bits will be programmed according to the speed */ 990*4882a593Smuzhiyun+ if (priv->hw->pcs) { 991*4882a593Smuzhiyun+ int speed = priv->plat->mac_port_sel_speed; 992*4882a593Smuzhiyun+ 993*4882a593Smuzhiyun+ if (speed == SPEED_10 || speed == SPEED_100 || 994*4882a593Smuzhiyun+ speed == SPEED_1000) { 995*4882a593Smuzhiyun+ priv->hw->ps = speed; 996*4882a593Smuzhiyun+ } else { 997*4882a593Smuzhiyun+ dev_warn(priv->device, "invalid port speed\n"); 998*4882a593Smuzhiyun+ priv->hw->ps = 0; 999*4882a593Smuzhiyun+ } 1000*4882a593Smuzhiyun+ } 1001*4882a593Smuzhiyun+ 1002*4882a593Smuzhiyun+ /* Initialize the MAC Core */ 1003*4882a593Smuzhiyun+ stmmac_core_init(priv, priv->hw, dev); 1004*4882a593Smuzhiyun+ 1005*4882a593Smuzhiyun+ /* Initialize MTL*/ 1006*4882a593Smuzhiyun+ rockchip_gmac_uio_mtl_configuration(priv); 1007*4882a593Smuzhiyun+ 1008*4882a593Smuzhiyun+ /* Initialize Safety Features */ 1009*4882a593Smuzhiyun+ uio_safety_feat_configuration(priv); 1010*4882a593Smuzhiyun+ 1011*4882a593Smuzhiyun+ ret = stmmac_rx_ipc(priv, priv->hw); 1012*4882a593Smuzhiyun+ if (!ret) { 1013*4882a593Smuzhiyun+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 1014*4882a593Smuzhiyun+ priv->plat->rx_coe = STMMAC_RX_COE_NONE; 1015*4882a593Smuzhiyun+ priv->hw->rx_csum = 0; 1016*4882a593Smuzhiyun+ } 1017*4882a593Smuzhiyun+ 1018*4882a593Smuzhiyun+ /* Enable the MAC Rx/Tx */ 1019*4882a593Smuzhiyun+ stmmac_mac_set(priv, priv->ioaddr, true); 1020*4882a593Smuzhiyun+ 1021*4882a593Smuzhiyun+ /* Set the HW DMA mode and the COE */ 1022*4882a593Smuzhiyun+ uio_dma_operation_mode(priv); 1023*4882a593Smuzhiyun+ 1024*4882a593Smuzhiyun+ rockchip_gmac_uio_mmc_setup(priv); 1025*4882a593Smuzhiyun+ 1026*4882a593Smuzhiyun+ if (priv->hw->pcs) 1027*4882a593Smuzhiyun+ stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 1028*4882a593Smuzhiyun+ 1029*4882a593Smuzhiyun+ /* set TX and RX rings length */ 1030*4882a593Smuzhiyun+ uio_set_rings_length(priv); 1031*4882a593Smuzhiyun+ 1032*4882a593Smuzhiyun+ return ret; 1033*4882a593Smuzhiyun+} 1034*4882a593Smuzhiyun+ 1035*4882a593Smuzhiyun+/** 1036*4882a593Smuzhiyun+ * uio_open - open entry point of the driver 1037*4882a593Smuzhiyun+ * @dev : pointer to the device structure. 1038*4882a593Smuzhiyun+ * Description: 1039*4882a593Smuzhiyun+ * This function is the open entry point of the driver. 1040*4882a593Smuzhiyun+ * Return value: 1041*4882a593Smuzhiyun+ * 0 on success and an appropriate (-)ve integer as defined in errno.h 1042*4882a593Smuzhiyun+ * file on failure. 1043*4882a593Smuzhiyun+ */ 1044*4882a593Smuzhiyun+static int uio_open(struct net_device *dev) 1045*4882a593Smuzhiyun+{ 1046*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 1047*4882a593Smuzhiyun+ int ret; 1048*4882a593Smuzhiyun+ 1049*4882a593Smuzhiyun+ if (priv->hw->pcs != STMMAC_PCS_RGMII && 1050*4882a593Smuzhiyun+ priv->hw->pcs != STMMAC_PCS_TBI && 1051*4882a593Smuzhiyun+ priv->hw->pcs != STMMAC_PCS_RTBI) { 1052*4882a593Smuzhiyun+ ret = rockchip_gmac_uio_init_phy(dev); 1053*4882a593Smuzhiyun+ if (ret) { 1054*4882a593Smuzhiyun+ netdev_err(priv->dev, 1055*4882a593Smuzhiyun+ "%s: Cannot attach to PHY (error: %d)\n", 1056*4882a593Smuzhiyun+ __func__, ret); 1057*4882a593Smuzhiyun+ return ret; 1058*4882a593Smuzhiyun+ } 1059*4882a593Smuzhiyun+ } 1060*4882a593Smuzhiyun+ 1061*4882a593Smuzhiyun+ /* Extra statistics */ 1062*4882a593Smuzhiyun+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1063*4882a593Smuzhiyun+ priv->xstats.threshold = tc; 1064*4882a593Smuzhiyun+ 1065*4882a593Smuzhiyun+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1066*4882a593Smuzhiyun+ priv->rx_copybreak = STMMAC_RX_COPYBREAK; 1067*4882a593Smuzhiyun+ 1068*4882a593Smuzhiyun+ ret = uio_alloc_dma_desc_resources(priv); 1069*4882a593Smuzhiyun+ if (ret < 0) { 1070*4882a593Smuzhiyun+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 1071*4882a593Smuzhiyun+ __func__); 1072*4882a593Smuzhiyun+ goto dma_desc_error; 1073*4882a593Smuzhiyun+ } 1074*4882a593Smuzhiyun+ 1075*4882a593Smuzhiyun+ ret = rockchip_gmac_uio_hw_setup(dev, true); 1076*4882a593Smuzhiyun+ if (ret < 0) { 1077*4882a593Smuzhiyun+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 1078*4882a593Smuzhiyun+ goto init_error; 1079*4882a593Smuzhiyun+ } 1080*4882a593Smuzhiyun+ 1081*4882a593Smuzhiyun+ if (dev->phydev) 1082*4882a593Smuzhiyun+ phy_start(dev->phydev); 1083*4882a593Smuzhiyun+ 1084*4882a593Smuzhiyun+ return 0; 1085*4882a593Smuzhiyun+ 1086*4882a593Smuzhiyun+init_error: 1087*4882a593Smuzhiyun+ uio_free_dma_desc_resources(priv); 1088*4882a593Smuzhiyun+dma_desc_error: 1089*4882a593Smuzhiyun+ if (dev->phydev) 1090*4882a593Smuzhiyun+ phy_disconnect(dev->phydev); 1091*4882a593Smuzhiyun+ return ret; 1092*4882a593Smuzhiyun+} 1093*4882a593Smuzhiyun+ 1094*4882a593Smuzhiyun+/** 1095*4882a593Smuzhiyun+ * uio_release - close entry point of the driver 1096*4882a593Smuzhiyun+ * @dev : device pointer. 1097*4882a593Smuzhiyun+ * Description: 1098*4882a593Smuzhiyun+ * This is the stop entry point of the driver. 1099*4882a593Smuzhiyun+ */ 1100*4882a593Smuzhiyun+static int uio_release(struct net_device *dev) 1101*4882a593Smuzhiyun+{ 1102*4882a593Smuzhiyun+ struct stmmac_priv *priv = netdev_priv(dev); 1103*4882a593Smuzhiyun+ 1104*4882a593Smuzhiyun+ /* Stop and disconnect the PHY */ 1105*4882a593Smuzhiyun+ if (dev->phydev) { 1106*4882a593Smuzhiyun+ phy_stop(dev->phydev); 1107*4882a593Smuzhiyun+ phy_disconnect(dev->phydev); 1108*4882a593Smuzhiyun+ if (priv->plat->integrated_phy_power) 1109*4882a593Smuzhiyun+ priv->plat->integrated_phy_power(priv->plat->bsp_priv, 1110*4882a593Smuzhiyun+ false); 1111*4882a593Smuzhiyun+ } 1112*4882a593Smuzhiyun+ 1113*4882a593Smuzhiyun+ /* Release and free the Rx/Tx resources */ 1114*4882a593Smuzhiyun+ uio_free_dma_desc_resources(priv); 1115*4882a593Smuzhiyun+ 1116*4882a593Smuzhiyun+ /* Disable the MAC Rx/Tx */ 1117*4882a593Smuzhiyun+ stmmac_mac_set(priv, priv->ioaddr, false); 1118*4882a593Smuzhiyun+ 1119*4882a593Smuzhiyun+ netif_carrier_off(dev); 1120*4882a593Smuzhiyun+ 1121*4882a593Smuzhiyun+ return 0; 1122*4882a593Smuzhiyun+} 1123*4882a593Smuzhiyun+ 1124*4882a593Smuzhiyun+/** 1125*4882a593Smuzhiyun+ * rockchip_gmac_uio_probe() platform driver probe routine 1126*4882a593Smuzhiyun+ * - register uio devices filled with memory maps retrieved 1127*4882a593Smuzhiyun+ * from device tree 1128*4882a593Smuzhiyun+ */ 1129*4882a593Smuzhiyun+static int rockchip_gmac_uio_probe(struct platform_device *pdev) 1130*4882a593Smuzhiyun+{ 1131*4882a593Smuzhiyun+ struct device *dev = &pdev->dev; 1132*4882a593Smuzhiyun+ struct device_node *np = dev->of_node, *mac_node; 1133*4882a593Smuzhiyun+ struct rockchip_gmac_uio_pdev_info *pdev_info; 1134*4882a593Smuzhiyun+ struct net_device *netdev; 1135*4882a593Smuzhiyun+ struct stmmac_priv *priv; 1136*4882a593Smuzhiyun+ struct uio_info *uio; 1137*4882a593Smuzhiyun+ struct resource *res; 1138*4882a593Smuzhiyun+ int err = 0; 1139*4882a593Smuzhiyun+ 1140*4882a593Smuzhiyun+ pdev_info = devm_kzalloc(dev, sizeof(struct rockchip_gmac_uio_pdev_info), 1141*4882a593Smuzhiyun+ GFP_KERNEL); 1142*4882a593Smuzhiyun+ if (!pdev_info) 1143*4882a593Smuzhiyun+ return -ENOMEM; 1144*4882a593Smuzhiyun+ 1145*4882a593Smuzhiyun+ uio = &pdev_info->uio; 1146*4882a593Smuzhiyun+ pdev_info->dev = dev; 1147*4882a593Smuzhiyun+ mac_node = of_parse_phandle(np, "rockchip,ethernet", 0); 1148*4882a593Smuzhiyun+ if (!mac_node) 1149*4882a593Smuzhiyun+ return -ENODEV; 1150*4882a593Smuzhiyun+ 1151*4882a593Smuzhiyun+ if (of_device_is_available(mac_node)) { 1152*4882a593Smuzhiyun+ netdev = of_find_net_device_by_node(mac_node); 1153*4882a593Smuzhiyun+ of_node_put(mac_node); 1154*4882a593Smuzhiyun+ if (!netdev) 1155*4882a593Smuzhiyun+ return -ENODEV; 1156*4882a593Smuzhiyun+ } else { 1157*4882a593Smuzhiyun+ of_node_put(mac_node); 1158*4882a593Smuzhiyun+ return -EINVAL; 1159*4882a593Smuzhiyun+ } 1160*4882a593Smuzhiyun+ 1161*4882a593Smuzhiyun+ pdev_info->ndev = netdev; 1162*4882a593Smuzhiyun+ rtnl_lock(); 1163*4882a593Smuzhiyun+ dev_close(netdev); 1164*4882a593Smuzhiyun+ rtnl_unlock(); 1165*4882a593Smuzhiyun+ 1166*4882a593Smuzhiyun+ rtnl_lock(); 1167*4882a593Smuzhiyun+ err = uio_open(netdev); 1168*4882a593Smuzhiyun+ if (err) { 1169*4882a593Smuzhiyun+ rtnl_unlock(); 1170*4882a593Smuzhiyun+ dev_err(dev, "Failed to open stmmac resource: %d\n", err); 1171*4882a593Smuzhiyun+ return err; 1172*4882a593Smuzhiyun+ } 1173*4882a593Smuzhiyun+ rtnl_unlock(); 1174*4882a593Smuzhiyun+ 1175*4882a593Smuzhiyun+ priv = netdev_priv(netdev); 1176*4882a593Smuzhiyun+ snprintf(pdev_info->name, sizeof(pdev_info->name), "uio_%s", 1177*4882a593Smuzhiyun+ netdev->name); 1178*4882a593Smuzhiyun+ uio->name = pdev_info->name; 1179*4882a593Smuzhiyun+ uio->version = DRIVER_VERSION; 1180*4882a593Smuzhiyun+ 1181*4882a593Smuzhiyun+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1182*4882a593Smuzhiyun+ if (!res) 1183*4882a593Smuzhiyun+ return -ENODEV; 1184*4882a593Smuzhiyun+ 1185*4882a593Smuzhiyun+ uio->mem[0].name = "eth_regs"; 1186*4882a593Smuzhiyun+ uio->mem[0].addr = res->start & PAGE_MASK; 1187*4882a593Smuzhiyun+ uio->mem[0].size = PAGE_ALIGN(resource_size(res)); 1188*4882a593Smuzhiyun+ uio->mem[0].memtype = UIO_MEM_PHYS; 1189*4882a593Smuzhiyun+ 1190*4882a593Smuzhiyun+ uio->mem[1].name = "eth_rx_bd"; 1191*4882a593Smuzhiyun+ uio->mem[1].addr = priv->rx_queue[0].dma_rx_phy; 1192*4882a593Smuzhiyun+ uio->mem[1].size = DMA_RX_SIZE * sizeof(struct dma_desc); 1193*4882a593Smuzhiyun+ uio->mem[1].memtype = UIO_MEM_PHYS; 1194*4882a593Smuzhiyun+ 1195*4882a593Smuzhiyun+ uio->mem[2].name = "eth_tx_bd"; 1196*4882a593Smuzhiyun+ uio->mem[2].addr = priv->tx_queue[0].dma_tx_phy; 1197*4882a593Smuzhiyun+ uio->mem[2].size = DMA_TX_SIZE * sizeof(struct dma_desc); 1198*4882a593Smuzhiyun+ uio->mem[2].memtype = UIO_MEM_PHYS; 1199*4882a593Smuzhiyun+ 1200*4882a593Smuzhiyun+ uio->open = rockchip_gmac_uio_open; 1201*4882a593Smuzhiyun+ uio->release = rockchip_gmac_uio_release; 1202*4882a593Smuzhiyun+ /* Custom mmap function. */ 1203*4882a593Smuzhiyun+ uio->mmap = rockchip_gmac_uio_mmap; 1204*4882a593Smuzhiyun+ uio->priv = pdev_info; 1205*4882a593Smuzhiyun+ 1206*4882a593Smuzhiyun+ err = uio_register_device(dev, uio); 1207*4882a593Smuzhiyun+ if (err) { 1208*4882a593Smuzhiyun+ dev_err(dev, "Failed to register uio device: %d\n", err); 1209*4882a593Smuzhiyun+ return err; 1210*4882a593Smuzhiyun+ } 1211*4882a593Smuzhiyun+ 1212*4882a593Smuzhiyun+ pdev_info->map_num = 3; 1213*4882a593Smuzhiyun+ 1214*4882a593Smuzhiyun+ dev_info(dev, "Registered %s uio devices, %d register maps attached\n", 1215*4882a593Smuzhiyun+ pdev_info->name, pdev_info->map_num); 1216*4882a593Smuzhiyun+ 1217*4882a593Smuzhiyun+ platform_set_drvdata(pdev, pdev_info); 1218*4882a593Smuzhiyun+ 1219*4882a593Smuzhiyun+ return 0; 1220*4882a593Smuzhiyun+} 1221*4882a593Smuzhiyun+ 1222*4882a593Smuzhiyun+/** 1223*4882a593Smuzhiyun+ * rockchip_gmac_uio_remove() - ROCKCHIP ETH UIO platform driver release 1224*4882a593Smuzhiyun+ * routine - unregister uio devices 1225*4882a593Smuzhiyun+ */ 1226*4882a593Smuzhiyun+static int rockchip_gmac_uio_remove(struct platform_device *pdev) 1227*4882a593Smuzhiyun+{ 1228*4882a593Smuzhiyun+ struct rockchip_gmac_uio_pdev_info *pdev_info = 1229*4882a593Smuzhiyun+ platform_get_drvdata(pdev); 1230*4882a593Smuzhiyun+ struct net_device *netdev; 1231*4882a593Smuzhiyun+ 1232*4882a593Smuzhiyun+ if (!pdev_info) 1233*4882a593Smuzhiyun+ return -EINVAL; 1234*4882a593Smuzhiyun+ 1235*4882a593Smuzhiyun+ netdev = pdev_info->ndev; 1236*4882a593Smuzhiyun+ 1237*4882a593Smuzhiyun+ uio_unregister_device(&pdev_info->uio); 1238*4882a593Smuzhiyun+ 1239*4882a593Smuzhiyun+ if (netdev) { 1240*4882a593Smuzhiyun+ rtnl_lock(); 1241*4882a593Smuzhiyun+ uio_release(netdev); 1242*4882a593Smuzhiyun+ rtnl_unlock(); 1243*4882a593Smuzhiyun+ } 1244*4882a593Smuzhiyun+ 1245*4882a593Smuzhiyun+ platform_set_drvdata(pdev, NULL); 1246*4882a593Smuzhiyun+ 1247*4882a593Smuzhiyun+ if (netdev) { 1248*4882a593Smuzhiyun+ rtnl_lock(); 1249*4882a593Smuzhiyun+ dev_open(netdev); 1250*4882a593Smuzhiyun+ rtnl_unlock(); 1251*4882a593Smuzhiyun+ } 1252*4882a593Smuzhiyun+ 1253*4882a593Smuzhiyun+ return 0; 1254*4882a593Smuzhiyun+} 1255*4882a593Smuzhiyun+ 1256*4882a593Smuzhiyun+static const struct of_device_id rockchip_gmac_uio_of_match[] = { 1257*4882a593Smuzhiyun+ { .compatible = "rockchip,uio-gmac", }, 1258*4882a593Smuzhiyun+ { } 1259*4882a593Smuzhiyun+}; 1260*4882a593Smuzhiyun+ 1261*4882a593Smuzhiyun+static struct platform_driver rockchip_gmac_uio_driver = { 1262*4882a593Smuzhiyun+ .driver = { 1263*4882a593Smuzhiyun+ .owner = THIS_MODULE, 1264*4882a593Smuzhiyun+ .name = DRIVER_NAME, 1265*4882a593Smuzhiyun+ .of_match_table = rockchip_gmac_uio_of_match, 1266*4882a593Smuzhiyun+ }, 1267*4882a593Smuzhiyun+ .probe = rockchip_gmac_uio_probe, 1268*4882a593Smuzhiyun+ .remove = rockchip_gmac_uio_remove, 1269*4882a593Smuzhiyun+}; 1270*4882a593Smuzhiyun+ 1271*4882a593Smuzhiyun+module_platform_driver(rockchip_gmac_uio_driver); 1272*4882a593Smuzhiyun+ 1273*4882a593Smuzhiyun+MODULE_LICENSE("GPL"); 1274*4882a593Smuzhiyun+MODULE_AUTHOR("ROCKCHIP"); 1275*4882a593Smuzhiyun+MODULE_DESCRIPTION("ROCKCHIP GMAC UIO Driver"); 1276*4882a593Smuzhiyun+ 1277