xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*******************************************************************************
3*4882a593Smuzhiyun   This is the driver for the MAC 10/100 on-chip Ethernet controller
4*4882a593Smuzhiyun   currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun   DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
7*4882a593Smuzhiyun   this code.
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun   This contains the functions to handle the dma.
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun   Copyright (C) 2007-2009  STMicroelectronics Ltd
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
15*4882a593Smuzhiyun *******************************************************************************/
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include "dwmac100.h"
19*4882a593Smuzhiyun #include "dwmac_dma.h"
20*4882a593Smuzhiyun 
dwmac100_dma_init(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,int atds)21*4882a593Smuzhiyun static void dwmac100_dma_init(void __iomem *ioaddr,
22*4882a593Smuzhiyun 			      struct stmmac_dma_cfg *dma_cfg, int atds)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	/* Enable Application Access by writing to DMA CSR0 */
25*4882a593Smuzhiyun 	writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
26*4882a593Smuzhiyun 	       ioaddr + DMA_BUS_MODE);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* Mask interrupts by writing to CSR7 */
29*4882a593Smuzhiyun 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
dwmac100_dma_init_rx(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,dma_addr_t dma_rx_phy,u32 chan)32*4882a593Smuzhiyun static void dwmac100_dma_init_rx(void __iomem *ioaddr,
33*4882a593Smuzhiyun 				 struct stmmac_dma_cfg *dma_cfg,
34*4882a593Smuzhiyun 				 dma_addr_t dma_rx_phy, u32 chan)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	/* RX descriptor base addr lists must be written into DMA CSR3 */
37*4882a593Smuzhiyun 	writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
dwmac100_dma_init_tx(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,dma_addr_t dma_tx_phy,u32 chan)40*4882a593Smuzhiyun static void dwmac100_dma_init_tx(void __iomem *ioaddr,
41*4882a593Smuzhiyun 				 struct stmmac_dma_cfg *dma_cfg,
42*4882a593Smuzhiyun 				 dma_addr_t dma_tx_phy, u32 chan)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	/* TX descriptor base addr lists must be written into DMA CSR4 */
45*4882a593Smuzhiyun 	writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* Store and Forward capability is not used at all.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * The transmit threshold can be programmed by setting the TTC bits in the DMA
51*4882a593Smuzhiyun  * control register.
52*4882a593Smuzhiyun  */
dwmac100_dma_operation_mode_tx(void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)53*4882a593Smuzhiyun static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
54*4882a593Smuzhiyun 					   u32 channel, int fifosz, u8 qmode)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	u32 csr6 = readl(ioaddr + DMA_CONTROL);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (mode <= 32)
59*4882a593Smuzhiyun 		csr6 |= DMA_CONTROL_TTC_32;
60*4882a593Smuzhiyun 	else if (mode <= 64)
61*4882a593Smuzhiyun 		csr6 |= DMA_CONTROL_TTC_64;
62*4882a593Smuzhiyun 	else
63*4882a593Smuzhiyun 		csr6 |= DMA_CONTROL_TTC_128;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	writel(csr6, ioaddr + DMA_CONTROL);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
dwmac100_dump_dma_regs(void __iomem * ioaddr,u32 * reg_space)68*4882a593Smuzhiyun static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	int i;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
73*4882a593Smuzhiyun 		reg_space[DMA_BUS_MODE / 4 + i] =
74*4882a593Smuzhiyun 			readl(ioaddr + DMA_BUS_MODE + i * 4);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	reg_space[DMA_CUR_TX_BUF_ADDR / 4] =
77*4882a593Smuzhiyun 		readl(ioaddr + DMA_CUR_TX_BUF_ADDR);
78*4882a593Smuzhiyun 	reg_space[DMA_CUR_RX_BUF_ADDR / 4] =
79*4882a593Smuzhiyun 		readl(ioaddr + DMA_CUR_RX_BUF_ADDR);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /* DMA controller has two counters to track the number of the missed frames. */
dwmac100_dma_diagnostic_fr(void * data,struct stmmac_extra_stats * x,void __iomem * ioaddr)83*4882a593Smuzhiyun static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
84*4882a593Smuzhiyun 				       void __iomem *ioaddr)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct net_device_stats *stats = (struct net_device_stats *)data;
87*4882a593Smuzhiyun 	u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (unlikely(csr8)) {
90*4882a593Smuzhiyun 		if (csr8 & DMA_MISSED_FRAME_OVE) {
91*4882a593Smuzhiyun 			stats->rx_over_errors += 0x800;
92*4882a593Smuzhiyun 			x->rx_overflow_cntr += 0x800;
93*4882a593Smuzhiyun 		} else {
94*4882a593Smuzhiyun 			unsigned int ove_cntr;
95*4882a593Smuzhiyun 			ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
96*4882a593Smuzhiyun 			stats->rx_over_errors += ove_cntr;
97*4882a593Smuzhiyun 			x->rx_overflow_cntr += ove_cntr;
98*4882a593Smuzhiyun 		}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		if (csr8 & DMA_MISSED_FRAME_OVE_M) {
101*4882a593Smuzhiyun 			stats->rx_missed_errors += 0xffff;
102*4882a593Smuzhiyun 			x->rx_missed_cntr += 0xffff;
103*4882a593Smuzhiyun 		} else {
104*4882a593Smuzhiyun 			unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
105*4882a593Smuzhiyun 			stats->rx_missed_errors += miss_f;
106*4882a593Smuzhiyun 			x->rx_missed_cntr += miss_f;
107*4882a593Smuzhiyun 		}
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun const struct stmmac_dma_ops dwmac100_dma_ops = {
112*4882a593Smuzhiyun 	.reset = dwmac_dma_reset,
113*4882a593Smuzhiyun 	.init = dwmac100_dma_init,
114*4882a593Smuzhiyun 	.init_rx_chan = dwmac100_dma_init_rx,
115*4882a593Smuzhiyun 	.init_tx_chan = dwmac100_dma_init_tx,
116*4882a593Smuzhiyun 	.dump_regs = dwmac100_dump_dma_regs,
117*4882a593Smuzhiyun 	.dma_tx_mode = dwmac100_dma_operation_mode_tx,
118*4882a593Smuzhiyun 	.dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
119*4882a593Smuzhiyun 	.enable_dma_transmission = dwmac_enable_dma_transmission,
120*4882a593Smuzhiyun 	.enable_dma_irq = dwmac_enable_dma_irq,
121*4882a593Smuzhiyun 	.disable_dma_irq = dwmac_disable_dma_irq,
122*4882a593Smuzhiyun 	.start_tx = dwmac_dma_start_tx,
123*4882a593Smuzhiyun 	.stop_tx = dwmac_dma_stop_tx,
124*4882a593Smuzhiyun 	.start_rx = dwmac_dma_start_rx,
125*4882a593Smuzhiyun 	.stop_rx = dwmac_dma_stop_rx,
126*4882a593Smuzhiyun 	.dma_interrupt = dwmac_dma_interrupt,
127*4882a593Smuzhiyun };
128