1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*******************************************************************************
3*4882a593Smuzhiyun This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4*4882a593Smuzhiyun DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
5*4882a593Smuzhiyun developing this code.
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun This contains the functions to handle the dma.
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun Copyright (C) 2007-2009 STMicroelectronics Ltd
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13*4882a593Smuzhiyun *******************************************************************************/
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <asm/io.h>
16*4882a593Smuzhiyun #include "dwmac1000.h"
17*4882a593Smuzhiyun #include "dwmac_dma.h"
18*4882a593Smuzhiyun
dwmac1000_dma_axi(void __iomem * ioaddr,struct stmmac_axi * axi)19*4882a593Smuzhiyun static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
22*4882a593Smuzhiyun int i;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun pr_info("dwmac1000: Master AXI performs %s burst length\n",
25*4882a593Smuzhiyun !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun if (axi->axi_lpi_en)
28*4882a593Smuzhiyun value |= DMA_AXI_EN_LPI;
29*4882a593Smuzhiyun if (axi->axi_xit_frm)
30*4882a593Smuzhiyun value |= DMA_AXI_LPI_XIT_FRM;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun value &= ~DMA_AXI_WR_OSR_LMT;
33*4882a593Smuzhiyun value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
34*4882a593Smuzhiyun DMA_AXI_WR_OSR_LMT_SHIFT;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun value &= ~DMA_AXI_RD_OSR_LMT;
37*4882a593Smuzhiyun value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
38*4882a593Smuzhiyun DMA_AXI_RD_OSR_LMT_SHIFT;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Depending on the UNDEF bit the Master AXI will perform any burst
41*4882a593Smuzhiyun * length according to the BLEN programmed (by default all BLEN are
42*4882a593Smuzhiyun * set).
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun for (i = 0; i < AXI_BLEN; i++) {
45*4882a593Smuzhiyun switch (axi->axi_blen[i]) {
46*4882a593Smuzhiyun case 256:
47*4882a593Smuzhiyun value |= DMA_AXI_BLEN256;
48*4882a593Smuzhiyun break;
49*4882a593Smuzhiyun case 128:
50*4882a593Smuzhiyun value |= DMA_AXI_BLEN128;
51*4882a593Smuzhiyun break;
52*4882a593Smuzhiyun case 64:
53*4882a593Smuzhiyun value |= DMA_AXI_BLEN64;
54*4882a593Smuzhiyun break;
55*4882a593Smuzhiyun case 32:
56*4882a593Smuzhiyun value |= DMA_AXI_BLEN32;
57*4882a593Smuzhiyun break;
58*4882a593Smuzhiyun case 16:
59*4882a593Smuzhiyun value |= DMA_AXI_BLEN16;
60*4882a593Smuzhiyun break;
61*4882a593Smuzhiyun case 8:
62*4882a593Smuzhiyun value |= DMA_AXI_BLEN8;
63*4882a593Smuzhiyun break;
64*4882a593Smuzhiyun case 4:
65*4882a593Smuzhiyun value |= DMA_AXI_BLEN4;
66*4882a593Smuzhiyun break;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun writel(value, ioaddr + DMA_AXI_BUS_MODE);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
dwmac1000_dma_init(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,int atds)73*4882a593Smuzhiyun static void dwmac1000_dma_init(void __iomem *ioaddr,
74*4882a593Smuzhiyun struct stmmac_dma_cfg *dma_cfg, int atds)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun u32 value = readl(ioaddr + DMA_BUS_MODE);
77*4882a593Smuzhiyun int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
78*4882a593Smuzhiyun int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Set the DMA PBL (Programmable Burst Length) mode.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
84*4882a593Smuzhiyun * post 3.5 mode bit acts as 8*PBL.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun if (dma_cfg->pblx8)
87*4882a593Smuzhiyun value |= DMA_BUS_MODE_MAXPBL;
88*4882a593Smuzhiyun value |= DMA_BUS_MODE_USP;
89*4882a593Smuzhiyun value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
90*4882a593Smuzhiyun value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
91*4882a593Smuzhiyun value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Set the Fixed burst mode */
94*4882a593Smuzhiyun if (dma_cfg->fixed_burst)
95*4882a593Smuzhiyun value |= DMA_BUS_MODE_FB;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Mixed Burst has no effect when fb is set */
98*4882a593Smuzhiyun if (dma_cfg->mixed_burst)
99*4882a593Smuzhiyun value |= DMA_BUS_MODE_MB;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (atds)
102*4882a593Smuzhiyun value |= DMA_BUS_MODE_ATDS;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (dma_cfg->aal)
105*4882a593Smuzhiyun value |= DMA_BUS_MODE_AAL;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun writel(value, ioaddr + DMA_BUS_MODE);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Mask interrupts by writing to CSR7 */
110*4882a593Smuzhiyun writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
dwmac1000_dma_init_rx(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,dma_addr_t dma_rx_phy,u32 chan)113*4882a593Smuzhiyun static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
114*4882a593Smuzhiyun struct stmmac_dma_cfg *dma_cfg,
115*4882a593Smuzhiyun dma_addr_t dma_rx_phy, u32 chan)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun /* RX descriptor base address list must be written into DMA CSR3 */
118*4882a593Smuzhiyun writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
dwmac1000_dma_init_tx(void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,dma_addr_t dma_tx_phy,u32 chan)121*4882a593Smuzhiyun static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
122*4882a593Smuzhiyun struct stmmac_dma_cfg *dma_cfg,
123*4882a593Smuzhiyun dma_addr_t dma_tx_phy, u32 chan)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun /* TX descriptor base address list must be written into DMA CSR4 */
126*4882a593Smuzhiyun writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
dwmac1000_configure_fc(u32 csr6,int rxfifosz)129*4882a593Smuzhiyun static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun csr6 &= ~DMA_CONTROL_RFA_MASK;
132*4882a593Smuzhiyun csr6 &= ~DMA_CONTROL_RFD_MASK;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Leave flow control disabled if receive fifo size is less than
135*4882a593Smuzhiyun * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full,
136*4882a593Smuzhiyun * and send XON when 2K less than full.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun if (rxfifosz < 4096) {
139*4882a593Smuzhiyun csr6 &= ~DMA_CONTROL_EFC;
140*4882a593Smuzhiyun pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
141*4882a593Smuzhiyun rxfifosz);
142*4882a593Smuzhiyun } else {
143*4882a593Smuzhiyun csr6 |= DMA_CONTROL_EFC;
144*4882a593Smuzhiyun csr6 |= RFA_FULL_MINUS_1K;
145*4882a593Smuzhiyun csr6 |= RFD_FULL_MINUS_2K;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun return csr6;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
dwmac1000_dma_operation_mode_rx(void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)150*4882a593Smuzhiyun static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
151*4882a593Smuzhiyun u32 channel, int fifosz, u8 qmode)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun u32 csr6 = readl(ioaddr + DMA_CONTROL);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (mode == SF_DMA_MODE) {
156*4882a593Smuzhiyun pr_debug("GMAC: enable RX store and forward mode\n");
157*4882a593Smuzhiyun csr6 |= DMA_CONTROL_RSF;
158*4882a593Smuzhiyun } else {
159*4882a593Smuzhiyun pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
160*4882a593Smuzhiyun csr6 &= ~DMA_CONTROL_RSF;
161*4882a593Smuzhiyun csr6 &= DMA_CONTROL_TC_RX_MASK;
162*4882a593Smuzhiyun if (mode <= 32)
163*4882a593Smuzhiyun csr6 |= DMA_CONTROL_RTC_32;
164*4882a593Smuzhiyun else if (mode <= 64)
165*4882a593Smuzhiyun csr6 |= DMA_CONTROL_RTC_64;
166*4882a593Smuzhiyun else if (mode <= 96)
167*4882a593Smuzhiyun csr6 |= DMA_CONTROL_RTC_96;
168*4882a593Smuzhiyun else
169*4882a593Smuzhiyun csr6 |= DMA_CONTROL_RTC_128;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Configure flow control based on rx fifo size */
173*4882a593Smuzhiyun csr6 = dwmac1000_configure_fc(csr6, fifosz);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun writel(csr6, ioaddr + DMA_CONTROL);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
dwmac1000_dma_operation_mode_tx(void __iomem * ioaddr,int mode,u32 channel,int fifosz,u8 qmode)178*4882a593Smuzhiyun static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
179*4882a593Smuzhiyun u32 channel, int fifosz, u8 qmode)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun u32 csr6 = readl(ioaddr + DMA_CONTROL);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (mode == SF_DMA_MODE) {
184*4882a593Smuzhiyun pr_debug("GMAC: enable TX store and forward mode\n");
185*4882a593Smuzhiyun /* Transmit COE type 2 cannot be done in cut-through mode. */
186*4882a593Smuzhiyun csr6 |= DMA_CONTROL_TSF;
187*4882a593Smuzhiyun /* Operating on second frame increase the performance
188*4882a593Smuzhiyun * especially when transmit store-and-forward is used.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun csr6 |= DMA_CONTROL_OSF;
191*4882a593Smuzhiyun } else {
192*4882a593Smuzhiyun pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
193*4882a593Smuzhiyun csr6 &= ~DMA_CONTROL_TSF;
194*4882a593Smuzhiyun csr6 &= DMA_CONTROL_TC_TX_MASK;
195*4882a593Smuzhiyun /* Set the transmit threshold */
196*4882a593Smuzhiyun if (mode <= 32)
197*4882a593Smuzhiyun csr6 |= DMA_CONTROL_TTC_32;
198*4882a593Smuzhiyun else if (mode <= 64)
199*4882a593Smuzhiyun csr6 |= DMA_CONTROL_TTC_64;
200*4882a593Smuzhiyun else if (mode <= 128)
201*4882a593Smuzhiyun csr6 |= DMA_CONTROL_TTC_128;
202*4882a593Smuzhiyun else if (mode <= 192)
203*4882a593Smuzhiyun csr6 |= DMA_CONTROL_TTC_192;
204*4882a593Smuzhiyun else
205*4882a593Smuzhiyun csr6 |= DMA_CONTROL_TTC_256;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun writel(csr6, ioaddr + DMA_CONTROL);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
dwmac1000_dump_dma_regs(void __iomem * ioaddr,u32 * reg_space)211*4882a593Smuzhiyun static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun int i;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
216*4882a593Smuzhiyun if ((i < 12) || (i > 17))
217*4882a593Smuzhiyun reg_space[DMA_BUS_MODE / 4 + i] =
218*4882a593Smuzhiyun readl(ioaddr + DMA_BUS_MODE + i * 4);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
dwmac1000_get_hw_feature(void __iomem * ioaddr,struct dma_features * dma_cap)221*4882a593Smuzhiyun static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
222*4882a593Smuzhiyun struct dma_features *dma_cap)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!hw_cap) {
227*4882a593Smuzhiyun /* 0x00000000 is the value read on old hardware that does not
228*4882a593Smuzhiyun * implement this register
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun return -EOPNOTSUPP;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
234*4882a593Smuzhiyun dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
235*4882a593Smuzhiyun dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
236*4882a593Smuzhiyun dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
237*4882a593Smuzhiyun dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
238*4882a593Smuzhiyun dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
239*4882a593Smuzhiyun dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
240*4882a593Smuzhiyun dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
241*4882a593Smuzhiyun dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
242*4882a593Smuzhiyun /* MMC */
243*4882a593Smuzhiyun dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
244*4882a593Smuzhiyun /* IEEE 1588-2002 */
245*4882a593Smuzhiyun dma_cap->time_stamp =
246*4882a593Smuzhiyun (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
247*4882a593Smuzhiyun /* IEEE 1588-2008 */
248*4882a593Smuzhiyun dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
249*4882a593Smuzhiyun /* 802.3az - Energy-Efficient Ethernet (EEE) */
250*4882a593Smuzhiyun dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
251*4882a593Smuzhiyun dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
252*4882a593Smuzhiyun /* TX and RX csum */
253*4882a593Smuzhiyun dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
254*4882a593Smuzhiyun dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
255*4882a593Smuzhiyun dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
256*4882a593Smuzhiyun dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
257*4882a593Smuzhiyun /* TX and RX number of channels */
258*4882a593Smuzhiyun dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
259*4882a593Smuzhiyun dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
260*4882a593Smuzhiyun /* Alternate (enhanced) DESC mode */
261*4882a593Smuzhiyun dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return 0;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
dwmac1000_rx_watchdog(void __iomem * ioaddr,u32 riwt,u32 number_chan)266*4882a593Smuzhiyun static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
267*4882a593Smuzhiyun u32 number_chan)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun writel(riwt, ioaddr + DMA_RX_WATCHDOG);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun const struct stmmac_dma_ops dwmac1000_dma_ops = {
273*4882a593Smuzhiyun .reset = dwmac_dma_reset,
274*4882a593Smuzhiyun .init = dwmac1000_dma_init,
275*4882a593Smuzhiyun .init_rx_chan = dwmac1000_dma_init_rx,
276*4882a593Smuzhiyun .init_tx_chan = dwmac1000_dma_init_tx,
277*4882a593Smuzhiyun .axi = dwmac1000_dma_axi,
278*4882a593Smuzhiyun .dump_regs = dwmac1000_dump_dma_regs,
279*4882a593Smuzhiyun .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
280*4882a593Smuzhiyun .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
281*4882a593Smuzhiyun .enable_dma_transmission = dwmac_enable_dma_transmission,
282*4882a593Smuzhiyun .enable_dma_irq = dwmac_enable_dma_irq,
283*4882a593Smuzhiyun .disable_dma_irq = dwmac_disable_dma_irq,
284*4882a593Smuzhiyun .start_tx = dwmac_dma_start_tx,
285*4882a593Smuzhiyun .stop_tx = dwmac_dma_stop_tx,
286*4882a593Smuzhiyun .start_rx = dwmac_dma_start_rx,
287*4882a593Smuzhiyun .stop_rx = dwmac_dma_stop_rx,
288*4882a593Smuzhiyun .dma_interrupt = dwmac_dma_interrupt,
289*4882a593Smuzhiyun .get_hw_feature = dwmac1000_get_hw_feature,
290*4882a593Smuzhiyun .rx_watchdog = dwmac1000_rx_watchdog,
291*4882a593Smuzhiyun };
292