xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/altera/altera_sgdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Altera TSE SGDMA and MSGDMA Linux driver
3*4882a593Smuzhiyun  * Copyright (C) 2014 Altera Corporation. All rights reserved
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/list.h>
7*4882a593Smuzhiyun #include "altera_utils.h"
8*4882a593Smuzhiyun #include "altera_tse.h"
9*4882a593Smuzhiyun #include "altera_sgdmahw.h"
10*4882a593Smuzhiyun #include "altera_sgdma.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
13*4882a593Smuzhiyun 				struct sgdma_descrip __iomem *ndesc,
14*4882a593Smuzhiyun 				dma_addr_t ndesc_phys,
15*4882a593Smuzhiyun 				dma_addr_t raddr,
16*4882a593Smuzhiyun 				dma_addr_t waddr,
17*4882a593Smuzhiyun 				u16 length,
18*4882a593Smuzhiyun 				int generate_eop,
19*4882a593Smuzhiyun 				int rfixed,
20*4882a593Smuzhiyun 				int wfixed);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static int sgdma_async_write(struct altera_tse_private *priv,
23*4882a593Smuzhiyun 			      struct sgdma_descrip __iomem *desc);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static int sgdma_async_read(struct altera_tse_private *priv);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static dma_addr_t
28*4882a593Smuzhiyun sgdma_txphysaddr(struct altera_tse_private *priv,
29*4882a593Smuzhiyun 		 struct sgdma_descrip __iomem *desc);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static dma_addr_t
32*4882a593Smuzhiyun sgdma_rxphysaddr(struct altera_tse_private *priv,
33*4882a593Smuzhiyun 		 struct sgdma_descrip __iomem *desc);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static int sgdma_txbusy(struct altera_tse_private *priv);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static int sgdma_rxbusy(struct altera_tse_private *priv);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static void
40*4882a593Smuzhiyun queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static void
43*4882a593Smuzhiyun queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static struct tse_buffer *
46*4882a593Smuzhiyun dequeue_tx(struct altera_tse_private *priv);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun static struct tse_buffer *
49*4882a593Smuzhiyun dequeue_rx(struct altera_tse_private *priv);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static struct tse_buffer *
52*4882a593Smuzhiyun queue_rx_peekhead(struct altera_tse_private *priv);
53*4882a593Smuzhiyun 
sgdma_initialize(struct altera_tse_private * priv)54*4882a593Smuzhiyun int sgdma_initialize(struct altera_tse_private *priv)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
57*4882a593Smuzhiyun 		      SGDMA_CTRLREG_INTEN;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
60*4882a593Smuzhiyun 		      SGDMA_CTRLREG_INTEN |
61*4882a593Smuzhiyun 		      SGDMA_CTRLREG_ILASTD;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	INIT_LIST_HEAD(&priv->txlisthd);
64*4882a593Smuzhiyun 	INIT_LIST_HEAD(&priv->rxlisthd);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	priv->rxdescphys = (dma_addr_t) 0;
67*4882a593Smuzhiyun 	priv->txdescphys = (dma_addr_t) 0;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	priv->rxdescphys = dma_map_single(priv->device,
70*4882a593Smuzhiyun 					  (void __force *)priv->rx_dma_desc,
71*4882a593Smuzhiyun 					  priv->rxdescmem, DMA_BIDIRECTIONAL);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
74*4882a593Smuzhiyun 		sgdma_uninitialize(priv);
75*4882a593Smuzhiyun 		netdev_err(priv->dev, "error mapping rx descriptor memory\n");
76*4882a593Smuzhiyun 		return -EINVAL;
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	priv->txdescphys = dma_map_single(priv->device,
80*4882a593Smuzhiyun 					  (void __force *)priv->tx_dma_desc,
81*4882a593Smuzhiyun 					  priv->txdescmem, DMA_TO_DEVICE);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (dma_mapping_error(priv->device, priv->txdescphys)) {
84*4882a593Smuzhiyun 		sgdma_uninitialize(priv);
85*4882a593Smuzhiyun 		netdev_err(priv->dev, "error mapping tx descriptor memory\n");
86*4882a593Smuzhiyun 		return -EINVAL;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/* Initialize descriptor memory to all 0's, sync memory to cache */
90*4882a593Smuzhiyun 	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
91*4882a593Smuzhiyun 	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	dma_sync_single_for_device(priv->device, priv->txdescphys,
94*4882a593Smuzhiyun 				   priv->txdescmem, DMA_TO_DEVICE);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	dma_sync_single_for_device(priv->device, priv->rxdescphys,
97*4882a593Smuzhiyun 				   priv->rxdescmem, DMA_TO_DEVICE);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
sgdma_uninitialize(struct altera_tse_private * priv)102*4882a593Smuzhiyun void sgdma_uninitialize(struct altera_tse_private *priv)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	if (priv->rxdescphys)
105*4882a593Smuzhiyun 		dma_unmap_single(priv->device, priv->rxdescphys,
106*4882a593Smuzhiyun 				 priv->rxdescmem, DMA_BIDIRECTIONAL);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (priv->txdescphys)
109*4882a593Smuzhiyun 		dma_unmap_single(priv->device, priv->txdescphys,
110*4882a593Smuzhiyun 				 priv->txdescmem, DMA_TO_DEVICE);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /* This function resets the SGDMA controller and clears the
114*4882a593Smuzhiyun  * descriptor memory used for transmits and receives.
115*4882a593Smuzhiyun  */
sgdma_reset(struct altera_tse_private * priv)116*4882a593Smuzhiyun void sgdma_reset(struct altera_tse_private *priv)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	/* Initialize descriptor memory to 0 */
119*4882a593Smuzhiyun 	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
120*4882a593Smuzhiyun 	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
123*4882a593Smuzhiyun 	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
126*4882a593Smuzhiyun 	csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* For SGDMA, interrupts remain enabled after initially enabling,
130*4882a593Smuzhiyun  * so no need to provide implementations for abstract enable
131*4882a593Smuzhiyun  * and disable
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun 
sgdma_enable_rxirq(struct altera_tse_private * priv)134*4882a593Smuzhiyun void sgdma_enable_rxirq(struct altera_tse_private *priv)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
sgdma_enable_txirq(struct altera_tse_private * priv)138*4882a593Smuzhiyun void sgdma_enable_txirq(struct altera_tse_private *priv)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
sgdma_disable_rxirq(struct altera_tse_private * priv)142*4882a593Smuzhiyun void sgdma_disable_rxirq(struct altera_tse_private *priv)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
sgdma_disable_txirq(struct altera_tse_private * priv)146*4882a593Smuzhiyun void sgdma_disable_txirq(struct altera_tse_private *priv)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
sgdma_clear_rxirq(struct altera_tse_private * priv)150*4882a593Smuzhiyun void sgdma_clear_rxirq(struct altera_tse_private *priv)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
153*4882a593Smuzhiyun 		    SGDMA_CTRLREG_CLRINT);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
sgdma_clear_txirq(struct altera_tse_private * priv)156*4882a593Smuzhiyun void sgdma_clear_txirq(struct altera_tse_private *priv)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
159*4882a593Smuzhiyun 		    SGDMA_CTRLREG_CLRINT);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /* transmits buffer through SGDMA. Returns number of buffers
163*4882a593Smuzhiyun  * transmitted, 0 if not possible.
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * tx_lock is held by the caller
166*4882a593Smuzhiyun  */
sgdma_tx_buffer(struct altera_tse_private * priv,struct tse_buffer * buffer)167*4882a593Smuzhiyun int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *descbase =
170*4882a593Smuzhiyun 		(struct sgdma_descrip __iomem *)priv->tx_dma_desc;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *cdesc = &descbase[0];
173*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *ndesc = &descbase[1];
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* wait 'til the tx sgdma is ready for the next transmit request */
176*4882a593Smuzhiyun 	if (sgdma_txbusy(priv))
177*4882a593Smuzhiyun 		return 0;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	sgdma_setup_descrip(cdesc,			/* current descriptor */
180*4882a593Smuzhiyun 			    ndesc,			/* next descriptor */
181*4882a593Smuzhiyun 			    sgdma_txphysaddr(priv, ndesc),
182*4882a593Smuzhiyun 			    buffer->dma_addr,		/* address of packet to xmit */
183*4882a593Smuzhiyun 			    0,				/* write addr 0 for tx dma */
184*4882a593Smuzhiyun 			    buffer->len,		/* length of packet */
185*4882a593Smuzhiyun 			    SGDMA_CONTROL_EOP,		/* Generate EOP */
186*4882a593Smuzhiyun 			    0,				/* read fixed */
187*4882a593Smuzhiyun 			    SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	sgdma_async_write(priv, cdesc);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* enqueue the request to the pending transmit queue */
192*4882a593Smuzhiyun 	queue_tx(priv, buffer);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return 1;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /* tx_lock held to protect access to queued tx list
199*4882a593Smuzhiyun  */
sgdma_tx_completions(struct altera_tse_private * priv)200*4882a593Smuzhiyun u32 sgdma_tx_completions(struct altera_tse_private *priv)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	u32 ready = 0;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (!sgdma_txbusy(priv) &&
205*4882a593Smuzhiyun 	    ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
206*4882a593Smuzhiyun 	     & SGDMA_CONTROL_HW_OWNED) == 0) &&
207*4882a593Smuzhiyun 	    (dequeue_tx(priv))) {
208*4882a593Smuzhiyun 		ready = 1;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return ready;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
sgdma_start_rxdma(struct altera_tse_private * priv)214*4882a593Smuzhiyun void sgdma_start_rxdma(struct altera_tse_private *priv)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	sgdma_async_read(priv);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
sgdma_add_rx_desc(struct altera_tse_private * priv,struct tse_buffer * rxbuffer)219*4882a593Smuzhiyun void sgdma_add_rx_desc(struct altera_tse_private *priv,
220*4882a593Smuzhiyun 		       struct tse_buffer *rxbuffer)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	queue_rx(priv, rxbuffer);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* status is returned on upper 16 bits,
226*4882a593Smuzhiyun  * length is returned in lower 16 bits
227*4882a593Smuzhiyun  */
sgdma_rx_status(struct altera_tse_private * priv)228*4882a593Smuzhiyun u32 sgdma_rx_status(struct altera_tse_private *priv)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *base =
231*4882a593Smuzhiyun 		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
232*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *desc = NULL;
233*4882a593Smuzhiyun 	struct tse_buffer *rxbuffer = NULL;
234*4882a593Smuzhiyun 	unsigned int rxstatus = 0;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	desc = &base[0];
239*4882a593Smuzhiyun 	if (sts & SGDMA_STSREG_EOP) {
240*4882a593Smuzhiyun 		unsigned int pktlength = 0;
241*4882a593Smuzhiyun 		unsigned int pktstatus = 0;
242*4882a593Smuzhiyun 		dma_sync_single_for_cpu(priv->device,
243*4882a593Smuzhiyun 					priv->rxdescphys,
244*4882a593Smuzhiyun 					SGDMA_DESC_LEN,
245*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
248*4882a593Smuzhiyun 		pktstatus = csrrd8(desc, sgdma_descroffs(status));
249*4882a593Smuzhiyun 		rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
250*4882a593Smuzhiyun 		rxstatus = rxstatus << 16;
251*4882a593Smuzhiyun 		rxstatus |= (pktlength & 0xffff);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		if (rxstatus) {
254*4882a593Smuzhiyun 			csrwr8(0, desc, sgdma_descroffs(status));
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 			rxbuffer = dequeue_rx(priv);
257*4882a593Smuzhiyun 			if (rxbuffer == NULL)
258*4882a593Smuzhiyun 				netdev_info(priv->dev,
259*4882a593Smuzhiyun 					    "sgdma rx and rx queue empty!\n");
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 			/* Clear control */
262*4882a593Smuzhiyun 			csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
263*4882a593Smuzhiyun 			/* clear status */
264*4882a593Smuzhiyun 			csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 			/* kick the rx sgdma after reaping this descriptor */
267*4882a593Smuzhiyun 			sgdma_async_read(priv);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		} else {
270*4882a593Smuzhiyun 			/* If the SGDMA indicated an end of packet on recv,
271*4882a593Smuzhiyun 			 * then it's expected that the rxstatus from the
272*4882a593Smuzhiyun 			 * descriptor is non-zero - meaning a valid packet
273*4882a593Smuzhiyun 			 * with a nonzero length, or an error has been
274*4882a593Smuzhiyun 			 * indicated. if not, then all we can do is signal
275*4882a593Smuzhiyun 			 * an error and return no packet received. Most likely
276*4882a593Smuzhiyun 			 * there is a system design error, or an error in the
277*4882a593Smuzhiyun 			 * underlying kernel (cache or cache management problem)
278*4882a593Smuzhiyun 			 */
279*4882a593Smuzhiyun 			netdev_err(priv->dev,
280*4882a593Smuzhiyun 				   "SGDMA RX Error Info: %x, %x, %x\n",
281*4882a593Smuzhiyun 				   sts, csrrd8(desc, sgdma_descroffs(status)),
282*4882a593Smuzhiyun 				   rxstatus);
283*4882a593Smuzhiyun 		}
284*4882a593Smuzhiyun 	} else if (sts == 0) {
285*4882a593Smuzhiyun 		sgdma_async_read(priv);
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return rxstatus;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /* Private functions */
sgdma_setup_descrip(struct sgdma_descrip __iomem * desc,struct sgdma_descrip __iomem * ndesc,dma_addr_t ndesc_phys,dma_addr_t raddr,dma_addr_t waddr,u16 length,int generate_eop,int rfixed,int wfixed)293*4882a593Smuzhiyun static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
294*4882a593Smuzhiyun 				struct sgdma_descrip __iomem *ndesc,
295*4882a593Smuzhiyun 				dma_addr_t ndesc_phys,
296*4882a593Smuzhiyun 				dma_addr_t raddr,
297*4882a593Smuzhiyun 				dma_addr_t waddr,
298*4882a593Smuzhiyun 				u16 length,
299*4882a593Smuzhiyun 				int generate_eop,
300*4882a593Smuzhiyun 				int rfixed,
301*4882a593Smuzhiyun 				int wfixed)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	/* Clear the next descriptor as not owned by hardware */
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
306*4882a593Smuzhiyun 	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
307*4882a593Smuzhiyun 	csrwr8(ctrl, ndesc, sgdma_descroffs(control));
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	ctrl = SGDMA_CONTROL_HW_OWNED;
310*4882a593Smuzhiyun 	ctrl |= generate_eop;
311*4882a593Smuzhiyun 	ctrl |= rfixed;
312*4882a593Smuzhiyun 	ctrl |= wfixed;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/* Channel is implicitly zero, initialized to 0 by default */
315*4882a593Smuzhiyun 	csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
316*4882a593Smuzhiyun 	csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	csrwr32(0, desc, sgdma_descroffs(pad1));
319*4882a593Smuzhiyun 	csrwr32(0, desc, sgdma_descroffs(pad2));
320*4882a593Smuzhiyun 	csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	csrwr8(ctrl, desc, sgdma_descroffs(control));
323*4882a593Smuzhiyun 	csrwr8(0, desc, sgdma_descroffs(status));
324*4882a593Smuzhiyun 	csrwr8(0, desc, sgdma_descroffs(wburst));
325*4882a593Smuzhiyun 	csrwr8(0, desc, sgdma_descroffs(rburst));
326*4882a593Smuzhiyun 	csrwr16(length, desc, sgdma_descroffs(bytes));
327*4882a593Smuzhiyun 	csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /* If hardware is busy, don't restart async read.
331*4882a593Smuzhiyun  * if status register is 0 - meaning initial state, restart async read,
332*4882a593Smuzhiyun  * probably for the first time when populating a receive buffer.
333*4882a593Smuzhiyun  * If read status indicate not busy and a status, restart the async
334*4882a593Smuzhiyun  * DMA read.
335*4882a593Smuzhiyun  */
sgdma_async_read(struct altera_tse_private * priv)336*4882a593Smuzhiyun static int sgdma_async_read(struct altera_tse_private *priv)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *descbase =
339*4882a593Smuzhiyun 		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *cdesc = &descbase[0];
342*4882a593Smuzhiyun 	struct sgdma_descrip __iomem *ndesc = &descbase[1];
343*4882a593Smuzhiyun 	struct tse_buffer *rxbuffer = NULL;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (!sgdma_rxbusy(priv)) {
346*4882a593Smuzhiyun 		rxbuffer = queue_rx_peekhead(priv);
347*4882a593Smuzhiyun 		if (rxbuffer == NULL) {
348*4882a593Smuzhiyun 			netdev_err(priv->dev, "no rx buffers available\n");
349*4882a593Smuzhiyun 			return 0;
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		sgdma_setup_descrip(cdesc,		/* current descriptor */
353*4882a593Smuzhiyun 				    ndesc,		/* next descriptor */
354*4882a593Smuzhiyun 				    sgdma_rxphysaddr(priv, ndesc),
355*4882a593Smuzhiyun 				    0,			/* read addr 0 for rx dma */
356*4882a593Smuzhiyun 				    rxbuffer->dma_addr, /* write addr for rx dma */
357*4882a593Smuzhiyun 				    0,			/* read 'til EOP */
358*4882a593Smuzhiyun 				    0,			/* EOP: NA for rx dma */
359*4882a593Smuzhiyun 				    0,			/* read fixed: NA for rx dma */
360*4882a593Smuzhiyun 				    0);			/* SOP: NA for rx DMA */
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		dma_sync_single_for_device(priv->device,
363*4882a593Smuzhiyun 					   priv->rxdescphys,
364*4882a593Smuzhiyun 					   SGDMA_DESC_LEN,
365*4882a593Smuzhiyun 					   DMA_TO_DEVICE);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
368*4882a593Smuzhiyun 			priv->rx_dma_csr,
369*4882a593Smuzhiyun 			sgdma_csroffs(next_descrip));
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
372*4882a593Smuzhiyun 			priv->rx_dma_csr,
373*4882a593Smuzhiyun 			sgdma_csroffs(control));
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		return 1;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	return 0;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
sgdma_async_write(struct altera_tse_private * priv,struct sgdma_descrip __iomem * desc)381*4882a593Smuzhiyun static int sgdma_async_write(struct altera_tse_private *priv,
382*4882a593Smuzhiyun 			     struct sgdma_descrip __iomem *desc)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	if (sgdma_txbusy(priv))
385*4882a593Smuzhiyun 		return 0;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	/* clear control and status */
388*4882a593Smuzhiyun 	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
389*4882a593Smuzhiyun 	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	dma_sync_single_for_device(priv->device, priv->txdescphys,
392*4882a593Smuzhiyun 				   SGDMA_DESC_LEN, DMA_TO_DEVICE);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
395*4882a593Smuzhiyun 		priv->tx_dma_csr,
396*4882a593Smuzhiyun 		sgdma_csroffs(next_descrip));
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
399*4882a593Smuzhiyun 		priv->tx_dma_csr,
400*4882a593Smuzhiyun 		sgdma_csroffs(control));
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return 1;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private * priv,struct sgdma_descrip __iomem * desc)406*4882a593Smuzhiyun sgdma_txphysaddr(struct altera_tse_private *priv,
407*4882a593Smuzhiyun 		 struct sgdma_descrip __iomem *desc)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	dma_addr_t paddr = priv->txdescmem_busaddr;
410*4882a593Smuzhiyun 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
411*4882a593Smuzhiyun 	return (dma_addr_t)((uintptr_t)paddr + offs);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private * priv,struct sgdma_descrip __iomem * desc)415*4882a593Smuzhiyun sgdma_rxphysaddr(struct altera_tse_private *priv,
416*4882a593Smuzhiyun 		 struct sgdma_descrip __iomem *desc)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	dma_addr_t paddr = priv->rxdescmem_busaddr;
419*4882a593Smuzhiyun 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
420*4882a593Smuzhiyun 	return (dma_addr_t)((uintptr_t)paddr + offs);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun #define list_remove_head(list, entry, type, member)			\
424*4882a593Smuzhiyun 	do {								\
425*4882a593Smuzhiyun 		entry = NULL;						\
426*4882a593Smuzhiyun 		if (!list_empty(list)) {				\
427*4882a593Smuzhiyun 			entry = list_entry((list)->next, type, member);	\
428*4882a593Smuzhiyun 			list_del_init(&entry->member);			\
429*4882a593Smuzhiyun 		}							\
430*4882a593Smuzhiyun 	} while (0)
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun #define list_peek_head(list, entry, type, member)			\
433*4882a593Smuzhiyun 	do {								\
434*4882a593Smuzhiyun 		entry = NULL;						\
435*4882a593Smuzhiyun 		if (!list_empty(list)) {				\
436*4882a593Smuzhiyun 			entry = list_entry((list)->next, type, member);	\
437*4882a593Smuzhiyun 		}							\
438*4882a593Smuzhiyun 	} while (0)
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /* adds a tse_buffer to the tail of a tx buffer list.
441*4882a593Smuzhiyun  * assumes the caller is managing and holding a mutual exclusion
442*4882a593Smuzhiyun  * primitive to avoid simultaneous pushes/pops to the list.
443*4882a593Smuzhiyun  */
444*4882a593Smuzhiyun static void
queue_tx(struct altera_tse_private * priv,struct tse_buffer * buffer)445*4882a593Smuzhiyun queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	list_add_tail(&buffer->lh, &priv->txlisthd);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /* adds a tse_buffer to the tail of a rx buffer list
452*4882a593Smuzhiyun  * assumes the caller is managing and holding a mutual exclusion
453*4882a593Smuzhiyun  * primitive to avoid simultaneous pushes/pops to the list.
454*4882a593Smuzhiyun  */
455*4882a593Smuzhiyun static void
queue_rx(struct altera_tse_private * priv,struct tse_buffer * buffer)456*4882a593Smuzhiyun queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	list_add_tail(&buffer->lh, &priv->rxlisthd);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /* dequeues a tse_buffer from the transmit buffer list, otherwise
462*4882a593Smuzhiyun  * returns NULL if empty.
463*4882a593Smuzhiyun  * assumes the caller is managing and holding a mutual exclusion
464*4882a593Smuzhiyun  * primitive to avoid simultaneous pushes/pops to the list.
465*4882a593Smuzhiyun  */
466*4882a593Smuzhiyun static struct tse_buffer *
dequeue_tx(struct altera_tse_private * priv)467*4882a593Smuzhiyun dequeue_tx(struct altera_tse_private *priv)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	struct tse_buffer *buffer = NULL;
470*4882a593Smuzhiyun 	list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
471*4882a593Smuzhiyun 	return buffer;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /* dequeues a tse_buffer from the receive buffer list, otherwise
475*4882a593Smuzhiyun  * returns NULL if empty
476*4882a593Smuzhiyun  * assumes the caller is managing and holding a mutual exclusion
477*4882a593Smuzhiyun  * primitive to avoid simultaneous pushes/pops to the list.
478*4882a593Smuzhiyun  */
479*4882a593Smuzhiyun static struct tse_buffer *
dequeue_rx(struct altera_tse_private * priv)480*4882a593Smuzhiyun dequeue_rx(struct altera_tse_private *priv)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	struct tse_buffer *buffer = NULL;
483*4882a593Smuzhiyun 	list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
484*4882a593Smuzhiyun 	return buffer;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /* dequeues a tse_buffer from the receive buffer list, otherwise
488*4882a593Smuzhiyun  * returns NULL if empty
489*4882a593Smuzhiyun  * assumes the caller is managing and holding a mutual exclusion
490*4882a593Smuzhiyun  * primitive to avoid simultaneous pushes/pops to the list while the
491*4882a593Smuzhiyun  * head is being examined.
492*4882a593Smuzhiyun  */
493*4882a593Smuzhiyun static struct tse_buffer *
queue_rx_peekhead(struct altera_tse_private * priv)494*4882a593Smuzhiyun queue_rx_peekhead(struct altera_tse_private *priv)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	struct tse_buffer *buffer = NULL;
497*4882a593Smuzhiyun 	list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
498*4882a593Smuzhiyun 	return buffer;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /* check and return rx sgdma status without polling
502*4882a593Smuzhiyun  */
sgdma_rxbusy(struct altera_tse_private * priv)503*4882a593Smuzhiyun static int sgdma_rxbusy(struct altera_tse_private *priv)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
506*4882a593Smuzhiyun 		       & SGDMA_STSREG_BUSY;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun /* waits for the tx sgdma to finish it's current operation, returns 0
510*4882a593Smuzhiyun  * when it transitions to nonbusy, returns 1 if the operation times out
511*4882a593Smuzhiyun  */
sgdma_txbusy(struct altera_tse_private * priv)512*4882a593Smuzhiyun static int sgdma_txbusy(struct altera_tse_private *priv)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	int delay = 0;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	/* if DMA is busy, wait for current transactino to finish */
517*4882a593Smuzhiyun 	while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
518*4882a593Smuzhiyun 		& SGDMA_STSREG_BUSY) && (delay++ < 100))
519*4882a593Smuzhiyun 		udelay(1);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
522*4882a593Smuzhiyun 	    & SGDMA_STSREG_BUSY) {
523*4882a593Smuzhiyun 		netdev_err(priv->dev, "timeout waiting for tx dma\n");
524*4882a593Smuzhiyun 		return 1;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528