xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/mach-au1x00/au1000_dma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * BRIEF MODULE DESCRIPTION
3*4882a593Smuzhiyun  *	Defines for using and allocating DMA channels on the Alchemy
4*4882a593Smuzhiyun  *      Au1x00 MIPS processors.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright 2000, 2008 MontaVista Software Inc.
7*4882a593Smuzhiyun  * Author: MontaVista Software, Inc. <source@mvista.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *  This program is free software; you can redistribute  it and/or modify it
10*4882a593Smuzhiyun  *  under  the terms of  the GNU General  Public License as published by the
11*4882a593Smuzhiyun  *  Free Software Foundation;  either version 2 of the  License, or (at your
12*4882a593Smuzhiyun  *  option) any later version.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
15*4882a593Smuzhiyun  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
16*4882a593Smuzhiyun  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17*4882a593Smuzhiyun  *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
18*4882a593Smuzhiyun  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19*4882a593Smuzhiyun  *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
20*4882a593Smuzhiyun  *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21*4882a593Smuzhiyun  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
22*4882a593Smuzhiyun  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23*4882a593Smuzhiyun  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  *  You should have received a copy of the  GNU General Public License along
26*4882a593Smuzhiyun  *  with this program; if not, write  to the Free Software Foundation, Inc.,
27*4882a593Smuzhiyun  *  675 Mass Ave, Cambridge, MA 02139, USA.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun #ifndef __ASM_AU1000_DMA_H
31*4882a593Smuzhiyun #define __ASM_AU1000_DMA_H
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <linux/io.h>		/* need byte IO */
34*4882a593Smuzhiyun #include <linux/spinlock.h>	/* And spinlocks */
35*4882a593Smuzhiyun #include <linux/delay.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define NUM_AU1000_DMA_CHANNELS 8
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* DMA Channel Register Offsets */
40*4882a593Smuzhiyun #define DMA_MODE_SET		0x00000000
41*4882a593Smuzhiyun #define DMA_MODE_READ		DMA_MODE_SET
42*4882a593Smuzhiyun #define DMA_MODE_CLEAR		0x00000004
43*4882a593Smuzhiyun /* DMA Mode register bits follow */
44*4882a593Smuzhiyun #define DMA_DAH_MASK		(0x0f << 20)
45*4882a593Smuzhiyun #define DMA_DID_BIT		16
46*4882a593Smuzhiyun #define DMA_DID_MASK		(0x0f << DMA_DID_BIT)
47*4882a593Smuzhiyun #define DMA_DS			(1 << 15)
48*4882a593Smuzhiyun #define DMA_BE			(1 << 13)
49*4882a593Smuzhiyun #define DMA_DR			(1 << 12)
50*4882a593Smuzhiyun #define DMA_TS8			(1 << 11)
51*4882a593Smuzhiyun #define DMA_DW_BIT		9
52*4882a593Smuzhiyun #define DMA_DW_MASK		(0x03 << DMA_DW_BIT)
53*4882a593Smuzhiyun #define DMA_DW8			(0 << DMA_DW_BIT)
54*4882a593Smuzhiyun #define DMA_DW16		(1 << DMA_DW_BIT)
55*4882a593Smuzhiyun #define DMA_DW32		(2 << DMA_DW_BIT)
56*4882a593Smuzhiyun #define DMA_NC			(1 << 8)
57*4882a593Smuzhiyun #define DMA_IE			(1 << 7)
58*4882a593Smuzhiyun #define DMA_HALT		(1 << 6)
59*4882a593Smuzhiyun #define DMA_GO			(1 << 5)
60*4882a593Smuzhiyun #define DMA_AB			(1 << 4)
61*4882a593Smuzhiyun #define DMA_D1			(1 << 3)
62*4882a593Smuzhiyun #define DMA_BE1			(1 << 2)
63*4882a593Smuzhiyun #define DMA_D0			(1 << 1)
64*4882a593Smuzhiyun #define DMA_BE0			(1 << 0)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define DMA_PERIPHERAL_ADDR	0x00000008
67*4882a593Smuzhiyun #define DMA_BUFFER0_START	0x0000000C
68*4882a593Smuzhiyun #define DMA_BUFFER1_START	0x00000014
69*4882a593Smuzhiyun #define DMA_BUFFER0_COUNT	0x00000010
70*4882a593Smuzhiyun #define DMA_BUFFER1_COUNT	0x00000018
71*4882a593Smuzhiyun #define DMA_BAH_BIT	16
72*4882a593Smuzhiyun #define DMA_BAH_MASK	(0x0f << DMA_BAH_BIT)
73*4882a593Smuzhiyun #define DMA_COUNT_BIT	0
74*4882a593Smuzhiyun #define DMA_COUNT_MASK	(0xffff << DMA_COUNT_BIT)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* DMA Device IDs follow */
77*4882a593Smuzhiyun enum {
78*4882a593Smuzhiyun 	DMA_ID_UART0_TX = 0,
79*4882a593Smuzhiyun 	DMA_ID_UART0_RX,
80*4882a593Smuzhiyun 	DMA_ID_GP04,
81*4882a593Smuzhiyun 	DMA_ID_GP05,
82*4882a593Smuzhiyun 	DMA_ID_AC97C_TX,
83*4882a593Smuzhiyun 	DMA_ID_AC97C_RX,
84*4882a593Smuzhiyun 	DMA_ID_UART3_TX,
85*4882a593Smuzhiyun 	DMA_ID_UART3_RX,
86*4882a593Smuzhiyun 	DMA_ID_USBDEV_EP0_RX,
87*4882a593Smuzhiyun 	DMA_ID_USBDEV_EP0_TX,
88*4882a593Smuzhiyun 	DMA_ID_USBDEV_EP2_TX,
89*4882a593Smuzhiyun 	DMA_ID_USBDEV_EP3_TX,
90*4882a593Smuzhiyun 	DMA_ID_USBDEV_EP4_RX,
91*4882a593Smuzhiyun 	DMA_ID_USBDEV_EP5_RX,
92*4882a593Smuzhiyun 	DMA_ID_I2S_TX,
93*4882a593Smuzhiyun 	DMA_ID_I2S_RX,
94*4882a593Smuzhiyun 	DMA_NUM_DEV
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* DMA Device ID's for 2nd bank (AU1100) follow */
98*4882a593Smuzhiyun enum {
99*4882a593Smuzhiyun 	DMA_ID_SD0_TX = 0,
100*4882a593Smuzhiyun 	DMA_ID_SD0_RX,
101*4882a593Smuzhiyun 	DMA_ID_SD1_TX,
102*4882a593Smuzhiyun 	DMA_ID_SD1_RX,
103*4882a593Smuzhiyun 	DMA_NUM_DEV_BANK2
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun struct dma_chan {
107*4882a593Smuzhiyun 	int dev_id;		/* this channel is allocated if >= 0, */
108*4882a593Smuzhiyun 				/* free otherwise */
109*4882a593Smuzhiyun 	void __iomem *io;
110*4882a593Smuzhiyun 	const char *dev_str;
111*4882a593Smuzhiyun 	int irq;
112*4882a593Smuzhiyun 	void *irq_dev;
113*4882a593Smuzhiyun 	unsigned int fifo_addr;
114*4882a593Smuzhiyun 	unsigned int mode;
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* These are in arch/mips/au1000/common/dma.c */
118*4882a593Smuzhiyun extern struct dma_chan au1000_dma_table[];
119*4882a593Smuzhiyun extern int request_au1000_dma(int dev_id,
120*4882a593Smuzhiyun 			      const char *dev_str,
121*4882a593Smuzhiyun 			      irq_handler_t irqhandler,
122*4882a593Smuzhiyun 			      unsigned long irqflags,
123*4882a593Smuzhiyun 			      void *irq_dev_id);
124*4882a593Smuzhiyun extern void free_au1000_dma(unsigned int dmanr);
125*4882a593Smuzhiyun extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
126*4882a593Smuzhiyun 				int length, int *eof, void *data);
127*4882a593Smuzhiyun extern void dump_au1000_dma_channel(unsigned int dmanr);
128*4882a593Smuzhiyun extern spinlock_t au1000_dma_spin_lock;
129*4882a593Smuzhiyun 
get_dma_chan(unsigned int dmanr)130*4882a593Smuzhiyun static inline struct dma_chan *get_dma_chan(unsigned int dmanr)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	if (dmanr >= NUM_AU1000_DMA_CHANNELS ||
133*4882a593Smuzhiyun 	    au1000_dma_table[dmanr].dev_id < 0)
134*4882a593Smuzhiyun 		return NULL;
135*4882a593Smuzhiyun 	return &au1000_dma_table[dmanr];
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
claim_dma_lock(void)138*4882a593Smuzhiyun static inline unsigned long claim_dma_lock(void)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	unsigned long flags;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	spin_lock_irqsave(&au1000_dma_spin_lock, flags);
143*4882a593Smuzhiyun 	return flags;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
release_dma_lock(unsigned long flags)146*4882a593Smuzhiyun static inline void release_dma_lock(unsigned long flags)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	spin_unlock_irqrestore(&au1000_dma_spin_lock, flags);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * Set the DMA buffer enable bits in the mode register.
153*4882a593Smuzhiyun  */
enable_dma_buffer0(unsigned int dmanr)154*4882a593Smuzhiyun static inline void enable_dma_buffer0(unsigned int dmanr)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (!chan)
159*4882a593Smuzhiyun 		return;
160*4882a593Smuzhiyun 	__raw_writel(DMA_BE0, chan->io + DMA_MODE_SET);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
enable_dma_buffer1(unsigned int dmanr)163*4882a593Smuzhiyun static inline void enable_dma_buffer1(unsigned int dmanr)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (!chan)
168*4882a593Smuzhiyun 		return;
169*4882a593Smuzhiyun 	__raw_writel(DMA_BE1, chan->io + DMA_MODE_SET);
170*4882a593Smuzhiyun }
enable_dma_buffers(unsigned int dmanr)171*4882a593Smuzhiyun static inline void enable_dma_buffers(unsigned int dmanr)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!chan)
176*4882a593Smuzhiyun 		return;
177*4882a593Smuzhiyun 	__raw_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
start_dma(unsigned int dmanr)180*4882a593Smuzhiyun static inline void start_dma(unsigned int dmanr)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (!chan)
185*4882a593Smuzhiyun 		return;
186*4882a593Smuzhiyun 	__raw_writel(DMA_GO, chan->io + DMA_MODE_SET);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun #define DMA_HALT_POLL 0x5000
190*4882a593Smuzhiyun 
halt_dma(unsigned int dmanr)191*4882a593Smuzhiyun static inline void halt_dma(unsigned int dmanr)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
194*4882a593Smuzhiyun 	int i;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (!chan)
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 	__raw_writel(DMA_GO, chan->io + DMA_MODE_CLEAR);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Poll the halt bit */
201*4882a593Smuzhiyun 	for (i = 0; i < DMA_HALT_POLL; i++)
202*4882a593Smuzhiyun 		if (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT)
203*4882a593Smuzhiyun 			break;
204*4882a593Smuzhiyun 	if (i == DMA_HALT_POLL)
205*4882a593Smuzhiyun 		printk(KERN_INFO "halt_dma: HALT poll expired!\n");
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
disable_dma(unsigned int dmanr)208*4882a593Smuzhiyun static inline void disable_dma(unsigned int dmanr)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	if (!chan)
213*4882a593Smuzhiyun 		return;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	halt_dma(dmanr);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* Now we can disable the buffers */
218*4882a593Smuzhiyun 	__raw_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
dma_halted(unsigned int dmanr)221*4882a593Smuzhiyun static inline int dma_halted(unsigned int dmanr)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (!chan)
226*4882a593Smuzhiyun 		return 1;
227*4882a593Smuzhiyun 	return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* Initialize a DMA channel. */
init_dma(unsigned int dmanr)231*4882a593Smuzhiyun static inline void init_dma(unsigned int dmanr)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
234*4882a593Smuzhiyun 	u32 mode;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (!chan)
237*4882a593Smuzhiyun 		return;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	disable_dma(dmanr);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* Set device FIFO address */
242*4882a593Smuzhiyun 	__raw_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	mode = chan->mode | (chan->dev_id << DMA_DID_BIT);
245*4882a593Smuzhiyun 	if (chan->irq)
246*4882a593Smuzhiyun 		mode |= DMA_IE;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	__raw_writel(~mode, chan->io + DMA_MODE_CLEAR);
249*4882a593Smuzhiyun 	__raw_writel(mode,	 chan->io + DMA_MODE_SET);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun  * Set mode for a specific DMA channel
254*4882a593Smuzhiyun  */
set_dma_mode(unsigned int dmanr,unsigned int mode)255*4882a593Smuzhiyun static inline void set_dma_mode(unsigned int dmanr, unsigned int mode)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (!chan)
260*4882a593Smuzhiyun 		return;
261*4882a593Smuzhiyun 	/*
262*4882a593Smuzhiyun 	 * set_dma_mode is only allowed to change endianess, direction,
263*4882a593Smuzhiyun 	 * transfer size, device FIFO width, and coherency settings.
264*4882a593Smuzhiyun 	 * Make sure anything else is masked off.
265*4882a593Smuzhiyun 	 */
266*4882a593Smuzhiyun 	mode &= (DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
267*4882a593Smuzhiyun 	chan->mode &= ~(DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
268*4882a593Smuzhiyun 	chan->mode |= mode;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
get_dma_mode(unsigned int dmanr)271*4882a593Smuzhiyun static inline unsigned int get_dma_mode(unsigned int dmanr)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (!chan)
276*4882a593Smuzhiyun 		return 0;
277*4882a593Smuzhiyun 	return chan->mode;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
get_dma_active_buffer(unsigned int dmanr)280*4882a593Smuzhiyun static inline int get_dma_active_buffer(unsigned int dmanr)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (!chan)
285*4882a593Smuzhiyun 		return -1;
286*4882a593Smuzhiyun 	return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * Set the device FIFO address for a specific DMA channel - only
291*4882a593Smuzhiyun  * applicable to GPO4 and GPO5. All the other devices have fixed
292*4882a593Smuzhiyun  * FIFO addresses.
293*4882a593Smuzhiyun  */
set_dma_fifo_addr(unsigned int dmanr,unsigned int a)294*4882a593Smuzhiyun static inline void set_dma_fifo_addr(unsigned int dmanr, unsigned int a)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (!chan)
299*4882a593Smuzhiyun 		return;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (chan->mode & DMA_DS)	/* second bank of device IDs */
302*4882a593Smuzhiyun 		return;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (chan->dev_id != DMA_ID_GP04 && chan->dev_id != DMA_ID_GP05)
305*4882a593Smuzhiyun 		return;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	__raw_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * Clear the DMA buffer done bits in the mode register.
312*4882a593Smuzhiyun  */
clear_dma_done0(unsigned int dmanr)313*4882a593Smuzhiyun static inline void clear_dma_done0(unsigned int dmanr)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (!chan)
318*4882a593Smuzhiyun 		return;
319*4882a593Smuzhiyun 	__raw_writel(DMA_D0, chan->io + DMA_MODE_CLEAR);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
clear_dma_done1(unsigned int dmanr)322*4882a593Smuzhiyun static inline void clear_dma_done1(unsigned int dmanr)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (!chan)
327*4882a593Smuzhiyun 		return;
328*4882a593Smuzhiyun 	__raw_writel(DMA_D1, chan->io + DMA_MODE_CLEAR);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun  * This does nothing - not applicable to Au1000 DMA.
333*4882a593Smuzhiyun  */
set_dma_page(unsigned int dmanr,char pagenr)334*4882a593Smuzhiyun static inline void set_dma_page(unsigned int dmanr, char pagenr)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun  * Set Buffer 0 transfer address for specific DMA channel.
340*4882a593Smuzhiyun  */
set_dma_addr0(unsigned int dmanr,unsigned int a)341*4882a593Smuzhiyun static inline void set_dma_addr0(unsigned int dmanr, unsigned int a)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (!chan)
346*4882a593Smuzhiyun 		return;
347*4882a593Smuzhiyun 	__raw_writel(a, chan->io + DMA_BUFFER0_START);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun  * Set Buffer 1 transfer address for specific DMA channel.
352*4882a593Smuzhiyun  */
set_dma_addr1(unsigned int dmanr,unsigned int a)353*4882a593Smuzhiyun static inline void set_dma_addr1(unsigned int dmanr, unsigned int a)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (!chan)
358*4882a593Smuzhiyun 		return;
359*4882a593Smuzhiyun 	__raw_writel(a, chan->io + DMA_BUFFER1_START);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun  * Set Buffer 0 transfer size (max 64k) for a specific DMA channel.
365*4882a593Smuzhiyun  */
set_dma_count0(unsigned int dmanr,unsigned int count)366*4882a593Smuzhiyun static inline void set_dma_count0(unsigned int dmanr, unsigned int count)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (!chan)
371*4882a593Smuzhiyun 		return;
372*4882a593Smuzhiyun 	count &= DMA_COUNT_MASK;
373*4882a593Smuzhiyun 	__raw_writel(count, chan->io + DMA_BUFFER0_COUNT);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun  * Set Buffer 1 transfer size (max 64k) for a specific DMA channel.
378*4882a593Smuzhiyun  */
set_dma_count1(unsigned int dmanr,unsigned int count)379*4882a593Smuzhiyun static inline void set_dma_count1(unsigned int dmanr, unsigned int count)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (!chan)
384*4882a593Smuzhiyun 		return;
385*4882a593Smuzhiyun 	count &= DMA_COUNT_MASK;
386*4882a593Smuzhiyun 	__raw_writel(count, chan->io + DMA_BUFFER1_COUNT);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun  * Set both buffer transfer sizes (max 64k) for a specific DMA channel.
391*4882a593Smuzhiyun  */
set_dma_count(unsigned int dmanr,unsigned int count)392*4882a593Smuzhiyun static inline void set_dma_count(unsigned int dmanr, unsigned int count)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	if (!chan)
397*4882a593Smuzhiyun 		return;
398*4882a593Smuzhiyun 	count &= DMA_COUNT_MASK;
399*4882a593Smuzhiyun 	__raw_writel(count, chan->io + DMA_BUFFER0_COUNT);
400*4882a593Smuzhiyun 	__raw_writel(count, chan->io + DMA_BUFFER1_COUNT);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun  * Returns which buffer has its done bit set in the mode register.
405*4882a593Smuzhiyun  * Returns -1 if neither or both done bits set.
406*4882a593Smuzhiyun  */
get_dma_buffer_done(unsigned int dmanr)407*4882a593Smuzhiyun static inline unsigned int get_dma_buffer_done(unsigned int dmanr)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (!chan)
412*4882a593Smuzhiyun 		return 0;
413*4882a593Smuzhiyun 	return __raw_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun  * Returns the DMA channel's Buffer Done IRQ number.
419*4882a593Smuzhiyun  */
get_dma_done_irq(unsigned int dmanr)420*4882a593Smuzhiyun static inline int get_dma_done_irq(unsigned int dmanr)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (!chan)
425*4882a593Smuzhiyun 		return -1;
426*4882a593Smuzhiyun 	return chan->irq;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun  * Get DMA residue count. Returns the number of _bytes_ left to transfer.
431*4882a593Smuzhiyun  */
get_dma_residue(unsigned int dmanr)432*4882a593Smuzhiyun static inline int get_dma_residue(unsigned int dmanr)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	int curBufCntReg, count;
435*4882a593Smuzhiyun 	struct dma_chan *chan = get_dma_chan(dmanr);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (!chan)
438*4882a593Smuzhiyun 		return 0;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	curBufCntReg = (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ?
441*4882a593Smuzhiyun 	    DMA_BUFFER1_COUNT : DMA_BUFFER0_COUNT;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	count = __raw_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if ((chan->mode & DMA_DW_MASK) == DMA_DW16)
446*4882a593Smuzhiyun 		count <<= 1;
447*4882a593Smuzhiyun 	else if ((chan->mode & DMA_DW_MASK) == DMA_DW32)
448*4882a593Smuzhiyun 		count <<= 2;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	return count;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun #endif /* __ASM_AU1000_DMA_H */
454