xref: /OK3568_Linux_fs/kernel/arch/mips/jazz/jazzdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Mips Jazz DMA controller support
4*4882a593Smuzhiyun  * Copyright (C) 1995, 1996 by Andreas Busse
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * NOTE: Some of the argument checking could be removed when
7*4882a593Smuzhiyun  * things have settled down. Also, instead of returning 0xffffffff
8*4882a593Smuzhiyun  * on failure of vdma_alloc() one could leave page #0 unused
9*4882a593Smuzhiyun  * and return the more usual NULL pointer as logical address.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/memblock.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/gfp.h>
19*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
20*4882a593Smuzhiyun #include <asm/mipsregs.h>
21*4882a593Smuzhiyun #include <asm/jazz.h>
22*4882a593Smuzhiyun #include <asm/io.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <asm/dma.h>
25*4882a593Smuzhiyun #include <asm/jazzdma.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Set this to one to enable additional vdma debug code.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun #define CONF_DEBUG_VDMA 0
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static VDMA_PGTBL_ENTRY *pgtbl;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static DEFINE_SPINLOCK(vdma_lock);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Debug stuff
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define vdma_debug     ((CONF_DEBUG_VDMA) ? debuglvl : 0)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static int debuglvl = 3;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * Initialize the pagetable with a one-to-one mapping of
45*4882a593Smuzhiyun  * the first 16 Mbytes of main memory and declare all
46*4882a593Smuzhiyun  * entries to be unused. Using this method will at least
47*4882a593Smuzhiyun  * allow some early device driver operations to work.
48*4882a593Smuzhiyun  */
vdma_pgtbl_init(void)49*4882a593Smuzhiyun static inline void vdma_pgtbl_init(void)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	unsigned long paddr = 0;
52*4882a593Smuzhiyun 	int i;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
55*4882a593Smuzhiyun 		pgtbl[i].frame = paddr;
56*4882a593Smuzhiyun 		pgtbl[i].owner = VDMA_PAGE_EMPTY;
57*4882a593Smuzhiyun 		paddr += VDMA_PAGESIZE;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun  * Initialize the Jazz R4030 dma controller
63*4882a593Smuzhiyun  */
vdma_init(void)64*4882a593Smuzhiyun static int __init vdma_init(void)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	/*
67*4882a593Smuzhiyun 	 * Allocate 32k of memory for DMA page tables.	This needs to be page
68*4882a593Smuzhiyun 	 * aligned and should be uncached to avoid cache flushing after every
69*4882a593Smuzhiyun 	 * update.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
72*4882a593Smuzhiyun 						    get_order(VDMA_PGTBL_SIZE));
73*4882a593Smuzhiyun 	BUG_ON(!pgtbl);
74*4882a593Smuzhiyun 	dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
75*4882a593Smuzhiyun 	pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * Clear the R4030 translation table
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	vdma_pgtbl_init();
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
83*4882a593Smuzhiyun 			  CPHYSADDR((unsigned long)pgtbl));
84*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
85*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun arch_initcall(vdma_init);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * Allocate DMA pagetables using a simple first-fit algorithm
94*4882a593Smuzhiyun  */
vdma_alloc(unsigned long paddr,unsigned long size)95*4882a593Smuzhiyun unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	int first, last, pages, frame, i;
98*4882a593Smuzhiyun 	unsigned long laddr, flags;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* check arguments */
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (paddr > 0x1fffffff) {
103*4882a593Smuzhiyun 		if (vdma_debug)
104*4882a593Smuzhiyun 			printk("vdma_alloc: Invalid physical address: %08lx\n",
105*4882a593Smuzhiyun 			       paddr);
106*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;	/* invalid physical address */
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 	if (size > 0x400000 || size == 0) {
109*4882a593Smuzhiyun 		if (vdma_debug)
110*4882a593Smuzhiyun 			printk("vdma_alloc: Invalid size: %08lx\n", size);
111*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;	/* invalid physical address */
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	spin_lock_irqsave(&vdma_lock, flags);
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * Find free chunk
117*4882a593Smuzhiyun 	 */
118*4882a593Smuzhiyun 	pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
119*4882a593Smuzhiyun 	first = 0;
120*4882a593Smuzhiyun 	while (1) {
121*4882a593Smuzhiyun 		while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
122*4882a593Smuzhiyun 		       first < VDMA_PGTBL_ENTRIES) first++;
123*4882a593Smuzhiyun 		if (first + pages > VDMA_PGTBL_ENTRIES) {	/* nothing free */
124*4882a593Smuzhiyun 			spin_unlock_irqrestore(&vdma_lock, flags);
125*4882a593Smuzhiyun 			return DMA_MAPPING_ERROR;
126*4882a593Smuzhiyun 		}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		last = first + 1;
129*4882a593Smuzhiyun 		while (pgtbl[last].owner == VDMA_PAGE_EMPTY
130*4882a593Smuzhiyun 		       && last - first < pages)
131*4882a593Smuzhiyun 			last++;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		if (last - first == pages)
134*4882a593Smuzhiyun 			break;	/* found */
135*4882a593Smuzhiyun 		first = last + 1;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/*
139*4882a593Smuzhiyun 	 * Mark pages as allocated
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
142*4882a593Smuzhiyun 	frame = paddr & ~(VDMA_PAGESIZE - 1);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	for (i = first; i < last; i++) {
145*4882a593Smuzhiyun 		pgtbl[i].frame = frame;
146*4882a593Smuzhiyun 		pgtbl[i].owner = laddr;
147*4882a593Smuzhiyun 		frame += VDMA_PAGESIZE;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/*
151*4882a593Smuzhiyun 	 * Update translation table and return logical start address
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (vdma_debug > 1)
156*4882a593Smuzhiyun 		printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
157*4882a593Smuzhiyun 		     pages, laddr);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (vdma_debug > 2) {
160*4882a593Smuzhiyun 		printk("LADDR: ");
161*4882a593Smuzhiyun 		for (i = first; i < last; i++)
162*4882a593Smuzhiyun 			printk("%08x ", i << 12);
163*4882a593Smuzhiyun 		printk("\nPADDR: ");
164*4882a593Smuzhiyun 		for (i = first; i < last; i++)
165*4882a593Smuzhiyun 			printk("%08x ", pgtbl[i].frame);
166*4882a593Smuzhiyun 		printk("\nOWNER: ");
167*4882a593Smuzhiyun 		for (i = first; i < last; i++)
168*4882a593Smuzhiyun 			printk("%08x ", pgtbl[i].owner);
169*4882a593Smuzhiyun 		printk("\n");
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vdma_lock, flags);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return laddr;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_alloc);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * Free previously allocated dma translation pages
181*4882a593Smuzhiyun  * Note that this does NOT change the translation table,
182*4882a593Smuzhiyun  * it just marks the free'd pages as unused!
183*4882a593Smuzhiyun  */
vdma_free(unsigned long laddr)184*4882a593Smuzhiyun int vdma_free(unsigned long laddr)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	int i;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	i = laddr >> 12;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (pgtbl[i].owner != laddr) {
191*4882a593Smuzhiyun 		printk
192*4882a593Smuzhiyun 		    ("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
193*4882a593Smuzhiyun 		     laddr);
194*4882a593Smuzhiyun 		return -1;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
198*4882a593Smuzhiyun 		pgtbl[i].owner = VDMA_PAGE_EMPTY;
199*4882a593Smuzhiyun 		i++;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (vdma_debug > 1)
203*4882a593Smuzhiyun 		printk("vdma_free: freed %ld pages starting from %08lx\n",
204*4882a593Smuzhiyun 		       i - (laddr >> 12), laddr);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	return 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_free);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * Translate a physical address to a logical address.
213*4882a593Smuzhiyun  * This will return the logical address of the first
214*4882a593Smuzhiyun  * match.
215*4882a593Smuzhiyun  */
vdma_phys2log(unsigned long paddr)216*4882a593Smuzhiyun unsigned long vdma_phys2log(unsigned long paddr)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	int i;
219*4882a593Smuzhiyun 	int frame;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	frame = paddr & ~(VDMA_PAGESIZE - 1);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
224*4882a593Smuzhiyun 		if (pgtbl[i].frame == frame)
225*4882a593Smuzhiyun 			break;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (i == VDMA_PGTBL_ENTRIES)
229*4882a593Smuzhiyun 		return ~0UL;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_phys2log);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun  * Translate a logical DMA address to a physical address
238*4882a593Smuzhiyun  */
vdma_log2phys(unsigned long laddr)239*4882a593Smuzhiyun unsigned long vdma_log2phys(unsigned long laddr)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_log2phys);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun  * Print DMA statistics
248*4882a593Smuzhiyun  */
vdma_stats(void)249*4882a593Smuzhiyun void vdma_stats(void)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	int i;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	printk("vdma_stats: CONFIG: %08x\n",
254*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_CONFIG));
255*4882a593Smuzhiyun 	printk("R4030 translation table base: %08x\n",
256*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
257*4882a593Smuzhiyun 	printk("R4030 translation table limit: %08x\n",
258*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
259*4882a593Smuzhiyun 	printk("vdma_stats: INV_ADDR: %08x\n",
260*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_INV_ADDR));
261*4882a593Smuzhiyun 	printk("vdma_stats: R_FAIL_ADDR: %08x\n",
262*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
263*4882a593Smuzhiyun 	printk("vdma_stats: M_FAIL_ADDR: %08x\n",
264*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
265*4882a593Smuzhiyun 	printk("vdma_stats: IRQ_SOURCE: %08x\n",
266*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
267*4882a593Smuzhiyun 	printk("vdma_stats: I386_ERROR: %08x\n",
268*4882a593Smuzhiyun 	       r4030_read_reg32(JAZZ_R4030_I386_ERROR));
269*4882a593Smuzhiyun 	printk("vdma_chnl_modes:   ");
270*4882a593Smuzhiyun 	for (i = 0; i < 8; i++)
271*4882a593Smuzhiyun 		printk("%04x ",
272*4882a593Smuzhiyun 		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
273*4882a593Smuzhiyun 						   (i << 5)));
274*4882a593Smuzhiyun 	printk("\n");
275*4882a593Smuzhiyun 	printk("vdma_chnl_enables: ");
276*4882a593Smuzhiyun 	for (i = 0; i < 8; i++)
277*4882a593Smuzhiyun 		printk("%04x ",
278*4882a593Smuzhiyun 		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
279*4882a593Smuzhiyun 						   (i << 5)));
280*4882a593Smuzhiyun 	printk("\n");
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun  * DMA transfer functions
285*4882a593Smuzhiyun  */
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * Enable a DMA channel. Also clear any error conditions.
289*4882a593Smuzhiyun  */
vdma_enable(int channel)290*4882a593Smuzhiyun void vdma_enable(int channel)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	int status;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	if (vdma_debug)
295*4882a593Smuzhiyun 		printk("vdma_enable: channel %d\n", channel);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * Check error conditions first
299*4882a593Smuzhiyun 	 */
300*4882a593Smuzhiyun 	status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
301*4882a593Smuzhiyun 	if (status & 0x400)
302*4882a593Smuzhiyun 		printk("VDMA: Channel %d: Address error!\n", channel);
303*4882a593Smuzhiyun 	if (status & 0x200)
304*4882a593Smuzhiyun 		printk("VDMA: Channel %d: Memory error!\n", channel);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/*
307*4882a593Smuzhiyun 	 * Clear all interrupt flags
308*4882a593Smuzhiyun 	 */
309*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
310*4882a593Smuzhiyun 			  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
311*4882a593Smuzhiyun 					   (channel << 5)) | R4030_TC_INTR
312*4882a593Smuzhiyun 			  | R4030_MEM_INTR | R4030_ADDR_INTR);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/*
315*4882a593Smuzhiyun 	 * Enable the desired channel
316*4882a593Smuzhiyun 	 */
317*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
318*4882a593Smuzhiyun 			  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
319*4882a593Smuzhiyun 					   (channel << 5)) |
320*4882a593Smuzhiyun 			  R4030_CHNL_ENABLE);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_enable);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun  * Disable a DMA channel
327*4882a593Smuzhiyun  */
vdma_disable(int channel)328*4882a593Smuzhiyun void vdma_disable(int channel)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	if (vdma_debug) {
331*4882a593Smuzhiyun 		int status =
332*4882a593Smuzhiyun 		    r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
333*4882a593Smuzhiyun 				     (channel << 5));
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		printk("vdma_disable: channel %d\n", channel);
336*4882a593Smuzhiyun 		printk("VDMA: channel %d status: %04x (%s) mode: "
337*4882a593Smuzhiyun 		       "%02x addr: %06x count: %06x\n",
338*4882a593Smuzhiyun 		       channel, status,
339*4882a593Smuzhiyun 		       ((status & 0x600) ? "ERROR" : "OK"),
340*4882a593Smuzhiyun 		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
341*4882a593Smuzhiyun 						   (channel << 5)),
342*4882a593Smuzhiyun 		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
343*4882a593Smuzhiyun 						   (channel << 5)),
344*4882a593Smuzhiyun 		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
345*4882a593Smuzhiyun 						   (channel << 5)));
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
349*4882a593Smuzhiyun 			  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
350*4882a593Smuzhiyun 					   (channel << 5)) &
351*4882a593Smuzhiyun 			  ~R4030_CHNL_ENABLE);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/*
354*4882a593Smuzhiyun 	 * After disabling a DMA channel a remote bus register should be
355*4882a593Smuzhiyun 	 * read to ensure that the current DMA acknowledge cycle is completed.
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	*((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_disable);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun  * Set DMA mode. This function accepts the mode values used
364*4882a593Smuzhiyun  * to set a PC-style DMA controller. For the SCSI and FDC
365*4882a593Smuzhiyun  * channels, we also set the default modes each time we're
366*4882a593Smuzhiyun  * called.
367*4882a593Smuzhiyun  * NOTE: The FAST and BURST dma modes are supported by the
368*4882a593Smuzhiyun  * R4030 Rev. 2 and PICA chipsets only. I leave them disabled
369*4882a593Smuzhiyun  * for now.
370*4882a593Smuzhiyun  */
vdma_set_mode(int channel,int mode)371*4882a593Smuzhiyun void vdma_set_mode(int channel, int mode)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	if (vdma_debug)
374*4882a593Smuzhiyun 		printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
375*4882a593Smuzhiyun 		       mode);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	switch (channel) {
378*4882a593Smuzhiyun 	case JAZZ_SCSI_DMA:	/* scsi */
379*4882a593Smuzhiyun 		r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
380*4882a593Smuzhiyun /*			  R4030_MODE_FAST | */
381*4882a593Smuzhiyun /*			  R4030_MODE_BURST | */
382*4882a593Smuzhiyun 				  R4030_MODE_INTR_EN |
383*4882a593Smuzhiyun 				  R4030_MODE_WIDTH_16 |
384*4882a593Smuzhiyun 				  R4030_MODE_ATIME_80);
385*4882a593Smuzhiyun 		break;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	case JAZZ_FLOPPY_DMA:	/* floppy */
388*4882a593Smuzhiyun 		r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
389*4882a593Smuzhiyun /*			  R4030_MODE_FAST | */
390*4882a593Smuzhiyun /*			  R4030_MODE_BURST | */
391*4882a593Smuzhiyun 				  R4030_MODE_INTR_EN |
392*4882a593Smuzhiyun 				  R4030_MODE_WIDTH_8 |
393*4882a593Smuzhiyun 				  R4030_MODE_ATIME_120);
394*4882a593Smuzhiyun 		break;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	case JAZZ_AUDIOL_DMA:
397*4882a593Smuzhiyun 	case JAZZ_AUDIOR_DMA:
398*4882a593Smuzhiyun 		printk("VDMA: Audio DMA not supported yet.\n");
399*4882a593Smuzhiyun 		break;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	default:
402*4882a593Smuzhiyun 		printk
403*4882a593Smuzhiyun 		    ("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
404*4882a593Smuzhiyun 		     channel);
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	switch (mode) {
408*4882a593Smuzhiyun 	case DMA_MODE_READ:
409*4882a593Smuzhiyun 		r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
410*4882a593Smuzhiyun 				  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
411*4882a593Smuzhiyun 						   (channel << 5)) &
412*4882a593Smuzhiyun 				  ~R4030_CHNL_WRITE);
413*4882a593Smuzhiyun 		break;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	case DMA_MODE_WRITE:
416*4882a593Smuzhiyun 		r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
417*4882a593Smuzhiyun 				  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
418*4882a593Smuzhiyun 						   (channel << 5)) |
419*4882a593Smuzhiyun 				  R4030_CHNL_WRITE);
420*4882a593Smuzhiyun 		break;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	default:
423*4882a593Smuzhiyun 		printk
424*4882a593Smuzhiyun 		    ("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
425*4882a593Smuzhiyun 		     mode);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_set_mode);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun  * Set Transfer Address
433*4882a593Smuzhiyun  */
vdma_set_addr(int channel,long addr)434*4882a593Smuzhiyun void vdma_set_addr(int channel, long addr)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	if (vdma_debug)
437*4882a593Smuzhiyun 		printk("vdma_set_addr: channel %d, addr %lx\n", channel,
438*4882a593Smuzhiyun 		       addr);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_set_addr);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun  * Set Transfer Count
447*4882a593Smuzhiyun  */
vdma_set_count(int channel,int count)448*4882a593Smuzhiyun void vdma_set_count(int channel, int count)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	if (vdma_debug)
451*4882a593Smuzhiyun 		printk("vdma_set_count: channel %d, count %08x\n", channel,
452*4882a593Smuzhiyun 		       (unsigned) count);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun EXPORT_SYMBOL(vdma_set_count);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun  * Get Residual
461*4882a593Smuzhiyun  */
vdma_get_residue(int channel)462*4882a593Smuzhiyun int vdma_get_residue(int channel)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	int residual;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (vdma_debug)
469*4882a593Smuzhiyun 		printk("vdma_get_residual: channel %d: residual=%d\n",
470*4882a593Smuzhiyun 		       channel, residual);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return residual;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun  * Get DMA channel enable register
477*4882a593Smuzhiyun  */
vdma_get_enable(int channel)478*4882a593Smuzhiyun int vdma_get_enable(int channel)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	int enable;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	if (vdma_debug)
485*4882a593Smuzhiyun 		printk("vdma_get_enable: channel %d: enable=%d\n", channel,
486*4882a593Smuzhiyun 		       enable);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	return enable;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
jazz_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)491*4882a593Smuzhiyun static void *jazz_dma_alloc(struct device *dev, size_t size,
492*4882a593Smuzhiyun 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct page *page;
495*4882a593Smuzhiyun 	void *ret;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (attrs & DMA_ATTR_NO_WARN)
498*4882a593Smuzhiyun 		gfp |= __GFP_NOWARN;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	size = PAGE_ALIGN(size);
501*4882a593Smuzhiyun 	page = alloc_pages(gfp, get_order(size));
502*4882a593Smuzhiyun 	if (!page)
503*4882a593Smuzhiyun 		return NULL;
504*4882a593Smuzhiyun 	ret = page_address(page);
505*4882a593Smuzhiyun 	memset(ret, 0, size);
506*4882a593Smuzhiyun 	*dma_handle = vdma_alloc(virt_to_phys(ret), size);
507*4882a593Smuzhiyun 	if (*dma_handle == DMA_MAPPING_ERROR)
508*4882a593Smuzhiyun 		goto out_free_pages;
509*4882a593Smuzhiyun 	arch_dma_prep_coherent(page, size);
510*4882a593Smuzhiyun 	return (void *)(UNCAC_BASE + __pa(ret));
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun out_free_pages:
513*4882a593Smuzhiyun 	__free_pages(page, get_order(size));
514*4882a593Smuzhiyun 	return NULL;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
jazz_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)517*4882a593Smuzhiyun static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
518*4882a593Smuzhiyun 		dma_addr_t dma_handle, unsigned long attrs)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	vdma_free(dma_handle);
521*4882a593Smuzhiyun 	__free_pages(virt_to_page(vaddr), get_order(size));
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
jazz_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)524*4882a593Smuzhiyun static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
525*4882a593Smuzhiyun 		unsigned long offset, size_t size, enum dma_data_direction dir,
526*4882a593Smuzhiyun 		unsigned long attrs)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	phys_addr_t phys = page_to_phys(page) + offset;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
531*4882a593Smuzhiyun 		arch_sync_dma_for_device(phys, size, dir);
532*4882a593Smuzhiyun 	return vdma_alloc(phys, size);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
jazz_dma_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)535*4882a593Smuzhiyun static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
536*4882a593Smuzhiyun 		size_t size, enum dma_data_direction dir, unsigned long attrs)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
539*4882a593Smuzhiyun 		arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
540*4882a593Smuzhiyun 	vdma_free(dma_addr);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
jazz_dma_map_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction dir,unsigned long attrs)543*4882a593Smuzhiyun static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
544*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	int i;
547*4882a593Smuzhiyun 	struct scatterlist *sg;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	for_each_sg(sglist, sg, nents, i) {
550*4882a593Smuzhiyun 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
551*4882a593Smuzhiyun 			arch_sync_dma_for_device(sg_phys(sg), sg->length,
552*4882a593Smuzhiyun 				dir);
553*4882a593Smuzhiyun 		sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
554*4882a593Smuzhiyun 		if (sg->dma_address == DMA_MAPPING_ERROR)
555*4882a593Smuzhiyun 			return 0;
556*4882a593Smuzhiyun 		sg_dma_len(sg) = sg->length;
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	return nents;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
jazz_dma_unmap_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction dir,unsigned long attrs)562*4882a593Smuzhiyun static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
563*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	int i;
566*4882a593Smuzhiyun 	struct scatterlist *sg;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	for_each_sg(sglist, sg, nents, i) {
569*4882a593Smuzhiyun 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
570*4882a593Smuzhiyun 			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
571*4882a593Smuzhiyun 		vdma_free(sg->dma_address);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
jazz_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)575*4882a593Smuzhiyun static void jazz_dma_sync_single_for_device(struct device *dev,
576*4882a593Smuzhiyun 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
jazz_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)581*4882a593Smuzhiyun static void jazz_dma_sync_single_for_cpu(struct device *dev,
582*4882a593Smuzhiyun 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
jazz_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)587*4882a593Smuzhiyun static void jazz_dma_sync_sg_for_device(struct device *dev,
588*4882a593Smuzhiyun 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct scatterlist *sg;
591*4882a593Smuzhiyun 	int i;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, i)
594*4882a593Smuzhiyun 		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
jazz_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)597*4882a593Smuzhiyun static void jazz_dma_sync_sg_for_cpu(struct device *dev,
598*4882a593Smuzhiyun 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	struct scatterlist *sg;
601*4882a593Smuzhiyun 	int i;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, i)
604*4882a593Smuzhiyun 		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun const struct dma_map_ops jazz_dma_ops = {
608*4882a593Smuzhiyun 	.alloc			= jazz_dma_alloc,
609*4882a593Smuzhiyun 	.free			= jazz_dma_free,
610*4882a593Smuzhiyun 	.map_page		= jazz_dma_map_page,
611*4882a593Smuzhiyun 	.unmap_page		= jazz_dma_unmap_page,
612*4882a593Smuzhiyun 	.map_sg			= jazz_dma_map_sg,
613*4882a593Smuzhiyun 	.unmap_sg		= jazz_dma_unmap_sg,
614*4882a593Smuzhiyun 	.sync_single_for_cpu	= jazz_dma_sync_single_for_cpu,
615*4882a593Smuzhiyun 	.sync_single_for_device	= jazz_dma_sync_single_for_device,
616*4882a593Smuzhiyun 	.sync_sg_for_cpu	= jazz_dma_sync_sg_for_cpu,
617*4882a593Smuzhiyun 	.sync_sg_for_device	= jazz_dma_sync_sg_for_device,
618*4882a593Smuzhiyun 	.mmap			= dma_common_mmap,
619*4882a593Smuzhiyun 	.get_sgtable		= dma_common_get_sgtable,
620*4882a593Smuzhiyun 	.alloc_pages		= dma_common_alloc_pages,
621*4882a593Smuzhiyun 	.free_pages		= dma_common_free_pages,
622*4882a593Smuzhiyun };
623*4882a593Smuzhiyun EXPORT_SYMBOL(jazz_dma_ops);
624