xref: /OK3568_Linux_fs/kernel/arch/arm/mach-rpc/dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/arch/arm/mach-rpc/dma.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1998 Russell King
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *  DMA functions specific to RiscPC architecture
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/mman.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/page.h>
16*4882a593Smuzhiyun #include <asm/dma.h>
17*4882a593Smuzhiyun #include <asm/fiq.h>
18*4882a593Smuzhiyun #include <asm/irq.h>
19*4882a593Smuzhiyun #include <mach/hardware.h>
20*4882a593Smuzhiyun #include <linux/uaccess.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <asm/mach/dma.h>
23*4882a593Smuzhiyun #include <asm/hardware/iomd.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct iomd_dma {
26*4882a593Smuzhiyun 	struct dma_struct	dma;
27*4882a593Smuzhiyun 	void __iomem		*base;		/* Controller base address */
28*4882a593Smuzhiyun 	int			irq;		/* Controller IRQ */
29*4882a593Smuzhiyun 	unsigned int		state;
30*4882a593Smuzhiyun 	dma_addr_t		cur_addr;
31*4882a593Smuzhiyun 	unsigned int		cur_len;
32*4882a593Smuzhiyun 	dma_addr_t		dma_addr;
33*4882a593Smuzhiyun 	unsigned int		dma_len;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #if 0
37*4882a593Smuzhiyun typedef enum {
38*4882a593Smuzhiyun 	dma_size_8	= 1,
39*4882a593Smuzhiyun 	dma_size_16	= 2,
40*4882a593Smuzhiyun 	dma_size_32	= 4,
41*4882a593Smuzhiyun 	dma_size_128	= 16
42*4882a593Smuzhiyun } dma_size_t;
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define TRANSFER_SIZE	2
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define CURA	(0)
48*4882a593Smuzhiyun #define ENDA	(IOMD_IO0ENDA - IOMD_IO0CURA)
49*4882a593Smuzhiyun #define CURB	(IOMD_IO0CURB - IOMD_IO0CURA)
50*4882a593Smuzhiyun #define ENDB	(IOMD_IO0ENDB - IOMD_IO0CURA)
51*4882a593Smuzhiyun #define CR	(IOMD_IO0CR - IOMD_IO0CURA)
52*4882a593Smuzhiyun #define ST	(IOMD_IO0ST - IOMD_IO0CURA)
53*4882a593Smuzhiyun 
iomd_get_next_sg(struct iomd_dma * idma)54*4882a593Smuzhiyun static void iomd_get_next_sg(struct iomd_dma *idma)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	unsigned long end, offset, flags = 0;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (idma->dma.sg) {
59*4882a593Smuzhiyun 		idma->cur_addr = idma->dma_addr;
60*4882a593Smuzhiyun 		offset = idma->cur_addr & ~PAGE_MASK;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 		end = offset + idma->dma_len;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		if (end > PAGE_SIZE)
65*4882a593Smuzhiyun 			end = PAGE_SIZE;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 		if (offset + TRANSFER_SIZE >= end)
68*4882a593Smuzhiyun 			flags |= DMA_END_L;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		idma->cur_len = end - TRANSFER_SIZE;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 		idma->dma_len -= end - offset;
73*4882a593Smuzhiyun 		idma->dma_addr += end - offset;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		if (idma->dma_len == 0) {
76*4882a593Smuzhiyun 			if (idma->dma.sgcount > 1) {
77*4882a593Smuzhiyun 				idma->dma.sg = sg_next(idma->dma.sg);
78*4882a593Smuzhiyun 				idma->dma_addr = idma->dma.sg->dma_address;
79*4882a593Smuzhiyun 				idma->dma_len = idma->dma.sg->length;
80*4882a593Smuzhiyun 				idma->dma.sgcount--;
81*4882a593Smuzhiyun 			} else {
82*4882a593Smuzhiyun 				idma->dma.sg = NULL;
83*4882a593Smuzhiyun 				flags |= DMA_END_S;
84*4882a593Smuzhiyun 			}
85*4882a593Smuzhiyun 		}
86*4882a593Smuzhiyun 	} else {
87*4882a593Smuzhiyun 		flags = DMA_END_S | DMA_END_L;
88*4882a593Smuzhiyun 		idma->cur_addr = 0;
89*4882a593Smuzhiyun 		idma->cur_len = 0;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	idma->cur_len |= flags;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
iomd_dma_handle(int irq,void * dev_id)95*4882a593Smuzhiyun static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct iomd_dma *idma = dev_id;
98*4882a593Smuzhiyun 	void __iomem *base = idma->base;
99*4882a593Smuzhiyun 	unsigned int state = idma->state;
100*4882a593Smuzhiyun 	unsigned int status, cur, end;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	do {
103*4882a593Smuzhiyun 		status = readb(base + ST);
104*4882a593Smuzhiyun 		if (!(status & DMA_ST_INT))
105*4882a593Smuzhiyun 			goto out;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		if ((state ^ status) & DMA_ST_AB)
108*4882a593Smuzhiyun 			iomd_get_next_sg(idma);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		// This efficiently implements state = OFL != AB ? AB : 0
111*4882a593Smuzhiyun 		state = ((status >> 2) ^ status) & DMA_ST_AB;
112*4882a593Smuzhiyun 		if (state) {
113*4882a593Smuzhiyun 			cur = CURA;
114*4882a593Smuzhiyun 			end = ENDA;
115*4882a593Smuzhiyun 		} else {
116*4882a593Smuzhiyun 			cur = CURB;
117*4882a593Smuzhiyun 			end = ENDB;
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 		writel(idma->cur_addr, base + cur);
120*4882a593Smuzhiyun 		writel(idma->cur_len, base + end);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 		if (status & DMA_ST_OFL &&
123*4882a593Smuzhiyun 		    idma->cur_len == (DMA_END_S|DMA_END_L))
124*4882a593Smuzhiyun 			break;
125*4882a593Smuzhiyun 	} while (1);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	state = ~DMA_ST_AB;
128*4882a593Smuzhiyun 	disable_irq_nosync(irq);
129*4882a593Smuzhiyun out:
130*4882a593Smuzhiyun 	idma->state = state;
131*4882a593Smuzhiyun 	return IRQ_HANDLED;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
iomd_request_dma(unsigned int chan,dma_t * dma)134*4882a593Smuzhiyun static int iomd_request_dma(unsigned int chan, dma_t *dma)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return request_irq(idma->irq, iomd_dma_handle,
139*4882a593Smuzhiyun 			   0, idma->dma.device_id, idma);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
iomd_free_dma(unsigned int chan,dma_t * dma)142*4882a593Smuzhiyun static void iomd_free_dma(unsigned int chan, dma_t *dma)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	free_irq(idma->irq, idma);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun static struct device isa_dma_dev = {
150*4882a593Smuzhiyun 	.init_name		= "fallback device",
151*4882a593Smuzhiyun 	.coherent_dma_mask	= ~(dma_addr_t)0,
152*4882a593Smuzhiyun 	.dma_mask		= &isa_dma_dev.coherent_dma_mask,
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
iomd_enable_dma(unsigned int chan,dma_t * dma)155*4882a593Smuzhiyun static void iomd_enable_dma(unsigned int chan, dma_t *dma)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
158*4882a593Smuzhiyun 	void __iomem *base = idma->base;
159*4882a593Smuzhiyun 	unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (idma->dma.invalid) {
162*4882a593Smuzhiyun 		idma->dma.invalid = 0;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		/*
165*4882a593Smuzhiyun 		 * Cope with ISA-style drivers which expect cache
166*4882a593Smuzhiyun 		 * coherence.
167*4882a593Smuzhiyun 		 */
168*4882a593Smuzhiyun 		if (!idma->dma.sg) {
169*4882a593Smuzhiyun 			idma->dma.sg = &idma->dma.buf;
170*4882a593Smuzhiyun 			idma->dma.sgcount = 1;
171*4882a593Smuzhiyun 			idma->dma.buf.length = idma->dma.count;
172*4882a593Smuzhiyun 			idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
173*4882a593Smuzhiyun 				idma->dma.addr, idma->dma.count,
174*4882a593Smuzhiyun 				idma->dma.dma_mode == DMA_MODE_READ ?
175*4882a593Smuzhiyun 				DMA_FROM_DEVICE : DMA_TO_DEVICE);
176*4882a593Smuzhiyun 		}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		idma->dma_addr = idma->dma.sg->dma_address;
179*4882a593Smuzhiyun 		idma->dma_len = idma->dma.sg->length;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		writeb(DMA_CR_C, base + CR);
182*4882a593Smuzhiyun 		idma->state = DMA_ST_AB;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (idma->dma.dma_mode == DMA_MODE_READ)
186*4882a593Smuzhiyun 		ctrl |= DMA_CR_D;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	writeb(ctrl, base + CR);
189*4882a593Smuzhiyun 	enable_irq(idma->irq);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
iomd_disable_dma(unsigned int chan,dma_t * dma)192*4882a593Smuzhiyun static void iomd_disable_dma(unsigned int chan, dma_t *dma)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
195*4882a593Smuzhiyun 	void __iomem *base = idma->base;
196*4882a593Smuzhiyun 	unsigned long flags;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	local_irq_save(flags);
199*4882a593Smuzhiyun 	if (idma->state != ~DMA_ST_AB)
200*4882a593Smuzhiyun 		disable_irq(idma->irq);
201*4882a593Smuzhiyun 	writeb(0, base + CR);
202*4882a593Smuzhiyun 	local_irq_restore(flags);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
iomd_set_dma_speed(unsigned int chan,dma_t * dma,int cycle)205*4882a593Smuzhiyun static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	int tcr, speed;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (cycle < 188)
210*4882a593Smuzhiyun 		speed = 3;
211*4882a593Smuzhiyun 	else if (cycle <= 250)
212*4882a593Smuzhiyun 		speed = 2;
213*4882a593Smuzhiyun 	else if (cycle < 438)
214*4882a593Smuzhiyun 		speed = 1;
215*4882a593Smuzhiyun 	else
216*4882a593Smuzhiyun 		speed = 0;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	tcr = iomd_readb(IOMD_DMATCR);
219*4882a593Smuzhiyun 	speed &= 3;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	switch (chan) {
222*4882a593Smuzhiyun 	case DMA_0:
223*4882a593Smuzhiyun 		tcr = (tcr & ~0x03) | speed;
224*4882a593Smuzhiyun 		break;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	case DMA_1:
227*4882a593Smuzhiyun 		tcr = (tcr & ~0x0c) | (speed << 2);
228*4882a593Smuzhiyun 		break;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	case DMA_2:
231*4882a593Smuzhiyun 		tcr = (tcr & ~0x30) | (speed << 4);
232*4882a593Smuzhiyun 		break;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	case DMA_3:
235*4882a593Smuzhiyun 		tcr = (tcr & ~0xc0) | (speed << 6);
236*4882a593Smuzhiyun 		break;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	default:
239*4882a593Smuzhiyun 		break;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	iomd_writeb(tcr, IOMD_DMATCR);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return speed;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun static struct dma_ops iomd_dma_ops = {
248*4882a593Smuzhiyun 	.type		= "IOMD",
249*4882a593Smuzhiyun 	.request	= iomd_request_dma,
250*4882a593Smuzhiyun 	.free		= iomd_free_dma,
251*4882a593Smuzhiyun 	.enable		= iomd_enable_dma,
252*4882a593Smuzhiyun 	.disable	= iomd_disable_dma,
253*4882a593Smuzhiyun 	.setspeed	= iomd_set_dma_speed,
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun static struct fiq_handler fh = {
257*4882a593Smuzhiyun 	.name	= "floppydma"
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun struct floppy_dma {
261*4882a593Smuzhiyun 	struct dma_struct	dma;
262*4882a593Smuzhiyun 	unsigned int		fiq;
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun 
floppy_enable_dma(unsigned int chan,dma_t * dma)265*4882a593Smuzhiyun static void floppy_enable_dma(unsigned int chan, dma_t *dma)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
268*4882a593Smuzhiyun 	void *fiqhandler_start;
269*4882a593Smuzhiyun 	unsigned int fiqhandler_length;
270*4882a593Smuzhiyun 	struct pt_regs regs;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (fdma->dma.sg)
273*4882a593Smuzhiyun 		BUG();
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (fdma->dma.dma_mode == DMA_MODE_READ) {
276*4882a593Smuzhiyun 		extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
277*4882a593Smuzhiyun 		fiqhandler_start = &floppy_fiqin_start;
278*4882a593Smuzhiyun 		fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
279*4882a593Smuzhiyun 	} else {
280*4882a593Smuzhiyun 		extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
281*4882a593Smuzhiyun 		fiqhandler_start = &floppy_fiqout_start;
282*4882a593Smuzhiyun 		fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	regs.ARM_r9  = fdma->dma.count;
286*4882a593Smuzhiyun 	regs.ARM_r10 = (unsigned long)fdma->dma.addr;
287*4882a593Smuzhiyun 	regs.ARM_fp  = (unsigned long)FLOPPYDMA_BASE;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (claim_fiq(&fh)) {
290*4882a593Smuzhiyun 		printk("floppydma: couldn't claim FIQ.\n");
291*4882a593Smuzhiyun 		return;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	set_fiq_handler(fiqhandler_start, fiqhandler_length);
295*4882a593Smuzhiyun 	set_fiq_regs(&regs);
296*4882a593Smuzhiyun 	enable_fiq(fdma->fiq);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
floppy_disable_dma(unsigned int chan,dma_t * dma)299*4882a593Smuzhiyun static void floppy_disable_dma(unsigned int chan, dma_t *dma)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
302*4882a593Smuzhiyun 	disable_fiq(fdma->fiq);
303*4882a593Smuzhiyun 	release_fiq(&fh);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
floppy_get_residue(unsigned int chan,dma_t * dma)306*4882a593Smuzhiyun static int floppy_get_residue(unsigned int chan, dma_t *dma)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct pt_regs regs;
309*4882a593Smuzhiyun 	get_fiq_regs(&regs);
310*4882a593Smuzhiyun 	return regs.ARM_r9;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun static struct dma_ops floppy_dma_ops = {
314*4882a593Smuzhiyun 	.type		= "FIQDMA",
315*4882a593Smuzhiyun 	.enable		= floppy_enable_dma,
316*4882a593Smuzhiyun 	.disable	= floppy_disable_dma,
317*4882a593Smuzhiyun 	.residue	= floppy_get_residue,
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun  * This is virtual DMA - we don't need anything here.
322*4882a593Smuzhiyun  */
sound_enable_disable_dma(unsigned int chan,dma_t * dma)323*4882a593Smuzhiyun static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun static struct dma_ops sound_dma_ops = {
328*4882a593Smuzhiyun 	.type		= "VIRTUAL",
329*4882a593Smuzhiyun 	.enable		= sound_enable_disable_dma,
330*4882a593Smuzhiyun 	.disable	= sound_enable_disable_dma,
331*4882a593Smuzhiyun };
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun static struct iomd_dma iomd_dma[6];
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun static struct floppy_dma floppy_dma = {
336*4882a593Smuzhiyun 	.dma		= {
337*4882a593Smuzhiyun 		.d_ops	= &floppy_dma_ops,
338*4882a593Smuzhiyun 	},
339*4882a593Smuzhiyun 	.fiq		= FIQ_FLOPPYDATA,
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun static dma_t sound_dma = {
343*4882a593Smuzhiyun 	.d_ops		= &sound_dma_ops,
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun 
rpc_dma_init(void)346*4882a593Smuzhiyun static int __init rpc_dma_init(void)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	unsigned int i;
349*4882a593Smuzhiyun 	int ret;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	iomd_writeb(0, IOMD_IO0CR);
352*4882a593Smuzhiyun 	iomd_writeb(0, IOMD_IO1CR);
353*4882a593Smuzhiyun 	iomd_writeb(0, IOMD_IO2CR);
354*4882a593Smuzhiyun 	iomd_writeb(0, IOMD_IO3CR);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	iomd_writeb(0xa0, IOMD_DMATCR);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/*
359*4882a593Smuzhiyun 	 * Setup DMA channels 2,3 to be for podules
360*4882a593Smuzhiyun 	 * and channels 0,1 for internal devices
361*4882a593Smuzhiyun 	 */
362*4882a593Smuzhiyun 	iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	iomd_dma[DMA_0].base	= IOMD_BASE + IOMD_IO0CURA;
365*4882a593Smuzhiyun 	iomd_dma[DMA_0].irq	= IRQ_DMA0;
366*4882a593Smuzhiyun 	iomd_dma[DMA_1].base	= IOMD_BASE + IOMD_IO1CURA;
367*4882a593Smuzhiyun 	iomd_dma[DMA_1].irq	= IRQ_DMA1;
368*4882a593Smuzhiyun 	iomd_dma[DMA_2].base	= IOMD_BASE + IOMD_IO2CURA;
369*4882a593Smuzhiyun 	iomd_dma[DMA_2].irq	= IRQ_DMA2;
370*4882a593Smuzhiyun 	iomd_dma[DMA_3].base	= IOMD_BASE + IOMD_IO3CURA;
371*4882a593Smuzhiyun 	iomd_dma[DMA_3].irq	= IRQ_DMA3;
372*4882a593Smuzhiyun 	iomd_dma[DMA_S0].base	= IOMD_BASE + IOMD_SD0CURA;
373*4882a593Smuzhiyun 	iomd_dma[DMA_S0].irq	= IRQ_DMAS0;
374*4882a593Smuzhiyun 	iomd_dma[DMA_S1].base	= IOMD_BASE + IOMD_SD1CURA;
375*4882a593Smuzhiyun 	iomd_dma[DMA_S1].irq	= IRQ_DMAS1;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	for (i = DMA_0; i <= DMA_S1; i++) {
378*4882a593Smuzhiyun 		iomd_dma[i].dma.d_ops = &iomd_dma_ops;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		ret = isa_dma_add(i, &iomd_dma[i].dma);
381*4882a593Smuzhiyun 		if (ret)
382*4882a593Smuzhiyun 			printk("IOMDDMA%u: unable to register: %d\n", i, ret);
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
386*4882a593Smuzhiyun 	if (ret)
387*4882a593Smuzhiyun 		printk("IOMDFLOPPY: unable to register: %d\n", ret);
388*4882a593Smuzhiyun 	ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
389*4882a593Smuzhiyun 	if (ret)
390*4882a593Smuzhiyun 		printk("IOMDSOUND: unable to register: %d\n", ret);
391*4882a593Smuzhiyun 	return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun core_initcall(rpc_dma_init);
394