xref: /OK3568_Linux_fs/kernel/drivers/dma/mpc512x_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
4*4882a593Smuzhiyun  * Copyright (C) Semihalf 2009
5*4882a593Smuzhiyun  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
6*4882a593Smuzhiyun  * Copyright (C) Alexander Popov, Promcontroller 2014
7*4882a593Smuzhiyun  * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
10*4882a593Smuzhiyun  * (defines, structures and comments) was taken from MPC5121 DMA driver
11*4882a593Smuzhiyun  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Approved as OSADL project by a majority of OSADL members and funded
14*4882a593Smuzhiyun  * by OSADL membership fees in 2009;  for details see www.osadl.org.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
19*4882a593Smuzhiyun  * (tested using dmatest module) and data transfers between memory and
20*4882a593Smuzhiyun  * peripheral I/O memory by means of slave scatter/gather with these
21*4882a593Smuzhiyun  * limitations:
22*4882a593Smuzhiyun  *  - chunked transfers (described by s/g lists with more than one item) are
23*4882a593Smuzhiyun  *     refused as long as proper support for scatter/gather is missing
24*4882a593Smuzhiyun  *  - transfers on MPC8308 always start from software as this SoC does not have
25*4882a593Smuzhiyun  *     external request lines for peripheral flow control
26*4882a593Smuzhiyun  *  - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
27*4882a593Smuzhiyun  *     MPC512x), and 32 bytes are supported, and, consequently, source
28*4882a593Smuzhiyun  *     addresses and destination addresses must be aligned accordingly;
29*4882a593Smuzhiyun  *     furthermore, for MPC512x SoCs, the transfer size must be aligned on
30*4882a593Smuzhiyun  *     (chunk size * maxburst)
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <linux/module.h>
34*4882a593Smuzhiyun #include <linux/dmaengine.h>
35*4882a593Smuzhiyun #include <linux/dma-mapping.h>
36*4882a593Smuzhiyun #include <linux/interrupt.h>
37*4882a593Smuzhiyun #include <linux/io.h>
38*4882a593Smuzhiyun #include <linux/slab.h>
39*4882a593Smuzhiyun #include <linux/of_address.h>
40*4882a593Smuzhiyun #include <linux/of_device.h>
41*4882a593Smuzhiyun #include <linux/of_irq.h>
42*4882a593Smuzhiyun #include <linux/of_dma.h>
43*4882a593Smuzhiyun #include <linux/of_platform.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include <linux/random.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include "dmaengine.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* Number of DMA Transfer descriptors allocated per channel */
50*4882a593Smuzhiyun #define MPC_DMA_DESCRIPTORS	64
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Macro definitions */
53*4882a593Smuzhiyun #define MPC_DMA_TCD_OFFSET	0x1000
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * Maximum channel counts for individual hardware variants
57*4882a593Smuzhiyun  * and the maximum channel count over all supported controllers,
58*4882a593Smuzhiyun  * used for data structure size
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun #define MPC8308_DMACHAN_MAX	16
61*4882a593Smuzhiyun #define MPC512x_DMACHAN_MAX	64
62*4882a593Smuzhiyun #define MPC_DMA_CHANNELS	64
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* Arbitration mode of group and channel */
65*4882a593Smuzhiyun #define MPC_DMA_DMACR_EDCG	(1 << 31)
66*4882a593Smuzhiyun #define MPC_DMA_DMACR_ERGA	(1 << 3)
67*4882a593Smuzhiyun #define MPC_DMA_DMACR_ERCA	(1 << 2)
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Error codes */
70*4882a593Smuzhiyun #define MPC_DMA_DMAES_VLD	(1 << 31)
71*4882a593Smuzhiyun #define MPC_DMA_DMAES_GPE	(1 << 15)
72*4882a593Smuzhiyun #define MPC_DMA_DMAES_CPE	(1 << 14)
73*4882a593Smuzhiyun #define MPC_DMA_DMAES_ERRCHN(err) \
74*4882a593Smuzhiyun 				(((err) >> 8) & 0x3f)
75*4882a593Smuzhiyun #define MPC_DMA_DMAES_SAE	(1 << 7)
76*4882a593Smuzhiyun #define MPC_DMA_DMAES_SOE	(1 << 6)
77*4882a593Smuzhiyun #define MPC_DMA_DMAES_DAE	(1 << 5)
78*4882a593Smuzhiyun #define MPC_DMA_DMAES_DOE	(1 << 4)
79*4882a593Smuzhiyun #define MPC_DMA_DMAES_NCE	(1 << 3)
80*4882a593Smuzhiyun #define MPC_DMA_DMAES_SGE	(1 << 2)
81*4882a593Smuzhiyun #define MPC_DMA_DMAES_SBE	(1 << 1)
82*4882a593Smuzhiyun #define MPC_DMA_DMAES_DBE	(1 << 0)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define MPC_DMA_TSIZE_1		0x00
87*4882a593Smuzhiyun #define MPC_DMA_TSIZE_2		0x01
88*4882a593Smuzhiyun #define MPC_DMA_TSIZE_4		0x02
89*4882a593Smuzhiyun #define MPC_DMA_TSIZE_16	0x04
90*4882a593Smuzhiyun #define MPC_DMA_TSIZE_32	0x05
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* MPC5121 DMA engine registers */
93*4882a593Smuzhiyun struct __attribute__ ((__packed__)) mpc_dma_regs {
94*4882a593Smuzhiyun 	/* 0x00 */
95*4882a593Smuzhiyun 	u32 dmacr;		/* DMA control register */
96*4882a593Smuzhiyun 	u32 dmaes;		/* DMA error status */
97*4882a593Smuzhiyun 	/* 0x08 */
98*4882a593Smuzhiyun 	u32 dmaerqh;		/* DMA enable request high(channels 63~32) */
99*4882a593Smuzhiyun 	u32 dmaerql;		/* DMA enable request low(channels 31~0) */
100*4882a593Smuzhiyun 	u32 dmaeeih;		/* DMA enable error interrupt high(ch63~32) */
101*4882a593Smuzhiyun 	u32 dmaeeil;		/* DMA enable error interrupt low(ch31~0) */
102*4882a593Smuzhiyun 	/* 0x18 */
103*4882a593Smuzhiyun 	u8 dmaserq;		/* DMA set enable request */
104*4882a593Smuzhiyun 	u8 dmacerq;		/* DMA clear enable request */
105*4882a593Smuzhiyun 	u8 dmaseei;		/* DMA set enable error interrupt */
106*4882a593Smuzhiyun 	u8 dmaceei;		/* DMA clear enable error interrupt */
107*4882a593Smuzhiyun 	/* 0x1c */
108*4882a593Smuzhiyun 	u8 dmacint;		/* DMA clear interrupt request */
109*4882a593Smuzhiyun 	u8 dmacerr;		/* DMA clear error */
110*4882a593Smuzhiyun 	u8 dmassrt;		/* DMA set start bit */
111*4882a593Smuzhiyun 	u8 dmacdne;		/* DMA clear DONE status bit */
112*4882a593Smuzhiyun 	/* 0x20 */
113*4882a593Smuzhiyun 	u32 dmainth;		/* DMA interrupt request high(ch63~32) */
114*4882a593Smuzhiyun 	u32 dmaintl;		/* DMA interrupt request low(ch31~0) */
115*4882a593Smuzhiyun 	u32 dmaerrh;		/* DMA error high(ch63~32) */
116*4882a593Smuzhiyun 	u32 dmaerrl;		/* DMA error low(ch31~0) */
117*4882a593Smuzhiyun 	/* 0x30 */
118*4882a593Smuzhiyun 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
119*4882a593Smuzhiyun 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
120*4882a593Smuzhiyun 	union {
121*4882a593Smuzhiyun 		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
122*4882a593Smuzhiyun 		u32 dmagpor;	/* (General purpose register on MPC8308) */
123*4882a593Smuzhiyun 	};
124*4882a593Smuzhiyun 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
125*4882a593Smuzhiyun 	/* 0x40 ~ 0xff */
126*4882a593Smuzhiyun 	u32 reserve0[48];	/* Reserved */
127*4882a593Smuzhiyun 	/* 0x100 */
128*4882a593Smuzhiyun 	u8 dchpri[MPC_DMA_CHANNELS];
129*4882a593Smuzhiyun 	/* DMA channels(0~63) priority */
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun struct __attribute__ ((__packed__)) mpc_dma_tcd {
133*4882a593Smuzhiyun 	/* 0x00 */
134*4882a593Smuzhiyun 	u32 saddr;		/* Source address */
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	u32 smod:5;		/* Source address modulo */
137*4882a593Smuzhiyun 	u32 ssize:3;		/* Source data transfer size */
138*4882a593Smuzhiyun 	u32 dmod:5;		/* Destination address modulo */
139*4882a593Smuzhiyun 	u32 dsize:3;		/* Destination data transfer size */
140*4882a593Smuzhiyun 	u32 soff:16;		/* Signed source address offset */
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* 0x08 */
143*4882a593Smuzhiyun 	u32 nbytes;		/* Inner "minor" byte count */
144*4882a593Smuzhiyun 	u32 slast;		/* Last source address adjustment */
145*4882a593Smuzhiyun 	u32 daddr;		/* Destination address */
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* 0x14 */
148*4882a593Smuzhiyun 	u32 citer_elink:1;	/* Enable channel-to-channel linking on
149*4882a593Smuzhiyun 				 * minor loop complete
150*4882a593Smuzhiyun 				 */
151*4882a593Smuzhiyun 	u32 citer_linkch:6;	/* Link channel for minor loop complete */
152*4882a593Smuzhiyun 	u32 citer:9;		/* Current "major" iteration count */
153*4882a593Smuzhiyun 	u32 doff:16;		/* Signed destination address offset */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* 0x18 */
156*4882a593Smuzhiyun 	u32 dlast_sga;		/* Last Destination address adjustment/scatter
157*4882a593Smuzhiyun 				 * gather address
158*4882a593Smuzhiyun 				 */
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* 0x1c */
161*4882a593Smuzhiyun 	u32 biter_elink:1;	/* Enable channel-to-channel linking on major
162*4882a593Smuzhiyun 				 * loop complete
163*4882a593Smuzhiyun 				 */
164*4882a593Smuzhiyun 	u32 biter_linkch:6;
165*4882a593Smuzhiyun 	u32 biter:9;		/* Beginning "major" iteration count */
166*4882a593Smuzhiyun 	u32 bwc:2;		/* Bandwidth control */
167*4882a593Smuzhiyun 	u32 major_linkch:6;	/* Link channel number */
168*4882a593Smuzhiyun 	u32 done:1;		/* Channel done */
169*4882a593Smuzhiyun 	u32 active:1;		/* Channel active */
170*4882a593Smuzhiyun 	u32 major_elink:1;	/* Enable channel-to-channel linking on major
171*4882a593Smuzhiyun 				 * loop complete
172*4882a593Smuzhiyun 				 */
173*4882a593Smuzhiyun 	u32 e_sg:1;		/* Enable scatter/gather processing */
174*4882a593Smuzhiyun 	u32 d_req:1;		/* Disable request */
175*4882a593Smuzhiyun 	u32 int_half:1;		/* Enable an interrupt when major counter is
176*4882a593Smuzhiyun 				 * half complete
177*4882a593Smuzhiyun 				 */
178*4882a593Smuzhiyun 	u32 int_maj:1;		/* Enable an interrupt when major iteration
179*4882a593Smuzhiyun 				 * count completes
180*4882a593Smuzhiyun 				 */
181*4882a593Smuzhiyun 	u32 start:1;		/* Channel start */
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun struct mpc_dma_desc {
185*4882a593Smuzhiyun 	struct dma_async_tx_descriptor	desc;
186*4882a593Smuzhiyun 	struct mpc_dma_tcd		*tcd;
187*4882a593Smuzhiyun 	dma_addr_t			tcd_paddr;
188*4882a593Smuzhiyun 	int				error;
189*4882a593Smuzhiyun 	struct list_head		node;
190*4882a593Smuzhiyun 	int				will_access_peripheral;
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun struct mpc_dma_chan {
194*4882a593Smuzhiyun 	struct dma_chan			chan;
195*4882a593Smuzhiyun 	struct list_head		free;
196*4882a593Smuzhiyun 	struct list_head		prepared;
197*4882a593Smuzhiyun 	struct list_head		queued;
198*4882a593Smuzhiyun 	struct list_head		active;
199*4882a593Smuzhiyun 	struct list_head		completed;
200*4882a593Smuzhiyun 	struct mpc_dma_tcd		*tcd;
201*4882a593Smuzhiyun 	dma_addr_t			tcd_paddr;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Settings for access to peripheral FIFO */
204*4882a593Smuzhiyun 	dma_addr_t			src_per_paddr;
205*4882a593Smuzhiyun 	u32				src_tcd_nunits;
206*4882a593Smuzhiyun 	u8				swidth;
207*4882a593Smuzhiyun 	dma_addr_t			dst_per_paddr;
208*4882a593Smuzhiyun 	u32				dst_tcd_nunits;
209*4882a593Smuzhiyun 	u8				dwidth;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Lock for this structure */
212*4882a593Smuzhiyun 	spinlock_t			lock;
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun struct mpc_dma {
216*4882a593Smuzhiyun 	struct dma_device		dma;
217*4882a593Smuzhiyun 	struct tasklet_struct		tasklet;
218*4882a593Smuzhiyun 	struct mpc_dma_chan		channels[MPC_DMA_CHANNELS];
219*4882a593Smuzhiyun 	struct mpc_dma_regs __iomem	*regs;
220*4882a593Smuzhiyun 	struct mpc_dma_tcd __iomem	*tcd;
221*4882a593Smuzhiyun 	int				irq;
222*4882a593Smuzhiyun 	int				irq2;
223*4882a593Smuzhiyun 	uint				error_status;
224*4882a593Smuzhiyun 	int				is_mpc8308;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Lock for error_status field in this structure */
227*4882a593Smuzhiyun 	spinlock_t			error_status_lock;
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun #define DRV_NAME	"mpc512x_dma"
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /* Convert struct dma_chan to struct mpc_dma_chan */
dma_chan_to_mpc_dma_chan(struct dma_chan * c)233*4882a593Smuzhiyun static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	return container_of(c, struct mpc_dma_chan, chan);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /* Convert struct dma_chan to struct mpc_dma */
dma_chan_to_mpc_dma(struct dma_chan * c)239*4882a593Smuzhiyun static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun  * Execute all queued DMA descriptors.
248*4882a593Smuzhiyun  *
249*4882a593Smuzhiyun  * Following requirements must be met while calling mpc_dma_execute():
250*4882a593Smuzhiyun  *	a) mchan->lock is acquired,
251*4882a593Smuzhiyun  *	b) mchan->active list is empty,
252*4882a593Smuzhiyun  *	c) mchan->queued list contains at least one entry.
253*4882a593Smuzhiyun  */
mpc_dma_execute(struct mpc_dma_chan * mchan)254*4882a593Smuzhiyun static void mpc_dma_execute(struct mpc_dma_chan *mchan)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
257*4882a593Smuzhiyun 	struct mpc_dma_desc *first = NULL;
258*4882a593Smuzhiyun 	struct mpc_dma_desc *prev = NULL;
259*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc;
260*4882a593Smuzhiyun 	int cid = mchan->chan.chan_id;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	while (!list_empty(&mchan->queued)) {
263*4882a593Smuzhiyun 		mdesc = list_first_entry(&mchan->queued,
264*4882a593Smuzhiyun 						struct mpc_dma_desc, node);
265*4882a593Smuzhiyun 		/*
266*4882a593Smuzhiyun 		 * Grab either several mem-to-mem transfer descriptors
267*4882a593Smuzhiyun 		 * or one peripheral transfer descriptor,
268*4882a593Smuzhiyun 		 * don't mix mem-to-mem and peripheral transfer descriptors
269*4882a593Smuzhiyun 		 * within the same 'active' list.
270*4882a593Smuzhiyun 		 */
271*4882a593Smuzhiyun 		if (mdesc->will_access_peripheral) {
272*4882a593Smuzhiyun 			if (list_empty(&mchan->active))
273*4882a593Smuzhiyun 				list_move_tail(&mdesc->node, &mchan->active);
274*4882a593Smuzhiyun 			break;
275*4882a593Smuzhiyun 		} else {
276*4882a593Smuzhiyun 			list_move_tail(&mdesc->node, &mchan->active);
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* Chain descriptors into one transaction */
281*4882a593Smuzhiyun 	list_for_each_entry(mdesc, &mchan->active, node) {
282*4882a593Smuzhiyun 		if (!first)
283*4882a593Smuzhiyun 			first = mdesc;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		if (!prev) {
286*4882a593Smuzhiyun 			prev = mdesc;
287*4882a593Smuzhiyun 			continue;
288*4882a593Smuzhiyun 		}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		prev->tcd->dlast_sga = mdesc->tcd_paddr;
291*4882a593Smuzhiyun 		prev->tcd->e_sg = 1;
292*4882a593Smuzhiyun 		mdesc->tcd->start = 1;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		prev = mdesc;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	prev->tcd->int_maj = 1;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* Send first descriptor in chain into hardware */
300*4882a593Smuzhiyun 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (first != prev)
303*4882a593Smuzhiyun 		mdma->tcd[cid].e_sg = 1;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (mdma->is_mpc8308) {
306*4882a593Smuzhiyun 		/* MPC8308, no request lines, software initiated start */
307*4882a593Smuzhiyun 		out_8(&mdma->regs->dmassrt, cid);
308*4882a593Smuzhiyun 	} else if (first->will_access_peripheral) {
309*4882a593Smuzhiyun 		/* Peripherals involved, start by external request signal */
310*4882a593Smuzhiyun 		out_8(&mdma->regs->dmaserq, cid);
311*4882a593Smuzhiyun 	} else {
312*4882a593Smuzhiyun 		/* Memory to memory transfer, software initiated start */
313*4882a593Smuzhiyun 		out_8(&mdma->regs->dmassrt, cid);
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /* Handle interrupt on one half of DMA controller (32 channels) */
mpc_dma_irq_process(struct mpc_dma * mdma,u32 is,u32 es,int off)318*4882a593Smuzhiyun static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan;
321*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc;
322*4882a593Smuzhiyun 	u32 status = is | es;
323*4882a593Smuzhiyun 	int ch;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	while ((ch = fls(status) - 1) >= 0) {
326*4882a593Smuzhiyun 		status &= ~(1 << ch);
327*4882a593Smuzhiyun 		mchan = &mdma->channels[ch + off];
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 		spin_lock(&mchan->lock);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		out_8(&mdma->regs->dmacint, ch + off);
332*4882a593Smuzhiyun 		out_8(&mdma->regs->dmacerr, ch + off);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		/* Check error status */
335*4882a593Smuzhiyun 		if (es & (1 << ch))
336*4882a593Smuzhiyun 			list_for_each_entry(mdesc, &mchan->active, node)
337*4882a593Smuzhiyun 				mdesc->error = -EIO;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		/* Execute queued descriptors */
340*4882a593Smuzhiyun 		list_splice_tail_init(&mchan->active, &mchan->completed);
341*4882a593Smuzhiyun 		if (!list_empty(&mchan->queued))
342*4882a593Smuzhiyun 			mpc_dma_execute(mchan);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		spin_unlock(&mchan->lock);
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /* Interrupt handler */
mpc_dma_irq(int irq,void * data)349*4882a593Smuzhiyun static irqreturn_t mpc_dma_irq(int irq, void *data)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	struct mpc_dma *mdma = data;
352*4882a593Smuzhiyun 	uint es;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/* Save error status register */
355*4882a593Smuzhiyun 	es = in_be32(&mdma->regs->dmaes);
356*4882a593Smuzhiyun 	spin_lock(&mdma->error_status_lock);
357*4882a593Smuzhiyun 	if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
358*4882a593Smuzhiyun 		mdma->error_status = es;
359*4882a593Smuzhiyun 	spin_unlock(&mdma->error_status_lock);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Handle interrupt on each channel */
362*4882a593Smuzhiyun 	if (mdma->dma.chancnt > 32) {
363*4882a593Smuzhiyun 		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
364*4882a593Smuzhiyun 					in_be32(&mdma->regs->dmaerrh), 32);
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
367*4882a593Smuzhiyun 					in_be32(&mdma->regs->dmaerrl), 0);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/* Schedule tasklet */
370*4882a593Smuzhiyun 	tasklet_schedule(&mdma->tasklet);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return IRQ_HANDLED;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /* process completed descriptors */
mpc_dma_process_completed(struct mpc_dma * mdma)376*4882a593Smuzhiyun static void mpc_dma_process_completed(struct mpc_dma *mdma)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	dma_cookie_t last_cookie = 0;
379*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan;
380*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc;
381*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc;
382*4882a593Smuzhiyun 	unsigned long flags;
383*4882a593Smuzhiyun 	LIST_HEAD(list);
384*4882a593Smuzhiyun 	int i;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	for (i = 0; i < mdma->dma.chancnt; i++) {
387*4882a593Smuzhiyun 		mchan = &mdma->channels[i];
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		/* Get all completed descriptors */
390*4882a593Smuzhiyun 		spin_lock_irqsave(&mchan->lock, flags);
391*4882a593Smuzhiyun 		if (!list_empty(&mchan->completed))
392*4882a593Smuzhiyun 			list_splice_tail_init(&mchan->completed, &list);
393*4882a593Smuzhiyun 		spin_unlock_irqrestore(&mchan->lock, flags);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		if (list_empty(&list))
396*4882a593Smuzhiyun 			continue;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		/* Execute callbacks and run dependencies */
399*4882a593Smuzhiyun 		list_for_each_entry(mdesc, &list, node) {
400*4882a593Smuzhiyun 			desc = &mdesc->desc;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 			dmaengine_desc_get_callback_invoke(desc, NULL);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 			last_cookie = desc->cookie;
405*4882a593Smuzhiyun 			dma_run_dependencies(desc);
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		/* Free descriptors */
409*4882a593Smuzhiyun 		spin_lock_irqsave(&mchan->lock, flags);
410*4882a593Smuzhiyun 		list_splice_tail_init(&list, &mchan->free);
411*4882a593Smuzhiyun 		mchan->chan.completed_cookie = last_cookie;
412*4882a593Smuzhiyun 		spin_unlock_irqrestore(&mchan->lock, flags);
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /* DMA Tasklet */
mpc_dma_tasklet(struct tasklet_struct * t)417*4882a593Smuzhiyun static void mpc_dma_tasklet(struct tasklet_struct *t)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct mpc_dma *mdma = from_tasklet(mdma, t, tasklet);
420*4882a593Smuzhiyun 	unsigned long flags;
421*4882a593Smuzhiyun 	uint es;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	spin_lock_irqsave(&mdma->error_status_lock, flags);
424*4882a593Smuzhiyun 	es = mdma->error_status;
425*4882a593Smuzhiyun 	mdma->error_status = 0;
426*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mdma->error_status_lock, flags);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/* Print nice error report */
429*4882a593Smuzhiyun 	if (es) {
430*4882a593Smuzhiyun 		dev_err(mdma->dma.dev,
431*4882a593Smuzhiyun 			"Hardware reported following error(s) on channel %u:\n",
432*4882a593Smuzhiyun 						      MPC_DMA_DMAES_ERRCHN(es));
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_GPE)
435*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Group Priority Error\n");
436*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_CPE)
437*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Channel Priority Error\n");
438*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_SAE)
439*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Source Address Error\n");
440*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_SOE)
441*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
442*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_DAE)
443*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Destination Address Error\n");
444*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_DOE)
445*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
446*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_NCE)
447*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
448*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_SGE)
449*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
450*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_SBE)
451*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Source Bus Error\n");
452*4882a593Smuzhiyun 		if (es & MPC_DMA_DMAES_DBE)
453*4882a593Smuzhiyun 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	mpc_dma_process_completed(mdma);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /* Submit descriptor to hardware */
mpc_dma_tx_submit(struct dma_async_tx_descriptor * txd)460*4882a593Smuzhiyun static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
463*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc;
464*4882a593Smuzhiyun 	unsigned long flags;
465*4882a593Smuzhiyun 	dma_cookie_t cookie;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	mdesc = container_of(txd, struct mpc_dma_desc, desc);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, flags);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	/* Move descriptor to queue */
472*4882a593Smuzhiyun 	list_move_tail(&mdesc->node, &mchan->queued);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* If channel is idle, execute all queued descriptors */
475*4882a593Smuzhiyun 	if (list_empty(&mchan->active))
476*4882a593Smuzhiyun 		mpc_dma_execute(mchan);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* Update cookie */
479*4882a593Smuzhiyun 	cookie = dma_cookie_assign(txd);
480*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, flags);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return cookie;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /* Alloc channel resources */
mpc_dma_alloc_chan_resources(struct dma_chan * chan)486*4882a593Smuzhiyun static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
489*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
490*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc;
491*4882a593Smuzhiyun 	struct mpc_dma_tcd *tcd;
492*4882a593Smuzhiyun 	dma_addr_t tcd_paddr;
493*4882a593Smuzhiyun 	unsigned long flags;
494*4882a593Smuzhiyun 	LIST_HEAD(descs);
495*4882a593Smuzhiyun 	int i;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* Alloc DMA memory for Transfer Control Descriptors */
498*4882a593Smuzhiyun 	tcd = dma_alloc_coherent(mdma->dma.dev,
499*4882a593Smuzhiyun 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
500*4882a593Smuzhiyun 							&tcd_paddr, GFP_KERNEL);
501*4882a593Smuzhiyun 	if (!tcd)
502*4882a593Smuzhiyun 		return -ENOMEM;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	/* Alloc descriptors for this channel */
505*4882a593Smuzhiyun 	for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
506*4882a593Smuzhiyun 		mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
507*4882a593Smuzhiyun 		if (!mdesc) {
508*4882a593Smuzhiyun 			dev_notice(mdma->dma.dev,
509*4882a593Smuzhiyun 				"Memory allocation error. Allocated only %u descriptors\n", i);
510*4882a593Smuzhiyun 			break;
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		dma_async_tx_descriptor_init(&mdesc->desc, chan);
514*4882a593Smuzhiyun 		mdesc->desc.flags = DMA_CTRL_ACK;
515*4882a593Smuzhiyun 		mdesc->desc.tx_submit = mpc_dma_tx_submit;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		mdesc->tcd = &tcd[i];
518*4882a593Smuzhiyun 		mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		list_add_tail(&mdesc->node, &descs);
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	/* Return error only if no descriptors were allocated */
524*4882a593Smuzhiyun 	if (i == 0) {
525*4882a593Smuzhiyun 		dma_free_coherent(mdma->dma.dev,
526*4882a593Smuzhiyun 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
527*4882a593Smuzhiyun 								tcd, tcd_paddr);
528*4882a593Smuzhiyun 		return -ENOMEM;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, flags);
532*4882a593Smuzhiyun 	mchan->tcd = tcd;
533*4882a593Smuzhiyun 	mchan->tcd_paddr = tcd_paddr;
534*4882a593Smuzhiyun 	list_splice_tail_init(&descs, &mchan->free);
535*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, flags);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Enable Error Interrupt */
538*4882a593Smuzhiyun 	out_8(&mdma->regs->dmaseei, chan->chan_id);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun /* Free channel resources */
mpc_dma_free_chan_resources(struct dma_chan * chan)544*4882a593Smuzhiyun static void mpc_dma_free_chan_resources(struct dma_chan *chan)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
547*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
548*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc, *tmp;
549*4882a593Smuzhiyun 	struct mpc_dma_tcd *tcd;
550*4882a593Smuzhiyun 	dma_addr_t tcd_paddr;
551*4882a593Smuzhiyun 	unsigned long flags;
552*4882a593Smuzhiyun 	LIST_HEAD(descs);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, flags);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* Channel must be idle */
557*4882a593Smuzhiyun 	BUG_ON(!list_empty(&mchan->prepared));
558*4882a593Smuzhiyun 	BUG_ON(!list_empty(&mchan->queued));
559*4882a593Smuzhiyun 	BUG_ON(!list_empty(&mchan->active));
560*4882a593Smuzhiyun 	BUG_ON(!list_empty(&mchan->completed));
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* Move data */
563*4882a593Smuzhiyun 	list_splice_tail_init(&mchan->free, &descs);
564*4882a593Smuzhiyun 	tcd = mchan->tcd;
565*4882a593Smuzhiyun 	tcd_paddr = mchan->tcd_paddr;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, flags);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* Free DMA memory used by descriptors */
570*4882a593Smuzhiyun 	dma_free_coherent(mdma->dma.dev,
571*4882a593Smuzhiyun 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
572*4882a593Smuzhiyun 								tcd, tcd_paddr);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/* Free descriptors */
575*4882a593Smuzhiyun 	list_for_each_entry_safe(mdesc, tmp, &descs, node)
576*4882a593Smuzhiyun 		kfree(mdesc);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* Disable Error Interrupt */
579*4882a593Smuzhiyun 	out_8(&mdma->regs->dmaceei, chan->chan_id);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /* Send all pending descriptor to hardware */
mpc_dma_issue_pending(struct dma_chan * chan)583*4882a593Smuzhiyun static void mpc_dma_issue_pending(struct dma_chan *chan)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	/*
586*4882a593Smuzhiyun 	 * We are posting descriptors to the hardware as soon as
587*4882a593Smuzhiyun 	 * they are ready, so this function does nothing.
588*4882a593Smuzhiyun 	 */
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /* Check request completion status */
592*4882a593Smuzhiyun static enum dma_status
mpc_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)593*4882a593Smuzhiyun mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
594*4882a593Smuzhiyun 	       struct dma_tx_state *txstate)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	return dma_cookie_status(chan, cookie, txstate);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /* Prepare descriptor for memory to memory copy */
600*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
mpc_dma_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)601*4882a593Smuzhiyun mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
602*4882a593Smuzhiyun 					size_t len, unsigned long flags)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
605*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
606*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc = NULL;
607*4882a593Smuzhiyun 	struct mpc_dma_tcd *tcd;
608*4882a593Smuzhiyun 	unsigned long iflags;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/* Get free descriptor */
611*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, iflags);
612*4882a593Smuzhiyun 	if (!list_empty(&mchan->free)) {
613*4882a593Smuzhiyun 		mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
614*4882a593Smuzhiyun 									node);
615*4882a593Smuzhiyun 		list_del(&mdesc->node);
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, iflags);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	if (!mdesc) {
620*4882a593Smuzhiyun 		/* try to free completed descriptors */
621*4882a593Smuzhiyun 		mpc_dma_process_completed(mdma);
622*4882a593Smuzhiyun 		return NULL;
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	mdesc->error = 0;
626*4882a593Smuzhiyun 	mdesc->will_access_peripheral = 0;
627*4882a593Smuzhiyun 	tcd = mdesc->tcd;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* Prepare Transfer Control Descriptor for this transaction */
630*4882a593Smuzhiyun 	memset(tcd, 0, sizeof(struct mpc_dma_tcd));
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if (IS_ALIGNED(src | dst | len, 32)) {
633*4882a593Smuzhiyun 		tcd->ssize = MPC_DMA_TSIZE_32;
634*4882a593Smuzhiyun 		tcd->dsize = MPC_DMA_TSIZE_32;
635*4882a593Smuzhiyun 		tcd->soff = 32;
636*4882a593Smuzhiyun 		tcd->doff = 32;
637*4882a593Smuzhiyun 	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
638*4882a593Smuzhiyun 		/* MPC8308 doesn't support 16 byte transfers */
639*4882a593Smuzhiyun 		tcd->ssize = MPC_DMA_TSIZE_16;
640*4882a593Smuzhiyun 		tcd->dsize = MPC_DMA_TSIZE_16;
641*4882a593Smuzhiyun 		tcd->soff = 16;
642*4882a593Smuzhiyun 		tcd->doff = 16;
643*4882a593Smuzhiyun 	} else if (IS_ALIGNED(src | dst | len, 4)) {
644*4882a593Smuzhiyun 		tcd->ssize = MPC_DMA_TSIZE_4;
645*4882a593Smuzhiyun 		tcd->dsize = MPC_DMA_TSIZE_4;
646*4882a593Smuzhiyun 		tcd->soff = 4;
647*4882a593Smuzhiyun 		tcd->doff = 4;
648*4882a593Smuzhiyun 	} else if (IS_ALIGNED(src | dst | len, 2)) {
649*4882a593Smuzhiyun 		tcd->ssize = MPC_DMA_TSIZE_2;
650*4882a593Smuzhiyun 		tcd->dsize = MPC_DMA_TSIZE_2;
651*4882a593Smuzhiyun 		tcd->soff = 2;
652*4882a593Smuzhiyun 		tcd->doff = 2;
653*4882a593Smuzhiyun 	} else {
654*4882a593Smuzhiyun 		tcd->ssize = MPC_DMA_TSIZE_1;
655*4882a593Smuzhiyun 		tcd->dsize = MPC_DMA_TSIZE_1;
656*4882a593Smuzhiyun 		tcd->soff = 1;
657*4882a593Smuzhiyun 		tcd->doff = 1;
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	tcd->saddr = src;
661*4882a593Smuzhiyun 	tcd->daddr = dst;
662*4882a593Smuzhiyun 	tcd->nbytes = len;
663*4882a593Smuzhiyun 	tcd->biter = 1;
664*4882a593Smuzhiyun 	tcd->citer = 1;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/* Place descriptor in prepared list */
667*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, iflags);
668*4882a593Smuzhiyun 	list_add_tail(&mdesc->node, &mchan->prepared);
669*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, iflags);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	return &mdesc->desc;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun 
buswidth_to_dmatsize(u8 buswidth)674*4882a593Smuzhiyun inline u8 buswidth_to_dmatsize(u8 buswidth)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	u8 res;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	for (res = 0; buswidth > 1; buswidth /= 2)
679*4882a593Smuzhiyun 		res++;
680*4882a593Smuzhiyun 	return res;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
mpc_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)684*4882a593Smuzhiyun mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
685*4882a593Smuzhiyun 		unsigned int sg_len, enum dma_transfer_direction direction,
686*4882a593Smuzhiyun 		unsigned long flags, void *context)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
689*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
690*4882a593Smuzhiyun 	struct mpc_dma_desc *mdesc = NULL;
691*4882a593Smuzhiyun 	dma_addr_t per_paddr;
692*4882a593Smuzhiyun 	u32 tcd_nunits;
693*4882a593Smuzhiyun 	struct mpc_dma_tcd *tcd;
694*4882a593Smuzhiyun 	unsigned long iflags;
695*4882a593Smuzhiyun 	struct scatterlist *sg;
696*4882a593Smuzhiyun 	size_t len;
697*4882a593Smuzhiyun 	int iter, i;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	/* Currently there is no proper support for scatter/gather */
700*4882a593Smuzhiyun 	if (sg_len != 1)
701*4882a593Smuzhiyun 		return NULL;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	if (!is_slave_direction(direction))
704*4882a593Smuzhiyun 		return NULL;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	for_each_sg(sgl, sg, sg_len, i) {
707*4882a593Smuzhiyun 		spin_lock_irqsave(&mchan->lock, iflags);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		mdesc = list_first_entry(&mchan->free,
710*4882a593Smuzhiyun 						struct mpc_dma_desc, node);
711*4882a593Smuzhiyun 		if (!mdesc) {
712*4882a593Smuzhiyun 			spin_unlock_irqrestore(&mchan->lock, iflags);
713*4882a593Smuzhiyun 			/* Try to free completed descriptors */
714*4882a593Smuzhiyun 			mpc_dma_process_completed(mdma);
715*4882a593Smuzhiyun 			return NULL;
716*4882a593Smuzhiyun 		}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 		list_del(&mdesc->node);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 		if (direction == DMA_DEV_TO_MEM) {
721*4882a593Smuzhiyun 			per_paddr = mchan->src_per_paddr;
722*4882a593Smuzhiyun 			tcd_nunits = mchan->src_tcd_nunits;
723*4882a593Smuzhiyun 		} else {
724*4882a593Smuzhiyun 			per_paddr = mchan->dst_per_paddr;
725*4882a593Smuzhiyun 			tcd_nunits = mchan->dst_tcd_nunits;
726*4882a593Smuzhiyun 		}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 		spin_unlock_irqrestore(&mchan->lock, iflags);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		if (per_paddr == 0 || tcd_nunits == 0)
731*4882a593Smuzhiyun 			goto err_prep;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 		mdesc->error = 0;
734*4882a593Smuzhiyun 		mdesc->will_access_peripheral = 1;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 		/* Prepare Transfer Control Descriptor for this transaction */
737*4882a593Smuzhiyun 		tcd = mdesc->tcd;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 		memset(tcd, 0, sizeof(struct mpc_dma_tcd));
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 		if (direction == DMA_DEV_TO_MEM) {
742*4882a593Smuzhiyun 			tcd->saddr = per_paddr;
743*4882a593Smuzhiyun 			tcd->daddr = sg_dma_address(sg);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 			if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
746*4882a593Smuzhiyun 				goto err_prep;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 			tcd->soff = 0;
749*4882a593Smuzhiyun 			tcd->doff = mchan->dwidth;
750*4882a593Smuzhiyun 		} else {
751*4882a593Smuzhiyun 			tcd->saddr = sg_dma_address(sg);
752*4882a593Smuzhiyun 			tcd->daddr = per_paddr;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 			if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
755*4882a593Smuzhiyun 				goto err_prep;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 			tcd->soff = mchan->swidth;
758*4882a593Smuzhiyun 			tcd->doff = 0;
759*4882a593Smuzhiyun 		}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
762*4882a593Smuzhiyun 		tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		if (mdma->is_mpc8308) {
765*4882a593Smuzhiyun 			tcd->nbytes = sg_dma_len(sg);
766*4882a593Smuzhiyun 			if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
767*4882a593Smuzhiyun 				goto err_prep;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 			/* No major loops for MPC8303 */
770*4882a593Smuzhiyun 			tcd->biter = 1;
771*4882a593Smuzhiyun 			tcd->citer = 1;
772*4882a593Smuzhiyun 		} else {
773*4882a593Smuzhiyun 			len = sg_dma_len(sg);
774*4882a593Smuzhiyun 			tcd->nbytes = tcd_nunits * tcd->ssize;
775*4882a593Smuzhiyun 			if (!IS_ALIGNED(len, tcd->nbytes))
776*4882a593Smuzhiyun 				goto err_prep;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 			iter = len / tcd->nbytes;
779*4882a593Smuzhiyun 			if (iter >= 1 << 15) {
780*4882a593Smuzhiyun 				/* len is too big */
781*4882a593Smuzhiyun 				goto err_prep;
782*4882a593Smuzhiyun 			}
783*4882a593Smuzhiyun 			/* citer_linkch contains the high bits of iter */
784*4882a593Smuzhiyun 			tcd->biter = iter & 0x1ff;
785*4882a593Smuzhiyun 			tcd->biter_linkch = iter >> 9;
786*4882a593Smuzhiyun 			tcd->citer = tcd->biter;
787*4882a593Smuzhiyun 			tcd->citer_linkch = tcd->biter_linkch;
788*4882a593Smuzhiyun 		}
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 		tcd->e_sg = 0;
791*4882a593Smuzhiyun 		tcd->d_req = 1;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		/* Place descriptor in prepared list */
794*4882a593Smuzhiyun 		spin_lock_irqsave(&mchan->lock, iflags);
795*4882a593Smuzhiyun 		list_add_tail(&mdesc->node, &mchan->prepared);
796*4882a593Smuzhiyun 		spin_unlock_irqrestore(&mchan->lock, iflags);
797*4882a593Smuzhiyun 	}
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	return &mdesc->desc;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun err_prep:
802*4882a593Smuzhiyun 	/* Put the descriptor back */
803*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, iflags);
804*4882a593Smuzhiyun 	list_add_tail(&mdesc->node, &mchan->free);
805*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, iflags);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	return NULL;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
is_buswidth_valid(u8 buswidth,bool is_mpc8308)810*4882a593Smuzhiyun inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	switch (buswidth) {
813*4882a593Smuzhiyun 	case 16:
814*4882a593Smuzhiyun 		if (is_mpc8308)
815*4882a593Smuzhiyun 			return false;
816*4882a593Smuzhiyun 	case 1:
817*4882a593Smuzhiyun 	case 2:
818*4882a593Smuzhiyun 	case 4:
819*4882a593Smuzhiyun 	case 32:
820*4882a593Smuzhiyun 		break;
821*4882a593Smuzhiyun 	default:
822*4882a593Smuzhiyun 		return false;
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return true;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
mpc_dma_device_config(struct dma_chan * chan,struct dma_slave_config * cfg)828*4882a593Smuzhiyun static int mpc_dma_device_config(struct dma_chan *chan,
829*4882a593Smuzhiyun 				 struct dma_slave_config *cfg)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
832*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
833*4882a593Smuzhiyun 	unsigned long flags;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/*
836*4882a593Smuzhiyun 	 * Software constraints:
837*4882a593Smuzhiyun 	 *  - only transfers between a peripheral device and memory are
838*4882a593Smuzhiyun 	 *     supported
839*4882a593Smuzhiyun 	 *  - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
840*4882a593Smuzhiyun 	 *     are supported, and, consequently, source addresses and
841*4882a593Smuzhiyun 	 *     destination addresses; must be aligned accordingly; furthermore,
842*4882a593Smuzhiyun 	 *     for MPC512x SoCs, the transfer size must be aligned on (chunk
843*4882a593Smuzhiyun 	 *     size * maxburst)
844*4882a593Smuzhiyun 	 *  - during the transfer, the RAM address is incremented by the size
845*4882a593Smuzhiyun 	 *     of transfer chunk
846*4882a593Smuzhiyun 	 *  - the peripheral port's address is constant during the transfer.
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
850*4882a593Smuzhiyun 	    !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
851*4882a593Smuzhiyun 		return -EINVAL;
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
855*4882a593Smuzhiyun 	    !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
856*4882a593Smuzhiyun 		return -EINVAL;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, flags);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	mchan->src_per_paddr = cfg->src_addr;
861*4882a593Smuzhiyun 	mchan->src_tcd_nunits = cfg->src_maxburst;
862*4882a593Smuzhiyun 	mchan->swidth = cfg->src_addr_width;
863*4882a593Smuzhiyun 	mchan->dst_per_paddr = cfg->dst_addr;
864*4882a593Smuzhiyun 	mchan->dst_tcd_nunits = cfg->dst_maxburst;
865*4882a593Smuzhiyun 	mchan->dwidth = cfg->dst_addr_width;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	/* Apply defaults */
868*4882a593Smuzhiyun 	if (mchan->src_tcd_nunits == 0)
869*4882a593Smuzhiyun 		mchan->src_tcd_nunits = 1;
870*4882a593Smuzhiyun 	if (mchan->dst_tcd_nunits == 0)
871*4882a593Smuzhiyun 		mchan->dst_tcd_nunits = 1;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, flags);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
mpc_dma_device_terminate_all(struct dma_chan * chan)878*4882a593Smuzhiyun static int mpc_dma_device_terminate_all(struct dma_chan *chan)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
881*4882a593Smuzhiyun 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
882*4882a593Smuzhiyun 	unsigned long flags;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* Disable channel requests */
885*4882a593Smuzhiyun 	spin_lock_irqsave(&mchan->lock, flags);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	out_8(&mdma->regs->dmacerq, chan->chan_id);
888*4882a593Smuzhiyun 	list_splice_tail_init(&mchan->prepared, &mchan->free);
889*4882a593Smuzhiyun 	list_splice_tail_init(&mchan->queued, &mchan->free);
890*4882a593Smuzhiyun 	list_splice_tail_init(&mchan->active, &mchan->free);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	spin_unlock_irqrestore(&mchan->lock, flags);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	return 0;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
mpc_dma_probe(struct platform_device * op)897*4882a593Smuzhiyun static int mpc_dma_probe(struct platform_device *op)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	struct device_node *dn = op->dev.of_node;
900*4882a593Smuzhiyun 	struct device *dev = &op->dev;
901*4882a593Smuzhiyun 	struct dma_device *dma;
902*4882a593Smuzhiyun 	struct mpc_dma *mdma;
903*4882a593Smuzhiyun 	struct mpc_dma_chan *mchan;
904*4882a593Smuzhiyun 	struct resource res;
905*4882a593Smuzhiyun 	ulong regs_start, regs_size;
906*4882a593Smuzhiyun 	int retval, i;
907*4882a593Smuzhiyun 	u8 chancnt;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
910*4882a593Smuzhiyun 	if (!mdma) {
911*4882a593Smuzhiyun 		retval = -ENOMEM;
912*4882a593Smuzhiyun 		goto err;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	mdma->irq = irq_of_parse_and_map(dn, 0);
916*4882a593Smuzhiyun 	if (!mdma->irq) {
917*4882a593Smuzhiyun 		dev_err(dev, "Error mapping IRQ!\n");
918*4882a593Smuzhiyun 		retval = -EINVAL;
919*4882a593Smuzhiyun 		goto err;
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
923*4882a593Smuzhiyun 		mdma->is_mpc8308 = 1;
924*4882a593Smuzhiyun 		mdma->irq2 = irq_of_parse_and_map(dn, 1);
925*4882a593Smuzhiyun 		if (!mdma->irq2) {
926*4882a593Smuzhiyun 			dev_err(dev, "Error mapping IRQ!\n");
927*4882a593Smuzhiyun 			retval = -EINVAL;
928*4882a593Smuzhiyun 			goto err_dispose1;
929*4882a593Smuzhiyun 		}
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	retval = of_address_to_resource(dn, 0, &res);
933*4882a593Smuzhiyun 	if (retval) {
934*4882a593Smuzhiyun 		dev_err(dev, "Error parsing memory region!\n");
935*4882a593Smuzhiyun 		goto err_dispose2;
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	regs_start = res.start;
939*4882a593Smuzhiyun 	regs_size = resource_size(&res);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
942*4882a593Smuzhiyun 		dev_err(dev, "Error requesting memory region!\n");
943*4882a593Smuzhiyun 		retval = -EBUSY;
944*4882a593Smuzhiyun 		goto err_dispose2;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	mdma->regs = devm_ioremap(dev, regs_start, regs_size);
948*4882a593Smuzhiyun 	if (!mdma->regs) {
949*4882a593Smuzhiyun 		dev_err(dev, "Error mapping memory region!\n");
950*4882a593Smuzhiyun 		retval = -ENOMEM;
951*4882a593Smuzhiyun 		goto err_dispose2;
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
955*4882a593Smuzhiyun 							+ MPC_DMA_TCD_OFFSET);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
958*4882a593Smuzhiyun 	if (retval) {
959*4882a593Smuzhiyun 		dev_err(dev, "Error requesting IRQ!\n");
960*4882a593Smuzhiyun 		retval = -EINVAL;
961*4882a593Smuzhiyun 		goto err_dispose2;
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	if (mdma->is_mpc8308) {
965*4882a593Smuzhiyun 		retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
966*4882a593Smuzhiyun 							DRV_NAME, mdma);
967*4882a593Smuzhiyun 		if (retval) {
968*4882a593Smuzhiyun 			dev_err(dev, "Error requesting IRQ2!\n");
969*4882a593Smuzhiyun 			retval = -EINVAL;
970*4882a593Smuzhiyun 			goto err_free1;
971*4882a593Smuzhiyun 		}
972*4882a593Smuzhiyun 	}
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	spin_lock_init(&mdma->error_status_lock);
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	dma = &mdma->dma;
977*4882a593Smuzhiyun 	dma->dev = dev;
978*4882a593Smuzhiyun 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
979*4882a593Smuzhiyun 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
980*4882a593Smuzhiyun 	dma->device_issue_pending = mpc_dma_issue_pending;
981*4882a593Smuzhiyun 	dma->device_tx_status = mpc_dma_tx_status;
982*4882a593Smuzhiyun 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
983*4882a593Smuzhiyun 	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
984*4882a593Smuzhiyun 	dma->device_config = mpc_dma_device_config;
985*4882a593Smuzhiyun 	dma->device_terminate_all = mpc_dma_device_terminate_all;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dma->channels);
988*4882a593Smuzhiyun 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
989*4882a593Smuzhiyun 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	if (mdma->is_mpc8308)
992*4882a593Smuzhiyun 		chancnt = MPC8308_DMACHAN_MAX;
993*4882a593Smuzhiyun 	else
994*4882a593Smuzhiyun 		chancnt = MPC512x_DMACHAN_MAX;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	for (i = 0; i < chancnt; i++) {
997*4882a593Smuzhiyun 		mchan = &mdma->channels[i];
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		mchan->chan.device = dma;
1000*4882a593Smuzhiyun 		dma_cookie_init(&mchan->chan);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mchan->free);
1003*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mchan->prepared);
1004*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mchan->queued);
1005*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mchan->active);
1006*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mchan->completed);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 		spin_lock_init(&mchan->lock);
1009*4882a593Smuzhiyun 		list_add_tail(&mchan->chan.device_node, &dma->channels);
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	tasklet_setup(&mdma->tasklet, mpc_dma_tasklet);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	/*
1015*4882a593Smuzhiyun 	 * Configure DMA Engine:
1016*4882a593Smuzhiyun 	 * - Dynamic clock,
1017*4882a593Smuzhiyun 	 * - Round-robin group arbitration,
1018*4882a593Smuzhiyun 	 * - Round-robin channel arbitration.
1019*4882a593Smuzhiyun 	 */
1020*4882a593Smuzhiyun 	if (mdma->is_mpc8308) {
1021*4882a593Smuzhiyun 		/* MPC8308 has 16 channels and lacks some registers */
1022*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 		/* enable snooping */
1025*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1026*4882a593Smuzhiyun 		/* Disable error interrupts */
1027*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaeeil, 0);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 		/* Clear interrupts status */
1030*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaintl, 0xFFFF);
1031*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1032*4882a593Smuzhiyun 	} else {
1033*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1034*4882a593Smuzhiyun 						MPC_DMA_DMACR_ERGA |
1035*4882a593Smuzhiyun 						MPC_DMA_DMACR_ERCA);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 		/* Disable hardware DMA requests */
1038*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaerqh, 0);
1039*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaerql, 0);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		/* Disable error interrupts */
1042*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaeeih, 0);
1043*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaeeil, 0);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		/* Clear interrupts status */
1046*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1047*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1048*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1049*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 		/* Route interrupts to IPIC */
1052*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmaihsa, 0);
1053*4882a593Smuzhiyun 		out_be32(&mdma->regs->dmailsa, 0);
1054*4882a593Smuzhiyun 	}
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	/* Register DMA engine */
1057*4882a593Smuzhiyun 	dev_set_drvdata(dev, mdma);
1058*4882a593Smuzhiyun 	retval = dma_async_device_register(dma);
1059*4882a593Smuzhiyun 	if (retval)
1060*4882a593Smuzhiyun 		goto err_free2;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	/* Register with OF helpers for DMA lookups (nonfatal) */
1063*4882a593Smuzhiyun 	if (dev->of_node) {
1064*4882a593Smuzhiyun 		retval = of_dma_controller_register(dev->of_node,
1065*4882a593Smuzhiyun 						of_dma_xlate_by_chan_id, mdma);
1066*4882a593Smuzhiyun 		if (retval)
1067*4882a593Smuzhiyun 			dev_warn(dev, "Could not register for OF lookup\n");
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	return 0;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun err_free2:
1073*4882a593Smuzhiyun 	if (mdma->is_mpc8308)
1074*4882a593Smuzhiyun 		free_irq(mdma->irq2, mdma);
1075*4882a593Smuzhiyun err_free1:
1076*4882a593Smuzhiyun 	free_irq(mdma->irq, mdma);
1077*4882a593Smuzhiyun err_dispose2:
1078*4882a593Smuzhiyun 	if (mdma->is_mpc8308)
1079*4882a593Smuzhiyun 		irq_dispose_mapping(mdma->irq2);
1080*4882a593Smuzhiyun err_dispose1:
1081*4882a593Smuzhiyun 	irq_dispose_mapping(mdma->irq);
1082*4882a593Smuzhiyun err:
1083*4882a593Smuzhiyun 	return retval;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
mpc_dma_remove(struct platform_device * op)1086*4882a593Smuzhiyun static int mpc_dma_remove(struct platform_device *op)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	struct device *dev = &op->dev;
1089*4882a593Smuzhiyun 	struct mpc_dma *mdma = dev_get_drvdata(dev);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (dev->of_node)
1092*4882a593Smuzhiyun 		of_dma_controller_free(dev->of_node);
1093*4882a593Smuzhiyun 	dma_async_device_unregister(&mdma->dma);
1094*4882a593Smuzhiyun 	if (mdma->is_mpc8308) {
1095*4882a593Smuzhiyun 		free_irq(mdma->irq2, mdma);
1096*4882a593Smuzhiyun 		irq_dispose_mapping(mdma->irq2);
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 	free_irq(mdma->irq, mdma);
1099*4882a593Smuzhiyun 	irq_dispose_mapping(mdma->irq);
1100*4882a593Smuzhiyun 	tasklet_kill(&mdma->tasklet);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	return 0;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun static const struct of_device_id mpc_dma_match[] = {
1106*4882a593Smuzhiyun 	{ .compatible = "fsl,mpc5121-dma", },
1107*4882a593Smuzhiyun 	{ .compatible = "fsl,mpc8308-dma", },
1108*4882a593Smuzhiyun 	{},
1109*4882a593Smuzhiyun };
1110*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mpc_dma_match);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun static struct platform_driver mpc_dma_driver = {
1113*4882a593Smuzhiyun 	.probe		= mpc_dma_probe,
1114*4882a593Smuzhiyun 	.remove		= mpc_dma_remove,
1115*4882a593Smuzhiyun 	.driver = {
1116*4882a593Smuzhiyun 		.name = DRV_NAME,
1117*4882a593Smuzhiyun 		.of_match_table	= mpc_dma_match,
1118*4882a593Smuzhiyun 	},
1119*4882a593Smuzhiyun };
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun module_platform_driver(mpc_dma_driver);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1124*4882a593Smuzhiyun MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
1125