xref: /OK3568_Linux_fs/kernel/drivers/dma/sh/shdma-base.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * extracted from shdma.c
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8*4882a593Smuzhiyun  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9*4882a593Smuzhiyun  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10*4882a593Smuzhiyun  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/shdma-base.h>
15*4882a593Smuzhiyun #include <linux/dmaengine.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/pm_runtime.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/spinlock.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "../dmaengine.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* DMA descriptor control */
26*4882a593Smuzhiyun enum shdma_desc_status {
27*4882a593Smuzhiyun 	DESC_IDLE,
28*4882a593Smuzhiyun 	DESC_PREPARED,
29*4882a593Smuzhiyun 	DESC_SUBMITTED,
30*4882a593Smuzhiyun 	DESC_COMPLETED,	/* completed, have to call callback */
31*4882a593Smuzhiyun 	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define NR_DESCS_PER_CHANNEL 32
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
37*4882a593Smuzhiyun #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * For slave DMA we assume, that there is a finite number of DMA slaves in the
41*4882a593Smuzhiyun  * system, and that each such slave can only use a finite number of channels.
42*4882a593Smuzhiyun  * We use slave channel IDs to make sure, that no such slave channel ID is
43*4882a593Smuzhiyun  * allocated more than once.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun static unsigned int slave_num = 256;
46*4882a593Smuzhiyun module_param(slave_num, uint, 0444);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* A bitmask with slave_num bits */
49*4882a593Smuzhiyun static unsigned long *shdma_slave_used;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* Called under spin_lock_irq(&schan->chan_lock") */
shdma_chan_xfer_ld_queue(struct shdma_chan * schan)52*4882a593Smuzhiyun static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
55*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
56*4882a593Smuzhiyun 	struct shdma_desc *sdesc;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* DMA work check */
59*4882a593Smuzhiyun 	if (ops->channel_busy(schan))
60*4882a593Smuzhiyun 		return;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* Find the first not transferred descriptor */
63*4882a593Smuzhiyun 	list_for_each_entry(sdesc, &schan->ld_queue, node)
64*4882a593Smuzhiyun 		if (sdesc->mark == DESC_SUBMITTED) {
65*4882a593Smuzhiyun 			ops->start_xfer(schan, sdesc);
66*4882a593Smuzhiyun 			break;
67*4882a593Smuzhiyun 		}
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
shdma_tx_submit(struct dma_async_tx_descriptor * tx)70*4882a593Smuzhiyun static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	struct shdma_desc *chunk, *c, *desc =
73*4882a593Smuzhiyun 		container_of(tx, struct shdma_desc, async_tx);
74*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(tx->chan);
75*4882a593Smuzhiyun 	dma_async_tx_callback callback = tx->callback;
76*4882a593Smuzhiyun 	dma_cookie_t cookie;
77*4882a593Smuzhiyun 	bool power_up;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	spin_lock_irq(&schan->chan_lock);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	power_up = list_empty(&schan->ld_queue);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	cookie = dma_cookie_assign(tx);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* Mark all chunks of this descriptor as submitted, move to the queue */
86*4882a593Smuzhiyun 	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
87*4882a593Smuzhiyun 		/*
88*4882a593Smuzhiyun 		 * All chunks are on the global ld_free, so, we have to find
89*4882a593Smuzhiyun 		 * the end of the chain ourselves
90*4882a593Smuzhiyun 		 */
91*4882a593Smuzhiyun 		if (chunk != desc && (chunk->mark == DESC_IDLE ||
92*4882a593Smuzhiyun 				      chunk->async_tx.cookie > 0 ||
93*4882a593Smuzhiyun 				      chunk->async_tx.cookie == -EBUSY ||
94*4882a593Smuzhiyun 				      &chunk->node == &schan->ld_free))
95*4882a593Smuzhiyun 			break;
96*4882a593Smuzhiyun 		chunk->mark = DESC_SUBMITTED;
97*4882a593Smuzhiyun 		if (chunk->chunks == 1) {
98*4882a593Smuzhiyun 			chunk->async_tx.callback = callback;
99*4882a593Smuzhiyun 			chunk->async_tx.callback_param = tx->callback_param;
100*4882a593Smuzhiyun 		} else {
101*4882a593Smuzhiyun 			/* Callback goes to the last chunk */
102*4882a593Smuzhiyun 			chunk->async_tx.callback = NULL;
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 		chunk->cookie = cookie;
105*4882a593Smuzhiyun 		list_move_tail(&chunk->node, &schan->ld_queue);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108*4882a593Smuzhiyun 			tx->cookie, &chunk->async_tx, schan->id);
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (power_up) {
112*4882a593Smuzhiyun 		int ret;
113*4882a593Smuzhiyun 		schan->pm_state = SHDMA_PM_BUSY;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		ret = pm_runtime_get(schan->dev);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		spin_unlock_irq(&schan->chan_lock);
118*4882a593Smuzhiyun 		if (ret < 0)
119*4882a593Smuzhiyun 			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		pm_runtime_barrier(schan->dev);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		spin_lock_irq(&schan->chan_lock);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		/* Have we been reset, while waiting? */
126*4882a593Smuzhiyun 		if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
127*4882a593Smuzhiyun 			struct shdma_dev *sdev =
128*4882a593Smuzhiyun 				to_shdma_dev(schan->dma_chan.device);
129*4882a593Smuzhiyun 			const struct shdma_ops *ops = sdev->ops;
130*4882a593Smuzhiyun 			dev_dbg(schan->dev, "Bring up channel %d\n",
131*4882a593Smuzhiyun 				schan->id);
132*4882a593Smuzhiyun 			/*
133*4882a593Smuzhiyun 			 * TODO: .xfer_setup() might fail on some platforms.
134*4882a593Smuzhiyun 			 * Make it int then, on error remove chunks from the
135*4882a593Smuzhiyun 			 * queue again
136*4882a593Smuzhiyun 			 */
137*4882a593Smuzhiyun 			ops->setup_xfer(schan, schan->slave_id);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 			if (schan->pm_state == SHDMA_PM_PENDING)
140*4882a593Smuzhiyun 				shdma_chan_xfer_ld_queue(schan);
141*4882a593Smuzhiyun 			schan->pm_state = SHDMA_PM_ESTABLISHED;
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 	} else {
144*4882a593Smuzhiyun 		/*
145*4882a593Smuzhiyun 		 * Tell .device_issue_pending() not to run the queue, interrupts
146*4882a593Smuzhiyun 		 * will do it anyway
147*4882a593Smuzhiyun 		 */
148*4882a593Smuzhiyun 		schan->pm_state = SHDMA_PM_PENDING;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	spin_unlock_irq(&schan->chan_lock);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return cookie;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* Called with desc_lock held */
shdma_get_desc(struct shdma_chan * schan)157*4882a593Smuzhiyun static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct shdma_desc *sdesc;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	list_for_each_entry(sdesc, &schan->ld_free, node)
162*4882a593Smuzhiyun 		if (sdesc->mark != DESC_PREPARED) {
163*4882a593Smuzhiyun 			BUG_ON(sdesc->mark != DESC_IDLE);
164*4882a593Smuzhiyun 			list_del(&sdesc->node);
165*4882a593Smuzhiyun 			return sdesc;
166*4882a593Smuzhiyun 		}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return NULL;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
shdma_setup_slave(struct shdma_chan * schan,dma_addr_t slave_addr)171*4882a593Smuzhiyun static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
174*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
175*4882a593Smuzhiyun 	int ret, match;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (schan->dev->of_node) {
178*4882a593Smuzhiyun 		match = schan->hw_req;
179*4882a593Smuzhiyun 		ret = ops->set_slave(schan, match, slave_addr, true);
180*4882a593Smuzhiyun 		if (ret < 0)
181*4882a593Smuzhiyun 			return ret;
182*4882a593Smuzhiyun 	} else {
183*4882a593Smuzhiyun 		match = schan->real_slave_id;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
187*4882a593Smuzhiyun 		return -EINVAL;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
190*4882a593Smuzhiyun 		return -EBUSY;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	ret = ops->set_slave(schan, match, slave_addr, false);
193*4882a593Smuzhiyun 	if (ret < 0) {
194*4882a593Smuzhiyun 		clear_bit(schan->real_slave_id, shdma_slave_used);
195*4882a593Smuzhiyun 		return ret;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	schan->slave_id = schan->real_slave_id;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
shdma_alloc_chan_resources(struct dma_chan * chan)203*4882a593Smuzhiyun static int shdma_alloc_chan_resources(struct dma_chan *chan)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
206*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
207*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
208*4882a593Smuzhiyun 	struct shdma_desc *desc;
209*4882a593Smuzhiyun 	struct shdma_slave *slave = chan->private;
210*4882a593Smuzhiyun 	int ret, i;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * This relies on the guarantee from dmaengine that alloc_chan_resources
214*4882a593Smuzhiyun 	 * never runs concurrently with itself or free_chan_resources.
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	if (slave) {
217*4882a593Smuzhiyun 		/* Legacy mode: .private is set in filter */
218*4882a593Smuzhiyun 		schan->real_slave_id = slave->slave_id;
219*4882a593Smuzhiyun 		ret = shdma_setup_slave(schan, 0);
220*4882a593Smuzhiyun 		if (ret < 0)
221*4882a593Smuzhiyun 			goto esetslave;
222*4882a593Smuzhiyun 	} else {
223*4882a593Smuzhiyun 		/* Normal mode: real_slave_id was set by filter */
224*4882a593Smuzhiyun 		schan->slave_id = -EINVAL;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
228*4882a593Smuzhiyun 			      sdev->desc_size, GFP_KERNEL);
229*4882a593Smuzhiyun 	if (!schan->desc) {
230*4882a593Smuzhiyun 		ret = -ENOMEM;
231*4882a593Smuzhiyun 		goto edescalloc;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 	schan->desc_num = NR_DESCS_PER_CHANNEL;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
236*4882a593Smuzhiyun 		desc = ops->embedded_desc(schan->desc, i);
237*4882a593Smuzhiyun 		dma_async_tx_descriptor_init(&desc->async_tx,
238*4882a593Smuzhiyun 					     &schan->dma_chan);
239*4882a593Smuzhiyun 		desc->async_tx.tx_submit = shdma_tx_submit;
240*4882a593Smuzhiyun 		desc->mark = DESC_IDLE;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		list_add(&desc->node, &schan->ld_free);
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return NR_DESCS_PER_CHANNEL;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun edescalloc:
248*4882a593Smuzhiyun 	if (slave)
249*4882a593Smuzhiyun esetslave:
250*4882a593Smuzhiyun 		clear_bit(slave->slave_id, shdma_slave_used);
251*4882a593Smuzhiyun 	chan->private = NULL;
252*4882a593Smuzhiyun 	return ret;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * This is the standard shdma filter function to be used as a replacement to the
257*4882a593Smuzhiyun  * "old" method, using the .private pointer.
258*4882a593Smuzhiyun  * You always have to pass a valid slave id as the argument, old drivers that
259*4882a593Smuzhiyun  * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
260*4882a593Smuzhiyun  * need to be updated so we can remove the slave_id field from dma_slave_config.
261*4882a593Smuzhiyun  * parameter. If this filter is used, the slave driver, after calling
262*4882a593Smuzhiyun  * dma_request_channel(), will also have to call dmaengine_slave_config() with
263*4882a593Smuzhiyun  * .direction, and either .src_addr or .dst_addr set.
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
266*4882a593Smuzhiyun  * capability! If this becomes a requirement, hardware glue drivers, using this
267*4882a593Smuzhiyun  * services would have to provide their own filters, which first would check
268*4882a593Smuzhiyun  * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
269*4882a593Smuzhiyun  * this, and only then, in case of a match, call this common filter.
270*4882a593Smuzhiyun  * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
271*4882a593Smuzhiyun  * In that case the MID-RID value is used for slave channel filtering and is
272*4882a593Smuzhiyun  * passed to this function in the "arg" parameter.
273*4882a593Smuzhiyun  */
shdma_chan_filter(struct dma_chan * chan,void * arg)274*4882a593Smuzhiyun bool shdma_chan_filter(struct dma_chan *chan, void *arg)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	struct shdma_chan *schan;
277*4882a593Smuzhiyun 	struct shdma_dev *sdev;
278*4882a593Smuzhiyun 	int slave_id = (long)arg;
279*4882a593Smuzhiyun 	int ret;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/* Only support channels handled by this driver. */
282*4882a593Smuzhiyun 	if (chan->device->device_alloc_chan_resources !=
283*4882a593Smuzhiyun 	    shdma_alloc_chan_resources)
284*4882a593Smuzhiyun 		return false;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	schan = to_shdma_chan(chan);
287*4882a593Smuzhiyun 	sdev = to_shdma_dev(chan->device);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/*
290*4882a593Smuzhiyun 	 * For DT, the schan->slave_id field is generated by the
291*4882a593Smuzhiyun 	 * set_slave function from the slave ID that is passed in
292*4882a593Smuzhiyun 	 * from xlate. For the non-DT case, the slave ID is
293*4882a593Smuzhiyun 	 * directly passed into the filter function by the driver
294*4882a593Smuzhiyun 	 */
295*4882a593Smuzhiyun 	if (schan->dev->of_node) {
296*4882a593Smuzhiyun 		ret = sdev->ops->set_slave(schan, slave_id, 0, true);
297*4882a593Smuzhiyun 		if (ret < 0)
298*4882a593Smuzhiyun 			return false;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		schan->real_slave_id = schan->slave_id;
301*4882a593Smuzhiyun 		return true;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (slave_id < 0) {
305*4882a593Smuzhiyun 		/* No slave requested - arbitrary channel */
306*4882a593Smuzhiyun 		dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
307*4882a593Smuzhiyun 		return true;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (slave_id >= slave_num)
311*4882a593Smuzhiyun 		return false;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	ret = sdev->ops->set_slave(schan, slave_id, 0, true);
314*4882a593Smuzhiyun 	if (ret < 0)
315*4882a593Smuzhiyun 		return false;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	schan->real_slave_id = slave_id;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return true;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_chan_filter);
322*4882a593Smuzhiyun 
__ld_cleanup(struct shdma_chan * schan,bool all)323*4882a593Smuzhiyun static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct shdma_desc *desc, *_desc;
326*4882a593Smuzhiyun 	/* Is the "exposed" head of a chain acked? */
327*4882a593Smuzhiyun 	bool head_acked = false;
328*4882a593Smuzhiyun 	dma_cookie_t cookie = 0;
329*4882a593Smuzhiyun 	dma_async_tx_callback callback = NULL;
330*4882a593Smuzhiyun 	struct dmaengine_desc_callback cb;
331*4882a593Smuzhiyun 	unsigned long flags;
332*4882a593Smuzhiyun 	LIST_HEAD(cyclic_list);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	memset(&cb, 0, sizeof(cb));
335*4882a593Smuzhiyun 	spin_lock_irqsave(&schan->chan_lock, flags);
336*4882a593Smuzhiyun 	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
337*4882a593Smuzhiyun 		struct dma_async_tx_descriptor *tx = &desc->async_tx;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
340*4882a593Smuzhiyun 		BUG_ON(desc->mark != DESC_SUBMITTED &&
341*4882a593Smuzhiyun 		       desc->mark != DESC_COMPLETED &&
342*4882a593Smuzhiyun 		       desc->mark != DESC_WAITING);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		/*
345*4882a593Smuzhiyun 		 * queue is ordered, and we use this loop to (1) clean up all
346*4882a593Smuzhiyun 		 * completed descriptors, and to (2) update descriptor flags of
347*4882a593Smuzhiyun 		 * any chunks in a (partially) completed chain
348*4882a593Smuzhiyun 		 */
349*4882a593Smuzhiyun 		if (!all && desc->mark == DESC_SUBMITTED &&
350*4882a593Smuzhiyun 		    desc->cookie != cookie)
351*4882a593Smuzhiyun 			break;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		if (tx->cookie > 0)
354*4882a593Smuzhiyun 			cookie = tx->cookie;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
357*4882a593Smuzhiyun 			if (schan->dma_chan.completed_cookie != desc->cookie - 1)
358*4882a593Smuzhiyun 				dev_dbg(schan->dev,
359*4882a593Smuzhiyun 					"Completing cookie %d, expected %d\n",
360*4882a593Smuzhiyun 					desc->cookie,
361*4882a593Smuzhiyun 					schan->dma_chan.completed_cookie + 1);
362*4882a593Smuzhiyun 			schan->dma_chan.completed_cookie = desc->cookie;
363*4882a593Smuzhiyun 		}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		/* Call callback on the last chunk */
366*4882a593Smuzhiyun 		if (desc->mark == DESC_COMPLETED && tx->callback) {
367*4882a593Smuzhiyun 			desc->mark = DESC_WAITING;
368*4882a593Smuzhiyun 			dmaengine_desc_get_callback(tx, &cb);
369*4882a593Smuzhiyun 			callback = tx->callback;
370*4882a593Smuzhiyun 			dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
371*4882a593Smuzhiyun 				tx->cookie, tx, schan->id);
372*4882a593Smuzhiyun 			BUG_ON(desc->chunks != 1);
373*4882a593Smuzhiyun 			break;
374*4882a593Smuzhiyun 		}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
377*4882a593Smuzhiyun 			if (desc->mark == DESC_COMPLETED) {
378*4882a593Smuzhiyun 				BUG_ON(tx->cookie < 0);
379*4882a593Smuzhiyun 				desc->mark = DESC_WAITING;
380*4882a593Smuzhiyun 			}
381*4882a593Smuzhiyun 			head_acked = async_tx_test_ack(tx);
382*4882a593Smuzhiyun 		} else {
383*4882a593Smuzhiyun 			switch (desc->mark) {
384*4882a593Smuzhiyun 			case DESC_COMPLETED:
385*4882a593Smuzhiyun 				desc->mark = DESC_WAITING;
386*4882a593Smuzhiyun 				fallthrough;
387*4882a593Smuzhiyun 			case DESC_WAITING:
388*4882a593Smuzhiyun 				if (head_acked)
389*4882a593Smuzhiyun 					async_tx_ack(&desc->async_tx);
390*4882a593Smuzhiyun 			}
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
394*4882a593Smuzhiyun 			tx, tx->cookie);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		if (((desc->mark == DESC_COMPLETED ||
397*4882a593Smuzhiyun 		      desc->mark == DESC_WAITING) &&
398*4882a593Smuzhiyun 		     async_tx_test_ack(&desc->async_tx)) || all) {
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 			if (all || !desc->cyclic) {
401*4882a593Smuzhiyun 				/* Remove from ld_queue list */
402*4882a593Smuzhiyun 				desc->mark = DESC_IDLE;
403*4882a593Smuzhiyun 				list_move(&desc->node, &schan->ld_free);
404*4882a593Smuzhiyun 			} else {
405*4882a593Smuzhiyun 				/* reuse as cyclic */
406*4882a593Smuzhiyun 				desc->mark = DESC_SUBMITTED;
407*4882a593Smuzhiyun 				list_move_tail(&desc->node, &cyclic_list);
408*4882a593Smuzhiyun 			}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 			if (list_empty(&schan->ld_queue)) {
411*4882a593Smuzhiyun 				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
412*4882a593Smuzhiyun 				pm_runtime_put(schan->dev);
413*4882a593Smuzhiyun 				schan->pm_state = SHDMA_PM_ESTABLISHED;
414*4882a593Smuzhiyun 			} else if (schan->pm_state == SHDMA_PM_PENDING) {
415*4882a593Smuzhiyun 				shdma_chan_xfer_ld_queue(schan);
416*4882a593Smuzhiyun 			}
417*4882a593Smuzhiyun 		}
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (all && !callback)
421*4882a593Smuzhiyun 		/*
422*4882a593Smuzhiyun 		 * Terminating and the loop completed normally: forgive
423*4882a593Smuzhiyun 		 * uncompleted cookies
424*4882a593Smuzhiyun 		 */
425*4882a593Smuzhiyun 		schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	list_splice_tail(&cyclic_list, &schan->ld_queue);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	spin_unlock_irqrestore(&schan->chan_lock, flags);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	dmaengine_desc_callback_invoke(&cb, NULL);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	return callback;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun  * shdma_chan_ld_cleanup - Clean up link descriptors
438*4882a593Smuzhiyun  *
439*4882a593Smuzhiyun  * Clean up the ld_queue of DMA channel.
440*4882a593Smuzhiyun  */
shdma_chan_ld_cleanup(struct shdma_chan * schan,bool all)441*4882a593Smuzhiyun static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	while (__ld_cleanup(schan, all))
444*4882a593Smuzhiyun 		;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun  * shdma_free_chan_resources - Free all resources of the channel.
449*4882a593Smuzhiyun  */
shdma_free_chan_resources(struct dma_chan * chan)450*4882a593Smuzhiyun static void shdma_free_chan_resources(struct dma_chan *chan)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
453*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
454*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
455*4882a593Smuzhiyun 	LIST_HEAD(list);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* Protect against ISR */
458*4882a593Smuzhiyun 	spin_lock_irq(&schan->chan_lock);
459*4882a593Smuzhiyun 	ops->halt_channel(schan);
460*4882a593Smuzhiyun 	spin_unlock_irq(&schan->chan_lock);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/* Now no new interrupts will occur */
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* Prepared and not submitted descriptors can still be on the queue */
465*4882a593Smuzhiyun 	if (!list_empty(&schan->ld_queue))
466*4882a593Smuzhiyun 		shdma_chan_ld_cleanup(schan, true);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (schan->slave_id >= 0) {
469*4882a593Smuzhiyun 		/* The caller is holding dma_list_mutex */
470*4882a593Smuzhiyun 		clear_bit(schan->slave_id, shdma_slave_used);
471*4882a593Smuzhiyun 		chan->private = NULL;
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	schan->real_slave_id = 0;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	spin_lock_irq(&schan->chan_lock);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	list_splice_init(&schan->ld_free, &list);
479*4882a593Smuzhiyun 	schan->desc_num = 0;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	spin_unlock_irq(&schan->chan_lock);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	kfree(schan->desc);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun  * shdma_add_desc - get, set up and return one transfer descriptor
488*4882a593Smuzhiyun  * @schan:	DMA channel
489*4882a593Smuzhiyun  * @flags:	DMA transfer flags
490*4882a593Smuzhiyun  * @dst:	destination DMA address, incremented when direction equals
491*4882a593Smuzhiyun  *		DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
492*4882a593Smuzhiyun  * @src:	source DMA address, incremented when direction equals
493*4882a593Smuzhiyun  *		DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
494*4882a593Smuzhiyun  * @len:	DMA transfer length
495*4882a593Smuzhiyun  * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
496*4882a593Smuzhiyun  * @direction:	needed for slave DMA to decide which address to keep constant,
497*4882a593Smuzhiyun  *		equals DMA_MEM_TO_MEM for MEMCPY
498*4882a593Smuzhiyun  * Returns 0 or an error
499*4882a593Smuzhiyun  * Locks: called with desc_lock held
500*4882a593Smuzhiyun  */
shdma_add_desc(struct shdma_chan * schan,unsigned long flags,dma_addr_t * dst,dma_addr_t * src,size_t * len,struct shdma_desc ** first,enum dma_transfer_direction direction)501*4882a593Smuzhiyun static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
502*4882a593Smuzhiyun 	unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
503*4882a593Smuzhiyun 	struct shdma_desc **first, enum dma_transfer_direction direction)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
506*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
507*4882a593Smuzhiyun 	struct shdma_desc *new;
508*4882a593Smuzhiyun 	size_t copy_size = *len;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (!copy_size)
511*4882a593Smuzhiyun 		return NULL;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/* Allocate the link descriptor from the free list */
514*4882a593Smuzhiyun 	new = shdma_get_desc(schan);
515*4882a593Smuzhiyun 	if (!new) {
516*4882a593Smuzhiyun 		dev_err(schan->dev, "No free link descriptor available\n");
517*4882a593Smuzhiyun 		return NULL;
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	ops->desc_setup(schan, new, *src, *dst, &copy_size);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (!*first) {
523*4882a593Smuzhiyun 		/* First desc */
524*4882a593Smuzhiyun 		new->async_tx.cookie = -EBUSY;
525*4882a593Smuzhiyun 		*first = new;
526*4882a593Smuzhiyun 	} else {
527*4882a593Smuzhiyun 		/* Other desc - invisible to the user */
528*4882a593Smuzhiyun 		new->async_tx.cookie = -EINVAL;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	dev_dbg(schan->dev,
532*4882a593Smuzhiyun 		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
533*4882a593Smuzhiyun 		copy_size, *len, src, dst, &new->async_tx,
534*4882a593Smuzhiyun 		new->async_tx.cookie);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	new->mark = DESC_PREPARED;
537*4882a593Smuzhiyun 	new->async_tx.flags = flags;
538*4882a593Smuzhiyun 	new->direction = direction;
539*4882a593Smuzhiyun 	new->partial = 0;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	*len -= copy_size;
542*4882a593Smuzhiyun 	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
543*4882a593Smuzhiyun 		*src += copy_size;
544*4882a593Smuzhiyun 	if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
545*4882a593Smuzhiyun 		*dst += copy_size;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return new;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun  * shdma_prep_sg - prepare transfer descriptors from an SG list
552*4882a593Smuzhiyun  *
553*4882a593Smuzhiyun  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
554*4882a593Smuzhiyun  * converted to scatter-gather to guarantee consistent locking and a correct
555*4882a593Smuzhiyun  * list manipulation. For slave DMA direction carries the usual meaning, and,
556*4882a593Smuzhiyun  * logically, the SG list is RAM and the addr variable contains slave address,
557*4882a593Smuzhiyun  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
558*4882a593Smuzhiyun  * and the SG list contains only one element and points at the source buffer.
559*4882a593Smuzhiyun  */
shdma_prep_sg(struct shdma_chan * schan,struct scatterlist * sgl,unsigned int sg_len,dma_addr_t * addr,enum dma_transfer_direction direction,unsigned long flags,bool cyclic)560*4882a593Smuzhiyun static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
561*4882a593Smuzhiyun 	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
562*4882a593Smuzhiyun 	enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	struct scatterlist *sg;
565*4882a593Smuzhiyun 	struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
566*4882a593Smuzhiyun 	LIST_HEAD(tx_list);
567*4882a593Smuzhiyun 	int chunks = 0;
568*4882a593Smuzhiyun 	unsigned long irq_flags;
569*4882a593Smuzhiyun 	int i;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	for_each_sg(sgl, sg, sg_len, i)
572*4882a593Smuzhiyun 		chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/* Have to lock the whole loop to protect against concurrent release */
575*4882a593Smuzhiyun 	spin_lock_irqsave(&schan->chan_lock, irq_flags);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	/*
578*4882a593Smuzhiyun 	 * Chaining:
579*4882a593Smuzhiyun 	 * first descriptor is what user is dealing with in all API calls, its
580*4882a593Smuzhiyun 	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
581*4882a593Smuzhiyun 	 *	number
582*4882a593Smuzhiyun 	 * if more than one chunk is needed further chunks have cookie = -EINVAL
583*4882a593Smuzhiyun 	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
584*4882a593Smuzhiyun 	 * all chunks are linked onto the tx_list head with their .node heads
585*4882a593Smuzhiyun 	 *	only during this function, then they are immediately spliced
586*4882a593Smuzhiyun 	 *	back onto the free list in form of a chain
587*4882a593Smuzhiyun 	 */
588*4882a593Smuzhiyun 	for_each_sg(sgl, sg, sg_len, i) {
589*4882a593Smuzhiyun 		dma_addr_t sg_addr = sg_dma_address(sg);
590*4882a593Smuzhiyun 		size_t len = sg_dma_len(sg);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		if (!len)
593*4882a593Smuzhiyun 			goto err_get_desc;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		do {
596*4882a593Smuzhiyun 			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
597*4882a593Smuzhiyun 				i, sg, len, &sg_addr);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 			if (direction == DMA_DEV_TO_MEM)
600*4882a593Smuzhiyun 				new = shdma_add_desc(schan, flags,
601*4882a593Smuzhiyun 						&sg_addr, addr, &len, &first,
602*4882a593Smuzhiyun 						direction);
603*4882a593Smuzhiyun 			else
604*4882a593Smuzhiyun 				new = shdma_add_desc(schan, flags,
605*4882a593Smuzhiyun 						addr, &sg_addr, &len, &first,
606*4882a593Smuzhiyun 						direction);
607*4882a593Smuzhiyun 			if (!new)
608*4882a593Smuzhiyun 				goto err_get_desc;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 			new->cyclic = cyclic;
611*4882a593Smuzhiyun 			if (cyclic)
612*4882a593Smuzhiyun 				new->chunks = 1;
613*4882a593Smuzhiyun 			else
614*4882a593Smuzhiyun 				new->chunks = chunks--;
615*4882a593Smuzhiyun 			list_add_tail(&new->node, &tx_list);
616*4882a593Smuzhiyun 		} while (len);
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	if (new != first)
620*4882a593Smuzhiyun 		new->async_tx.cookie = -ENOSPC;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* Put them back on the free list, so, they don't get lost */
623*4882a593Smuzhiyun 	list_splice_tail(&tx_list, &schan->ld_free);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return &first->async_tx;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun err_get_desc:
630*4882a593Smuzhiyun 	list_for_each_entry(new, &tx_list, node)
631*4882a593Smuzhiyun 		new->mark = DESC_IDLE;
632*4882a593Smuzhiyun 	list_splice(&tx_list, &schan->ld_free);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	return NULL;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
shdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dma_dest,dma_addr_t dma_src,size_t len,unsigned long flags)639*4882a593Smuzhiyun static struct dma_async_tx_descriptor *shdma_prep_memcpy(
640*4882a593Smuzhiyun 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
641*4882a593Smuzhiyun 	size_t len, unsigned long flags)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
644*4882a593Smuzhiyun 	struct scatterlist sg;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (!chan || !len)
647*4882a593Smuzhiyun 		return NULL;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	BUG_ON(!schan->desc_num);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	sg_init_table(&sg, 1);
652*4882a593Smuzhiyun 	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
653*4882a593Smuzhiyun 		    offset_in_page(dma_src));
654*4882a593Smuzhiyun 	sg_dma_address(&sg) = dma_src;
655*4882a593Smuzhiyun 	sg_dma_len(&sg) = len;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
658*4882a593Smuzhiyun 			     flags, false);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
shdma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)661*4882a593Smuzhiyun static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
662*4882a593Smuzhiyun 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
663*4882a593Smuzhiyun 	enum dma_transfer_direction direction, unsigned long flags, void *context)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
666*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
667*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
668*4882a593Smuzhiyun 	int slave_id = schan->slave_id;
669*4882a593Smuzhiyun 	dma_addr_t slave_addr;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (!chan)
672*4882a593Smuzhiyun 		return NULL;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	BUG_ON(!schan->desc_num);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/* Someone calling slave DMA on a generic channel? */
677*4882a593Smuzhiyun 	if (slave_id < 0 || !sg_len) {
678*4882a593Smuzhiyun 		dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
679*4882a593Smuzhiyun 			 __func__, sg_len, slave_id);
680*4882a593Smuzhiyun 		return NULL;
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	slave_addr = ops->slave_addr(schan);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
686*4882a593Smuzhiyun 			     direction, flags, false);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun #define SHDMA_MAX_SG_LEN 32
690*4882a593Smuzhiyun 
shdma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)691*4882a593Smuzhiyun static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
692*4882a593Smuzhiyun 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
693*4882a593Smuzhiyun 	size_t period_len, enum dma_transfer_direction direction,
694*4882a593Smuzhiyun 	unsigned long flags)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
697*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
698*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc;
699*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
700*4882a593Smuzhiyun 	unsigned int sg_len = buf_len / period_len;
701*4882a593Smuzhiyun 	int slave_id = schan->slave_id;
702*4882a593Smuzhiyun 	dma_addr_t slave_addr;
703*4882a593Smuzhiyun 	struct scatterlist *sgl;
704*4882a593Smuzhiyun 	int i;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (!chan)
707*4882a593Smuzhiyun 		return NULL;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	BUG_ON(!schan->desc_num);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	if (sg_len > SHDMA_MAX_SG_LEN) {
712*4882a593Smuzhiyun 		dev_err(schan->dev, "sg length %d exceeds limit %d",
713*4882a593Smuzhiyun 				sg_len, SHDMA_MAX_SG_LEN);
714*4882a593Smuzhiyun 		return NULL;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* Someone calling slave DMA on a generic channel? */
718*4882a593Smuzhiyun 	if (slave_id < 0 || (buf_len < period_len)) {
719*4882a593Smuzhiyun 		dev_warn(schan->dev,
720*4882a593Smuzhiyun 			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
721*4882a593Smuzhiyun 			__func__, buf_len, period_len, slave_id);
722*4882a593Smuzhiyun 		return NULL;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	slave_addr = ops->slave_addr(schan);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/*
728*4882a593Smuzhiyun 	 * Allocate the sg list dynamically as it would consumer too much stack
729*4882a593Smuzhiyun 	 * space.
730*4882a593Smuzhiyun 	 */
731*4882a593Smuzhiyun 	sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
732*4882a593Smuzhiyun 	if (!sgl)
733*4882a593Smuzhiyun 		return NULL;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	sg_init_table(sgl, sg_len);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	for (i = 0; i < sg_len; i++) {
738*4882a593Smuzhiyun 		dma_addr_t src = buf_addr + (period_len * i);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
741*4882a593Smuzhiyun 			    offset_in_page(src));
742*4882a593Smuzhiyun 		sg_dma_address(&sgl[i]) = src;
743*4882a593Smuzhiyun 		sg_dma_len(&sgl[i]) = period_len;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
747*4882a593Smuzhiyun 			     direction, flags, true);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	kfree(sgl);
750*4882a593Smuzhiyun 	return desc;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
shdma_terminate_all(struct dma_chan * chan)753*4882a593Smuzhiyun static int shdma_terminate_all(struct dma_chan *chan)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
756*4882a593Smuzhiyun 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
757*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
758*4882a593Smuzhiyun 	unsigned long flags;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	spin_lock_irqsave(&schan->chan_lock, flags);
761*4882a593Smuzhiyun 	ops->halt_channel(schan);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (ops->get_partial && !list_empty(&schan->ld_queue)) {
764*4882a593Smuzhiyun 		/* Record partial transfer */
765*4882a593Smuzhiyun 		struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
766*4882a593Smuzhiyun 							   struct shdma_desc, node);
767*4882a593Smuzhiyun 		desc->partial = ops->get_partial(schan, desc);
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	spin_unlock_irqrestore(&schan->chan_lock, flags);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	shdma_chan_ld_cleanup(schan, true);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	return 0;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
shdma_config(struct dma_chan * chan,struct dma_slave_config * config)777*4882a593Smuzhiyun static int shdma_config(struct dma_chan *chan,
778*4882a593Smuzhiyun 			struct dma_slave_config *config)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/*
783*4882a593Smuzhiyun 	 * So far only .slave_id is used, but the slave drivers are
784*4882a593Smuzhiyun 	 * encouraged to also set a transfer direction and an address.
785*4882a593Smuzhiyun 	 */
786*4882a593Smuzhiyun 	if (!config)
787*4882a593Smuzhiyun 		return -EINVAL;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/*
790*4882a593Smuzhiyun 	 * overriding the slave_id through dma_slave_config is deprecated,
791*4882a593Smuzhiyun 	 * but possibly some out-of-tree drivers still do it.
792*4882a593Smuzhiyun 	 */
793*4882a593Smuzhiyun 	if (WARN_ON_ONCE(config->slave_id &&
794*4882a593Smuzhiyun 			 config->slave_id != schan->real_slave_id))
795*4882a593Smuzhiyun 		schan->real_slave_id = config->slave_id;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	/*
798*4882a593Smuzhiyun 	 * We could lock this, but you shouldn't be configuring the
799*4882a593Smuzhiyun 	 * channel, while using it...
800*4882a593Smuzhiyun 	 */
801*4882a593Smuzhiyun 	return shdma_setup_slave(schan,
802*4882a593Smuzhiyun 				 config->direction == DMA_DEV_TO_MEM ?
803*4882a593Smuzhiyun 				 config->src_addr : config->dst_addr);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
shdma_issue_pending(struct dma_chan * chan)806*4882a593Smuzhiyun static void shdma_issue_pending(struct dma_chan *chan)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	spin_lock_irq(&schan->chan_lock);
811*4882a593Smuzhiyun 	if (schan->pm_state == SHDMA_PM_ESTABLISHED)
812*4882a593Smuzhiyun 		shdma_chan_xfer_ld_queue(schan);
813*4882a593Smuzhiyun 	else
814*4882a593Smuzhiyun 		schan->pm_state = SHDMA_PM_PENDING;
815*4882a593Smuzhiyun 	spin_unlock_irq(&schan->chan_lock);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun 
shdma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)818*4882a593Smuzhiyun static enum dma_status shdma_tx_status(struct dma_chan *chan,
819*4882a593Smuzhiyun 					dma_cookie_t cookie,
820*4882a593Smuzhiyun 					struct dma_tx_state *txstate)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	struct shdma_chan *schan = to_shdma_chan(chan);
823*4882a593Smuzhiyun 	enum dma_status status;
824*4882a593Smuzhiyun 	unsigned long flags;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	shdma_chan_ld_cleanup(schan, false);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	spin_lock_irqsave(&schan->chan_lock, flags);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	status = dma_cookie_status(chan, cookie, txstate);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/*
833*4882a593Smuzhiyun 	 * If we don't find cookie on the queue, it has been aborted and we have
834*4882a593Smuzhiyun 	 * to report error
835*4882a593Smuzhiyun 	 */
836*4882a593Smuzhiyun 	if (status != DMA_COMPLETE) {
837*4882a593Smuzhiyun 		struct shdma_desc *sdesc;
838*4882a593Smuzhiyun 		status = DMA_ERROR;
839*4882a593Smuzhiyun 		list_for_each_entry(sdesc, &schan->ld_queue, node)
840*4882a593Smuzhiyun 			if (sdesc->cookie == cookie) {
841*4882a593Smuzhiyun 				status = DMA_IN_PROGRESS;
842*4882a593Smuzhiyun 				break;
843*4882a593Smuzhiyun 			}
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	spin_unlock_irqrestore(&schan->chan_lock, flags);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	return status;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun /* Called from error IRQ or NMI */
shdma_reset(struct shdma_dev * sdev)852*4882a593Smuzhiyun bool shdma_reset(struct shdma_dev *sdev)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	const struct shdma_ops *ops = sdev->ops;
855*4882a593Smuzhiyun 	struct shdma_chan *schan;
856*4882a593Smuzhiyun 	unsigned int handled = 0;
857*4882a593Smuzhiyun 	int i;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* Reset all channels */
860*4882a593Smuzhiyun 	shdma_for_each_chan(schan, sdev, i) {
861*4882a593Smuzhiyun 		struct shdma_desc *sdesc;
862*4882a593Smuzhiyun 		LIST_HEAD(dl);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 		if (!schan)
865*4882a593Smuzhiyun 			continue;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 		spin_lock(&schan->chan_lock);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 		/* Stop the channel */
870*4882a593Smuzhiyun 		ops->halt_channel(schan);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 		list_splice_init(&schan->ld_queue, &dl);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		if (!list_empty(&dl)) {
875*4882a593Smuzhiyun 			dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
876*4882a593Smuzhiyun 			pm_runtime_put(schan->dev);
877*4882a593Smuzhiyun 		}
878*4882a593Smuzhiyun 		schan->pm_state = SHDMA_PM_ESTABLISHED;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 		spin_unlock(&schan->chan_lock);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 		/* Complete all  */
883*4882a593Smuzhiyun 		list_for_each_entry(sdesc, &dl, node) {
884*4882a593Smuzhiyun 			struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 			sdesc->mark = DESC_IDLE;
887*4882a593Smuzhiyun 			dmaengine_desc_get_callback_invoke(tx, NULL);
888*4882a593Smuzhiyun 		}
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 		spin_lock(&schan->chan_lock);
891*4882a593Smuzhiyun 		list_splice(&dl, &schan->ld_free);
892*4882a593Smuzhiyun 		spin_unlock(&schan->chan_lock);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 		handled++;
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	return !!handled;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_reset);
900*4882a593Smuzhiyun 
chan_irq(int irq,void * dev)901*4882a593Smuzhiyun static irqreturn_t chan_irq(int irq, void *dev)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct shdma_chan *schan = dev;
904*4882a593Smuzhiyun 	const struct shdma_ops *ops =
905*4882a593Smuzhiyun 		to_shdma_dev(schan->dma_chan.device)->ops;
906*4882a593Smuzhiyun 	irqreturn_t ret;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	spin_lock(&schan->chan_lock);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	spin_unlock(&schan->chan_lock);
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	return ret;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
chan_irqt(int irq,void * dev)917*4882a593Smuzhiyun static irqreturn_t chan_irqt(int irq, void *dev)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	struct shdma_chan *schan = dev;
920*4882a593Smuzhiyun 	const struct shdma_ops *ops =
921*4882a593Smuzhiyun 		to_shdma_dev(schan->dma_chan.device)->ops;
922*4882a593Smuzhiyun 	struct shdma_desc *sdesc;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	spin_lock_irq(&schan->chan_lock);
925*4882a593Smuzhiyun 	list_for_each_entry(sdesc, &schan->ld_queue, node) {
926*4882a593Smuzhiyun 		if (sdesc->mark == DESC_SUBMITTED &&
927*4882a593Smuzhiyun 		    ops->desc_completed(schan, sdesc)) {
928*4882a593Smuzhiyun 			dev_dbg(schan->dev, "done #%d@%p\n",
929*4882a593Smuzhiyun 				sdesc->async_tx.cookie, &sdesc->async_tx);
930*4882a593Smuzhiyun 			sdesc->mark = DESC_COMPLETED;
931*4882a593Smuzhiyun 			break;
932*4882a593Smuzhiyun 		}
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 	/* Next desc */
935*4882a593Smuzhiyun 	shdma_chan_xfer_ld_queue(schan);
936*4882a593Smuzhiyun 	spin_unlock_irq(&schan->chan_lock);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	shdma_chan_ld_cleanup(schan, false);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	return IRQ_HANDLED;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun 
shdma_request_irq(struct shdma_chan * schan,int irq,unsigned long flags,const char * name)943*4882a593Smuzhiyun int shdma_request_irq(struct shdma_chan *schan, int irq,
944*4882a593Smuzhiyun 			   unsigned long flags, const char *name)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
947*4882a593Smuzhiyun 					    chan_irqt, flags, name, schan);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	schan->irq = ret < 0 ? ret : irq;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	return ret;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_request_irq);
954*4882a593Smuzhiyun 
shdma_chan_probe(struct shdma_dev * sdev,struct shdma_chan * schan,int id)955*4882a593Smuzhiyun void shdma_chan_probe(struct shdma_dev *sdev,
956*4882a593Smuzhiyun 			   struct shdma_chan *schan, int id)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	schan->pm_state = SHDMA_PM_ESTABLISHED;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	/* reference struct dma_device */
961*4882a593Smuzhiyun 	schan->dma_chan.device = &sdev->dma_dev;
962*4882a593Smuzhiyun 	dma_cookie_init(&schan->dma_chan);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	schan->dev = sdev->dma_dev.dev;
965*4882a593Smuzhiyun 	schan->id = id;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	if (!schan->max_xfer_len)
968*4882a593Smuzhiyun 		schan->max_xfer_len = PAGE_SIZE;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	spin_lock_init(&schan->chan_lock);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* Init descripter manage list */
973*4882a593Smuzhiyun 	INIT_LIST_HEAD(&schan->ld_queue);
974*4882a593Smuzhiyun 	INIT_LIST_HEAD(&schan->ld_free);
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* Add the channel to DMA device channel list */
977*4882a593Smuzhiyun 	list_add_tail(&schan->dma_chan.device_node,
978*4882a593Smuzhiyun 			&sdev->dma_dev.channels);
979*4882a593Smuzhiyun 	sdev->schan[id] = schan;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_chan_probe);
982*4882a593Smuzhiyun 
shdma_chan_remove(struct shdma_chan * schan)983*4882a593Smuzhiyun void shdma_chan_remove(struct shdma_chan *schan)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun 	list_del(&schan->dma_chan.device_node);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_chan_remove);
988*4882a593Smuzhiyun 
shdma_init(struct device * dev,struct shdma_dev * sdev,int chan_num)989*4882a593Smuzhiyun int shdma_init(struct device *dev, struct shdma_dev *sdev,
990*4882a593Smuzhiyun 		    int chan_num)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct dma_device *dma_dev = &sdev->dma_dev;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	/*
995*4882a593Smuzhiyun 	 * Require all call-backs for now, they can trivially be made optional
996*4882a593Smuzhiyun 	 * later as required
997*4882a593Smuzhiyun 	 */
998*4882a593Smuzhiyun 	if (!sdev->ops ||
999*4882a593Smuzhiyun 	    !sdev->desc_size ||
1000*4882a593Smuzhiyun 	    !sdev->ops->embedded_desc ||
1001*4882a593Smuzhiyun 	    !sdev->ops->start_xfer ||
1002*4882a593Smuzhiyun 	    !sdev->ops->setup_xfer ||
1003*4882a593Smuzhiyun 	    !sdev->ops->set_slave ||
1004*4882a593Smuzhiyun 	    !sdev->ops->desc_setup ||
1005*4882a593Smuzhiyun 	    !sdev->ops->slave_addr ||
1006*4882a593Smuzhiyun 	    !sdev->ops->channel_busy ||
1007*4882a593Smuzhiyun 	    !sdev->ops->halt_channel ||
1008*4882a593Smuzhiyun 	    !sdev->ops->desc_completed)
1009*4882a593Smuzhiyun 		return -EINVAL;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1012*4882a593Smuzhiyun 	if (!sdev->schan)
1013*4882a593Smuzhiyun 		return -ENOMEM;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dma_dev->channels);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	/* Common and MEMCPY operations */
1018*4882a593Smuzhiyun 	dma_dev->device_alloc_chan_resources
1019*4882a593Smuzhiyun 		= shdma_alloc_chan_resources;
1020*4882a593Smuzhiyun 	dma_dev->device_free_chan_resources = shdma_free_chan_resources;
1021*4882a593Smuzhiyun 	dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
1022*4882a593Smuzhiyun 	dma_dev->device_tx_status = shdma_tx_status;
1023*4882a593Smuzhiyun 	dma_dev->device_issue_pending = shdma_issue_pending;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	/* Compulsory for DMA_SLAVE fields */
1026*4882a593Smuzhiyun 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1027*4882a593Smuzhiyun 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1028*4882a593Smuzhiyun 	dma_dev->device_config = shdma_config;
1029*4882a593Smuzhiyun 	dma_dev->device_terminate_all = shdma_terminate_all;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	dma_dev->dev = dev;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	return 0;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_init);
1036*4882a593Smuzhiyun 
shdma_cleanup(struct shdma_dev * sdev)1037*4882a593Smuzhiyun void shdma_cleanup(struct shdma_dev *sdev)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	kfree(sdev->schan);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun EXPORT_SYMBOL(shdma_cleanup);
1042*4882a593Smuzhiyun 
shdma_enter(void)1043*4882a593Smuzhiyun static int __init shdma_enter(void)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG),
1046*4882a593Smuzhiyun 				   sizeof(long),
1047*4882a593Smuzhiyun 				   GFP_KERNEL);
1048*4882a593Smuzhiyun 	if (!shdma_slave_used)
1049*4882a593Smuzhiyun 		return -ENOMEM;
1050*4882a593Smuzhiyun 	return 0;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun module_init(shdma_enter);
1053*4882a593Smuzhiyun 
shdma_exit(void)1054*4882a593Smuzhiyun static void __exit shdma_exit(void)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	kfree(shdma_slave_used);
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun module_exit(shdma_exit);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1061*4882a593Smuzhiyun MODULE_DESCRIPTION("SH-DMA driver base library");
1062*4882a593Smuzhiyun MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1063