xref: /OK3568_Linux_fs/kernel/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2014-2015 Analog Devices Inc.
4*4882a593Smuzhiyun  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/dmaengine.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/err.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/iio/iio.h>
16*4882a593Smuzhiyun #include <linux/iio/sysfs.h>
17*4882a593Smuzhiyun #include <linux/iio/buffer.h>
18*4882a593Smuzhiyun #include <linux/iio/buffer_impl.h>
19*4882a593Smuzhiyun #include <linux/iio/buffer-dma.h>
20*4882a593Smuzhiyun #include <linux/iio/buffer-dmaengine.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24*4882a593Smuzhiyun  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25*4882a593Smuzhiyun  * used to manage the buffer memory and implement the IIO buffer operations
26*4882a593Smuzhiyun  * while the DMAengine framework is used to perform the DMA transfers. Combined
27*4882a593Smuzhiyun  * this results in a device independent fully functional DMA buffer
28*4882a593Smuzhiyun  * implementation that can be used by device drivers for peripherals which are
29*4882a593Smuzhiyun  * connected to a DMA controller which has a DMAengine driver implementation.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct dmaengine_buffer {
33*4882a593Smuzhiyun 	struct iio_dma_buffer_queue queue;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	struct dma_chan *chan;
36*4882a593Smuzhiyun 	struct list_head active;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	size_t align;
39*4882a593Smuzhiyun 	size_t max_size;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
iio_buffer_to_dmaengine_buffer(struct iio_buffer * buffer)42*4882a593Smuzhiyun static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43*4882a593Smuzhiyun 		struct iio_buffer *buffer)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
iio_dmaengine_buffer_block_done(void * data,const struct dmaengine_result * result)48*4882a593Smuzhiyun static void iio_dmaengine_buffer_block_done(void *data,
49*4882a593Smuzhiyun 		const struct dmaengine_result *result)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct iio_dma_buffer_block *block = data;
52*4882a593Smuzhiyun 	unsigned long flags;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	spin_lock_irqsave(&block->queue->list_lock, flags);
55*4882a593Smuzhiyun 	list_del(&block->head);
56*4882a593Smuzhiyun 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
57*4882a593Smuzhiyun 	block->bytes_used -= result->residue;
58*4882a593Smuzhiyun 	iio_dma_buffer_block_done(block);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block)61*4882a593Smuzhiyun static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
62*4882a593Smuzhiyun 	struct iio_dma_buffer_block *block)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct dmaengine_buffer *dmaengine_buffer =
65*4882a593Smuzhiyun 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
66*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc;
67*4882a593Smuzhiyun 	dma_cookie_t cookie;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
70*4882a593Smuzhiyun 	block->bytes_used = rounddown(block->bytes_used,
71*4882a593Smuzhiyun 			dmaengine_buffer->align);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
74*4882a593Smuzhiyun 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
75*4882a593Smuzhiyun 		DMA_PREP_INTERRUPT);
76*4882a593Smuzhiyun 	if (!desc)
77*4882a593Smuzhiyun 		return -ENOMEM;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	desc->callback_result = iio_dmaengine_buffer_block_done;
80*4882a593Smuzhiyun 	desc->callback_param = block;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	cookie = dmaengine_submit(desc);
83*4882a593Smuzhiyun 	if (dma_submit_error(cookie))
84*4882a593Smuzhiyun 		return dma_submit_error(cookie);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
87*4882a593Smuzhiyun 	list_add_tail(&block->head, &dmaengine_buffer->active);
88*4882a593Smuzhiyun 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	dma_async_issue_pending(dmaengine_buffer->chan);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue * queue)95*4882a593Smuzhiyun static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct dmaengine_buffer *dmaengine_buffer =
98*4882a593Smuzhiyun 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	dmaengine_terminate_sync(dmaengine_buffer->chan);
101*4882a593Smuzhiyun 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
iio_dmaengine_buffer_release(struct iio_buffer * buf)104*4882a593Smuzhiyun static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct dmaengine_buffer *dmaengine_buffer =
107*4882a593Smuzhiyun 		iio_buffer_to_dmaengine_buffer(buf);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	iio_dma_buffer_release(&dmaengine_buffer->queue);
110*4882a593Smuzhiyun 	kfree(dmaengine_buffer);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
114*4882a593Smuzhiyun 	.read = iio_dma_buffer_read,
115*4882a593Smuzhiyun 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
116*4882a593Smuzhiyun 	.set_length = iio_dma_buffer_set_length,
117*4882a593Smuzhiyun 	.request_update = iio_dma_buffer_request_update,
118*4882a593Smuzhiyun 	.enable = iio_dma_buffer_enable,
119*4882a593Smuzhiyun 	.disable = iio_dma_buffer_disable,
120*4882a593Smuzhiyun 	.data_available = iio_dma_buffer_data_available,
121*4882a593Smuzhiyun 	.release = iio_dmaengine_buffer_release,
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	.modes = INDIO_BUFFER_HARDWARE,
124*4882a593Smuzhiyun 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
128*4882a593Smuzhiyun 	.submit = iio_dmaengine_buffer_submit_block,
129*4882a593Smuzhiyun 	.abort = iio_dmaengine_buffer_abort,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
iio_dmaengine_buffer_get_length_align(struct device * dev,struct device_attribute * attr,char * buf)132*4882a593Smuzhiyun static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
133*4882a593Smuzhiyun 	struct device_attribute *attr, char *buf)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
136*4882a593Smuzhiyun 	struct dmaengine_buffer *dmaengine_buffer =
137*4882a593Smuzhiyun 		iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return sprintf(buf, "%zu\n", dmaengine_buffer->align);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static IIO_DEVICE_ATTR(length_align_bytes, 0444,
143*4882a593Smuzhiyun 		       iio_dmaengine_buffer_get_length_align, NULL, 0);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun static const struct attribute *iio_dmaengine_buffer_attrs[] = {
146*4882a593Smuzhiyun 	&iio_dev_attr_length_align_bytes.dev_attr.attr,
147*4882a593Smuzhiyun 	NULL,
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
152*4882a593Smuzhiyun  * @dev: Parent device for the buffer
153*4882a593Smuzhiyun  * @channel: DMA channel name, typically "rx".
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * This allocates a new IIO buffer which internally uses the DMAengine framework
156*4882a593Smuzhiyun  * to perform its transfers. The parent device will be used to request the DMA
157*4882a593Smuzhiyun  * channel.
158*4882a593Smuzhiyun  *
159*4882a593Smuzhiyun  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
160*4882a593Smuzhiyun  * release it.
161*4882a593Smuzhiyun  */
iio_dmaengine_buffer_alloc(struct device * dev,const char * channel)162*4882a593Smuzhiyun static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
163*4882a593Smuzhiyun 	const char *channel)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct dmaengine_buffer *dmaengine_buffer;
166*4882a593Smuzhiyun 	unsigned int width, src_width, dest_width;
167*4882a593Smuzhiyun 	struct dma_slave_caps caps;
168*4882a593Smuzhiyun 	struct dma_chan *chan;
169*4882a593Smuzhiyun 	int ret;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
172*4882a593Smuzhiyun 	if (!dmaengine_buffer)
173*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	chan = dma_request_chan(dev, channel);
176*4882a593Smuzhiyun 	if (IS_ERR(chan)) {
177*4882a593Smuzhiyun 		ret = PTR_ERR(chan);
178*4882a593Smuzhiyun 		goto err_free;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	ret = dma_get_slave_caps(chan, &caps);
182*4882a593Smuzhiyun 	if (ret < 0)
183*4882a593Smuzhiyun 		goto err_free;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Needs to be aligned to the maximum of the minimums */
186*4882a593Smuzhiyun 	if (caps.src_addr_widths)
187*4882a593Smuzhiyun 		src_width = __ffs(caps.src_addr_widths);
188*4882a593Smuzhiyun 	else
189*4882a593Smuzhiyun 		src_width = 1;
190*4882a593Smuzhiyun 	if (caps.dst_addr_widths)
191*4882a593Smuzhiyun 		dest_width = __ffs(caps.dst_addr_widths);
192*4882a593Smuzhiyun 	else
193*4882a593Smuzhiyun 		dest_width = 1;
194*4882a593Smuzhiyun 	width = max(src_width, dest_width);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dmaengine_buffer->active);
197*4882a593Smuzhiyun 	dmaengine_buffer->chan = chan;
198*4882a593Smuzhiyun 	dmaengine_buffer->align = width;
199*4882a593Smuzhiyun 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
202*4882a593Smuzhiyun 		&iio_dmaengine_default_ops);
203*4882a593Smuzhiyun 	iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
204*4882a593Smuzhiyun 		iio_dmaengine_buffer_attrs);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return &dmaengine_buffer->queue.buffer;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun err_free:
211*4882a593Smuzhiyun 	kfree(dmaengine_buffer);
212*4882a593Smuzhiyun 	return ERR_PTR(ret);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun  * iio_dmaengine_buffer_free() - Free dmaengine buffer
217*4882a593Smuzhiyun  * @buffer: Buffer to free
218*4882a593Smuzhiyun  *
219*4882a593Smuzhiyun  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
220*4882a593Smuzhiyun  */
iio_dmaengine_buffer_free(struct iio_buffer * buffer)221*4882a593Smuzhiyun static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct dmaengine_buffer *dmaengine_buffer =
224*4882a593Smuzhiyun 		iio_buffer_to_dmaengine_buffer(buffer);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
227*4882a593Smuzhiyun 	dma_release_channel(dmaengine_buffer->chan);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	iio_buffer_put(buffer);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
__devm_iio_dmaengine_buffer_free(struct device * dev,void * res)232*4882a593Smuzhiyun static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun  * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
239*4882a593Smuzhiyun  * @dev: Parent device for the buffer
240*4882a593Smuzhiyun  * @channel: DMA channel name, typically "rx".
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * This allocates a new IIO buffer which internally uses the DMAengine framework
243*4882a593Smuzhiyun  * to perform its transfers. The parent device will be used to request the DMA
244*4882a593Smuzhiyun  * channel.
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * The buffer will be automatically de-allocated once the device gets destroyed.
247*4882a593Smuzhiyun  */
devm_iio_dmaengine_buffer_alloc(struct device * dev,const char * channel)248*4882a593Smuzhiyun struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
249*4882a593Smuzhiyun 	const char *channel)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct iio_buffer **bufferp, *buffer;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
254*4882a593Smuzhiyun 			       sizeof(*bufferp), GFP_KERNEL);
255*4882a593Smuzhiyun 	if (!bufferp)
256*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	buffer = iio_dmaengine_buffer_alloc(dev, channel);
259*4882a593Smuzhiyun 	if (IS_ERR(buffer)) {
260*4882a593Smuzhiyun 		devres_free(bufferp);
261*4882a593Smuzhiyun 		return buffer;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	*bufferp = buffer;
265*4882a593Smuzhiyun 	devres_add(dev, bufferp);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return buffer;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
272*4882a593Smuzhiyun MODULE_DESCRIPTION("DMA buffer for the IIO framework");
273*4882a593Smuzhiyun MODULE_LICENSE("GPL");
274