1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for the Analog Devices AXI-DMAC core
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2013-2019 Analog Devices Inc.
6*4882a593Smuzhiyun * Author: Lars-Peter Clausen <lars@metafoo.de>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/bitfield.h>
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/dmaengine.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/of_dma.h>
21*4882a593Smuzhiyun #include <linux/platform_device.h>
22*4882a593Smuzhiyun #include <linux/regmap.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/fpga/adi-axi-common.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <dt-bindings/dma/axi-dmac.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "dmaengine.h"
29*4882a593Smuzhiyun #include "virt-dma.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
33*4882a593Smuzhiyun * various instantiation parameters which decided the exact feature set support
34*4882a593Smuzhiyun * by the core.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * Each channel of the core has a source interface and a destination interface.
37*4882a593Smuzhiyun * The number of channels and the type of the channel interfaces is selected at
38*4882a593Smuzhiyun * configuration time. A interface can either be a connected to a central memory
39*4882a593Smuzhiyun * interconnect, which allows access to system memory, or it can be connected to
40*4882a593Smuzhiyun * a dedicated bus which is directly connected to a data port on a peripheral.
41*4882a593Smuzhiyun * Given that those are configuration options of the core that are selected when
42*4882a593Smuzhiyun * it is instantiated this means that they can not be changed by software at
43*4882a593Smuzhiyun * runtime. By extension this means that each channel is uni-directional. It can
44*4882a593Smuzhiyun * either be device to memory or memory to device, but not both. Also since the
45*4882a593Smuzhiyun * device side is a dedicated data bus only connected to a single peripheral
46*4882a593Smuzhiyun * there is no address than can or needs to be configured for the device side.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define AXI_DMAC_REG_INTERFACE_DESC 0x10
50*4882a593Smuzhiyun #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
51*4882a593Smuzhiyun #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
52*4882a593Smuzhiyun #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
53*4882a593Smuzhiyun #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
54*4882a593Smuzhiyun #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
55*4882a593Smuzhiyun #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
56*4882a593Smuzhiyun #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
57*4882a593Smuzhiyun #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define AXI_DMAC_REG_IRQ_MASK 0x80
60*4882a593Smuzhiyun #define AXI_DMAC_REG_IRQ_PENDING 0x84
61*4882a593Smuzhiyun #define AXI_DMAC_REG_IRQ_SOURCE 0x88
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define AXI_DMAC_REG_CTRL 0x400
64*4882a593Smuzhiyun #define AXI_DMAC_REG_TRANSFER_ID 0x404
65*4882a593Smuzhiyun #define AXI_DMAC_REG_START_TRANSFER 0x408
66*4882a593Smuzhiyun #define AXI_DMAC_REG_FLAGS 0x40c
67*4882a593Smuzhiyun #define AXI_DMAC_REG_DEST_ADDRESS 0x410
68*4882a593Smuzhiyun #define AXI_DMAC_REG_SRC_ADDRESS 0x414
69*4882a593Smuzhiyun #define AXI_DMAC_REG_X_LENGTH 0x418
70*4882a593Smuzhiyun #define AXI_DMAC_REG_Y_LENGTH 0x41c
71*4882a593Smuzhiyun #define AXI_DMAC_REG_DEST_STRIDE 0x420
72*4882a593Smuzhiyun #define AXI_DMAC_REG_SRC_STRIDE 0x424
73*4882a593Smuzhiyun #define AXI_DMAC_REG_TRANSFER_DONE 0x428
74*4882a593Smuzhiyun #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
75*4882a593Smuzhiyun #define AXI_DMAC_REG_STATUS 0x430
76*4882a593Smuzhiyun #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
77*4882a593Smuzhiyun #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
78*4882a593Smuzhiyun #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
79*4882a593Smuzhiyun #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define AXI_DMAC_CTRL_ENABLE BIT(0)
82*4882a593Smuzhiyun #define AXI_DMAC_CTRL_PAUSE BIT(1)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define AXI_DMAC_IRQ_SOT BIT(0)
85*4882a593Smuzhiyun #define AXI_DMAC_IRQ_EOT BIT(1)
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define AXI_DMAC_FLAG_CYCLIC BIT(0)
88*4882a593Smuzhiyun #define AXI_DMAC_FLAG_LAST BIT(1)
89*4882a593Smuzhiyun #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* The maximum ID allocated by the hardware is 31 */
94*4882a593Smuzhiyun #define AXI_DMAC_SG_UNUSED 32U
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct axi_dmac_sg {
97*4882a593Smuzhiyun dma_addr_t src_addr;
98*4882a593Smuzhiyun dma_addr_t dest_addr;
99*4882a593Smuzhiyun unsigned int x_len;
100*4882a593Smuzhiyun unsigned int y_len;
101*4882a593Smuzhiyun unsigned int dest_stride;
102*4882a593Smuzhiyun unsigned int src_stride;
103*4882a593Smuzhiyun unsigned int id;
104*4882a593Smuzhiyun unsigned int partial_len;
105*4882a593Smuzhiyun bool schedule_when_free;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct axi_dmac_desc {
109*4882a593Smuzhiyun struct virt_dma_desc vdesc;
110*4882a593Smuzhiyun bool cyclic;
111*4882a593Smuzhiyun bool have_partial_xfer;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun unsigned int num_submitted;
114*4882a593Smuzhiyun unsigned int num_completed;
115*4882a593Smuzhiyun unsigned int num_sgs;
116*4882a593Smuzhiyun struct axi_dmac_sg sg[];
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun struct axi_dmac_chan {
120*4882a593Smuzhiyun struct virt_dma_chan vchan;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct axi_dmac_desc *next_desc;
123*4882a593Smuzhiyun struct list_head active_descs;
124*4882a593Smuzhiyun enum dma_transfer_direction direction;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun unsigned int src_width;
127*4882a593Smuzhiyun unsigned int dest_width;
128*4882a593Smuzhiyun unsigned int src_type;
129*4882a593Smuzhiyun unsigned int dest_type;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun unsigned int max_length;
132*4882a593Smuzhiyun unsigned int address_align_mask;
133*4882a593Smuzhiyun unsigned int length_align_mask;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun bool hw_partial_xfer;
136*4882a593Smuzhiyun bool hw_cyclic;
137*4882a593Smuzhiyun bool hw_2d;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun struct axi_dmac {
141*4882a593Smuzhiyun void __iomem *base;
142*4882a593Smuzhiyun int irq;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun struct clk *clk;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun struct dma_device dma_dev;
147*4882a593Smuzhiyun struct axi_dmac_chan chan;
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun
chan_to_axi_dmac(struct axi_dmac_chan * chan)150*4882a593Smuzhiyun static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return container_of(chan->vchan.chan.device, struct axi_dmac,
153*4882a593Smuzhiyun dma_dev);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
to_axi_dmac_chan(struct dma_chan * c)156*4882a593Smuzhiyun static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun return container_of(c, struct axi_dmac_chan, vchan.chan);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
to_axi_dmac_desc(struct virt_dma_desc * vdesc)161*4882a593Smuzhiyun static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return container_of(vdesc, struct axi_dmac_desc, vdesc);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
axi_dmac_write(struct axi_dmac * axi_dmac,unsigned int reg,unsigned int val)166*4882a593Smuzhiyun static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
167*4882a593Smuzhiyun unsigned int val)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun writel(val, axi_dmac->base + reg);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
axi_dmac_read(struct axi_dmac * axi_dmac,unsigned int reg)172*4882a593Smuzhiyun static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun return readl(axi_dmac->base + reg);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
axi_dmac_src_is_mem(struct axi_dmac_chan * chan)177*4882a593Smuzhiyun static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
axi_dmac_dest_is_mem(struct axi_dmac_chan * chan)182*4882a593Smuzhiyun static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
axi_dmac_check_len(struct axi_dmac_chan * chan,unsigned int len)187*4882a593Smuzhiyun static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun if (len == 0)
190*4882a593Smuzhiyun return false;
191*4882a593Smuzhiyun if ((len & chan->length_align_mask) != 0) /* Not aligned */
192*4882a593Smuzhiyun return false;
193*4882a593Smuzhiyun return true;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
axi_dmac_check_addr(struct axi_dmac_chan * chan,dma_addr_t addr)196*4882a593Smuzhiyun static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun if ((addr & chan->address_align_mask) != 0) /* Not aligned */
199*4882a593Smuzhiyun return false;
200*4882a593Smuzhiyun return true;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
axi_dmac_start_transfer(struct axi_dmac_chan * chan)203*4882a593Smuzhiyun static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun struct axi_dmac *dmac = chan_to_axi_dmac(chan);
206*4882a593Smuzhiyun struct virt_dma_desc *vdesc;
207*4882a593Smuzhiyun struct axi_dmac_desc *desc;
208*4882a593Smuzhiyun struct axi_dmac_sg *sg;
209*4882a593Smuzhiyun unsigned int flags = 0;
210*4882a593Smuzhiyun unsigned int val;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
213*4882a593Smuzhiyun if (val) /* Queue is full, wait for the next SOT IRQ */
214*4882a593Smuzhiyun return;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun desc = chan->next_desc;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (!desc) {
219*4882a593Smuzhiyun vdesc = vchan_next_desc(&chan->vchan);
220*4882a593Smuzhiyun if (!vdesc)
221*4882a593Smuzhiyun return;
222*4882a593Smuzhiyun list_move_tail(&vdesc->node, &chan->active_descs);
223*4882a593Smuzhiyun desc = to_axi_dmac_desc(vdesc);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun sg = &desc->sg[desc->num_submitted];
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Already queued in cyclic mode. Wait for it to finish */
228*4882a593Smuzhiyun if (sg->id != AXI_DMAC_SG_UNUSED) {
229*4882a593Smuzhiyun sg->schedule_when_free = true;
230*4882a593Smuzhiyun return;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun desc->num_submitted++;
234*4882a593Smuzhiyun if (desc->num_submitted == desc->num_sgs ||
235*4882a593Smuzhiyun desc->have_partial_xfer) {
236*4882a593Smuzhiyun if (desc->cyclic)
237*4882a593Smuzhiyun desc->num_submitted = 0; /* Start again */
238*4882a593Smuzhiyun else
239*4882a593Smuzhiyun chan->next_desc = NULL;
240*4882a593Smuzhiyun flags |= AXI_DMAC_FLAG_LAST;
241*4882a593Smuzhiyun } else {
242*4882a593Smuzhiyun chan->next_desc = desc;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (axi_dmac_dest_is_mem(chan)) {
248*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
249*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (axi_dmac_src_is_mem(chan)) {
253*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
254*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * If the hardware supports cyclic transfers and there is no callback to
259*4882a593Smuzhiyun * call and only a single segment, enable hw cyclic mode to avoid
260*4882a593Smuzhiyun * unnecessary interrupts.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
263*4882a593Smuzhiyun desc->num_sgs == 1)
264*4882a593Smuzhiyun flags |= AXI_DMAC_FLAG_CYCLIC;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (chan->hw_partial_xfer)
267*4882a593Smuzhiyun flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
270*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
271*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
272*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
axi_dmac_active_desc(struct axi_dmac_chan * chan)275*4882a593Smuzhiyun static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun return list_first_entry_or_null(&chan->active_descs,
278*4882a593Smuzhiyun struct axi_dmac_desc, vdesc.node);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
axi_dmac_total_sg_bytes(struct axi_dmac_chan * chan,struct axi_dmac_sg * sg)281*4882a593Smuzhiyun static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
282*4882a593Smuzhiyun struct axi_dmac_sg *sg)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun if (chan->hw_2d)
285*4882a593Smuzhiyun return sg->x_len * sg->y_len;
286*4882a593Smuzhiyun else
287*4882a593Smuzhiyun return sg->x_len;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan * chan)290*4882a593Smuzhiyun static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun struct axi_dmac *dmac = chan_to_axi_dmac(chan);
293*4882a593Smuzhiyun struct axi_dmac_desc *desc;
294*4882a593Smuzhiyun struct axi_dmac_sg *sg;
295*4882a593Smuzhiyun u32 xfer_done, len, id, i;
296*4882a593Smuzhiyun bool found_sg;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun do {
299*4882a593Smuzhiyun len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
300*4882a593Smuzhiyun id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun found_sg = false;
303*4882a593Smuzhiyun list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
304*4882a593Smuzhiyun for (i = 0; i < desc->num_sgs; i++) {
305*4882a593Smuzhiyun sg = &desc->sg[i];
306*4882a593Smuzhiyun if (sg->id == AXI_DMAC_SG_UNUSED)
307*4882a593Smuzhiyun continue;
308*4882a593Smuzhiyun if (sg->id == id) {
309*4882a593Smuzhiyun desc->have_partial_xfer = true;
310*4882a593Smuzhiyun sg->partial_len = len;
311*4882a593Smuzhiyun found_sg = true;
312*4882a593Smuzhiyun break;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun if (found_sg)
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (found_sg) {
320*4882a593Smuzhiyun dev_dbg(dmac->dma_dev.dev,
321*4882a593Smuzhiyun "Found partial segment id=%u, len=%u\n",
322*4882a593Smuzhiyun id, len);
323*4882a593Smuzhiyun } else {
324*4882a593Smuzhiyun dev_warn(dmac->dma_dev.dev,
325*4882a593Smuzhiyun "Not found partial segment id=%u, len=%u\n",
326*4882a593Smuzhiyun id, len);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Check if we have any more partial transfers */
330*4882a593Smuzhiyun xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
331*4882a593Smuzhiyun xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun } while (!xfer_done);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
axi_dmac_compute_residue(struct axi_dmac_chan * chan,struct axi_dmac_desc * active)336*4882a593Smuzhiyun static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
337*4882a593Smuzhiyun struct axi_dmac_desc *active)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct dmaengine_result *rslt = &active->vdesc.tx_result;
340*4882a593Smuzhiyun unsigned int start = active->num_completed - 1;
341*4882a593Smuzhiyun struct axi_dmac_sg *sg;
342*4882a593Smuzhiyun unsigned int i, total;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun rslt->result = DMA_TRANS_NOERROR;
345*4882a593Smuzhiyun rslt->residue = 0;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * We get here if the last completed segment is partial, which
349*4882a593Smuzhiyun * means we can compute the residue from that segment onwards
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun for (i = start; i < active->num_sgs; i++) {
352*4882a593Smuzhiyun sg = &active->sg[i];
353*4882a593Smuzhiyun total = axi_dmac_total_sg_bytes(chan, sg);
354*4882a593Smuzhiyun rslt->residue += (total - sg->partial_len);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
axi_dmac_transfer_done(struct axi_dmac_chan * chan,unsigned int completed_transfers)358*4882a593Smuzhiyun static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
359*4882a593Smuzhiyun unsigned int completed_transfers)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct axi_dmac_desc *active;
362*4882a593Smuzhiyun struct axi_dmac_sg *sg;
363*4882a593Smuzhiyun bool start_next = false;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun active = axi_dmac_active_desc(chan);
366*4882a593Smuzhiyun if (!active)
367*4882a593Smuzhiyun return false;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (chan->hw_partial_xfer &&
370*4882a593Smuzhiyun (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
371*4882a593Smuzhiyun axi_dmac_dequeue_partial_xfers(chan);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun do {
374*4882a593Smuzhiyun sg = &active->sg[active->num_completed];
375*4882a593Smuzhiyun if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun if (!(BIT(sg->id) & completed_transfers))
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun active->num_completed++;
380*4882a593Smuzhiyun sg->id = AXI_DMAC_SG_UNUSED;
381*4882a593Smuzhiyun if (sg->schedule_when_free) {
382*4882a593Smuzhiyun sg->schedule_when_free = false;
383*4882a593Smuzhiyun start_next = true;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (sg->partial_len)
387*4882a593Smuzhiyun axi_dmac_compute_residue(chan, active);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (active->cyclic)
390*4882a593Smuzhiyun vchan_cyclic_callback(&active->vdesc);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (active->num_completed == active->num_sgs ||
393*4882a593Smuzhiyun sg->partial_len) {
394*4882a593Smuzhiyun if (active->cyclic) {
395*4882a593Smuzhiyun active->num_completed = 0; /* wrap around */
396*4882a593Smuzhiyun } else {
397*4882a593Smuzhiyun list_del(&active->vdesc.node);
398*4882a593Smuzhiyun vchan_cookie_complete(&active->vdesc);
399*4882a593Smuzhiyun active = axi_dmac_active_desc(chan);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun } while (active);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return start_next;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
axi_dmac_interrupt_handler(int irq,void * devid)407*4882a593Smuzhiyun static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun struct axi_dmac *dmac = devid;
410*4882a593Smuzhiyun unsigned int pending;
411*4882a593Smuzhiyun bool start_next = false;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
414*4882a593Smuzhiyun if (!pending)
415*4882a593Smuzhiyun return IRQ_NONE;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun spin_lock(&dmac->chan.vchan.lock);
420*4882a593Smuzhiyun /* One or more transfers have finished */
421*4882a593Smuzhiyun if (pending & AXI_DMAC_IRQ_EOT) {
422*4882a593Smuzhiyun unsigned int completed;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
425*4882a593Smuzhiyun start_next = axi_dmac_transfer_done(&dmac->chan, completed);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun /* Space has become available in the descriptor queue */
428*4882a593Smuzhiyun if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
429*4882a593Smuzhiyun axi_dmac_start_transfer(&dmac->chan);
430*4882a593Smuzhiyun spin_unlock(&dmac->chan.vchan.lock);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun return IRQ_HANDLED;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
axi_dmac_terminate_all(struct dma_chan * c)435*4882a593Smuzhiyun static int axi_dmac_terminate_all(struct dma_chan *c)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
438*4882a593Smuzhiyun struct axi_dmac *dmac = chan_to_axi_dmac(chan);
439*4882a593Smuzhiyun unsigned long flags;
440*4882a593Smuzhiyun LIST_HEAD(head);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
443*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
444*4882a593Smuzhiyun chan->next_desc = NULL;
445*4882a593Smuzhiyun vchan_get_all_descriptors(&chan->vchan, &head);
446*4882a593Smuzhiyun list_splice_tail_init(&chan->active_descs, &head);
447*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun vchan_dma_desc_free_list(&chan->vchan, &head);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
axi_dmac_synchronize(struct dma_chan * c)454*4882a593Smuzhiyun static void axi_dmac_synchronize(struct dma_chan *c)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun vchan_synchronize(&chan->vchan);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
axi_dmac_issue_pending(struct dma_chan * c)461*4882a593Smuzhiyun static void axi_dmac_issue_pending(struct dma_chan *c)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
464*4882a593Smuzhiyun struct axi_dmac *dmac = chan_to_axi_dmac(chan);
465*4882a593Smuzhiyun unsigned long flags;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
470*4882a593Smuzhiyun if (vchan_issue_pending(&chan->vchan))
471*4882a593Smuzhiyun axi_dmac_start_transfer(chan);
472*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
axi_dmac_alloc_desc(unsigned int num_sgs)475*4882a593Smuzhiyun static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun struct axi_dmac_desc *desc;
478*4882a593Smuzhiyun unsigned int i;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
481*4882a593Smuzhiyun if (!desc)
482*4882a593Smuzhiyun return NULL;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (i = 0; i < num_sgs; i++)
485*4882a593Smuzhiyun desc->sg[i].id = AXI_DMAC_SG_UNUSED;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun desc->num_sgs = num_sgs;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun return desc;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
axi_dmac_fill_linear_sg(struct axi_dmac_chan * chan,enum dma_transfer_direction direction,dma_addr_t addr,unsigned int num_periods,unsigned int period_len,struct axi_dmac_sg * sg)492*4882a593Smuzhiyun static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
493*4882a593Smuzhiyun enum dma_transfer_direction direction, dma_addr_t addr,
494*4882a593Smuzhiyun unsigned int num_periods, unsigned int period_len,
495*4882a593Smuzhiyun struct axi_dmac_sg *sg)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun unsigned int num_segments, i;
498*4882a593Smuzhiyun unsigned int segment_size;
499*4882a593Smuzhiyun unsigned int len;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Split into multiple equally sized segments if necessary */
502*4882a593Smuzhiyun num_segments = DIV_ROUND_UP(period_len, chan->max_length);
503*4882a593Smuzhiyun segment_size = DIV_ROUND_UP(period_len, num_segments);
504*4882a593Smuzhiyun /* Take care of alignment */
505*4882a593Smuzhiyun segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun for (i = 0; i < num_periods; i++) {
508*4882a593Smuzhiyun len = period_len;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun while (len > segment_size) {
511*4882a593Smuzhiyun if (direction == DMA_DEV_TO_MEM)
512*4882a593Smuzhiyun sg->dest_addr = addr;
513*4882a593Smuzhiyun else
514*4882a593Smuzhiyun sg->src_addr = addr;
515*4882a593Smuzhiyun sg->x_len = segment_size;
516*4882a593Smuzhiyun sg->y_len = 1;
517*4882a593Smuzhiyun sg++;
518*4882a593Smuzhiyun addr += segment_size;
519*4882a593Smuzhiyun len -= segment_size;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (direction == DMA_DEV_TO_MEM)
523*4882a593Smuzhiyun sg->dest_addr = addr;
524*4882a593Smuzhiyun else
525*4882a593Smuzhiyun sg->src_addr = addr;
526*4882a593Smuzhiyun sg->x_len = len;
527*4882a593Smuzhiyun sg->y_len = 1;
528*4882a593Smuzhiyun sg++;
529*4882a593Smuzhiyun addr += len;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return sg;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
axi_dmac_prep_slave_sg(struct dma_chan * c,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)535*4882a593Smuzhiyun static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
536*4882a593Smuzhiyun struct dma_chan *c, struct scatterlist *sgl,
537*4882a593Smuzhiyun unsigned int sg_len, enum dma_transfer_direction direction,
538*4882a593Smuzhiyun unsigned long flags, void *context)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
541*4882a593Smuzhiyun struct axi_dmac_desc *desc;
542*4882a593Smuzhiyun struct axi_dmac_sg *dsg;
543*4882a593Smuzhiyun struct scatterlist *sg;
544*4882a593Smuzhiyun unsigned int num_sgs;
545*4882a593Smuzhiyun unsigned int i;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (direction != chan->direction)
548*4882a593Smuzhiyun return NULL;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun num_sgs = 0;
551*4882a593Smuzhiyun for_each_sg(sgl, sg, sg_len, i)
552*4882a593Smuzhiyun num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun desc = axi_dmac_alloc_desc(num_sgs);
555*4882a593Smuzhiyun if (!desc)
556*4882a593Smuzhiyun return NULL;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun dsg = desc->sg;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun for_each_sg(sgl, sg, sg_len, i) {
561*4882a593Smuzhiyun if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
562*4882a593Smuzhiyun !axi_dmac_check_len(chan, sg_dma_len(sg))) {
563*4882a593Smuzhiyun kfree(desc);
564*4882a593Smuzhiyun return NULL;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
568*4882a593Smuzhiyun sg_dma_len(sg), dsg);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun desc->cyclic = false;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
axi_dmac_prep_dma_cyclic(struct dma_chan * c,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)576*4882a593Smuzhiyun static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
577*4882a593Smuzhiyun struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
578*4882a593Smuzhiyun size_t period_len, enum dma_transfer_direction direction,
579*4882a593Smuzhiyun unsigned long flags)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
582*4882a593Smuzhiyun struct axi_dmac_desc *desc;
583*4882a593Smuzhiyun unsigned int num_periods, num_segments;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (direction != chan->direction)
586*4882a593Smuzhiyun return NULL;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (!axi_dmac_check_len(chan, buf_len) ||
589*4882a593Smuzhiyun !axi_dmac_check_addr(chan, buf_addr))
590*4882a593Smuzhiyun return NULL;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (period_len == 0 || buf_len % period_len)
593*4882a593Smuzhiyun return NULL;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun num_periods = buf_len / period_len;
596*4882a593Smuzhiyun num_segments = DIV_ROUND_UP(period_len, chan->max_length);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun desc = axi_dmac_alloc_desc(num_periods * num_segments);
599*4882a593Smuzhiyun if (!desc)
600*4882a593Smuzhiyun return NULL;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
603*4882a593Smuzhiyun period_len, desc->sg);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun desc->cyclic = true;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
axi_dmac_prep_interleaved(struct dma_chan * c,struct dma_interleaved_template * xt,unsigned long flags)610*4882a593Smuzhiyun static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
611*4882a593Smuzhiyun struct dma_chan *c, struct dma_interleaved_template *xt,
612*4882a593Smuzhiyun unsigned long flags)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
615*4882a593Smuzhiyun struct axi_dmac_desc *desc;
616*4882a593Smuzhiyun size_t dst_icg, src_icg;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (xt->frame_size != 1)
619*4882a593Smuzhiyun return NULL;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (xt->dir != chan->direction)
622*4882a593Smuzhiyun return NULL;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (axi_dmac_src_is_mem(chan)) {
625*4882a593Smuzhiyun if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
626*4882a593Smuzhiyun return NULL;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun if (axi_dmac_dest_is_mem(chan)) {
630*4882a593Smuzhiyun if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
631*4882a593Smuzhiyun return NULL;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
635*4882a593Smuzhiyun src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (chan->hw_2d) {
638*4882a593Smuzhiyun if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
639*4882a593Smuzhiyun xt->numf == 0)
640*4882a593Smuzhiyun return NULL;
641*4882a593Smuzhiyun if (xt->sgl[0].size + dst_icg > chan->max_length ||
642*4882a593Smuzhiyun xt->sgl[0].size + src_icg > chan->max_length)
643*4882a593Smuzhiyun return NULL;
644*4882a593Smuzhiyun } else {
645*4882a593Smuzhiyun if (dst_icg != 0 || src_icg != 0)
646*4882a593Smuzhiyun return NULL;
647*4882a593Smuzhiyun if (chan->max_length / xt->sgl[0].size < xt->numf)
648*4882a593Smuzhiyun return NULL;
649*4882a593Smuzhiyun if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
650*4882a593Smuzhiyun return NULL;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun desc = axi_dmac_alloc_desc(1);
654*4882a593Smuzhiyun if (!desc)
655*4882a593Smuzhiyun return NULL;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (axi_dmac_src_is_mem(chan)) {
658*4882a593Smuzhiyun desc->sg[0].src_addr = xt->src_start;
659*4882a593Smuzhiyun desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (axi_dmac_dest_is_mem(chan)) {
663*4882a593Smuzhiyun desc->sg[0].dest_addr = xt->dst_start;
664*4882a593Smuzhiyun desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (chan->hw_2d) {
668*4882a593Smuzhiyun desc->sg[0].x_len = xt->sgl[0].size;
669*4882a593Smuzhiyun desc->sg[0].y_len = xt->numf;
670*4882a593Smuzhiyun } else {
671*4882a593Smuzhiyun desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
672*4882a593Smuzhiyun desc->sg[0].y_len = 1;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (flags & DMA_CYCLIC)
676*4882a593Smuzhiyun desc->cyclic = true;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
axi_dmac_free_chan_resources(struct dma_chan * c)681*4882a593Smuzhiyun static void axi_dmac_free_chan_resources(struct dma_chan *c)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun vchan_free_chan_resources(to_virt_chan(c));
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
axi_dmac_desc_free(struct virt_dma_desc * vdesc)686*4882a593Smuzhiyun static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
axi_dmac_regmap_rdwr(struct device * dev,unsigned int reg)691*4882a593Smuzhiyun static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun switch (reg) {
694*4882a593Smuzhiyun case AXI_DMAC_REG_IRQ_MASK:
695*4882a593Smuzhiyun case AXI_DMAC_REG_IRQ_SOURCE:
696*4882a593Smuzhiyun case AXI_DMAC_REG_IRQ_PENDING:
697*4882a593Smuzhiyun case AXI_DMAC_REG_CTRL:
698*4882a593Smuzhiyun case AXI_DMAC_REG_TRANSFER_ID:
699*4882a593Smuzhiyun case AXI_DMAC_REG_START_TRANSFER:
700*4882a593Smuzhiyun case AXI_DMAC_REG_FLAGS:
701*4882a593Smuzhiyun case AXI_DMAC_REG_DEST_ADDRESS:
702*4882a593Smuzhiyun case AXI_DMAC_REG_SRC_ADDRESS:
703*4882a593Smuzhiyun case AXI_DMAC_REG_X_LENGTH:
704*4882a593Smuzhiyun case AXI_DMAC_REG_Y_LENGTH:
705*4882a593Smuzhiyun case AXI_DMAC_REG_DEST_STRIDE:
706*4882a593Smuzhiyun case AXI_DMAC_REG_SRC_STRIDE:
707*4882a593Smuzhiyun case AXI_DMAC_REG_TRANSFER_DONE:
708*4882a593Smuzhiyun case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
709*4882a593Smuzhiyun case AXI_DMAC_REG_STATUS:
710*4882a593Smuzhiyun case AXI_DMAC_REG_CURRENT_SRC_ADDR:
711*4882a593Smuzhiyun case AXI_DMAC_REG_CURRENT_DEST_ADDR:
712*4882a593Smuzhiyun case AXI_DMAC_REG_PARTIAL_XFER_LEN:
713*4882a593Smuzhiyun case AXI_DMAC_REG_PARTIAL_XFER_ID:
714*4882a593Smuzhiyun return true;
715*4882a593Smuzhiyun default:
716*4882a593Smuzhiyun return false;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun static const struct regmap_config axi_dmac_regmap_config = {
721*4882a593Smuzhiyun .reg_bits = 32,
722*4882a593Smuzhiyun .val_bits = 32,
723*4882a593Smuzhiyun .reg_stride = 4,
724*4882a593Smuzhiyun .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
725*4882a593Smuzhiyun .readable_reg = axi_dmac_regmap_rdwr,
726*4882a593Smuzhiyun .writeable_reg = axi_dmac_regmap_rdwr,
727*4882a593Smuzhiyun };
728*4882a593Smuzhiyun
axi_dmac_adjust_chan_params(struct axi_dmac_chan * chan)729*4882a593Smuzhiyun static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
734*4882a593Smuzhiyun chan->direction = DMA_MEM_TO_MEM;
735*4882a593Smuzhiyun else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
736*4882a593Smuzhiyun chan->direction = DMA_MEM_TO_DEV;
737*4882a593Smuzhiyun else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
738*4882a593Smuzhiyun chan->direction = DMA_DEV_TO_MEM;
739*4882a593Smuzhiyun else
740*4882a593Smuzhiyun chan->direction = DMA_DEV_TO_DEV;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /*
744*4882a593Smuzhiyun * The configuration stored in the devicetree matches the configuration
745*4882a593Smuzhiyun * parameters of the peripheral instance and allows the driver to know which
746*4882a593Smuzhiyun * features are implemented and how it should behave.
747*4882a593Smuzhiyun */
axi_dmac_parse_chan_dt(struct device_node * of_chan,struct axi_dmac_chan * chan)748*4882a593Smuzhiyun static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
749*4882a593Smuzhiyun struct axi_dmac_chan *chan)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun u32 val;
752*4882a593Smuzhiyun int ret;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun ret = of_property_read_u32(of_chan, "reg", &val);
755*4882a593Smuzhiyun if (ret)
756*4882a593Smuzhiyun return ret;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /* We only support 1 channel for now */
759*4882a593Smuzhiyun if (val != 0)
760*4882a593Smuzhiyun return -EINVAL;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
763*4882a593Smuzhiyun if (ret)
764*4882a593Smuzhiyun return ret;
765*4882a593Smuzhiyun if (val > AXI_DMAC_BUS_TYPE_FIFO)
766*4882a593Smuzhiyun return -EINVAL;
767*4882a593Smuzhiyun chan->src_type = val;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
770*4882a593Smuzhiyun if (ret)
771*4882a593Smuzhiyun return ret;
772*4882a593Smuzhiyun if (val > AXI_DMAC_BUS_TYPE_FIFO)
773*4882a593Smuzhiyun return -EINVAL;
774*4882a593Smuzhiyun chan->dest_type = val;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
777*4882a593Smuzhiyun if (ret)
778*4882a593Smuzhiyun return ret;
779*4882a593Smuzhiyun chan->src_width = val / 8;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
782*4882a593Smuzhiyun if (ret)
783*4882a593Smuzhiyun return ret;
784*4882a593Smuzhiyun chan->dest_width = val / 8;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun axi_dmac_adjust_chan_params(chan);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun return 0;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
axi_dmac_parse_dt(struct device * dev,struct axi_dmac * dmac)791*4882a593Smuzhiyun static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun struct device_node *of_channels, *of_chan;
794*4882a593Smuzhiyun int ret;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
797*4882a593Smuzhiyun if (of_channels == NULL)
798*4882a593Smuzhiyun return -ENODEV;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun for_each_child_of_node(of_channels, of_chan) {
801*4882a593Smuzhiyun ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
802*4882a593Smuzhiyun if (ret) {
803*4882a593Smuzhiyun of_node_put(of_chan);
804*4882a593Smuzhiyun of_node_put(of_channels);
805*4882a593Smuzhiyun return -EINVAL;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun of_node_put(of_channels);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
axi_dmac_read_chan_config(struct device * dev,struct axi_dmac * dmac)813*4882a593Smuzhiyun static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct axi_dmac_chan *chan = &dmac->chan;
816*4882a593Smuzhiyun unsigned int val, desc;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
819*4882a593Smuzhiyun if (desc == 0) {
820*4882a593Smuzhiyun dev_err(dev, "DMA interface register reads zero\n");
821*4882a593Smuzhiyun return -EFAULT;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
825*4882a593Smuzhiyun if (val > AXI_DMAC_BUS_TYPE_FIFO) {
826*4882a593Smuzhiyun dev_err(dev, "Invalid source bus type read: %d\n", val);
827*4882a593Smuzhiyun return -EINVAL;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun chan->src_type = val;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
832*4882a593Smuzhiyun if (val > AXI_DMAC_BUS_TYPE_FIFO) {
833*4882a593Smuzhiyun dev_err(dev, "Invalid destination bus type read: %d\n", val);
834*4882a593Smuzhiyun return -EINVAL;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun chan->dest_type = val;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
839*4882a593Smuzhiyun if (val == 0) {
840*4882a593Smuzhiyun dev_err(dev, "Source bus width is zero\n");
841*4882a593Smuzhiyun return -EINVAL;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun /* widths are stored in log2 */
844*4882a593Smuzhiyun chan->src_width = 1 << val;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
847*4882a593Smuzhiyun if (val == 0) {
848*4882a593Smuzhiyun dev_err(dev, "Destination bus width is zero\n");
849*4882a593Smuzhiyun return -EINVAL;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun chan->dest_width = 1 << val;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun axi_dmac_adjust_chan_params(chan);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
axi_dmac_detect_caps(struct axi_dmac * dmac,unsigned int version)858*4882a593Smuzhiyun static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun struct axi_dmac_chan *chan = &dmac->chan;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
863*4882a593Smuzhiyun if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
864*4882a593Smuzhiyun chan->hw_cyclic = true;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
867*4882a593Smuzhiyun if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
868*4882a593Smuzhiyun chan->hw_2d = true;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
871*4882a593Smuzhiyun chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
872*4882a593Smuzhiyun if (chan->max_length != UINT_MAX)
873*4882a593Smuzhiyun chan->max_length++;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
876*4882a593Smuzhiyun if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
877*4882a593Smuzhiyun chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
878*4882a593Smuzhiyun dev_err(dmac->dma_dev.dev,
879*4882a593Smuzhiyun "Destination memory-mapped interface not supported.");
880*4882a593Smuzhiyun return -ENODEV;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
884*4882a593Smuzhiyun if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
885*4882a593Smuzhiyun chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
886*4882a593Smuzhiyun dev_err(dmac->dma_dev.dev,
887*4882a593Smuzhiyun "Source memory-mapped interface not supported.");
888*4882a593Smuzhiyun return -ENODEV;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
892*4882a593Smuzhiyun chan->hw_partial_xfer = true;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
895*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
896*4882a593Smuzhiyun chan->length_align_mask =
897*4882a593Smuzhiyun axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
898*4882a593Smuzhiyun } else {
899*4882a593Smuzhiyun chan->length_align_mask = chan->address_align_mask;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun return 0;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
axi_dmac_probe(struct platform_device * pdev)905*4882a593Smuzhiyun static int axi_dmac_probe(struct platform_device *pdev)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun struct dma_device *dma_dev;
908*4882a593Smuzhiyun struct axi_dmac *dmac;
909*4882a593Smuzhiyun struct resource *res;
910*4882a593Smuzhiyun struct regmap *regmap;
911*4882a593Smuzhiyun unsigned int version;
912*4882a593Smuzhiyun int ret;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
915*4882a593Smuzhiyun if (!dmac)
916*4882a593Smuzhiyun return -ENOMEM;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun dmac->irq = platform_get_irq(pdev, 0);
919*4882a593Smuzhiyun if (dmac->irq < 0)
920*4882a593Smuzhiyun return dmac->irq;
921*4882a593Smuzhiyun if (dmac->irq == 0)
922*4882a593Smuzhiyun return -EINVAL;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
925*4882a593Smuzhiyun dmac->base = devm_ioremap_resource(&pdev->dev, res);
926*4882a593Smuzhiyun if (IS_ERR(dmac->base))
927*4882a593Smuzhiyun return PTR_ERR(dmac->base);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun dmac->clk = devm_clk_get(&pdev->dev, NULL);
930*4882a593Smuzhiyun if (IS_ERR(dmac->clk))
931*4882a593Smuzhiyun return PTR_ERR(dmac->clk);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun ret = clk_prepare_enable(dmac->clk);
934*4882a593Smuzhiyun if (ret < 0)
935*4882a593Smuzhiyun return ret;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
940*4882a593Smuzhiyun ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
941*4882a593Smuzhiyun else
942*4882a593Smuzhiyun ret = axi_dmac_parse_dt(&pdev->dev, dmac);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (ret < 0)
945*4882a593Smuzhiyun goto err_clk_disable;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun INIT_LIST_HEAD(&dmac->chan.active_descs);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun dma_set_max_seg_size(&pdev->dev, UINT_MAX);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun dma_dev = &dmac->dma_dev;
952*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
953*4882a593Smuzhiyun dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
954*4882a593Smuzhiyun dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
955*4882a593Smuzhiyun dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
956*4882a593Smuzhiyun dma_dev->device_tx_status = dma_cookie_status;
957*4882a593Smuzhiyun dma_dev->device_issue_pending = axi_dmac_issue_pending;
958*4882a593Smuzhiyun dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
959*4882a593Smuzhiyun dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
960*4882a593Smuzhiyun dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
961*4882a593Smuzhiyun dma_dev->device_terminate_all = axi_dmac_terminate_all;
962*4882a593Smuzhiyun dma_dev->device_synchronize = axi_dmac_synchronize;
963*4882a593Smuzhiyun dma_dev->dev = &pdev->dev;
964*4882a593Smuzhiyun dma_dev->chancnt = 1;
965*4882a593Smuzhiyun dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
966*4882a593Smuzhiyun dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
967*4882a593Smuzhiyun dma_dev->directions = BIT(dmac->chan.direction);
968*4882a593Smuzhiyun dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
969*4882a593Smuzhiyun INIT_LIST_HEAD(&dma_dev->channels);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun dmac->chan.vchan.desc_free = axi_dmac_desc_free;
972*4882a593Smuzhiyun vchan_init(&dmac->chan.vchan, dma_dev);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun ret = axi_dmac_detect_caps(dmac, version);
975*4882a593Smuzhiyun if (ret)
976*4882a593Smuzhiyun goto err_clk_disable;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun ret = dma_async_device_register(dma_dev);
983*4882a593Smuzhiyun if (ret)
984*4882a593Smuzhiyun goto err_clk_disable;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun ret = of_dma_controller_register(pdev->dev.of_node,
987*4882a593Smuzhiyun of_dma_xlate_by_chan_id, dma_dev);
988*4882a593Smuzhiyun if (ret)
989*4882a593Smuzhiyun goto err_unregister_device;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
992*4882a593Smuzhiyun dev_name(&pdev->dev), dmac);
993*4882a593Smuzhiyun if (ret)
994*4882a593Smuzhiyun goto err_unregister_of;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun platform_set_drvdata(pdev, dmac);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
999*4882a593Smuzhiyun &axi_dmac_regmap_config);
1000*4882a593Smuzhiyun if (IS_ERR(regmap)) {
1001*4882a593Smuzhiyun ret = PTR_ERR(regmap);
1002*4882a593Smuzhiyun goto err_free_irq;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun return 0;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun err_free_irq:
1008*4882a593Smuzhiyun free_irq(dmac->irq, dmac);
1009*4882a593Smuzhiyun err_unregister_of:
1010*4882a593Smuzhiyun of_dma_controller_free(pdev->dev.of_node);
1011*4882a593Smuzhiyun err_unregister_device:
1012*4882a593Smuzhiyun dma_async_device_unregister(&dmac->dma_dev);
1013*4882a593Smuzhiyun err_clk_disable:
1014*4882a593Smuzhiyun clk_disable_unprepare(dmac->clk);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun return ret;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
axi_dmac_remove(struct platform_device * pdev)1019*4882a593Smuzhiyun static int axi_dmac_remove(struct platform_device *pdev)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct axi_dmac *dmac = platform_get_drvdata(pdev);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun of_dma_controller_free(pdev->dev.of_node);
1024*4882a593Smuzhiyun free_irq(dmac->irq, dmac);
1025*4882a593Smuzhiyun tasklet_kill(&dmac->chan.vchan.task);
1026*4882a593Smuzhiyun dma_async_device_unregister(&dmac->dma_dev);
1027*4882a593Smuzhiyun clk_disable_unprepare(dmac->clk);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun return 0;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun static const struct of_device_id axi_dmac_of_match_table[] = {
1033*4882a593Smuzhiyun { .compatible = "adi,axi-dmac-1.00.a" },
1034*4882a593Smuzhiyun { },
1035*4882a593Smuzhiyun };
1036*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun static struct platform_driver axi_dmac_driver = {
1039*4882a593Smuzhiyun .driver = {
1040*4882a593Smuzhiyun .name = "dma-axi-dmac",
1041*4882a593Smuzhiyun .of_match_table = axi_dmac_of_match_table,
1042*4882a593Smuzhiyun },
1043*4882a593Smuzhiyun .probe = axi_dmac_probe,
1044*4882a593Smuzhiyun .remove = axi_dmac_remove,
1045*4882a593Smuzhiyun };
1046*4882a593Smuzhiyun module_platform_driver(axi_dmac_driver);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1049*4882a593Smuzhiyun MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
1050*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1051