1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SiFive FU540 Platform DMA driver
4*4882a593Smuzhiyun * Copyright (C) 2019 SiFive
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Based partially on:
7*4882a593Smuzhiyun * - drivers/dma/fsl-edma.c
8*4882a593Smuzhiyun * - drivers/dma/dw-edma/
9*4882a593Smuzhiyun * - drivers/dma/pxa-dma.c
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * See the following sources for further documentation:
12*4882a593Smuzhiyun * - Chapter 12 "Platform DMA Engine (PDMA)" of
13*4882a593Smuzhiyun * SiFive FU540-C000 v1.0
14*4882a593Smuzhiyun * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/device.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
21*4882a593Smuzhiyun #include <linux/dma-mapping.h>
22*4882a593Smuzhiyun #include <linux/of.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "sf-pdma.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifndef readq
readq(void __iomem * addr)28*4882a593Smuzhiyun static inline unsigned long long readq(void __iomem *addr)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #ifndef writeq
writeq(unsigned long long v,void __iomem * addr)35*4882a593Smuzhiyun static inline void writeq(unsigned long long v, void __iomem *addr)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun writel(lower_32_bits(v), addr);
38*4882a593Smuzhiyun writel(upper_32_bits(v), addr + 4);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun
to_sf_pdma_chan(struct dma_chan * dchan)42*4882a593Smuzhiyun static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun return container_of(dchan, struct sf_pdma_chan, vchan.chan);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
to_sf_pdma_desc(struct virt_dma_desc * vd)47*4882a593Smuzhiyun static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun return container_of(vd, struct sf_pdma_desc, vdesc);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
sf_pdma_alloc_desc(struct sf_pdma_chan * chan)52*4882a593Smuzhiyun static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct sf_pdma_desc *desc;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
57*4882a593Smuzhiyun if (!desc)
58*4882a593Smuzhiyun return NULL;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun desc->chan = chan;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return desc;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
sf_pdma_fill_desc(struct sf_pdma_desc * desc,u64 dst,u64 src,u64 size)65*4882a593Smuzhiyun static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
66*4882a593Smuzhiyun u64 dst, u64 src, u64 size)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun desc->xfer_type = PDMA_FULL_SPEED;
69*4882a593Smuzhiyun desc->xfer_size = size;
70*4882a593Smuzhiyun desc->dst_addr = dst;
71*4882a593Smuzhiyun desc->src_addr = src;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
sf_pdma_disclaim_chan(struct sf_pdma_chan * chan)74*4882a593Smuzhiyun static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun writel(PDMA_CLEAR_CTRL, regs->ctrl);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
sf_pdma_prep_dma_memcpy(struct dma_chan * dchan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)82*4882a593Smuzhiyun sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
83*4882a593Smuzhiyun size_t len, unsigned long flags)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
86*4882a593Smuzhiyun struct sf_pdma_desc *desc;
87*4882a593Smuzhiyun unsigned long iflags;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (chan && (!len || !dest || !src)) {
90*4882a593Smuzhiyun dev_err(chan->pdma->dma_dev.dev,
91*4882a593Smuzhiyun "Please check dma len, dest, src!\n");
92*4882a593Smuzhiyun return NULL;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun desc = sf_pdma_alloc_desc(chan);
96*4882a593Smuzhiyun if (!desc)
97*4882a593Smuzhiyun return NULL;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun desc->in_use = true;
100*4882a593Smuzhiyun desc->dirn = DMA_MEM_TO_MEM;
101*4882a593Smuzhiyun desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, iflags);
104*4882a593Smuzhiyun sf_pdma_fill_desc(desc, dest, src, len);
105*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, iflags);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return desc->async_tx;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
sf_pdma_slave_config(struct dma_chan * dchan,struct dma_slave_config * cfg)110*4882a593Smuzhiyun static int sf_pdma_slave_config(struct dma_chan *dchan,
111*4882a593Smuzhiyun struct dma_slave_config *cfg)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun memcpy(&chan->cfg, cfg, sizeof(*cfg));
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
sf_pdma_alloc_chan_resources(struct dma_chan * dchan)120*4882a593Smuzhiyun static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
123*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun dma_cookie_init(dchan);
126*4882a593Smuzhiyun writel(PDMA_CLAIM_MASK, regs->ctrl);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
sf_pdma_disable_request(struct sf_pdma_chan * chan)131*4882a593Smuzhiyun static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
sf_pdma_free_chan_resources(struct dma_chan * dchan)138*4882a593Smuzhiyun static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
141*4882a593Smuzhiyun unsigned long flags;
142*4882a593Smuzhiyun LIST_HEAD(head);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
145*4882a593Smuzhiyun sf_pdma_disable_request(chan);
146*4882a593Smuzhiyun kfree(chan->desc);
147*4882a593Smuzhiyun chan->desc = NULL;
148*4882a593Smuzhiyun vchan_get_all_descriptors(&chan->vchan, &head);
149*4882a593Smuzhiyun sf_pdma_disclaim_chan(chan);
150*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
151*4882a593Smuzhiyun vchan_dma_desc_free_list(&chan->vchan, &head);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
sf_pdma_desc_residue(struct sf_pdma_chan * chan,dma_cookie_t cookie)154*4882a593Smuzhiyun static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
155*4882a593Smuzhiyun dma_cookie_t cookie)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct virt_dma_desc *vd = NULL;
158*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
159*4882a593Smuzhiyun unsigned long flags;
160*4882a593Smuzhiyun u64 residue = 0;
161*4882a593Smuzhiyun struct sf_pdma_desc *desc;
162*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx = NULL;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
167*4882a593Smuzhiyun if (vd->tx.cookie == cookie)
168*4882a593Smuzhiyun tx = &vd->tx;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!tx)
171*4882a593Smuzhiyun goto out;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (cookie == tx->chan->completed_cookie)
174*4882a593Smuzhiyun goto out;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (cookie == tx->cookie) {
177*4882a593Smuzhiyun residue = readq(regs->residue);
178*4882a593Smuzhiyun } else {
179*4882a593Smuzhiyun vd = vchan_find_desc(&chan->vchan, cookie);
180*4882a593Smuzhiyun if (!vd)
181*4882a593Smuzhiyun goto out;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun desc = to_sf_pdma_desc(vd);
184*4882a593Smuzhiyun residue = desc->xfer_size;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun out:
188*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
189*4882a593Smuzhiyun return residue;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun static enum dma_status
sf_pdma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)193*4882a593Smuzhiyun sf_pdma_tx_status(struct dma_chan *dchan,
194*4882a593Smuzhiyun dma_cookie_t cookie,
195*4882a593Smuzhiyun struct dma_tx_state *txstate)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
198*4882a593Smuzhiyun enum dma_status status;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun status = dma_cookie_status(dchan, cookie, txstate);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (txstate && status != DMA_ERROR)
203*4882a593Smuzhiyun dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return status;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
sf_pdma_terminate_all(struct dma_chan * dchan)208*4882a593Smuzhiyun static int sf_pdma_terminate_all(struct dma_chan *dchan)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
211*4882a593Smuzhiyun unsigned long flags;
212*4882a593Smuzhiyun LIST_HEAD(head);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
215*4882a593Smuzhiyun sf_pdma_disable_request(chan);
216*4882a593Smuzhiyun kfree(chan->desc);
217*4882a593Smuzhiyun chan->desc = NULL;
218*4882a593Smuzhiyun chan->xfer_err = false;
219*4882a593Smuzhiyun vchan_get_all_descriptors(&chan->vchan, &head);
220*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
221*4882a593Smuzhiyun vchan_dma_desc_free_list(&chan->vchan, &head);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
sf_pdma_enable_request(struct sf_pdma_chan * chan)226*4882a593Smuzhiyun static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
229*4882a593Smuzhiyun u32 v;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun v = PDMA_CLAIM_MASK |
232*4882a593Smuzhiyun PDMA_ENABLE_DONE_INT_MASK |
233*4882a593Smuzhiyun PDMA_ENABLE_ERR_INT_MASK |
234*4882a593Smuzhiyun PDMA_RUN_MASK;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun writel(v, regs->ctrl);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
sf_pdma_get_first_pending_desc(struct sf_pdma_chan * chan)239*4882a593Smuzhiyun static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct virt_dma_chan *vchan = &chan->vchan;
242*4882a593Smuzhiyun struct virt_dma_desc *vdesc;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (list_empty(&vchan->desc_issued))
245*4882a593Smuzhiyun return NULL;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return container_of(vdesc, struct sf_pdma_desc, vdesc);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
sf_pdma_xfer_desc(struct sf_pdma_chan * chan)252*4882a593Smuzhiyun static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct sf_pdma_desc *desc = chan->desc;
255*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (!desc) {
258*4882a593Smuzhiyun dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
259*4882a593Smuzhiyun return;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun writel(desc->xfer_type, regs->xfer_type);
263*4882a593Smuzhiyun writeq(desc->xfer_size, regs->xfer_size);
264*4882a593Smuzhiyun writeq(desc->dst_addr, regs->dst_addr);
265*4882a593Smuzhiyun writeq(desc->src_addr, regs->src_addr);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun chan->desc = desc;
268*4882a593Smuzhiyun chan->status = DMA_IN_PROGRESS;
269*4882a593Smuzhiyun sf_pdma_enable_request(chan);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
sf_pdma_issue_pending(struct dma_chan * dchan)272*4882a593Smuzhiyun static void sf_pdma_issue_pending(struct dma_chan *dchan)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
275*4882a593Smuzhiyun unsigned long flags;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
280*4882a593Smuzhiyun /* vchan_issue_pending has made a check that desc in not NULL */
281*4882a593Smuzhiyun chan->desc = sf_pdma_get_first_pending_desc(chan);
282*4882a593Smuzhiyun sf_pdma_xfer_desc(chan);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
sf_pdma_free_desc(struct virt_dma_desc * vdesc)288*4882a593Smuzhiyun static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct sf_pdma_desc *desc;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun desc = to_sf_pdma_desc(vdesc);
293*4882a593Smuzhiyun desc->in_use = false;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
sf_pdma_donebh_tasklet(struct tasklet_struct * t)296*4882a593Smuzhiyun static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
299*4882a593Smuzhiyun unsigned long flags;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
302*4882a593Smuzhiyun if (chan->xfer_err) {
303*4882a593Smuzhiyun chan->retries = MAX_RETRY;
304*4882a593Smuzhiyun chan->status = DMA_COMPLETE;
305*4882a593Smuzhiyun chan->xfer_err = false;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
310*4882a593Smuzhiyun list_del(&chan->desc->vdesc.node);
311*4882a593Smuzhiyun vchan_cookie_complete(&chan->desc->vdesc);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun chan->desc = sf_pdma_get_first_pending_desc(chan);
314*4882a593Smuzhiyun if (chan->desc)
315*4882a593Smuzhiyun sf_pdma_xfer_desc(chan);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
sf_pdma_errbh_tasklet(struct tasklet_struct * t)320*4882a593Smuzhiyun static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
323*4882a593Smuzhiyun struct sf_pdma_desc *desc = chan->desc;
324*4882a593Smuzhiyun unsigned long flags;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
327*4882a593Smuzhiyun if (chan->retries <= 0) {
328*4882a593Smuzhiyun /* fail to recover */
329*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
330*4882a593Smuzhiyun dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
331*4882a593Smuzhiyun } else {
332*4882a593Smuzhiyun /* retry */
333*4882a593Smuzhiyun chan->retries--;
334*4882a593Smuzhiyun chan->xfer_err = true;
335*4882a593Smuzhiyun chan->status = DMA_ERROR;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun sf_pdma_enable_request(chan);
338*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
sf_pdma_done_isr(int irq,void * dev_id)342*4882a593Smuzhiyun static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct sf_pdma_chan *chan = dev_id;
345*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
346*4882a593Smuzhiyun unsigned long flags;
347*4882a593Smuzhiyun u64 residue;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun spin_lock_irqsave(&chan->vchan.lock, flags);
350*4882a593Smuzhiyun writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
351*4882a593Smuzhiyun residue = readq(regs->residue);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (!residue) {
354*4882a593Smuzhiyun tasklet_hi_schedule(&chan->done_tasklet);
355*4882a593Smuzhiyun } else {
356*4882a593Smuzhiyun /* submit next trascatioin if possible */
357*4882a593Smuzhiyun struct sf_pdma_desc *desc = chan->desc;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun desc->src_addr += desc->xfer_size - residue;
360*4882a593Smuzhiyun desc->dst_addr += desc->xfer_size - residue;
361*4882a593Smuzhiyun desc->xfer_size = residue;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun sf_pdma_xfer_desc(chan);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->vchan.lock, flags);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return IRQ_HANDLED;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
sf_pdma_err_isr(int irq,void * dev_id)371*4882a593Smuzhiyun static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun struct sf_pdma_chan *chan = dev_id;
374*4882a593Smuzhiyun struct pdma_regs *regs = &chan->regs;
375*4882a593Smuzhiyun unsigned long flags;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
378*4882a593Smuzhiyun writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
379*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun tasklet_schedule(&chan->err_tasklet);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return IRQ_HANDLED;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /**
387*4882a593Smuzhiyun * sf_pdma_irq_init() - Init PDMA IRQ Handlers
388*4882a593Smuzhiyun * @pdev: pointer of platform_device
389*4882a593Smuzhiyun * @pdma: pointer of PDMA engine. Caller should check NULL
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
392*4882a593Smuzhiyun * make sure the pointer passed in are non-NULL. This function should be called
393*4882a593Smuzhiyun * only one time during the device probe.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * Context: Any context.
396*4882a593Smuzhiyun *
397*4882a593Smuzhiyun * Return:
398*4882a593Smuzhiyun * * 0 - OK to init all IRQ handlers
399*4882a593Smuzhiyun * * -EINVAL - Fail to request IRQ
400*4882a593Smuzhiyun */
sf_pdma_irq_init(struct platform_device * pdev,struct sf_pdma * pdma)401*4882a593Smuzhiyun static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun int irq, r, i;
404*4882a593Smuzhiyun struct sf_pdma_chan *chan;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun for (i = 0; i < pdma->n_chans; i++) {
407*4882a593Smuzhiyun chan = &pdma->chans[i];
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun irq = platform_get_irq(pdev, i * 2);
410*4882a593Smuzhiyun if (irq < 0) {
411*4882a593Smuzhiyun dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
412*4882a593Smuzhiyun return -EINVAL;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
416*4882a593Smuzhiyun dev_name(&pdev->dev), (void *)chan);
417*4882a593Smuzhiyun if (r) {
418*4882a593Smuzhiyun dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
419*4882a593Smuzhiyun return -EINVAL;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun chan->txirq = irq;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun irq = platform_get_irq(pdev, (i * 2) + 1);
425*4882a593Smuzhiyun if (irq < 0) {
426*4882a593Smuzhiyun dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
427*4882a593Smuzhiyun return -EINVAL;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
431*4882a593Smuzhiyun dev_name(&pdev->dev), (void *)chan);
432*4882a593Smuzhiyun if (r) {
433*4882a593Smuzhiyun dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
434*4882a593Smuzhiyun return -EINVAL;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun chan->errirq = irq;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /**
444*4882a593Smuzhiyun * sf_pdma_setup_chans() - Init settings of each channel
445*4882a593Smuzhiyun * @pdma: pointer of PDMA engine. Caller should check NULL
446*4882a593Smuzhiyun *
447*4882a593Smuzhiyun * Initialize all data structure and register base. Caller should make sure
448*4882a593Smuzhiyun * the pointer passed in are non-NULL. This function should be called only
449*4882a593Smuzhiyun * one time during the device probe.
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * Context: Any context.
452*4882a593Smuzhiyun *
453*4882a593Smuzhiyun * Return: none
454*4882a593Smuzhiyun */
sf_pdma_setup_chans(struct sf_pdma * pdma)455*4882a593Smuzhiyun static void sf_pdma_setup_chans(struct sf_pdma *pdma)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun int i;
458*4882a593Smuzhiyun struct sf_pdma_chan *chan;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun INIT_LIST_HEAD(&pdma->dma_dev.channels);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun for (i = 0; i < pdma->n_chans; i++) {
463*4882a593Smuzhiyun chan = &pdma->chans[i];
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun chan->regs.ctrl =
466*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_CTRL;
467*4882a593Smuzhiyun chan->regs.xfer_type =
468*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
469*4882a593Smuzhiyun chan->regs.xfer_size =
470*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
471*4882a593Smuzhiyun chan->regs.dst_addr =
472*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
473*4882a593Smuzhiyun chan->regs.src_addr =
474*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
475*4882a593Smuzhiyun chan->regs.act_type =
476*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
477*4882a593Smuzhiyun chan->regs.residue =
478*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
479*4882a593Smuzhiyun chan->regs.cur_dst_addr =
480*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
481*4882a593Smuzhiyun chan->regs.cur_src_addr =
482*4882a593Smuzhiyun SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun chan->pdma = pdma;
485*4882a593Smuzhiyun chan->pm_state = RUNNING;
486*4882a593Smuzhiyun chan->slave_id = i;
487*4882a593Smuzhiyun chan->xfer_err = false;
488*4882a593Smuzhiyun spin_lock_init(&chan->lock);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun chan->vchan.desc_free = sf_pdma_free_desc;
491*4882a593Smuzhiyun vchan_init(&chan->vchan, &pdma->dma_dev);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
496*4882a593Smuzhiyun tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
sf_pdma_probe(struct platform_device * pdev)500*4882a593Smuzhiyun static int sf_pdma_probe(struct platform_device *pdev)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun struct sf_pdma *pdma;
503*4882a593Smuzhiyun struct sf_pdma_chan *chan;
504*4882a593Smuzhiyun struct resource *res;
505*4882a593Smuzhiyun int len, chans;
506*4882a593Smuzhiyun int ret;
507*4882a593Smuzhiyun const enum dma_slave_buswidth widths =
508*4882a593Smuzhiyun DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
509*4882a593Smuzhiyun DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
510*4882a593Smuzhiyun DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
511*4882a593Smuzhiyun DMA_SLAVE_BUSWIDTH_64_BYTES;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun chans = PDMA_NR_CH;
514*4882a593Smuzhiyun len = sizeof(*pdma) + sizeof(*chan) * chans;
515*4882a593Smuzhiyun pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
516*4882a593Smuzhiyun if (!pdma)
517*4882a593Smuzhiyun return -ENOMEM;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun pdma->n_chans = chans;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
522*4882a593Smuzhiyun pdma->membase = devm_ioremap_resource(&pdev->dev, res);
523*4882a593Smuzhiyun if (IS_ERR(pdma->membase))
524*4882a593Smuzhiyun return PTR_ERR(pdma->membase);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun ret = sf_pdma_irq_init(pdev, pdma);
527*4882a593Smuzhiyun if (ret)
528*4882a593Smuzhiyun return ret;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun sf_pdma_setup_chans(pdma);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun pdma->dma_dev.dev = &pdev->dev;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /* Setup capability */
535*4882a593Smuzhiyun dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
536*4882a593Smuzhiyun pdma->dma_dev.copy_align = 2;
537*4882a593Smuzhiyun pdma->dma_dev.src_addr_widths = widths;
538*4882a593Smuzhiyun pdma->dma_dev.dst_addr_widths = widths;
539*4882a593Smuzhiyun pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
540*4882a593Smuzhiyun pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
541*4882a593Smuzhiyun pdma->dma_dev.descriptor_reuse = true;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Setup DMA APIs */
544*4882a593Smuzhiyun pdma->dma_dev.device_alloc_chan_resources =
545*4882a593Smuzhiyun sf_pdma_alloc_chan_resources;
546*4882a593Smuzhiyun pdma->dma_dev.device_free_chan_resources =
547*4882a593Smuzhiyun sf_pdma_free_chan_resources;
548*4882a593Smuzhiyun pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
549*4882a593Smuzhiyun pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
550*4882a593Smuzhiyun pdma->dma_dev.device_config = sf_pdma_slave_config;
551*4882a593Smuzhiyun pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
552*4882a593Smuzhiyun pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun platform_set_drvdata(pdev, pdma);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
557*4882a593Smuzhiyun if (ret)
558*4882a593Smuzhiyun dev_warn(&pdev->dev,
559*4882a593Smuzhiyun "Failed to set DMA mask. Fall back to default.\n");
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun ret = dma_async_device_register(&pdma->dma_dev);
562*4882a593Smuzhiyun if (ret) {
563*4882a593Smuzhiyun dev_err(&pdev->dev,
564*4882a593Smuzhiyun "Can't register SiFive Platform DMA. (%d)\n", ret);
565*4882a593Smuzhiyun return ret;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return 0;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
sf_pdma_remove(struct platform_device * pdev)571*4882a593Smuzhiyun static int sf_pdma_remove(struct platform_device *pdev)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct sf_pdma *pdma = platform_get_drvdata(pdev);
574*4882a593Smuzhiyun struct sf_pdma_chan *ch;
575*4882a593Smuzhiyun int i;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun for (i = 0; i < PDMA_NR_CH; i++) {
578*4882a593Smuzhiyun ch = &pdma->chans[i];
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun devm_free_irq(&pdev->dev, ch->txirq, ch);
581*4882a593Smuzhiyun devm_free_irq(&pdev->dev, ch->errirq, ch);
582*4882a593Smuzhiyun list_del(&ch->vchan.chan.device_node);
583*4882a593Smuzhiyun tasklet_kill(&ch->vchan.task);
584*4882a593Smuzhiyun tasklet_kill(&ch->done_tasklet);
585*4882a593Smuzhiyun tasklet_kill(&ch->err_tasklet);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun dma_async_device_unregister(&pdma->dma_dev);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun return 0;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun static const struct of_device_id sf_pdma_dt_ids[] = {
594*4882a593Smuzhiyun { .compatible = "sifive,fu540-c000-pdma" },
595*4882a593Smuzhiyun {},
596*4882a593Smuzhiyun };
597*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun static struct platform_driver sf_pdma_driver = {
600*4882a593Smuzhiyun .probe = sf_pdma_probe,
601*4882a593Smuzhiyun .remove = sf_pdma_remove,
602*4882a593Smuzhiyun .driver = {
603*4882a593Smuzhiyun .name = "sf-pdma",
604*4882a593Smuzhiyun .of_match_table = of_match_ptr(sf_pdma_dt_ids),
605*4882a593Smuzhiyun },
606*4882a593Smuzhiyun };
607*4882a593Smuzhiyun
sf_pdma_init(void)608*4882a593Smuzhiyun static int __init sf_pdma_init(void)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun return platform_driver_register(&sf_pdma_driver);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
sf_pdma_exit(void)613*4882a593Smuzhiyun static void __exit sf_pdma_exit(void)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun platform_driver_unregister(&sf_pdma_driver);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* do early init */
619*4882a593Smuzhiyun subsys_initcall(sf_pdma_init);
620*4882a593Smuzhiyun module_exit(sf_pdma_exit);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
623*4882a593Smuzhiyun MODULE_DESCRIPTION("SiFive Platform DMA driver");
624*4882a593Smuzhiyun MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
625