1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Core driver for the Intel integrated DMA 64-bit
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015 Intel Corporation
6*4882a593Smuzhiyun * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/dmaengine.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/dmapool.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/dma/idma64.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "idma64.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* For now we support only two channels */
24*4882a593Smuzhiyun #define IDMA64_NR_CHAN 2
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
27*4882a593Smuzhiyun
chan2dev(struct dma_chan * chan)28*4882a593Smuzhiyun static struct device *chan2dev(struct dma_chan *chan)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun return &chan->dev->device;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
34*4882a593Smuzhiyun
idma64_off(struct idma64 * idma64)35*4882a593Smuzhiyun static void idma64_off(struct idma64 *idma64)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun unsigned short count = 100;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun dma_writel(idma64, CFG, 0);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
42*4882a593Smuzhiyun channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
43*4882a593Smuzhiyun channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
44*4882a593Smuzhiyun channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
45*4882a593Smuzhiyun channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun do {
48*4882a593Smuzhiyun cpu_relax();
49*4882a593Smuzhiyun } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
idma64_on(struct idma64 * idma64)52*4882a593Smuzhiyun static void idma64_on(struct idma64 *idma64)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
58*4882a593Smuzhiyun
idma64_chan_init(struct idma64 * idma64,struct idma64_chan * idma64c)59*4882a593Smuzhiyun static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
62*4882a593Smuzhiyun u32 cfglo = 0;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Set default burst alignment */
65*4882a593Smuzhiyun cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun channel_writel(idma64c, CFG_LO, cfglo);
68*4882a593Smuzhiyun channel_writel(idma64c, CFG_HI, cfghi);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Enable interrupts */
71*4882a593Smuzhiyun channel_set_bit(idma64, MASK(XFER), idma64c->mask);
72*4882a593Smuzhiyun channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * Enforce the controller to be turned on.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * The iDMA is turned off in ->probe() and looses context during system
78*4882a593Smuzhiyun * suspend / resume cycle. That's why we have to enable it each time we
79*4882a593Smuzhiyun * use it.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun idma64_on(idma64);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
idma64_chan_stop(struct idma64 * idma64,struct idma64_chan * idma64c)84*4882a593Smuzhiyun static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun channel_clear_bit(idma64, CH_EN, idma64c->mask);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
idma64_chan_start(struct idma64 * idma64,struct idma64_chan * idma64c)89*4882a593Smuzhiyun static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun struct idma64_desc *desc = idma64c->desc;
92*4882a593Smuzhiyun struct idma64_hw_desc *hw = &desc->hw[0];
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun channel_writeq(idma64c, SAR, 0);
95*4882a593Smuzhiyun channel_writeq(idma64c, DAR, 0);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
98*4882a593Smuzhiyun channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun channel_writeq(idma64c, LLP, hw->llp);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun channel_set_bit(idma64, CH_EN, idma64c->mask);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
idma64_stop_transfer(struct idma64_chan * idma64c)105*4882a593Smuzhiyun static void idma64_stop_transfer(struct idma64_chan *idma64c)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun idma64_chan_stop(idma64, idma64c);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
idma64_start_transfer(struct idma64_chan * idma64c)112*4882a593Smuzhiyun static void idma64_start_transfer(struct idma64_chan *idma64c)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
115*4882a593Smuzhiyun struct virt_dma_desc *vdesc;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Get the next descriptor */
118*4882a593Smuzhiyun vdesc = vchan_next_desc(&idma64c->vchan);
119*4882a593Smuzhiyun if (!vdesc) {
120*4882a593Smuzhiyun idma64c->desc = NULL;
121*4882a593Smuzhiyun return;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun list_del(&vdesc->node);
125*4882a593Smuzhiyun idma64c->desc = to_idma64_desc(vdesc);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Configure the channel */
128*4882a593Smuzhiyun idma64_chan_init(idma64, idma64c);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Start the channel with a new descriptor */
131*4882a593Smuzhiyun idma64_chan_start(idma64, idma64c);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
135*4882a593Smuzhiyun
idma64_chan_irq(struct idma64 * idma64,unsigned short c,u32 status_err,u32 status_xfer)136*4882a593Smuzhiyun static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
137*4882a593Smuzhiyun u32 status_err, u32 status_xfer)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct idma64_chan *idma64c = &idma64->chan[c];
140*4882a593Smuzhiyun struct idma64_desc *desc;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun spin_lock(&idma64c->vchan.lock);
143*4882a593Smuzhiyun desc = idma64c->desc;
144*4882a593Smuzhiyun if (desc) {
145*4882a593Smuzhiyun if (status_err & (1 << c)) {
146*4882a593Smuzhiyun dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
147*4882a593Smuzhiyun desc->status = DMA_ERROR;
148*4882a593Smuzhiyun } else if (status_xfer & (1 << c)) {
149*4882a593Smuzhiyun dma_writel(idma64, CLEAR(XFER), idma64c->mask);
150*4882a593Smuzhiyun desc->status = DMA_COMPLETE;
151*4882a593Smuzhiyun vchan_cookie_complete(&desc->vdesc);
152*4882a593Smuzhiyun idma64_start_transfer(idma64c);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* idma64_start_transfer() updates idma64c->desc */
156*4882a593Smuzhiyun if (idma64c->desc == NULL || desc->status == DMA_ERROR)
157*4882a593Smuzhiyun idma64_stop_transfer(idma64c);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun spin_unlock(&idma64c->vchan.lock);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
idma64_irq(int irq,void * dev)162*4882a593Smuzhiyun static irqreturn_t idma64_irq(int irq, void *dev)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct idma64 *idma64 = dev;
165*4882a593Smuzhiyun u32 status = dma_readl(idma64, STATUS_INT);
166*4882a593Smuzhiyun u32 status_xfer;
167*4882a593Smuzhiyun u32 status_err;
168*4882a593Smuzhiyun unsigned short i;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Check if we have any interrupt from the DMA controller */
173*4882a593Smuzhiyun if (!status)
174*4882a593Smuzhiyun return IRQ_NONE;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun status_xfer = dma_readl(idma64, RAW(XFER));
177*4882a593Smuzhiyun status_err = dma_readl(idma64, RAW(ERROR));
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun for (i = 0; i < idma64->dma.chancnt; i++)
180*4882a593Smuzhiyun idma64_chan_irq(idma64, i, status_err, status_xfer);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return IRQ_HANDLED;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
186*4882a593Smuzhiyun
idma64_alloc_desc(unsigned int ndesc)187*4882a593Smuzhiyun static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct idma64_desc *desc;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
192*4882a593Smuzhiyun if (!desc)
193*4882a593Smuzhiyun return NULL;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
196*4882a593Smuzhiyun if (!desc->hw) {
197*4882a593Smuzhiyun kfree(desc);
198*4882a593Smuzhiyun return NULL;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return desc;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
idma64_desc_free(struct idma64_chan * idma64c,struct idma64_desc * desc)204*4882a593Smuzhiyun static void idma64_desc_free(struct idma64_chan *idma64c,
205*4882a593Smuzhiyun struct idma64_desc *desc)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct idma64_hw_desc *hw;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (desc->ndesc) {
210*4882a593Smuzhiyun unsigned int i = desc->ndesc;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun do {
213*4882a593Smuzhiyun hw = &desc->hw[--i];
214*4882a593Smuzhiyun dma_pool_free(idma64c->pool, hw->lli, hw->llp);
215*4882a593Smuzhiyun } while (i);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun kfree(desc->hw);
219*4882a593Smuzhiyun kfree(desc);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
idma64_vdesc_free(struct virt_dma_desc * vdesc)222*4882a593Smuzhiyun static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun idma64_desc_free(idma64c, to_idma64_desc(vdesc));
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
idma64_hw_desc_fill(struct idma64_hw_desc * hw,struct dma_slave_config * config,enum dma_transfer_direction direction,u64 llp)229*4882a593Smuzhiyun static void idma64_hw_desc_fill(struct idma64_hw_desc *hw,
230*4882a593Smuzhiyun struct dma_slave_config *config,
231*4882a593Smuzhiyun enum dma_transfer_direction direction, u64 llp)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct idma64_lli *lli = hw->lli;
234*4882a593Smuzhiyun u64 sar, dar;
235*4882a593Smuzhiyun u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
236*4882a593Smuzhiyun u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
237*4882a593Smuzhiyun u32 src_width, dst_width;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (direction == DMA_MEM_TO_DEV) {
240*4882a593Smuzhiyun sar = hw->phys;
241*4882a593Smuzhiyun dar = config->dst_addr;
242*4882a593Smuzhiyun ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
243*4882a593Smuzhiyun IDMA64C_CTLL_FC_M2P;
244*4882a593Smuzhiyun src_width = __ffs(sar | hw->len | 4);
245*4882a593Smuzhiyun dst_width = __ffs(config->dst_addr_width);
246*4882a593Smuzhiyun } else { /* DMA_DEV_TO_MEM */
247*4882a593Smuzhiyun sar = config->src_addr;
248*4882a593Smuzhiyun dar = hw->phys;
249*4882a593Smuzhiyun ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
250*4882a593Smuzhiyun IDMA64C_CTLL_FC_P2M;
251*4882a593Smuzhiyun src_width = __ffs(config->src_addr_width);
252*4882a593Smuzhiyun dst_width = __ffs(dar | hw->len | 4);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun lli->sar = sar;
256*4882a593Smuzhiyun lli->dar = dar;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun lli->ctlhi = ctlhi;
259*4882a593Smuzhiyun lli->ctllo = ctllo |
260*4882a593Smuzhiyun IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
261*4882a593Smuzhiyun IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
262*4882a593Smuzhiyun IDMA64C_CTLL_DST_WIDTH(dst_width) |
263*4882a593Smuzhiyun IDMA64C_CTLL_SRC_WIDTH(src_width);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun lli->llp = llp;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
idma64_desc_fill(struct idma64_chan * idma64c,struct idma64_desc * desc)268*4882a593Smuzhiyun static void idma64_desc_fill(struct idma64_chan *idma64c,
269*4882a593Smuzhiyun struct idma64_desc *desc)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun struct dma_slave_config *config = &idma64c->config;
272*4882a593Smuzhiyun unsigned int i = desc->ndesc;
273*4882a593Smuzhiyun struct idma64_hw_desc *hw = &desc->hw[i - 1];
274*4882a593Smuzhiyun struct idma64_lli *lli = hw->lli;
275*4882a593Smuzhiyun u64 llp = 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Fill the hardware descriptors and link them to a list */
278*4882a593Smuzhiyun do {
279*4882a593Smuzhiyun hw = &desc->hw[--i];
280*4882a593Smuzhiyun idma64_hw_desc_fill(hw, config, desc->direction, llp);
281*4882a593Smuzhiyun llp = hw->llp;
282*4882a593Smuzhiyun desc->length += hw->len;
283*4882a593Smuzhiyun } while (i);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Trigger an interrupt after the last block is transfered */
286*4882a593Smuzhiyun lli->ctllo |= IDMA64C_CTLL_INT_EN;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Disable LLP transfer in the last block */
289*4882a593Smuzhiyun lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
idma64_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)292*4882a593Smuzhiyun static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
293*4882a593Smuzhiyun struct dma_chan *chan, struct scatterlist *sgl,
294*4882a593Smuzhiyun unsigned int sg_len, enum dma_transfer_direction direction,
295*4882a593Smuzhiyun unsigned long flags, void *context)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
298*4882a593Smuzhiyun struct idma64_desc *desc;
299*4882a593Smuzhiyun struct scatterlist *sg;
300*4882a593Smuzhiyun unsigned int i;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun desc = idma64_alloc_desc(sg_len);
303*4882a593Smuzhiyun if (!desc)
304*4882a593Smuzhiyun return NULL;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun for_each_sg(sgl, sg, sg_len, i) {
307*4882a593Smuzhiyun struct idma64_hw_desc *hw = &desc->hw[i];
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Allocate DMA capable memory for hardware descriptor */
310*4882a593Smuzhiyun hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
311*4882a593Smuzhiyun if (!hw->lli) {
312*4882a593Smuzhiyun desc->ndesc = i;
313*4882a593Smuzhiyun idma64_desc_free(idma64c, desc);
314*4882a593Smuzhiyun return NULL;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun hw->phys = sg_dma_address(sg);
318*4882a593Smuzhiyun hw->len = sg_dma_len(sg);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun desc->ndesc = sg_len;
322*4882a593Smuzhiyun desc->direction = direction;
323*4882a593Smuzhiyun desc->status = DMA_IN_PROGRESS;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun idma64_desc_fill(idma64c, desc);
326*4882a593Smuzhiyun return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
idma64_issue_pending(struct dma_chan * chan)329*4882a593Smuzhiyun static void idma64_issue_pending(struct dma_chan *chan)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
332*4882a593Smuzhiyun unsigned long flags;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun spin_lock_irqsave(&idma64c->vchan.lock, flags);
335*4882a593Smuzhiyun if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
336*4882a593Smuzhiyun idma64_start_transfer(idma64c);
337*4882a593Smuzhiyun spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
idma64_active_desc_size(struct idma64_chan * idma64c)340*4882a593Smuzhiyun static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct idma64_desc *desc = idma64c->desc;
343*4882a593Smuzhiyun struct idma64_hw_desc *hw;
344*4882a593Smuzhiyun size_t bytes = desc->length;
345*4882a593Smuzhiyun u64 llp = channel_readq(idma64c, LLP);
346*4882a593Smuzhiyun u32 ctlhi = channel_readl(idma64c, CTL_HI);
347*4882a593Smuzhiyun unsigned int i = 0;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun do {
350*4882a593Smuzhiyun hw = &desc->hw[i];
351*4882a593Smuzhiyun if (hw->llp == llp)
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun bytes -= hw->len;
354*4882a593Smuzhiyun } while (++i < desc->ndesc);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!i)
357*4882a593Smuzhiyun return bytes;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* The current chunk is not fully transfered yet */
360*4882a593Smuzhiyun bytes += desc->hw[--i].len;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
idma64_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)365*4882a593Smuzhiyun static enum dma_status idma64_tx_status(struct dma_chan *chan,
366*4882a593Smuzhiyun dma_cookie_t cookie, struct dma_tx_state *state)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
369*4882a593Smuzhiyun struct virt_dma_desc *vdesc;
370*4882a593Smuzhiyun enum dma_status status;
371*4882a593Smuzhiyun size_t bytes;
372*4882a593Smuzhiyun unsigned long flags;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun status = dma_cookie_status(chan, cookie, state);
375*4882a593Smuzhiyun if (status == DMA_COMPLETE)
376*4882a593Smuzhiyun return status;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun spin_lock_irqsave(&idma64c->vchan.lock, flags);
379*4882a593Smuzhiyun vdesc = vchan_find_desc(&idma64c->vchan, cookie);
380*4882a593Smuzhiyun if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
381*4882a593Smuzhiyun bytes = idma64_active_desc_size(idma64c);
382*4882a593Smuzhiyun dma_set_residue(state, bytes);
383*4882a593Smuzhiyun status = idma64c->desc->status;
384*4882a593Smuzhiyun } else if (vdesc) {
385*4882a593Smuzhiyun bytes = to_idma64_desc(vdesc)->length;
386*4882a593Smuzhiyun dma_set_residue(state, bytes);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return status;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
convert_burst(u32 * maxburst)393*4882a593Smuzhiyun static void convert_burst(u32 *maxburst)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun if (*maxburst)
396*4882a593Smuzhiyun *maxburst = __fls(*maxburst);
397*4882a593Smuzhiyun else
398*4882a593Smuzhiyun *maxburst = 0;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
idma64_slave_config(struct dma_chan * chan,struct dma_slave_config * config)401*4882a593Smuzhiyun static int idma64_slave_config(struct dma_chan *chan,
402*4882a593Smuzhiyun struct dma_slave_config *config)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun memcpy(&idma64c->config, config, sizeof(idma64c->config));
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun convert_burst(&idma64c->config.src_maxburst);
409*4882a593Smuzhiyun convert_burst(&idma64c->config.dst_maxburst);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun return 0;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
idma64_chan_deactivate(struct idma64_chan * idma64c,bool drain)414*4882a593Smuzhiyun static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun unsigned short count = 100;
417*4882a593Smuzhiyun u32 cfglo;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun cfglo = channel_readl(idma64c, CFG_LO);
420*4882a593Smuzhiyun if (drain)
421*4882a593Smuzhiyun cfglo |= IDMA64C_CFGL_CH_DRAIN;
422*4882a593Smuzhiyun else
423*4882a593Smuzhiyun cfglo &= ~IDMA64C_CFGL_CH_DRAIN;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
426*4882a593Smuzhiyun do {
427*4882a593Smuzhiyun udelay(1);
428*4882a593Smuzhiyun cfglo = channel_readl(idma64c, CFG_LO);
429*4882a593Smuzhiyun } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
idma64_chan_activate(struct idma64_chan * idma64c)432*4882a593Smuzhiyun static void idma64_chan_activate(struct idma64_chan *idma64c)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun u32 cfglo;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun cfglo = channel_readl(idma64c, CFG_LO);
437*4882a593Smuzhiyun channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
idma64_pause(struct dma_chan * chan)440*4882a593Smuzhiyun static int idma64_pause(struct dma_chan *chan)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
443*4882a593Smuzhiyun unsigned long flags;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun spin_lock_irqsave(&idma64c->vchan.lock, flags);
446*4882a593Smuzhiyun if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
447*4882a593Smuzhiyun idma64_chan_deactivate(idma64c, false);
448*4882a593Smuzhiyun idma64c->desc->status = DMA_PAUSED;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
idma64_resume(struct dma_chan * chan)455*4882a593Smuzhiyun static int idma64_resume(struct dma_chan *chan)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
458*4882a593Smuzhiyun unsigned long flags;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun spin_lock_irqsave(&idma64c->vchan.lock, flags);
461*4882a593Smuzhiyun if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
462*4882a593Smuzhiyun idma64c->desc->status = DMA_IN_PROGRESS;
463*4882a593Smuzhiyun idma64_chan_activate(idma64c);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun return 0;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
idma64_terminate_all(struct dma_chan * chan)470*4882a593Smuzhiyun static int idma64_terminate_all(struct dma_chan *chan)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
473*4882a593Smuzhiyun unsigned long flags;
474*4882a593Smuzhiyun LIST_HEAD(head);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun spin_lock_irqsave(&idma64c->vchan.lock, flags);
477*4882a593Smuzhiyun idma64_chan_deactivate(idma64c, true);
478*4882a593Smuzhiyun idma64_stop_transfer(idma64c);
479*4882a593Smuzhiyun if (idma64c->desc) {
480*4882a593Smuzhiyun idma64_vdesc_free(&idma64c->desc->vdesc);
481*4882a593Smuzhiyun idma64c->desc = NULL;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun vchan_get_all_descriptors(&idma64c->vchan, &head);
484*4882a593Smuzhiyun spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun vchan_dma_desc_free_list(&idma64c->vchan, &head);
487*4882a593Smuzhiyun return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
idma64_synchronize(struct dma_chan * chan)490*4882a593Smuzhiyun static void idma64_synchronize(struct dma_chan *chan)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun vchan_synchronize(&idma64c->vchan);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
idma64_alloc_chan_resources(struct dma_chan * chan)497*4882a593Smuzhiyun static int idma64_alloc_chan_resources(struct dma_chan *chan)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Create a pool of consistent memory blocks for hardware descriptors */
502*4882a593Smuzhiyun idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
503*4882a593Smuzhiyun chan->device->dev,
504*4882a593Smuzhiyun sizeof(struct idma64_lli), 8, 0);
505*4882a593Smuzhiyun if (!idma64c->pool) {
506*4882a593Smuzhiyun dev_err(chan2dev(chan), "No memory for descriptors\n");
507*4882a593Smuzhiyun return -ENOMEM;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
idma64_free_chan_resources(struct dma_chan * chan)513*4882a593Smuzhiyun static void idma64_free_chan_resources(struct dma_chan *chan)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun struct idma64_chan *idma64c = to_idma64_chan(chan);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun vchan_free_chan_resources(to_virt_chan(chan));
518*4882a593Smuzhiyun dma_pool_destroy(idma64c->pool);
519*4882a593Smuzhiyun idma64c->pool = NULL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun #define IDMA64_BUSWIDTHS \
525*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
526*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
527*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
528*4882a593Smuzhiyun
idma64_probe(struct idma64_chip * chip)529*4882a593Smuzhiyun static int idma64_probe(struct idma64_chip *chip)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct idma64 *idma64;
532*4882a593Smuzhiyun unsigned short nr_chan = IDMA64_NR_CHAN;
533*4882a593Smuzhiyun unsigned short i;
534*4882a593Smuzhiyun int ret;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
537*4882a593Smuzhiyun if (!idma64)
538*4882a593Smuzhiyun return -ENOMEM;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun idma64->regs = chip->regs;
541*4882a593Smuzhiyun chip->idma64 = idma64;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
544*4882a593Smuzhiyun GFP_KERNEL);
545*4882a593Smuzhiyun if (!idma64->chan)
546*4882a593Smuzhiyun return -ENOMEM;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun idma64->all_chan_mask = (1 << nr_chan) - 1;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* Turn off iDMA controller */
551*4882a593Smuzhiyun idma64_off(idma64);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
554*4882a593Smuzhiyun dev_name(chip->dev), idma64);
555*4882a593Smuzhiyun if (ret)
556*4882a593Smuzhiyun return ret;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun INIT_LIST_HEAD(&idma64->dma.channels);
559*4882a593Smuzhiyun for (i = 0; i < nr_chan; i++) {
560*4882a593Smuzhiyun struct idma64_chan *idma64c = &idma64->chan[i];
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun idma64c->vchan.desc_free = idma64_vdesc_free;
563*4882a593Smuzhiyun vchan_init(&idma64c->vchan, &idma64->dma);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
566*4882a593Smuzhiyun idma64c->mask = BIT(i);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
570*4882a593Smuzhiyun dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
573*4882a593Smuzhiyun idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun idma64->dma.device_issue_pending = idma64_issue_pending;
578*4882a593Smuzhiyun idma64->dma.device_tx_status = idma64_tx_status;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun idma64->dma.device_config = idma64_slave_config;
581*4882a593Smuzhiyun idma64->dma.device_pause = idma64_pause;
582*4882a593Smuzhiyun idma64->dma.device_resume = idma64_resume;
583*4882a593Smuzhiyun idma64->dma.device_terminate_all = idma64_terminate_all;
584*4882a593Smuzhiyun idma64->dma.device_synchronize = idma64_synchronize;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
587*4882a593Smuzhiyun idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
588*4882a593Smuzhiyun idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
589*4882a593Smuzhiyun idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun idma64->dma.dev = chip->sysdev;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun ret = dma_async_device_register(&idma64->dma);
596*4882a593Smuzhiyun if (ret)
597*4882a593Smuzhiyun return ret;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
600*4882a593Smuzhiyun return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
idma64_remove(struct idma64_chip * chip)603*4882a593Smuzhiyun static int idma64_remove(struct idma64_chip *chip)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct idma64 *idma64 = chip->idma64;
606*4882a593Smuzhiyun unsigned short i;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun dma_async_device_unregister(&idma64->dma);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /*
611*4882a593Smuzhiyun * Explicitly call devm_request_irq() to avoid the side effects with
612*4882a593Smuzhiyun * the scheduled tasklets.
613*4882a593Smuzhiyun */
614*4882a593Smuzhiyun devm_free_irq(chip->dev, chip->irq, idma64);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun for (i = 0; i < idma64->dma.chancnt; i++) {
617*4882a593Smuzhiyun struct idma64_chan *idma64c = &idma64->chan[i];
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun tasklet_kill(&idma64c->vchan.task);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun return 0;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
626*4882a593Smuzhiyun
idma64_platform_probe(struct platform_device * pdev)627*4882a593Smuzhiyun static int idma64_platform_probe(struct platform_device *pdev)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun struct idma64_chip *chip;
630*4882a593Smuzhiyun struct device *dev = &pdev->dev;
631*4882a593Smuzhiyun struct device *sysdev = dev->parent;
632*4882a593Smuzhiyun struct resource *mem;
633*4882a593Smuzhiyun int ret;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
636*4882a593Smuzhiyun if (!chip)
637*4882a593Smuzhiyun return -ENOMEM;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun chip->irq = platform_get_irq(pdev, 0);
640*4882a593Smuzhiyun if (chip->irq < 0)
641*4882a593Smuzhiyun return chip->irq;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
644*4882a593Smuzhiyun chip->regs = devm_ioremap_resource(dev, mem);
645*4882a593Smuzhiyun if (IS_ERR(chip->regs))
646*4882a593Smuzhiyun return PTR_ERR(chip->regs);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
649*4882a593Smuzhiyun if (ret)
650*4882a593Smuzhiyun return ret;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun chip->dev = dev;
653*4882a593Smuzhiyun chip->sysdev = sysdev;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun ret = idma64_probe(chip);
656*4882a593Smuzhiyun if (ret)
657*4882a593Smuzhiyun return ret;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun platform_set_drvdata(pdev, chip);
660*4882a593Smuzhiyun return 0;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
idma64_platform_remove(struct platform_device * pdev)663*4882a593Smuzhiyun static int idma64_platform_remove(struct platform_device *pdev)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun struct idma64_chip *chip = platform_get_drvdata(pdev);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun return idma64_remove(chip);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
671*4882a593Smuzhiyun
idma64_pm_suspend(struct device * dev)672*4882a593Smuzhiyun static int idma64_pm_suspend(struct device *dev)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct idma64_chip *chip = dev_get_drvdata(dev);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun idma64_off(chip->idma64);
677*4882a593Smuzhiyun return 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
idma64_pm_resume(struct device * dev)680*4882a593Smuzhiyun static int idma64_pm_resume(struct device *dev)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun struct idma64_chip *chip = dev_get_drvdata(dev);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun idma64_on(chip->idma64);
685*4882a593Smuzhiyun return 0;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun static const struct dev_pm_ops idma64_dev_pm_ops = {
691*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
692*4882a593Smuzhiyun };
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun static struct platform_driver idma64_platform_driver = {
695*4882a593Smuzhiyun .probe = idma64_platform_probe,
696*4882a593Smuzhiyun .remove = idma64_platform_remove,
697*4882a593Smuzhiyun .driver = {
698*4882a593Smuzhiyun .name = LPSS_IDMA64_DRIVER_NAME,
699*4882a593Smuzhiyun .pm = &idma64_dev_pm_ops,
700*4882a593Smuzhiyun },
701*4882a593Smuzhiyun };
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun module_platform_driver(idma64_platform_driver);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
706*4882a593Smuzhiyun MODULE_DESCRIPTION("iDMA64 core driver");
707*4882a593Smuzhiyun MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
708*4882a593Smuzhiyun MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME);
709