1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * OMAP DMAengine support
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/cpu_pm.h>
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/dmaengine.h>
8*4882a593Smuzhiyun #include <linux/dma-mapping.h>
9*4882a593Smuzhiyun #include <linux/dmapool.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/omap-dma.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/of_dma.h>
20*4882a593Smuzhiyun #include <linux/of_device.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "../virt-dma.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define OMAP_SDMA_REQUESTS 127
25*4882a593Smuzhiyun #define OMAP_SDMA_CHANNELS 32
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct omap_dma_config {
28*4882a593Smuzhiyun int lch_end;
29*4882a593Smuzhiyun unsigned int rw_priority:1;
30*4882a593Smuzhiyun unsigned int needs_busy_check:1;
31*4882a593Smuzhiyun unsigned int may_lose_context:1;
32*4882a593Smuzhiyun unsigned int needs_lch_clear:1;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct omap_dma_context {
36*4882a593Smuzhiyun u32 irqenable_l0;
37*4882a593Smuzhiyun u32 irqenable_l1;
38*4882a593Smuzhiyun u32 ocp_sysconfig;
39*4882a593Smuzhiyun u32 gcr;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct omap_dmadev {
43*4882a593Smuzhiyun struct dma_device ddev;
44*4882a593Smuzhiyun spinlock_t lock;
45*4882a593Smuzhiyun void __iomem *base;
46*4882a593Smuzhiyun const struct omap_dma_reg *reg_map;
47*4882a593Smuzhiyun struct omap_system_dma_plat_info *plat;
48*4882a593Smuzhiyun const struct omap_dma_config *cfg;
49*4882a593Smuzhiyun struct notifier_block nb;
50*4882a593Smuzhiyun struct omap_dma_context context;
51*4882a593Smuzhiyun int lch_count;
52*4882a593Smuzhiyun DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS);
53*4882a593Smuzhiyun struct mutex lch_lock; /* for assigning logical channels */
54*4882a593Smuzhiyun bool legacy;
55*4882a593Smuzhiyun bool ll123_supported;
56*4882a593Smuzhiyun struct dma_pool *desc_pool;
57*4882a593Smuzhiyun unsigned dma_requests;
58*4882a593Smuzhiyun spinlock_t irq_lock;
59*4882a593Smuzhiyun uint32_t irq_enable_mask;
60*4882a593Smuzhiyun struct omap_chan **lch_map;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct omap_chan {
64*4882a593Smuzhiyun struct virt_dma_chan vc;
65*4882a593Smuzhiyun void __iomem *channel_base;
66*4882a593Smuzhiyun const struct omap_dma_reg *reg_map;
67*4882a593Smuzhiyun uint32_t ccr;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct dma_slave_config cfg;
70*4882a593Smuzhiyun unsigned dma_sig;
71*4882a593Smuzhiyun bool cyclic;
72*4882a593Smuzhiyun bool paused;
73*4882a593Smuzhiyun bool running;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun int dma_ch;
76*4882a593Smuzhiyun struct omap_desc *desc;
77*4882a593Smuzhiyun unsigned sgidx;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define DESC_NXT_SV_REFRESH (0x1 << 24)
81*4882a593Smuzhiyun #define DESC_NXT_SV_REUSE (0x2 << 24)
82*4882a593Smuzhiyun #define DESC_NXT_DV_REFRESH (0x1 << 26)
83*4882a593Smuzhiyun #define DESC_NXT_DV_REUSE (0x2 << 26)
84*4882a593Smuzhiyun #define DESC_NTYPE_TYPE2 (0x2 << 29)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Type 2 descriptor with Source or Destination address update */
87*4882a593Smuzhiyun struct omap_type2_desc {
88*4882a593Smuzhiyun uint32_t next_desc;
89*4882a593Smuzhiyun uint32_t en;
90*4882a593Smuzhiyun uint32_t addr; /* src or dst */
91*4882a593Smuzhiyun uint16_t fn;
92*4882a593Smuzhiyun uint16_t cicr;
93*4882a593Smuzhiyun int16_t cdei;
94*4882a593Smuzhiyun int16_t csei;
95*4882a593Smuzhiyun int32_t cdfi;
96*4882a593Smuzhiyun int32_t csfi;
97*4882a593Smuzhiyun } __packed;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct omap_sg {
100*4882a593Smuzhiyun dma_addr_t addr;
101*4882a593Smuzhiyun uint32_t en; /* number of elements (24-bit) */
102*4882a593Smuzhiyun uint32_t fn; /* number of frames (16-bit) */
103*4882a593Smuzhiyun int32_t fi; /* for double indexing */
104*4882a593Smuzhiyun int16_t ei; /* for double indexing */
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Linked list */
107*4882a593Smuzhiyun struct omap_type2_desc *t2_desc;
108*4882a593Smuzhiyun dma_addr_t t2_desc_paddr;
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun struct omap_desc {
112*4882a593Smuzhiyun struct virt_dma_desc vd;
113*4882a593Smuzhiyun bool using_ll;
114*4882a593Smuzhiyun enum dma_transfer_direction dir;
115*4882a593Smuzhiyun dma_addr_t dev_addr;
116*4882a593Smuzhiyun bool polled;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
119*4882a593Smuzhiyun int16_t ei; /* for double indexing */
120*4882a593Smuzhiyun uint8_t es; /* CSDP_DATA_TYPE_xxx */
121*4882a593Smuzhiyun uint32_t ccr; /* CCR value */
122*4882a593Smuzhiyun uint16_t clnk_ctrl; /* CLNK_CTRL value */
123*4882a593Smuzhiyun uint16_t cicr; /* CICR value */
124*4882a593Smuzhiyun uint32_t csdp; /* CSDP value */
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun unsigned sglen;
127*4882a593Smuzhiyun struct omap_sg sg[];
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun enum {
131*4882a593Smuzhiyun CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */
132*4882a593Smuzhiyun CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun CCR_FS = BIT(5),
135*4882a593Smuzhiyun CCR_READ_PRIORITY = BIT(6),
136*4882a593Smuzhiyun CCR_ENABLE = BIT(7),
137*4882a593Smuzhiyun CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
138*4882a593Smuzhiyun CCR_REPEAT = BIT(9), /* OMAP1 only */
139*4882a593Smuzhiyun CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
140*4882a593Smuzhiyun CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
141*4882a593Smuzhiyun CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
142*4882a593Smuzhiyun CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
143*4882a593Smuzhiyun CCR_SRC_AMODE_CONSTANT = 0 << 12,
144*4882a593Smuzhiyun CCR_SRC_AMODE_POSTINC = 1 << 12,
145*4882a593Smuzhiyun CCR_SRC_AMODE_SGLIDX = 2 << 12,
146*4882a593Smuzhiyun CCR_SRC_AMODE_DBLIDX = 3 << 12,
147*4882a593Smuzhiyun CCR_DST_AMODE_CONSTANT = 0 << 14,
148*4882a593Smuzhiyun CCR_DST_AMODE_POSTINC = 1 << 14,
149*4882a593Smuzhiyun CCR_DST_AMODE_SGLIDX = 2 << 14,
150*4882a593Smuzhiyun CCR_DST_AMODE_DBLIDX = 3 << 14,
151*4882a593Smuzhiyun CCR_CONSTANT_FILL = BIT(16),
152*4882a593Smuzhiyun CCR_TRANSPARENT_COPY = BIT(17),
153*4882a593Smuzhiyun CCR_BS = BIT(18),
154*4882a593Smuzhiyun CCR_SUPERVISOR = BIT(22),
155*4882a593Smuzhiyun CCR_PREFETCH = BIT(23),
156*4882a593Smuzhiyun CCR_TRIGGER_SRC = BIT(24),
157*4882a593Smuzhiyun CCR_BUFFERING_DISABLE = BIT(25),
158*4882a593Smuzhiyun CCR_WRITE_PRIORITY = BIT(26),
159*4882a593Smuzhiyun CCR_SYNC_ELEMENT = 0,
160*4882a593Smuzhiyun CCR_SYNC_FRAME = CCR_FS,
161*4882a593Smuzhiyun CCR_SYNC_BLOCK = CCR_BS,
162*4882a593Smuzhiyun CCR_SYNC_PACKET = CCR_BS | CCR_FS,
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun CSDP_DATA_TYPE_8 = 0,
165*4882a593Smuzhiyun CSDP_DATA_TYPE_16 = 1,
166*4882a593Smuzhiyun CSDP_DATA_TYPE_32 = 2,
167*4882a593Smuzhiyun CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
168*4882a593Smuzhiyun CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
169*4882a593Smuzhiyun CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
170*4882a593Smuzhiyun CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
171*4882a593Smuzhiyun CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
172*4882a593Smuzhiyun CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
173*4882a593Smuzhiyun CSDP_SRC_PACKED = BIT(6),
174*4882a593Smuzhiyun CSDP_SRC_BURST_1 = 0 << 7,
175*4882a593Smuzhiyun CSDP_SRC_BURST_16 = 1 << 7,
176*4882a593Smuzhiyun CSDP_SRC_BURST_32 = 2 << 7,
177*4882a593Smuzhiyun CSDP_SRC_BURST_64 = 3 << 7,
178*4882a593Smuzhiyun CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
179*4882a593Smuzhiyun CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
180*4882a593Smuzhiyun CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
181*4882a593Smuzhiyun CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
182*4882a593Smuzhiyun CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
183*4882a593Smuzhiyun CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
184*4882a593Smuzhiyun CSDP_DST_PACKED = BIT(13),
185*4882a593Smuzhiyun CSDP_DST_BURST_1 = 0 << 14,
186*4882a593Smuzhiyun CSDP_DST_BURST_16 = 1 << 14,
187*4882a593Smuzhiyun CSDP_DST_BURST_32 = 2 << 14,
188*4882a593Smuzhiyun CSDP_DST_BURST_64 = 3 << 14,
189*4882a593Smuzhiyun CSDP_WRITE_NON_POSTED = 0 << 16,
190*4882a593Smuzhiyun CSDP_WRITE_POSTED = 1 << 16,
191*4882a593Smuzhiyun CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun CICR_TOUT_IE = BIT(0), /* OMAP1 only */
194*4882a593Smuzhiyun CICR_DROP_IE = BIT(1),
195*4882a593Smuzhiyun CICR_HALF_IE = BIT(2),
196*4882a593Smuzhiyun CICR_FRAME_IE = BIT(3),
197*4882a593Smuzhiyun CICR_LAST_IE = BIT(4),
198*4882a593Smuzhiyun CICR_BLOCK_IE = BIT(5),
199*4882a593Smuzhiyun CICR_PKT_IE = BIT(7), /* OMAP2+ only */
200*4882a593Smuzhiyun CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
201*4882a593Smuzhiyun CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
202*4882a593Smuzhiyun CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
203*4882a593Smuzhiyun CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
204*4882a593Smuzhiyun CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun CLNK_CTRL_ENABLE_LNK = BIT(15),
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun CDP_DST_VALID_INC = 0 << 0,
209*4882a593Smuzhiyun CDP_DST_VALID_RELOAD = 1 << 0,
210*4882a593Smuzhiyun CDP_DST_VALID_REUSE = 2 << 0,
211*4882a593Smuzhiyun CDP_SRC_VALID_INC = 0 << 2,
212*4882a593Smuzhiyun CDP_SRC_VALID_RELOAD = 1 << 2,
213*4882a593Smuzhiyun CDP_SRC_VALID_REUSE = 2 << 2,
214*4882a593Smuzhiyun CDP_NTYPE_TYPE1 = 1 << 4,
215*4882a593Smuzhiyun CDP_NTYPE_TYPE2 = 2 << 4,
216*4882a593Smuzhiyun CDP_NTYPE_TYPE3 = 3 << 4,
217*4882a593Smuzhiyun CDP_TMODE_NORMAL = 0 << 8,
218*4882a593Smuzhiyun CDP_TMODE_LLIST = 1 << 8,
219*4882a593Smuzhiyun CDP_FAST = BIT(10),
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun static const unsigned es_bytes[] = {
223*4882a593Smuzhiyun [CSDP_DATA_TYPE_8] = 1,
224*4882a593Smuzhiyun [CSDP_DATA_TYPE_16] = 2,
225*4882a593Smuzhiyun [CSDP_DATA_TYPE_32] = 4,
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
229*4882a593Smuzhiyun static struct of_dma_filter_info omap_dma_info = {
230*4882a593Smuzhiyun .filter_fn = omap_dma_filter_fn,
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
to_omap_dma_dev(struct dma_device * d)233*4882a593Smuzhiyun static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun return container_of(d, struct omap_dmadev, ddev);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
to_omap_dma_chan(struct dma_chan * c)238*4882a593Smuzhiyun static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun return container_of(c, struct omap_chan, vc.chan);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
to_omap_dma_desc(struct dma_async_tx_descriptor * t)243*4882a593Smuzhiyun static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun return container_of(t, struct omap_desc, vd.tx);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
omap_dma_desc_free(struct virt_dma_desc * vd)248*4882a593Smuzhiyun static void omap_dma_desc_free(struct virt_dma_desc *vd)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct omap_desc *d = to_omap_dma_desc(&vd->tx);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (d->using_ll) {
253*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
254*4882a593Smuzhiyun int i;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun for (i = 0; i < d->sglen; i++) {
257*4882a593Smuzhiyun if (d->sg[i].t2_desc)
258*4882a593Smuzhiyun dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
259*4882a593Smuzhiyun d->sg[i].t2_desc_paddr);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun kfree(d);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
omap_dma_fill_type2_desc(struct omap_desc * d,int idx,enum dma_transfer_direction dir,bool last)266*4882a593Smuzhiyun static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
267*4882a593Smuzhiyun enum dma_transfer_direction dir, bool last)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct omap_sg *sg = &d->sg[idx];
270*4882a593Smuzhiyun struct omap_type2_desc *t2_desc = sg->t2_desc;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (idx)
273*4882a593Smuzhiyun d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
274*4882a593Smuzhiyun if (last)
275*4882a593Smuzhiyun t2_desc->next_desc = 0xfffffffc;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun t2_desc->en = sg->en;
278*4882a593Smuzhiyun t2_desc->addr = sg->addr;
279*4882a593Smuzhiyun t2_desc->fn = sg->fn & 0xffff;
280*4882a593Smuzhiyun t2_desc->cicr = d->cicr;
281*4882a593Smuzhiyun if (!last)
282*4882a593Smuzhiyun t2_desc->cicr &= ~CICR_BLOCK_IE;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun switch (dir) {
285*4882a593Smuzhiyun case DMA_DEV_TO_MEM:
286*4882a593Smuzhiyun t2_desc->cdei = sg->ei;
287*4882a593Smuzhiyun t2_desc->csei = d->ei;
288*4882a593Smuzhiyun t2_desc->cdfi = sg->fi;
289*4882a593Smuzhiyun t2_desc->csfi = d->fi;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun t2_desc->en |= DESC_NXT_DV_REFRESH;
292*4882a593Smuzhiyun t2_desc->en |= DESC_NXT_SV_REUSE;
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun case DMA_MEM_TO_DEV:
295*4882a593Smuzhiyun t2_desc->cdei = d->ei;
296*4882a593Smuzhiyun t2_desc->csei = sg->ei;
297*4882a593Smuzhiyun t2_desc->cdfi = d->fi;
298*4882a593Smuzhiyun t2_desc->csfi = sg->fi;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun t2_desc->en |= DESC_NXT_SV_REFRESH;
301*4882a593Smuzhiyun t2_desc->en |= DESC_NXT_DV_REUSE;
302*4882a593Smuzhiyun break;
303*4882a593Smuzhiyun default:
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun t2_desc->en |= DESC_NTYPE_TYPE2;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
omap_dma_write(uint32_t val,unsigned type,void __iomem * addr)310*4882a593Smuzhiyun static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun switch (type) {
313*4882a593Smuzhiyun case OMAP_DMA_REG_16BIT:
314*4882a593Smuzhiyun writew_relaxed(val, addr);
315*4882a593Smuzhiyun break;
316*4882a593Smuzhiyun case OMAP_DMA_REG_2X16BIT:
317*4882a593Smuzhiyun writew_relaxed(val, addr);
318*4882a593Smuzhiyun writew_relaxed(val >> 16, addr + 2);
319*4882a593Smuzhiyun break;
320*4882a593Smuzhiyun case OMAP_DMA_REG_32BIT:
321*4882a593Smuzhiyun writel_relaxed(val, addr);
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun default:
324*4882a593Smuzhiyun WARN_ON(1);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
omap_dma_read(unsigned type,void __iomem * addr)328*4882a593Smuzhiyun static unsigned omap_dma_read(unsigned type, void __iomem *addr)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun unsigned val;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun switch (type) {
333*4882a593Smuzhiyun case OMAP_DMA_REG_16BIT:
334*4882a593Smuzhiyun val = readw_relaxed(addr);
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun case OMAP_DMA_REG_2X16BIT:
337*4882a593Smuzhiyun val = readw_relaxed(addr);
338*4882a593Smuzhiyun val |= readw_relaxed(addr + 2) << 16;
339*4882a593Smuzhiyun break;
340*4882a593Smuzhiyun case OMAP_DMA_REG_32BIT:
341*4882a593Smuzhiyun val = readl_relaxed(addr);
342*4882a593Smuzhiyun break;
343*4882a593Smuzhiyun default:
344*4882a593Smuzhiyun WARN_ON(1);
345*4882a593Smuzhiyun val = 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return val;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
omap_dma_glbl_write(struct omap_dmadev * od,unsigned reg,unsigned val)351*4882a593Smuzhiyun static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun const struct omap_dma_reg *r = od->reg_map + reg;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun WARN_ON(r->stride);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun omap_dma_write(val, r->type, od->base + r->offset);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
omap_dma_glbl_read(struct omap_dmadev * od,unsigned reg)360*4882a593Smuzhiyun static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun const struct omap_dma_reg *r = od->reg_map + reg;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun WARN_ON(r->stride);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return omap_dma_read(r->type, od->base + r->offset);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
omap_dma_chan_write(struct omap_chan * c,unsigned reg,unsigned val)369*4882a593Smuzhiyun static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun const struct omap_dma_reg *r = c->reg_map + reg;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun omap_dma_write(val, r->type, c->channel_base + r->offset);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
omap_dma_chan_read(struct omap_chan * c,unsigned reg)376*4882a593Smuzhiyun static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun const struct omap_dma_reg *r = c->reg_map + reg;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return omap_dma_read(r->type, c->channel_base + r->offset);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
omap_dma_clear_csr(struct omap_chan * c)383*4882a593Smuzhiyun static void omap_dma_clear_csr(struct omap_chan *c)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun if (dma_omap1())
386*4882a593Smuzhiyun omap_dma_chan_read(c, CSR);
387*4882a593Smuzhiyun else
388*4882a593Smuzhiyun omap_dma_chan_write(c, CSR, ~0);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
omap_dma_get_csr(struct omap_chan * c)391*4882a593Smuzhiyun static unsigned omap_dma_get_csr(struct omap_chan *c)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun unsigned val = omap_dma_chan_read(c, CSR);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun if (!dma_omap1())
396*4882a593Smuzhiyun omap_dma_chan_write(c, CSR, val);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return val;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
omap_dma_clear_lch(struct omap_dmadev * od,int lch)401*4882a593Smuzhiyun static void omap_dma_clear_lch(struct omap_dmadev *od, int lch)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct omap_chan *c;
404*4882a593Smuzhiyun int i;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun c = od->lch_map[lch];
407*4882a593Smuzhiyun if (!c)
408*4882a593Smuzhiyun return;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun for (i = CSDP; i <= od->cfg->lch_end; i++)
411*4882a593Smuzhiyun omap_dma_chan_write(c, i, 0);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
omap_dma_assign(struct omap_dmadev * od,struct omap_chan * c,unsigned lch)414*4882a593Smuzhiyun static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
415*4882a593Smuzhiyun unsigned lch)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun c->channel_base = od->base + od->plat->channel_stride * lch;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun od->lch_map[lch] = c;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
omap_dma_start(struct omap_chan * c,struct omap_desc * d)422*4882a593Smuzhiyun static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
425*4882a593Smuzhiyun uint16_t cicr = d->cicr;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (__dma_omap15xx(od->plat->dma_attr))
428*4882a593Smuzhiyun omap_dma_chan_write(c, CPC, 0);
429*4882a593Smuzhiyun else
430*4882a593Smuzhiyun omap_dma_chan_write(c, CDAC, 0);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun omap_dma_clear_csr(c);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (d->using_ll) {
435*4882a593Smuzhiyun uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (d->dir == DMA_DEV_TO_MEM)
438*4882a593Smuzhiyun cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
439*4882a593Smuzhiyun else
440*4882a593Smuzhiyun cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
441*4882a593Smuzhiyun omap_dma_chan_write(c, CDP, cdp);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
444*4882a593Smuzhiyun omap_dma_chan_write(c, CCDN, 0);
445*4882a593Smuzhiyun omap_dma_chan_write(c, CCFN, 0xffff);
446*4882a593Smuzhiyun omap_dma_chan_write(c, CCEN, 0xffffff);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun cicr &= ~CICR_BLOCK_IE;
449*4882a593Smuzhiyun } else if (od->ll123_supported) {
450*4882a593Smuzhiyun omap_dma_chan_write(c, CDP, 0);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Enable interrupts */
454*4882a593Smuzhiyun omap_dma_chan_write(c, CICR, cicr);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* Enable channel */
457*4882a593Smuzhiyun omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun c->running = true;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
omap_dma_drain_chan(struct omap_chan * c)462*4882a593Smuzhiyun static void omap_dma_drain_chan(struct omap_chan *c)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun int i;
465*4882a593Smuzhiyun u32 val;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* Wait for sDMA FIFO to drain */
468*4882a593Smuzhiyun for (i = 0; ; i++) {
469*4882a593Smuzhiyun val = omap_dma_chan_read(c, CCR);
470*4882a593Smuzhiyun if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
471*4882a593Smuzhiyun break;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (i > 100)
474*4882a593Smuzhiyun break;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun udelay(5);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
480*4882a593Smuzhiyun dev_err(c->vc.chan.device->dev,
481*4882a593Smuzhiyun "DMA drain did not complete on lch %d\n",
482*4882a593Smuzhiyun c->dma_ch);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
omap_dma_stop(struct omap_chan * c)485*4882a593Smuzhiyun static int omap_dma_stop(struct omap_chan *c)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
488*4882a593Smuzhiyun uint32_t val;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* disable irq */
491*4882a593Smuzhiyun omap_dma_chan_write(c, CICR, 0);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun omap_dma_clear_csr(c);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun val = omap_dma_chan_read(c, CCR);
496*4882a593Smuzhiyun if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
497*4882a593Smuzhiyun uint32_t sysconfig;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
500*4882a593Smuzhiyun val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
501*4882a593Smuzhiyun val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
502*4882a593Smuzhiyun omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun val = omap_dma_chan_read(c, CCR);
505*4882a593Smuzhiyun val &= ~CCR_ENABLE;
506*4882a593Smuzhiyun omap_dma_chan_write(c, CCR, val);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (!(c->ccr & CCR_BUFFERING_DISABLE))
509*4882a593Smuzhiyun omap_dma_drain_chan(c);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
512*4882a593Smuzhiyun } else {
513*4882a593Smuzhiyun if (!(val & CCR_ENABLE))
514*4882a593Smuzhiyun return -EINVAL;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun val &= ~CCR_ENABLE;
517*4882a593Smuzhiyun omap_dma_chan_write(c, CCR, val);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (!(c->ccr & CCR_BUFFERING_DISABLE))
520*4882a593Smuzhiyun omap_dma_drain_chan(c);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun mb();
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
526*4882a593Smuzhiyun val = omap_dma_chan_read(c, CLNK_CTRL);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (dma_omap1())
529*4882a593Smuzhiyun val |= 1 << 14; /* set the STOP_LNK bit */
530*4882a593Smuzhiyun else
531*4882a593Smuzhiyun val &= ~CLNK_CTRL_ENABLE_LNK;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun omap_dma_chan_write(c, CLNK_CTRL, val);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun c->running = false;
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
omap_dma_start_sg(struct omap_chan * c,struct omap_desc * d)539*4882a593Smuzhiyun static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct omap_sg *sg = d->sg + c->sgidx;
542*4882a593Smuzhiyun unsigned cxsa, cxei, cxfi;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
545*4882a593Smuzhiyun cxsa = CDSA;
546*4882a593Smuzhiyun cxei = CDEI;
547*4882a593Smuzhiyun cxfi = CDFI;
548*4882a593Smuzhiyun } else {
549*4882a593Smuzhiyun cxsa = CSSA;
550*4882a593Smuzhiyun cxei = CSEI;
551*4882a593Smuzhiyun cxfi = CSFI;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun omap_dma_chan_write(c, cxsa, sg->addr);
555*4882a593Smuzhiyun omap_dma_chan_write(c, cxei, sg->ei);
556*4882a593Smuzhiyun omap_dma_chan_write(c, cxfi, sg->fi);
557*4882a593Smuzhiyun omap_dma_chan_write(c, CEN, sg->en);
558*4882a593Smuzhiyun omap_dma_chan_write(c, CFN, sg->fn);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun omap_dma_start(c, d);
561*4882a593Smuzhiyun c->sgidx++;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
omap_dma_start_desc(struct omap_chan * c)564*4882a593Smuzhiyun static void omap_dma_start_desc(struct omap_chan *c)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
567*4882a593Smuzhiyun struct omap_desc *d;
568*4882a593Smuzhiyun unsigned cxsa, cxei, cxfi;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (!vd) {
571*4882a593Smuzhiyun c->desc = NULL;
572*4882a593Smuzhiyun return;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun list_del(&vd->node);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun c->desc = d = to_omap_dma_desc(&vd->tx);
578*4882a593Smuzhiyun c->sgidx = 0;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * This provides the necessary barrier to ensure data held in
582*4882a593Smuzhiyun * DMA coherent memory is visible to the DMA engine prior to
583*4882a593Smuzhiyun * the transfer starting.
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun mb();
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun omap_dma_chan_write(c, CCR, d->ccr);
588*4882a593Smuzhiyun if (dma_omap1())
589*4882a593Smuzhiyun omap_dma_chan_write(c, CCR2, d->ccr >> 16);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
592*4882a593Smuzhiyun cxsa = CSSA;
593*4882a593Smuzhiyun cxei = CSEI;
594*4882a593Smuzhiyun cxfi = CSFI;
595*4882a593Smuzhiyun } else {
596*4882a593Smuzhiyun cxsa = CDSA;
597*4882a593Smuzhiyun cxei = CDEI;
598*4882a593Smuzhiyun cxfi = CDFI;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun omap_dma_chan_write(c, cxsa, d->dev_addr);
602*4882a593Smuzhiyun omap_dma_chan_write(c, cxei, d->ei);
603*4882a593Smuzhiyun omap_dma_chan_write(c, cxfi, d->fi);
604*4882a593Smuzhiyun omap_dma_chan_write(c, CSDP, d->csdp);
605*4882a593Smuzhiyun omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun omap_dma_start_sg(c, d);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
omap_dma_callback(int ch,u16 status,void * data)610*4882a593Smuzhiyun static void omap_dma_callback(int ch, u16 status, void *data)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct omap_chan *c = data;
613*4882a593Smuzhiyun struct omap_desc *d;
614*4882a593Smuzhiyun unsigned long flags;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun spin_lock_irqsave(&c->vc.lock, flags);
617*4882a593Smuzhiyun d = c->desc;
618*4882a593Smuzhiyun if (d) {
619*4882a593Smuzhiyun if (c->cyclic) {
620*4882a593Smuzhiyun vchan_cyclic_callback(&d->vd);
621*4882a593Smuzhiyun } else if (d->using_ll || c->sgidx == d->sglen) {
622*4882a593Smuzhiyun omap_dma_start_desc(c);
623*4882a593Smuzhiyun vchan_cookie_complete(&d->vd);
624*4882a593Smuzhiyun } else {
625*4882a593Smuzhiyun omap_dma_start_sg(c, d);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun spin_unlock_irqrestore(&c->vc.lock, flags);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
omap_dma_irq(int irq,void * devid)631*4882a593Smuzhiyun static irqreturn_t omap_dma_irq(int irq, void *devid)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct omap_dmadev *od = devid;
634*4882a593Smuzhiyun unsigned status, channel;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun spin_lock(&od->irq_lock);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun status = omap_dma_glbl_read(od, IRQSTATUS_L1);
639*4882a593Smuzhiyun status &= od->irq_enable_mask;
640*4882a593Smuzhiyun if (status == 0) {
641*4882a593Smuzhiyun spin_unlock(&od->irq_lock);
642*4882a593Smuzhiyun return IRQ_NONE;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun while ((channel = ffs(status)) != 0) {
646*4882a593Smuzhiyun unsigned mask, csr;
647*4882a593Smuzhiyun struct omap_chan *c;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun channel -= 1;
650*4882a593Smuzhiyun mask = BIT(channel);
651*4882a593Smuzhiyun status &= ~mask;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun c = od->lch_map[channel];
654*4882a593Smuzhiyun if (c == NULL) {
655*4882a593Smuzhiyun /* This should never happen */
656*4882a593Smuzhiyun dev_err(od->ddev.dev, "invalid channel %u\n", channel);
657*4882a593Smuzhiyun continue;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun csr = omap_dma_get_csr(c);
661*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun omap_dma_callback(channel, csr, c);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun spin_unlock(&od->irq_lock);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun return IRQ_HANDLED;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
omap_dma_get_lch(struct omap_dmadev * od,int * lch)671*4882a593Smuzhiyun static int omap_dma_get_lch(struct omap_dmadev *od, int *lch)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun int channel;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun mutex_lock(&od->lch_lock);
676*4882a593Smuzhiyun channel = find_first_zero_bit(od->lch_bitmap, od->lch_count);
677*4882a593Smuzhiyun if (channel >= od->lch_count)
678*4882a593Smuzhiyun goto out_busy;
679*4882a593Smuzhiyun set_bit(channel, od->lch_bitmap);
680*4882a593Smuzhiyun mutex_unlock(&od->lch_lock);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun omap_dma_clear_lch(od, channel);
683*4882a593Smuzhiyun *lch = channel;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun return 0;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun out_busy:
688*4882a593Smuzhiyun mutex_unlock(&od->lch_lock);
689*4882a593Smuzhiyun *lch = -EINVAL;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun return -EBUSY;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
omap_dma_put_lch(struct omap_dmadev * od,int lch)694*4882a593Smuzhiyun static void omap_dma_put_lch(struct omap_dmadev *od, int lch)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun omap_dma_clear_lch(od, lch);
697*4882a593Smuzhiyun mutex_lock(&od->lch_lock);
698*4882a593Smuzhiyun clear_bit(lch, od->lch_bitmap);
699*4882a593Smuzhiyun mutex_unlock(&od->lch_lock);
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
omap_dma_alloc_chan_resources(struct dma_chan * chan)702*4882a593Smuzhiyun static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
705*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
706*4882a593Smuzhiyun struct device *dev = od->ddev.dev;
707*4882a593Smuzhiyun int ret;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (od->legacy) {
710*4882a593Smuzhiyun ret = omap_request_dma(c->dma_sig, "DMA engine",
711*4882a593Smuzhiyun omap_dma_callback, c, &c->dma_ch);
712*4882a593Smuzhiyun } else {
713*4882a593Smuzhiyun ret = omap_dma_get_lch(od, &c->dma_ch);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun if (ret >= 0) {
719*4882a593Smuzhiyun omap_dma_assign(od, c, c->dma_ch);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (!od->legacy) {
722*4882a593Smuzhiyun unsigned val;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun spin_lock_irq(&od->irq_lock);
725*4882a593Smuzhiyun val = BIT(c->dma_ch);
726*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQSTATUS_L1, val);
727*4882a593Smuzhiyun od->irq_enable_mask |= val;
728*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun val = omap_dma_glbl_read(od, IRQENABLE_L0);
731*4882a593Smuzhiyun val &= ~BIT(c->dma_ch);
732*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L0, val);
733*4882a593Smuzhiyun spin_unlock_irq(&od->irq_lock);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (dma_omap1()) {
738*4882a593Smuzhiyun if (__dma_omap16xx(od->plat->dma_attr)) {
739*4882a593Smuzhiyun c->ccr = CCR_OMAP31_DISABLE;
740*4882a593Smuzhiyun /* Duplicate what plat-omap/dma.c does */
741*4882a593Smuzhiyun c->ccr |= c->dma_ch + 1;
742*4882a593Smuzhiyun } else {
743*4882a593Smuzhiyun c->ccr = c->dma_sig & 0x1f;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun } else {
746*4882a593Smuzhiyun c->ccr = c->dma_sig & 0x1f;
747*4882a593Smuzhiyun c->ccr |= (c->dma_sig & ~0x1f) << 14;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
750*4882a593Smuzhiyun c->ccr |= CCR_BUFFERING_DISABLE;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun return ret;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
omap_dma_free_chan_resources(struct dma_chan * chan)755*4882a593Smuzhiyun static void omap_dma_free_chan_resources(struct dma_chan *chan)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
758*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun if (!od->legacy) {
761*4882a593Smuzhiyun spin_lock_irq(&od->irq_lock);
762*4882a593Smuzhiyun od->irq_enable_mask &= ~BIT(c->dma_ch);
763*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
764*4882a593Smuzhiyun spin_unlock_irq(&od->irq_lock);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun c->channel_base = NULL;
768*4882a593Smuzhiyun od->lch_map[c->dma_ch] = NULL;
769*4882a593Smuzhiyun vchan_free_chan_resources(&c->vc);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun if (od->legacy)
772*4882a593Smuzhiyun omap_free_dma(c->dma_ch);
773*4882a593Smuzhiyun else
774*4882a593Smuzhiyun omap_dma_put_lch(od, c->dma_ch);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
777*4882a593Smuzhiyun c->dma_sig);
778*4882a593Smuzhiyun c->dma_sig = 0;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
omap_dma_sg_size(struct omap_sg * sg)781*4882a593Smuzhiyun static size_t omap_dma_sg_size(struct omap_sg *sg)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun return sg->en * sg->fn;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
omap_dma_desc_size(struct omap_desc * d)786*4882a593Smuzhiyun static size_t omap_dma_desc_size(struct omap_desc *d)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun unsigned i;
789*4882a593Smuzhiyun size_t size;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun for (size = i = 0; i < d->sglen; i++)
792*4882a593Smuzhiyun size += omap_dma_sg_size(&d->sg[i]);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return size * es_bytes[d->es];
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
omap_dma_desc_size_pos(struct omap_desc * d,dma_addr_t addr)797*4882a593Smuzhiyun static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun unsigned i;
800*4882a593Smuzhiyun size_t size, es_size = es_bytes[d->es];
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun for (size = i = 0; i < d->sglen; i++) {
803*4882a593Smuzhiyun size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (size)
806*4882a593Smuzhiyun size += this_size;
807*4882a593Smuzhiyun else if (addr >= d->sg[i].addr &&
808*4882a593Smuzhiyun addr < d->sg[i].addr + this_size)
809*4882a593Smuzhiyun size += d->sg[i].addr + this_size - addr;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun return size;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
816*4882a593Smuzhiyun * read before the DMA controller finished disabling the channel.
817*4882a593Smuzhiyun */
omap_dma_chan_read_3_3(struct omap_chan * c,unsigned reg)818*4882a593Smuzhiyun static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
821*4882a593Smuzhiyun uint32_t val;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun val = omap_dma_chan_read(c, reg);
824*4882a593Smuzhiyun if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
825*4882a593Smuzhiyun val = omap_dma_chan_read(c, reg);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return val;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
omap_dma_get_src_pos(struct omap_chan * c)830*4882a593Smuzhiyun static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
833*4882a593Smuzhiyun dma_addr_t addr, cdac;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun if (__dma_omap15xx(od->plat->dma_attr)) {
836*4882a593Smuzhiyun addr = omap_dma_chan_read(c, CPC);
837*4882a593Smuzhiyun } else {
838*4882a593Smuzhiyun addr = omap_dma_chan_read_3_3(c, CSAC);
839*4882a593Smuzhiyun cdac = omap_dma_chan_read_3_3(c, CDAC);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun * CDAC == 0 indicates that the DMA transfer on the channel has
843*4882a593Smuzhiyun * not been started (no data has been transferred so far).
844*4882a593Smuzhiyun * Return the programmed source start address in this case.
845*4882a593Smuzhiyun */
846*4882a593Smuzhiyun if (cdac == 0)
847*4882a593Smuzhiyun addr = omap_dma_chan_read(c, CSSA);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (dma_omap1())
851*4882a593Smuzhiyun addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return addr;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
omap_dma_get_dst_pos(struct omap_chan * c)856*4882a593Smuzhiyun static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
859*4882a593Smuzhiyun dma_addr_t addr;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if (__dma_omap15xx(od->plat->dma_attr)) {
862*4882a593Smuzhiyun addr = omap_dma_chan_read(c, CPC);
863*4882a593Smuzhiyun } else {
864*4882a593Smuzhiyun addr = omap_dma_chan_read_3_3(c, CDAC);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun * CDAC == 0 indicates that the DMA transfer on the channel
868*4882a593Smuzhiyun * has not been started (no data has been transferred so
869*4882a593Smuzhiyun * far). Return the programmed destination start address in
870*4882a593Smuzhiyun * this case.
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun if (addr == 0)
873*4882a593Smuzhiyun addr = omap_dma_chan_read(c, CDSA);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (dma_omap1())
877*4882a593Smuzhiyun addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun return addr;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
omap_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)882*4882a593Smuzhiyun static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
883*4882a593Smuzhiyun dma_cookie_t cookie, struct dma_tx_state *txstate)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
886*4882a593Smuzhiyun enum dma_status ret;
887*4882a593Smuzhiyun unsigned long flags;
888*4882a593Smuzhiyun struct omap_desc *d = NULL;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun ret = dma_cookie_status(chan, cookie, txstate);
891*4882a593Smuzhiyun if (ret == DMA_COMPLETE)
892*4882a593Smuzhiyun return ret;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun spin_lock_irqsave(&c->vc.lock, flags);
895*4882a593Smuzhiyun if (c->desc && c->desc->vd.tx.cookie == cookie)
896*4882a593Smuzhiyun d = c->desc;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun if (!txstate)
899*4882a593Smuzhiyun goto out;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if (d) {
902*4882a593Smuzhiyun dma_addr_t pos;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun if (d->dir == DMA_MEM_TO_DEV)
905*4882a593Smuzhiyun pos = omap_dma_get_src_pos(c);
906*4882a593Smuzhiyun else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
907*4882a593Smuzhiyun pos = omap_dma_get_dst_pos(c);
908*4882a593Smuzhiyun else
909*4882a593Smuzhiyun pos = 0;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun txstate->residue = omap_dma_desc_size_pos(d, pos);
912*4882a593Smuzhiyun } else {
913*4882a593Smuzhiyun struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (vd)
916*4882a593Smuzhiyun txstate->residue = omap_dma_desc_size(
917*4882a593Smuzhiyun to_omap_dma_desc(&vd->tx));
918*4882a593Smuzhiyun else
919*4882a593Smuzhiyun txstate->residue = 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun out:
923*4882a593Smuzhiyun if (ret == DMA_IN_PROGRESS && c->paused) {
924*4882a593Smuzhiyun ret = DMA_PAUSED;
925*4882a593Smuzhiyun } else if (d && d->polled && c->running) {
926*4882a593Smuzhiyun uint32_t ccr = omap_dma_chan_read(c, CCR);
927*4882a593Smuzhiyun /*
928*4882a593Smuzhiyun * The channel is no longer active, set the return value
929*4882a593Smuzhiyun * accordingly and mark it as completed
930*4882a593Smuzhiyun */
931*4882a593Smuzhiyun if (!(ccr & CCR_ENABLE)) {
932*4882a593Smuzhiyun ret = DMA_COMPLETE;
933*4882a593Smuzhiyun omap_dma_start_desc(c);
934*4882a593Smuzhiyun vchan_cookie_complete(&d->vd);
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun spin_unlock_irqrestore(&c->vc.lock, flags);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun return ret;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
omap_dma_issue_pending(struct dma_chan * chan)943*4882a593Smuzhiyun static void omap_dma_issue_pending(struct dma_chan *chan)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
946*4882a593Smuzhiyun unsigned long flags;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun spin_lock_irqsave(&c->vc.lock, flags);
949*4882a593Smuzhiyun if (vchan_issue_pending(&c->vc) && !c->desc)
950*4882a593Smuzhiyun omap_dma_start_desc(c);
951*4882a593Smuzhiyun spin_unlock_irqrestore(&c->vc.lock, flags);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
omap_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)954*4882a593Smuzhiyun static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
955*4882a593Smuzhiyun struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
956*4882a593Smuzhiyun enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
959*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
960*4882a593Smuzhiyun enum dma_slave_buswidth dev_width;
961*4882a593Smuzhiyun struct scatterlist *sgent;
962*4882a593Smuzhiyun struct omap_desc *d;
963*4882a593Smuzhiyun dma_addr_t dev_addr;
964*4882a593Smuzhiyun unsigned i, es, en, frame_bytes;
965*4882a593Smuzhiyun bool ll_failed = false;
966*4882a593Smuzhiyun u32 burst;
967*4882a593Smuzhiyun u32 port_window, port_window_bytes;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM) {
970*4882a593Smuzhiyun dev_addr = c->cfg.src_addr;
971*4882a593Smuzhiyun dev_width = c->cfg.src_addr_width;
972*4882a593Smuzhiyun burst = c->cfg.src_maxburst;
973*4882a593Smuzhiyun port_window = c->cfg.src_port_window_size;
974*4882a593Smuzhiyun } else if (dir == DMA_MEM_TO_DEV) {
975*4882a593Smuzhiyun dev_addr = c->cfg.dst_addr;
976*4882a593Smuzhiyun dev_width = c->cfg.dst_addr_width;
977*4882a593Smuzhiyun burst = c->cfg.dst_maxburst;
978*4882a593Smuzhiyun port_window = c->cfg.dst_port_window_size;
979*4882a593Smuzhiyun } else {
980*4882a593Smuzhiyun dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
981*4882a593Smuzhiyun return NULL;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /* Bus width translates to the element size (ES) */
985*4882a593Smuzhiyun switch (dev_width) {
986*4882a593Smuzhiyun case DMA_SLAVE_BUSWIDTH_1_BYTE:
987*4882a593Smuzhiyun es = CSDP_DATA_TYPE_8;
988*4882a593Smuzhiyun break;
989*4882a593Smuzhiyun case DMA_SLAVE_BUSWIDTH_2_BYTES:
990*4882a593Smuzhiyun es = CSDP_DATA_TYPE_16;
991*4882a593Smuzhiyun break;
992*4882a593Smuzhiyun case DMA_SLAVE_BUSWIDTH_4_BYTES:
993*4882a593Smuzhiyun es = CSDP_DATA_TYPE_32;
994*4882a593Smuzhiyun break;
995*4882a593Smuzhiyun default: /* not reached */
996*4882a593Smuzhiyun return NULL;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /* Now allocate and setup the descriptor. */
1000*4882a593Smuzhiyun d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
1001*4882a593Smuzhiyun if (!d)
1002*4882a593Smuzhiyun return NULL;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun d->dir = dir;
1005*4882a593Smuzhiyun d->dev_addr = dev_addr;
1006*4882a593Smuzhiyun d->es = es;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* When the port_window is used, one frame must cover the window */
1009*4882a593Smuzhiyun if (port_window) {
1010*4882a593Smuzhiyun burst = port_window;
1011*4882a593Smuzhiyun port_window_bytes = port_window * es_bytes[es];
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun d->ei = 1;
1014*4882a593Smuzhiyun /*
1015*4882a593Smuzhiyun * One frame covers the port_window and by configure
1016*4882a593Smuzhiyun * the source frame index to be -1 * (port_window - 1)
1017*4882a593Smuzhiyun * we instruct the sDMA that after a frame is processed
1018*4882a593Smuzhiyun * it should move back to the start of the window.
1019*4882a593Smuzhiyun */
1020*4882a593Smuzhiyun d->fi = -(port_window_bytes - 1);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun d->ccr = c->ccr | CCR_SYNC_FRAME;
1024*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM) {
1025*4882a593Smuzhiyun d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_POSTINC;
1028*4882a593Smuzhiyun if (port_window) {
1029*4882a593Smuzhiyun d->ccr |= CCR_SRC_AMODE_DBLIDX;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (port_window_bytes >= 64)
1032*4882a593Smuzhiyun d->csdp |= CSDP_SRC_BURST_64;
1033*4882a593Smuzhiyun else if (port_window_bytes >= 32)
1034*4882a593Smuzhiyun d->csdp |= CSDP_SRC_BURST_32;
1035*4882a593Smuzhiyun else if (port_window_bytes >= 16)
1036*4882a593Smuzhiyun d->csdp |= CSDP_SRC_BURST_16;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun } else {
1039*4882a593Smuzhiyun d->ccr |= CCR_SRC_AMODE_CONSTANT;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun } else {
1042*4882a593Smuzhiyun d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun d->ccr |= CCR_SRC_AMODE_POSTINC;
1045*4882a593Smuzhiyun if (port_window) {
1046*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_DBLIDX;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (port_window_bytes >= 64)
1049*4882a593Smuzhiyun d->csdp |= CSDP_DST_BURST_64;
1050*4882a593Smuzhiyun else if (port_window_bytes >= 32)
1051*4882a593Smuzhiyun d->csdp |= CSDP_DST_BURST_32;
1052*4882a593Smuzhiyun else if (port_window_bytes >= 16)
1053*4882a593Smuzhiyun d->csdp |= CSDP_DST_BURST_16;
1054*4882a593Smuzhiyun } else {
1055*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_CONSTANT;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
1060*4882a593Smuzhiyun d->csdp |= es;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun if (dma_omap1()) {
1063*4882a593Smuzhiyun d->cicr |= CICR_TOUT_IE;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM)
1066*4882a593Smuzhiyun d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
1067*4882a593Smuzhiyun else
1068*4882a593Smuzhiyun d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
1069*4882a593Smuzhiyun } else {
1070*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM)
1071*4882a593Smuzhiyun d->ccr |= CCR_TRIGGER_SRC;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (port_window)
1076*4882a593Smuzhiyun d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
1079*4882a593Smuzhiyun d->clnk_ctrl = c->dma_ch;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /*
1082*4882a593Smuzhiyun * Build our scatterlist entries: each contains the address,
1083*4882a593Smuzhiyun * the number of elements (EN) in each frame, and the number of
1084*4882a593Smuzhiyun * frames (FN). Number of bytes for this entry = ES * EN * FN.
1085*4882a593Smuzhiyun *
1086*4882a593Smuzhiyun * Burst size translates to number of elements with frame sync.
1087*4882a593Smuzhiyun * Note: DMA engine defines burst to be the number of dev-width
1088*4882a593Smuzhiyun * transfers.
1089*4882a593Smuzhiyun */
1090*4882a593Smuzhiyun en = burst;
1091*4882a593Smuzhiyun frame_bytes = es_bytes[es] * en;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (sglen >= 2)
1094*4882a593Smuzhiyun d->using_ll = od->ll123_supported;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun for_each_sg(sgl, sgent, sglen, i) {
1097*4882a593Smuzhiyun struct omap_sg *osg = &d->sg[i];
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun osg->addr = sg_dma_address(sgent);
1100*4882a593Smuzhiyun osg->en = en;
1101*4882a593Smuzhiyun osg->fn = sg_dma_len(sgent) / frame_bytes;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun if (d->using_ll) {
1104*4882a593Smuzhiyun osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
1105*4882a593Smuzhiyun &osg->t2_desc_paddr);
1106*4882a593Smuzhiyun if (!osg->t2_desc) {
1107*4882a593Smuzhiyun dev_err(chan->device->dev,
1108*4882a593Smuzhiyun "t2_desc[%d] allocation failed\n", i);
1109*4882a593Smuzhiyun ll_failed = true;
1110*4882a593Smuzhiyun d->using_ll = false;
1111*4882a593Smuzhiyun continue;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun d->sglen = sglen;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /* Release the dma_pool entries if one allocation failed */
1121*4882a593Smuzhiyun if (ll_failed) {
1122*4882a593Smuzhiyun for (i = 0; i < d->sglen; i++) {
1123*4882a593Smuzhiyun struct omap_sg *osg = &d->sg[i];
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun if (osg->t2_desc) {
1126*4882a593Smuzhiyun dma_pool_free(od->desc_pool, osg->t2_desc,
1127*4882a593Smuzhiyun osg->t2_desc_paddr);
1128*4882a593Smuzhiyun osg->t2_desc = NULL;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
omap_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1136*4882a593Smuzhiyun static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
1137*4882a593Smuzhiyun struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1138*4882a593Smuzhiyun size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1141*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1142*4882a593Smuzhiyun enum dma_slave_buswidth dev_width;
1143*4882a593Smuzhiyun struct omap_desc *d;
1144*4882a593Smuzhiyun dma_addr_t dev_addr;
1145*4882a593Smuzhiyun unsigned es;
1146*4882a593Smuzhiyun u32 burst;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM) {
1149*4882a593Smuzhiyun dev_addr = c->cfg.src_addr;
1150*4882a593Smuzhiyun dev_width = c->cfg.src_addr_width;
1151*4882a593Smuzhiyun burst = c->cfg.src_maxburst;
1152*4882a593Smuzhiyun } else if (dir == DMA_MEM_TO_DEV) {
1153*4882a593Smuzhiyun dev_addr = c->cfg.dst_addr;
1154*4882a593Smuzhiyun dev_width = c->cfg.dst_addr_width;
1155*4882a593Smuzhiyun burst = c->cfg.dst_maxburst;
1156*4882a593Smuzhiyun } else {
1157*4882a593Smuzhiyun dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1158*4882a593Smuzhiyun return NULL;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /* Bus width translates to the element size (ES) */
1162*4882a593Smuzhiyun switch (dev_width) {
1163*4882a593Smuzhiyun case DMA_SLAVE_BUSWIDTH_1_BYTE:
1164*4882a593Smuzhiyun es = CSDP_DATA_TYPE_8;
1165*4882a593Smuzhiyun break;
1166*4882a593Smuzhiyun case DMA_SLAVE_BUSWIDTH_2_BYTES:
1167*4882a593Smuzhiyun es = CSDP_DATA_TYPE_16;
1168*4882a593Smuzhiyun break;
1169*4882a593Smuzhiyun case DMA_SLAVE_BUSWIDTH_4_BYTES:
1170*4882a593Smuzhiyun es = CSDP_DATA_TYPE_32;
1171*4882a593Smuzhiyun break;
1172*4882a593Smuzhiyun default: /* not reached */
1173*4882a593Smuzhiyun return NULL;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /* Now allocate and setup the descriptor. */
1177*4882a593Smuzhiyun d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1178*4882a593Smuzhiyun if (!d)
1179*4882a593Smuzhiyun return NULL;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun d->dir = dir;
1182*4882a593Smuzhiyun d->dev_addr = dev_addr;
1183*4882a593Smuzhiyun d->fi = burst;
1184*4882a593Smuzhiyun d->es = es;
1185*4882a593Smuzhiyun d->sg[0].addr = buf_addr;
1186*4882a593Smuzhiyun d->sg[0].en = period_len / es_bytes[es];
1187*4882a593Smuzhiyun d->sg[0].fn = buf_len / period_len;
1188*4882a593Smuzhiyun d->sglen = 1;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun d->ccr = c->ccr;
1191*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM)
1192*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
1193*4882a593Smuzhiyun else
1194*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun d->cicr = CICR_DROP_IE;
1197*4882a593Smuzhiyun if (flags & DMA_PREP_INTERRUPT)
1198*4882a593Smuzhiyun d->cicr |= CICR_FRAME_IE;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun d->csdp = es;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun if (dma_omap1()) {
1203*4882a593Smuzhiyun d->cicr |= CICR_TOUT_IE;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM)
1206*4882a593Smuzhiyun d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
1207*4882a593Smuzhiyun else
1208*4882a593Smuzhiyun d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
1209*4882a593Smuzhiyun } else {
1210*4882a593Smuzhiyun if (burst)
1211*4882a593Smuzhiyun d->ccr |= CCR_SYNC_PACKET;
1212*4882a593Smuzhiyun else
1213*4882a593Smuzhiyun d->ccr |= CCR_SYNC_ELEMENT;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun if (dir == DMA_DEV_TO_MEM) {
1216*4882a593Smuzhiyun d->ccr |= CCR_TRIGGER_SRC;
1217*4882a593Smuzhiyun d->csdp |= CSDP_DST_PACKED;
1218*4882a593Smuzhiyun } else {
1219*4882a593Smuzhiyun d->csdp |= CSDP_SRC_PACKED;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (__dma_omap15xx(od->plat->dma_attr))
1228*4882a593Smuzhiyun d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
1229*4882a593Smuzhiyun else
1230*4882a593Smuzhiyun d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun c->cyclic = true;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun return vchan_tx_prep(&c->vc, &d->vd, flags);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
omap_dma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long tx_flags)1237*4882a593Smuzhiyun static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
1238*4882a593Smuzhiyun struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1239*4882a593Smuzhiyun size_t len, unsigned long tx_flags)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1242*4882a593Smuzhiyun struct omap_desc *d;
1243*4882a593Smuzhiyun uint8_t data_type;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1246*4882a593Smuzhiyun if (!d)
1247*4882a593Smuzhiyun return NULL;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun data_type = __ffs((src | dest | len));
1250*4882a593Smuzhiyun if (data_type > CSDP_DATA_TYPE_32)
1251*4882a593Smuzhiyun data_type = CSDP_DATA_TYPE_32;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun d->dir = DMA_MEM_TO_MEM;
1254*4882a593Smuzhiyun d->dev_addr = src;
1255*4882a593Smuzhiyun d->fi = 0;
1256*4882a593Smuzhiyun d->es = data_type;
1257*4882a593Smuzhiyun d->sg[0].en = len / BIT(data_type);
1258*4882a593Smuzhiyun d->sg[0].fn = 1;
1259*4882a593Smuzhiyun d->sg[0].addr = dest;
1260*4882a593Smuzhiyun d->sglen = 1;
1261*4882a593Smuzhiyun d->ccr = c->ccr;
1262*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun if (tx_flags & DMA_PREP_INTERRUPT)
1265*4882a593Smuzhiyun d->cicr |= CICR_FRAME_IE;
1266*4882a593Smuzhiyun else
1267*4882a593Smuzhiyun d->polled = true;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun d->csdp = data_type;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun if (dma_omap1()) {
1272*4882a593Smuzhiyun d->cicr |= CICR_TOUT_IE;
1273*4882a593Smuzhiyun d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1274*4882a593Smuzhiyun } else {
1275*4882a593Smuzhiyun d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1276*4882a593Smuzhiyun d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1277*4882a593Smuzhiyun d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
omap_dma_prep_dma_interleaved(struct dma_chan * chan,struct dma_interleaved_template * xt,unsigned long flags)1283*4882a593Smuzhiyun static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1284*4882a593Smuzhiyun struct dma_chan *chan, struct dma_interleaved_template *xt,
1285*4882a593Smuzhiyun unsigned long flags)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1288*4882a593Smuzhiyun struct omap_desc *d;
1289*4882a593Smuzhiyun struct omap_sg *sg;
1290*4882a593Smuzhiyun uint8_t data_type;
1291*4882a593Smuzhiyun size_t src_icg, dst_icg;
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun /* Slave mode is not supported */
1294*4882a593Smuzhiyun if (is_slave_direction(xt->dir))
1295*4882a593Smuzhiyun return NULL;
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun if (xt->frame_size != 1 || xt->numf == 0)
1298*4882a593Smuzhiyun return NULL;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1301*4882a593Smuzhiyun if (!d)
1302*4882a593Smuzhiyun return NULL;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
1305*4882a593Smuzhiyun if (data_type > CSDP_DATA_TYPE_32)
1306*4882a593Smuzhiyun data_type = CSDP_DATA_TYPE_32;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun sg = &d->sg[0];
1309*4882a593Smuzhiyun d->dir = DMA_MEM_TO_MEM;
1310*4882a593Smuzhiyun d->dev_addr = xt->src_start;
1311*4882a593Smuzhiyun d->es = data_type;
1312*4882a593Smuzhiyun sg->en = xt->sgl[0].size / BIT(data_type);
1313*4882a593Smuzhiyun sg->fn = xt->numf;
1314*4882a593Smuzhiyun sg->addr = xt->dst_start;
1315*4882a593Smuzhiyun d->sglen = 1;
1316*4882a593Smuzhiyun d->ccr = c->ccr;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1319*4882a593Smuzhiyun dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1320*4882a593Smuzhiyun if (src_icg) {
1321*4882a593Smuzhiyun d->ccr |= CCR_SRC_AMODE_DBLIDX;
1322*4882a593Smuzhiyun d->ei = 1;
1323*4882a593Smuzhiyun d->fi = src_icg + 1;
1324*4882a593Smuzhiyun } else if (xt->src_inc) {
1325*4882a593Smuzhiyun d->ccr |= CCR_SRC_AMODE_POSTINC;
1326*4882a593Smuzhiyun d->fi = 0;
1327*4882a593Smuzhiyun } else {
1328*4882a593Smuzhiyun dev_err(chan->device->dev,
1329*4882a593Smuzhiyun "%s: SRC constant addressing is not supported\n",
1330*4882a593Smuzhiyun __func__);
1331*4882a593Smuzhiyun kfree(d);
1332*4882a593Smuzhiyun return NULL;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun if (dst_icg) {
1336*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_DBLIDX;
1337*4882a593Smuzhiyun sg->ei = 1;
1338*4882a593Smuzhiyun sg->fi = dst_icg + 1;
1339*4882a593Smuzhiyun } else if (xt->dst_inc) {
1340*4882a593Smuzhiyun d->ccr |= CCR_DST_AMODE_POSTINC;
1341*4882a593Smuzhiyun sg->fi = 0;
1342*4882a593Smuzhiyun } else {
1343*4882a593Smuzhiyun dev_err(chan->device->dev,
1344*4882a593Smuzhiyun "%s: DST constant addressing is not supported\n",
1345*4882a593Smuzhiyun __func__);
1346*4882a593Smuzhiyun kfree(d);
1347*4882a593Smuzhiyun return NULL;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun d->csdp = data_type;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun if (dma_omap1()) {
1355*4882a593Smuzhiyun d->cicr |= CICR_TOUT_IE;
1356*4882a593Smuzhiyun d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1357*4882a593Smuzhiyun } else {
1358*4882a593Smuzhiyun d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1359*4882a593Smuzhiyun d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1360*4882a593Smuzhiyun d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun return vchan_tx_prep(&c->vc, &d->vd, flags);
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
omap_dma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)1366*4882a593Smuzhiyun static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1371*4882a593Smuzhiyun cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1372*4882a593Smuzhiyun return -EINVAL;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun if (cfg->src_maxburst > chan->device->max_burst ||
1375*4882a593Smuzhiyun cfg->dst_maxburst > chan->device->max_burst)
1376*4882a593Smuzhiyun return -EINVAL;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun memcpy(&c->cfg, cfg, sizeof(c->cfg));
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun return 0;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
omap_dma_terminate_all(struct dma_chan * chan)1383*4882a593Smuzhiyun static int omap_dma_terminate_all(struct dma_chan *chan)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1386*4882a593Smuzhiyun unsigned long flags;
1387*4882a593Smuzhiyun LIST_HEAD(head);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun spin_lock_irqsave(&c->vc.lock, flags);
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun /*
1392*4882a593Smuzhiyun * Stop DMA activity: we assume the callback will not be called
1393*4882a593Smuzhiyun * after omap_dma_stop() returns (even if it does, it will see
1394*4882a593Smuzhiyun * c->desc is NULL and exit.)
1395*4882a593Smuzhiyun */
1396*4882a593Smuzhiyun if (c->desc) {
1397*4882a593Smuzhiyun vchan_terminate_vdesc(&c->desc->vd);
1398*4882a593Smuzhiyun c->desc = NULL;
1399*4882a593Smuzhiyun /* Avoid stopping the dma twice */
1400*4882a593Smuzhiyun if (!c->paused)
1401*4882a593Smuzhiyun omap_dma_stop(c);
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun c->cyclic = false;
1405*4882a593Smuzhiyun c->paused = false;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun vchan_get_all_descriptors(&c->vc, &head);
1408*4882a593Smuzhiyun spin_unlock_irqrestore(&c->vc.lock, flags);
1409*4882a593Smuzhiyun vchan_dma_desc_free_list(&c->vc, &head);
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun return 0;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
omap_dma_synchronize(struct dma_chan * chan)1414*4882a593Smuzhiyun static void omap_dma_synchronize(struct dma_chan *chan)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun vchan_synchronize(&c->vc);
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
omap_dma_pause(struct dma_chan * chan)1421*4882a593Smuzhiyun static int omap_dma_pause(struct dma_chan *chan)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1424*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1425*4882a593Smuzhiyun unsigned long flags;
1426*4882a593Smuzhiyun int ret = -EINVAL;
1427*4882a593Smuzhiyun bool can_pause = false;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun spin_lock_irqsave(&od->irq_lock, flags);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun if (!c->desc)
1432*4882a593Smuzhiyun goto out;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun if (c->cyclic)
1435*4882a593Smuzhiyun can_pause = true;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /*
1438*4882a593Smuzhiyun * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1439*4882a593Smuzhiyun * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1440*4882a593Smuzhiyun * "When a channel is disabled during a transfer, the channel undergoes
1441*4882a593Smuzhiyun * an abort, unless it is hardware-source-synchronized …".
1442*4882a593Smuzhiyun * A source-synchronised channel is one where the fetching of data is
1443*4882a593Smuzhiyun * under control of the device. In other words, a device-to-memory
1444*4882a593Smuzhiyun * transfer. So, a destination-synchronised channel (which would be a
1445*4882a593Smuzhiyun * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1446*4882a593Smuzhiyun * bit is cleared.
1447*4882a593Smuzhiyun * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1448*4882a593Smuzhiyun * aborts immediately after completion of current read/write
1449*4882a593Smuzhiyun * transactions and then the FIFO is cleaned up." The term "cleaned up"
1450*4882a593Smuzhiyun * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1451*4882a593Smuzhiyun * are both clear _before_ disabling the channel, otherwise data loss
1452*4882a593Smuzhiyun * will occur.
1453*4882a593Smuzhiyun * The problem is that if the channel is active, then device activity
1454*4882a593Smuzhiyun * can result in DMA activity starting between reading those as both
1455*4882a593Smuzhiyun * clear and the write to DMA_CCR to clear the enable bit hitting the
1456*4882a593Smuzhiyun * hardware. If the DMA hardware can't drain the data in its FIFO to the
1457*4882a593Smuzhiyun * destination, then data loss "might" occur (say if we write to an UART
1458*4882a593Smuzhiyun * and the UART is not accepting any further data).
1459*4882a593Smuzhiyun */
1460*4882a593Smuzhiyun else if (c->desc->dir == DMA_DEV_TO_MEM)
1461*4882a593Smuzhiyun can_pause = true;
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun if (can_pause && !c->paused) {
1464*4882a593Smuzhiyun ret = omap_dma_stop(c);
1465*4882a593Smuzhiyun if (!ret)
1466*4882a593Smuzhiyun c->paused = true;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun out:
1469*4882a593Smuzhiyun spin_unlock_irqrestore(&od->irq_lock, flags);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun return ret;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
omap_dma_resume(struct dma_chan * chan)1474*4882a593Smuzhiyun static int omap_dma_resume(struct dma_chan *chan)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1477*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1478*4882a593Smuzhiyun unsigned long flags;
1479*4882a593Smuzhiyun int ret = -EINVAL;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun spin_lock_irqsave(&od->irq_lock, flags);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun if (c->paused && c->desc) {
1484*4882a593Smuzhiyun mb();
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun /* Restore channel link register */
1487*4882a593Smuzhiyun omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun omap_dma_start(c, c->desc);
1490*4882a593Smuzhiyun c->paused = false;
1491*4882a593Smuzhiyun ret = 0;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun spin_unlock_irqrestore(&od->irq_lock, flags);
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun return ret;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
omap_dma_chan_init(struct omap_dmadev * od)1498*4882a593Smuzhiyun static int omap_dma_chan_init(struct omap_dmadev *od)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun struct omap_chan *c;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun c = kzalloc(sizeof(*c), GFP_KERNEL);
1503*4882a593Smuzhiyun if (!c)
1504*4882a593Smuzhiyun return -ENOMEM;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun c->reg_map = od->reg_map;
1507*4882a593Smuzhiyun c->vc.desc_free = omap_dma_desc_free;
1508*4882a593Smuzhiyun vchan_init(&c->vc, &od->ddev);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun return 0;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun
omap_dma_free(struct omap_dmadev * od)1513*4882a593Smuzhiyun static void omap_dma_free(struct omap_dmadev *od)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun while (!list_empty(&od->ddev.channels)) {
1516*4882a593Smuzhiyun struct omap_chan *c = list_first_entry(&od->ddev.channels,
1517*4882a593Smuzhiyun struct omap_chan, vc.chan.device_node);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun list_del(&c->vc.chan.device_node);
1520*4882a593Smuzhiyun tasklet_kill(&c->vc.task);
1521*4882a593Smuzhiyun kfree(c);
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun /* Currently used by omap2 & 3 to block deeper SoC idle states */
omap_dma_busy(struct omap_dmadev * od)1526*4882a593Smuzhiyun static bool omap_dma_busy(struct omap_dmadev *od)
1527*4882a593Smuzhiyun {
1528*4882a593Smuzhiyun struct omap_chan *c;
1529*4882a593Smuzhiyun int lch = -1;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun while (1) {
1532*4882a593Smuzhiyun lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
1533*4882a593Smuzhiyun if (lch >= od->lch_count)
1534*4882a593Smuzhiyun break;
1535*4882a593Smuzhiyun c = od->lch_map[lch];
1536*4882a593Smuzhiyun if (!c)
1537*4882a593Smuzhiyun continue;
1538*4882a593Smuzhiyun if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
1539*4882a593Smuzhiyun return true;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun return false;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
omap_dma_busy_notifier(struct notifier_block * nb,unsigned long cmd,void * v)1546*4882a593Smuzhiyun static int omap_dma_busy_notifier(struct notifier_block *nb,
1547*4882a593Smuzhiyun unsigned long cmd, void *v)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun struct omap_dmadev *od;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun od = container_of(nb, struct omap_dmadev, nb);
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun switch (cmd) {
1554*4882a593Smuzhiyun case CPU_CLUSTER_PM_ENTER:
1555*4882a593Smuzhiyun if (omap_dma_busy(od))
1556*4882a593Smuzhiyun return NOTIFY_BAD;
1557*4882a593Smuzhiyun break;
1558*4882a593Smuzhiyun case CPU_CLUSTER_PM_ENTER_FAILED:
1559*4882a593Smuzhiyun case CPU_CLUSTER_PM_EXIT:
1560*4882a593Smuzhiyun break;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun return NOTIFY_OK;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun /*
1567*4882a593Smuzhiyun * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
1568*4882a593Smuzhiyun * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
1569*4882a593Smuzhiyun * now. Context save seems to be only currently needed on omap3.
1570*4882a593Smuzhiyun */
omap_dma_context_save(struct omap_dmadev * od)1571*4882a593Smuzhiyun static void omap_dma_context_save(struct omap_dmadev *od)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0);
1574*4882a593Smuzhiyun od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1);
1575*4882a593Smuzhiyun od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
1576*4882a593Smuzhiyun od->context.gcr = omap_dma_glbl_read(od, GCR);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
omap_dma_context_restore(struct omap_dmadev * od)1579*4882a593Smuzhiyun static void omap_dma_context_restore(struct omap_dmadev *od)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun int i;
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun omap_dma_glbl_write(od, GCR, od->context.gcr);
1584*4882a593Smuzhiyun omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig);
1585*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0);
1586*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1);
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
1589*4882a593Smuzhiyun if (od->plat->errata & DMA_ROMCODE_BUG)
1590*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQSTATUS_L0, 0);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /* Clear dma channels */
1593*4882a593Smuzhiyun for (i = 0; i < od->lch_count; i++)
1594*4882a593Smuzhiyun omap_dma_clear_lch(od, i);
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun /* Currently only used for omap3 */
omap_dma_context_notifier(struct notifier_block * nb,unsigned long cmd,void * v)1598*4882a593Smuzhiyun static int omap_dma_context_notifier(struct notifier_block *nb,
1599*4882a593Smuzhiyun unsigned long cmd, void *v)
1600*4882a593Smuzhiyun {
1601*4882a593Smuzhiyun struct omap_dmadev *od;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun od = container_of(nb, struct omap_dmadev, nb);
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun switch (cmd) {
1606*4882a593Smuzhiyun case CPU_CLUSTER_PM_ENTER:
1607*4882a593Smuzhiyun if (omap_dma_busy(od))
1608*4882a593Smuzhiyun return NOTIFY_BAD;
1609*4882a593Smuzhiyun omap_dma_context_save(od);
1610*4882a593Smuzhiyun break;
1611*4882a593Smuzhiyun case CPU_CLUSTER_PM_ENTER_FAILED:
1612*4882a593Smuzhiyun case CPU_CLUSTER_PM_EXIT:
1613*4882a593Smuzhiyun omap_dma_context_restore(od);
1614*4882a593Smuzhiyun break;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun return NOTIFY_OK;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
omap_dma_init_gcr(struct omap_dmadev * od,int arb_rate,int max_fifo_depth,int tparams)1620*4882a593Smuzhiyun static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate,
1621*4882a593Smuzhiyun int max_fifo_depth, int tparams)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun u32 val;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun /* Set only for omap2430 and later */
1626*4882a593Smuzhiyun if (!od->cfg->rw_priority)
1627*4882a593Smuzhiyun return;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun if (max_fifo_depth == 0)
1630*4882a593Smuzhiyun max_fifo_depth = 1;
1631*4882a593Smuzhiyun if (arb_rate == 0)
1632*4882a593Smuzhiyun arb_rate = 1;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun val = 0xff & max_fifo_depth;
1635*4882a593Smuzhiyun val |= (0x3 & tparams) << 12;
1636*4882a593Smuzhiyun val |= (arb_rate & 0xff) << 16;
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun omap_dma_glbl_write(od, GCR, val);
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1642*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1643*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun /*
1646*4882a593Smuzhiyun * No flags currently set for default configuration as omap1 is still
1647*4882a593Smuzhiyun * using platform data.
1648*4882a593Smuzhiyun */
1649*4882a593Smuzhiyun static const struct omap_dma_config default_cfg;
1650*4882a593Smuzhiyun
omap_dma_probe(struct platform_device * pdev)1651*4882a593Smuzhiyun static int omap_dma_probe(struct platform_device *pdev)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun const struct omap_dma_config *conf;
1654*4882a593Smuzhiyun struct omap_dmadev *od;
1655*4882a593Smuzhiyun struct resource *res;
1656*4882a593Smuzhiyun int rc, i, irq;
1657*4882a593Smuzhiyun u32 val;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1660*4882a593Smuzhiyun if (!od)
1661*4882a593Smuzhiyun return -ENOMEM;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1664*4882a593Smuzhiyun od->base = devm_ioremap_resource(&pdev->dev, res);
1665*4882a593Smuzhiyun if (IS_ERR(od->base))
1666*4882a593Smuzhiyun return PTR_ERR(od->base);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun conf = of_device_get_match_data(&pdev->dev);
1669*4882a593Smuzhiyun if (conf) {
1670*4882a593Smuzhiyun od->cfg = conf;
1671*4882a593Smuzhiyun od->plat = dev_get_platdata(&pdev->dev);
1672*4882a593Smuzhiyun if (!od->plat) {
1673*4882a593Smuzhiyun dev_err(&pdev->dev, "omap_system_dma_plat_info is missing");
1674*4882a593Smuzhiyun return -ENODEV;
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun } else {
1677*4882a593Smuzhiyun od->cfg = &default_cfg;
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun od->plat = omap_get_plat_info();
1680*4882a593Smuzhiyun if (!od->plat)
1681*4882a593Smuzhiyun return -EPROBE_DEFER;
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun od->reg_map = od->plat->reg_map;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1687*4882a593Smuzhiyun dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1688*4882a593Smuzhiyun dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1689*4882a593Smuzhiyun dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
1690*4882a593Smuzhiyun od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1691*4882a593Smuzhiyun od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1692*4882a593Smuzhiyun od->ddev.device_tx_status = omap_dma_tx_status;
1693*4882a593Smuzhiyun od->ddev.device_issue_pending = omap_dma_issue_pending;
1694*4882a593Smuzhiyun od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1695*4882a593Smuzhiyun od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1696*4882a593Smuzhiyun od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1697*4882a593Smuzhiyun od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
1698*4882a593Smuzhiyun od->ddev.device_config = omap_dma_slave_config;
1699*4882a593Smuzhiyun od->ddev.device_pause = omap_dma_pause;
1700*4882a593Smuzhiyun od->ddev.device_resume = omap_dma_resume;
1701*4882a593Smuzhiyun od->ddev.device_terminate_all = omap_dma_terminate_all;
1702*4882a593Smuzhiyun od->ddev.device_synchronize = omap_dma_synchronize;
1703*4882a593Smuzhiyun od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1704*4882a593Smuzhiyun od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1705*4882a593Smuzhiyun od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1706*4882a593Smuzhiyun if (__dma_omap15xx(od->plat->dma_attr))
1707*4882a593Smuzhiyun od->ddev.residue_granularity =
1708*4882a593Smuzhiyun DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1709*4882a593Smuzhiyun else
1710*4882a593Smuzhiyun od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1711*4882a593Smuzhiyun od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1712*4882a593Smuzhiyun od->ddev.dev = &pdev->dev;
1713*4882a593Smuzhiyun INIT_LIST_HEAD(&od->ddev.channels);
1714*4882a593Smuzhiyun mutex_init(&od->lch_lock);
1715*4882a593Smuzhiyun spin_lock_init(&od->lock);
1716*4882a593Smuzhiyun spin_lock_init(&od->irq_lock);
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun /* Number of DMA requests */
1719*4882a593Smuzhiyun od->dma_requests = OMAP_SDMA_REQUESTS;
1720*4882a593Smuzhiyun if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1721*4882a593Smuzhiyun "dma-requests",
1722*4882a593Smuzhiyun &od->dma_requests)) {
1723*4882a593Smuzhiyun dev_info(&pdev->dev,
1724*4882a593Smuzhiyun "Missing dma-requests property, using %u.\n",
1725*4882a593Smuzhiyun OMAP_SDMA_REQUESTS);
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun /* Number of available logical channels */
1729*4882a593Smuzhiyun if (!pdev->dev.of_node) {
1730*4882a593Smuzhiyun od->lch_count = od->plat->dma_attr->lch_count;
1731*4882a593Smuzhiyun if (unlikely(!od->lch_count))
1732*4882a593Smuzhiyun od->lch_count = OMAP_SDMA_CHANNELS;
1733*4882a593Smuzhiyun } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1734*4882a593Smuzhiyun &od->lch_count)) {
1735*4882a593Smuzhiyun dev_info(&pdev->dev,
1736*4882a593Smuzhiyun "Missing dma-channels property, using %u.\n",
1737*4882a593Smuzhiyun OMAP_SDMA_CHANNELS);
1738*4882a593Smuzhiyun od->lch_count = OMAP_SDMA_CHANNELS;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun /* Mask of allowed logical channels */
1742*4882a593Smuzhiyun if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node,
1743*4882a593Smuzhiyun "dma-channel-mask",
1744*4882a593Smuzhiyun &val)) {
1745*4882a593Smuzhiyun /* Tag channels not in mask as reserved */
1746*4882a593Smuzhiyun val = ~val;
1747*4882a593Smuzhiyun bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count);
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED)
1750*4882a593Smuzhiyun bitmap_set(od->lch_bitmap, 0, 2);
1751*4882a593Smuzhiyun
1752*4882a593Smuzhiyun od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count,
1753*4882a593Smuzhiyun sizeof(*od->lch_map),
1754*4882a593Smuzhiyun GFP_KERNEL);
1755*4882a593Smuzhiyun if (!od->lch_map)
1756*4882a593Smuzhiyun return -ENOMEM;
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun for (i = 0; i < od->dma_requests; i++) {
1759*4882a593Smuzhiyun rc = omap_dma_chan_init(od);
1760*4882a593Smuzhiyun if (rc) {
1761*4882a593Smuzhiyun omap_dma_free(od);
1762*4882a593Smuzhiyun return rc;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun irq = platform_get_irq(pdev, 1);
1767*4882a593Smuzhiyun if (irq <= 0) {
1768*4882a593Smuzhiyun dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1769*4882a593Smuzhiyun od->legacy = true;
1770*4882a593Smuzhiyun } else {
1771*4882a593Smuzhiyun /* Disable all interrupts */
1772*4882a593Smuzhiyun od->irq_enable_mask = 0;
1773*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1776*4882a593Smuzhiyun IRQF_SHARED, "omap-dma-engine", od);
1777*4882a593Smuzhiyun if (rc) {
1778*4882a593Smuzhiyun omap_dma_free(od);
1779*4882a593Smuzhiyun return rc;
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1784*4882a593Smuzhiyun od->ll123_supported = true;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun od->ddev.filter.map = od->plat->slave_map;
1787*4882a593Smuzhiyun od->ddev.filter.mapcnt = od->plat->slavecnt;
1788*4882a593Smuzhiyun od->ddev.filter.fn = omap_dma_filter_fn;
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun if (od->ll123_supported) {
1791*4882a593Smuzhiyun od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1792*4882a593Smuzhiyun &pdev->dev,
1793*4882a593Smuzhiyun sizeof(struct omap_type2_desc),
1794*4882a593Smuzhiyun 4, 0);
1795*4882a593Smuzhiyun if (!od->desc_pool) {
1796*4882a593Smuzhiyun dev_err(&pdev->dev,
1797*4882a593Smuzhiyun "unable to allocate descriptor pool\n");
1798*4882a593Smuzhiyun od->ll123_supported = false;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun rc = dma_async_device_register(&od->ddev);
1803*4882a593Smuzhiyun if (rc) {
1804*4882a593Smuzhiyun pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1805*4882a593Smuzhiyun rc);
1806*4882a593Smuzhiyun omap_dma_free(od);
1807*4882a593Smuzhiyun return rc;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun platform_set_drvdata(pdev, od);
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun if (pdev->dev.of_node) {
1813*4882a593Smuzhiyun omap_dma_info.dma_cap = od->ddev.cap_mask;
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun /* Device-tree DMA controller registration */
1816*4882a593Smuzhiyun rc = of_dma_controller_register(pdev->dev.of_node,
1817*4882a593Smuzhiyun of_dma_simple_xlate, &omap_dma_info);
1818*4882a593Smuzhiyun if (rc) {
1819*4882a593Smuzhiyun pr_warn("OMAP-DMA: failed to register DMA controller\n");
1820*4882a593Smuzhiyun dma_async_device_unregister(&od->ddev);
1821*4882a593Smuzhiyun omap_dma_free(od);
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0);
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun if (od->cfg->needs_busy_check) {
1828*4882a593Smuzhiyun od->nb.notifier_call = omap_dma_busy_notifier;
1829*4882a593Smuzhiyun cpu_pm_register_notifier(&od->nb);
1830*4882a593Smuzhiyun } else if (od->cfg->may_lose_context) {
1831*4882a593Smuzhiyun od->nb.notifier_call = omap_dma_context_notifier;
1832*4882a593Smuzhiyun cpu_pm_register_notifier(&od->nb);
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
1836*4882a593Smuzhiyun od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun return rc;
1839*4882a593Smuzhiyun }
1840*4882a593Smuzhiyun
omap_dma_remove(struct platform_device * pdev)1841*4882a593Smuzhiyun static int omap_dma_remove(struct platform_device *pdev)
1842*4882a593Smuzhiyun {
1843*4882a593Smuzhiyun struct omap_dmadev *od = platform_get_drvdata(pdev);
1844*4882a593Smuzhiyun int irq;
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun if (od->cfg->may_lose_context)
1847*4882a593Smuzhiyun cpu_pm_unregister_notifier(&od->nb);
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun if (pdev->dev.of_node)
1850*4882a593Smuzhiyun of_dma_controller_free(pdev->dev.of_node);
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun irq = platform_get_irq(pdev, 1);
1853*4882a593Smuzhiyun devm_free_irq(&pdev->dev, irq, od);
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun dma_async_device_unregister(&od->ddev);
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun if (!od->legacy) {
1858*4882a593Smuzhiyun /* Disable all interrupts */
1859*4882a593Smuzhiyun omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun if (od->ll123_supported)
1863*4882a593Smuzhiyun dma_pool_destroy(od->desc_pool);
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun omap_dma_free(od);
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun return 0;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun static const struct omap_dma_config omap2420_data = {
1871*4882a593Smuzhiyun .lch_end = CCFN,
1872*4882a593Smuzhiyun .rw_priority = true,
1873*4882a593Smuzhiyun .needs_lch_clear = true,
1874*4882a593Smuzhiyun .needs_busy_check = true,
1875*4882a593Smuzhiyun };
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun static const struct omap_dma_config omap2430_data = {
1878*4882a593Smuzhiyun .lch_end = CCFN,
1879*4882a593Smuzhiyun .rw_priority = true,
1880*4882a593Smuzhiyun .needs_lch_clear = true,
1881*4882a593Smuzhiyun };
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun static const struct omap_dma_config omap3430_data = {
1884*4882a593Smuzhiyun .lch_end = CCFN,
1885*4882a593Smuzhiyun .rw_priority = true,
1886*4882a593Smuzhiyun .needs_lch_clear = true,
1887*4882a593Smuzhiyun .may_lose_context = true,
1888*4882a593Smuzhiyun };
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun static const struct omap_dma_config omap3630_data = {
1891*4882a593Smuzhiyun .lch_end = CCDN,
1892*4882a593Smuzhiyun .rw_priority = true,
1893*4882a593Smuzhiyun .needs_lch_clear = true,
1894*4882a593Smuzhiyun .may_lose_context = true,
1895*4882a593Smuzhiyun };
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun static const struct omap_dma_config omap4_data = {
1898*4882a593Smuzhiyun .lch_end = CCDN,
1899*4882a593Smuzhiyun .rw_priority = true,
1900*4882a593Smuzhiyun .needs_lch_clear = true,
1901*4882a593Smuzhiyun };
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun static const struct of_device_id omap_dma_match[] = {
1904*4882a593Smuzhiyun { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, },
1905*4882a593Smuzhiyun { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, },
1906*4882a593Smuzhiyun { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, },
1907*4882a593Smuzhiyun { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, },
1908*4882a593Smuzhiyun { .compatible = "ti,omap4430-sdma", .data = &omap4_data, },
1909*4882a593Smuzhiyun {},
1910*4882a593Smuzhiyun };
1911*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, omap_dma_match);
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun static struct platform_driver omap_dma_driver = {
1914*4882a593Smuzhiyun .probe = omap_dma_probe,
1915*4882a593Smuzhiyun .remove = omap_dma_remove,
1916*4882a593Smuzhiyun .driver = {
1917*4882a593Smuzhiyun .name = "omap-dma-engine",
1918*4882a593Smuzhiyun .of_match_table = omap_dma_match,
1919*4882a593Smuzhiyun },
1920*4882a593Smuzhiyun };
1921*4882a593Smuzhiyun
omap_dma_filter_fn(struct dma_chan * chan,void * param)1922*4882a593Smuzhiyun static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun if (chan->device->dev->driver == &omap_dma_driver.driver) {
1925*4882a593Smuzhiyun struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1926*4882a593Smuzhiyun struct omap_chan *c = to_omap_dma_chan(chan);
1927*4882a593Smuzhiyun unsigned req = *(unsigned *)param;
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun if (req <= od->dma_requests) {
1930*4882a593Smuzhiyun c->dma_sig = req;
1931*4882a593Smuzhiyun return true;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun return false;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun
omap_dma_init(void)1937*4882a593Smuzhiyun static int omap_dma_init(void)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun return platform_driver_register(&omap_dma_driver);
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun subsys_initcall(omap_dma_init);
1942*4882a593Smuzhiyun
omap_dma_exit(void)1943*4882a593Smuzhiyun static void __exit omap_dma_exit(void)
1944*4882a593Smuzhiyun {
1945*4882a593Smuzhiyun platform_driver_unregister(&omap_dma_driver);
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun module_exit(omap_dma_exit);
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun MODULE_AUTHOR("Russell King");
1950*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1951