1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author:
6*4882a593Smuzhiyun * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
7*4882a593Smuzhiyun * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #ifndef __DMA_FSLDMA_H
10*4882a593Smuzhiyun #define __DMA_FSLDMA_H
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/dmapool.h>
14*4882a593Smuzhiyun #include <linux/dmaengine.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* Define data structures needed by Freescale
17*4882a593Smuzhiyun * MPC8540 and MPC8349 DMA controller.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun #define FSL_DMA_MR_CS 0x00000001
20*4882a593Smuzhiyun #define FSL_DMA_MR_CC 0x00000002
21*4882a593Smuzhiyun #define FSL_DMA_MR_CA 0x00000008
22*4882a593Smuzhiyun #define FSL_DMA_MR_EIE 0x00000040
23*4882a593Smuzhiyun #define FSL_DMA_MR_XFE 0x00000020
24*4882a593Smuzhiyun #define FSL_DMA_MR_EOLNIE 0x00000100
25*4882a593Smuzhiyun #define FSL_DMA_MR_EOLSIE 0x00000080
26*4882a593Smuzhiyun #define FSL_DMA_MR_EOSIE 0x00000200
27*4882a593Smuzhiyun #define FSL_DMA_MR_CDSM 0x00000010
28*4882a593Smuzhiyun #define FSL_DMA_MR_CTM 0x00000004
29*4882a593Smuzhiyun #define FSL_DMA_MR_EMP_EN 0x00200000
30*4882a593Smuzhiyun #define FSL_DMA_MR_EMS_EN 0x00040000
31*4882a593Smuzhiyun #define FSL_DMA_MR_DAHE 0x00002000
32*4882a593Smuzhiyun #define FSL_DMA_MR_SAHE 0x00001000
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define FSL_DMA_MR_SAHTS_MASK 0x0000C000
35*4882a593Smuzhiyun #define FSL_DMA_MR_DAHTS_MASK 0x00030000
36*4882a593Smuzhiyun #define FSL_DMA_MR_BWC_MASK 0x0f000000
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Bandwidth/pause control determines how many bytes a given
40*4882a593Smuzhiyun * channel is allowed to transfer before the DMA engine pauses
41*4882a593Smuzhiyun * the current channel and switches to the next channel
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun #define FSL_DMA_MR_BWC 0x0A000000
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Special MR definition for MPC8349 */
46*4882a593Smuzhiyun #define FSL_DMA_MR_EOTIE 0x00000080
47*4882a593Smuzhiyun #define FSL_DMA_MR_PRC_RM 0x00000800
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define FSL_DMA_SR_CH 0x00000020
50*4882a593Smuzhiyun #define FSL_DMA_SR_PE 0x00000010
51*4882a593Smuzhiyun #define FSL_DMA_SR_CB 0x00000004
52*4882a593Smuzhiyun #define FSL_DMA_SR_TE 0x00000080
53*4882a593Smuzhiyun #define FSL_DMA_SR_EOSI 0x00000002
54*4882a593Smuzhiyun #define FSL_DMA_SR_EOLSI 0x00000001
55*4882a593Smuzhiyun #define FSL_DMA_SR_EOCDI 0x00000001
56*4882a593Smuzhiyun #define FSL_DMA_SR_EOLNI 0x00000008
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define FSL_DMA_SATR_SBPATMU 0x20000000
59*4882a593Smuzhiyun #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
60*4882a593Smuzhiyun #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
61*4882a593Smuzhiyun #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
62*4882a593Smuzhiyun #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
63*4882a593Smuzhiyun #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define FSL_DMA_DATR_DBPATMU 0x20000000
66*4882a593Smuzhiyun #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
67*4882a593Smuzhiyun #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
68*4882a593Smuzhiyun #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define FSL_DMA_EOL ((u64)0x1)
71*4882a593Smuzhiyun #define FSL_DMA_SNEN ((u64)0x10)
72*4882a593Smuzhiyun #define FSL_DMA_EOSIE 0x8
73*4882a593Smuzhiyun #define FSL_DMA_NLDA_MASK (~(u64)0x1f)
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define FSL_DMA_DGSR_TE 0x80
78*4882a593Smuzhiyun #define FSL_DMA_DGSR_CH 0x20
79*4882a593Smuzhiyun #define FSL_DMA_DGSR_PE 0x10
80*4882a593Smuzhiyun #define FSL_DMA_DGSR_EOLNI 0x08
81*4882a593Smuzhiyun #define FSL_DMA_DGSR_CB 0x04
82*4882a593Smuzhiyun #define FSL_DMA_DGSR_EOSI 0x02
83*4882a593Smuzhiyun #define FSL_DMA_DGSR_EOLSI 0x01
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
86*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
87*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
88*4882a593Smuzhiyun BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
89*4882a593Smuzhiyun typedef u64 __bitwise v64;
90*4882a593Smuzhiyun typedef u32 __bitwise v32;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun struct fsl_dma_ld_hw {
93*4882a593Smuzhiyun v64 src_addr;
94*4882a593Smuzhiyun v64 dst_addr;
95*4882a593Smuzhiyun v64 next_ln_addr;
96*4882a593Smuzhiyun v32 count;
97*4882a593Smuzhiyun v32 reserve;
98*4882a593Smuzhiyun } __attribute__((aligned(32)));
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct fsl_desc_sw {
101*4882a593Smuzhiyun struct fsl_dma_ld_hw hw;
102*4882a593Smuzhiyun struct list_head node;
103*4882a593Smuzhiyun struct list_head tx_list;
104*4882a593Smuzhiyun struct dma_async_tx_descriptor async_tx;
105*4882a593Smuzhiyun } __attribute__((aligned(32)));
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun struct fsldma_chan_regs {
108*4882a593Smuzhiyun u32 mr; /* 0x00 - Mode Register */
109*4882a593Smuzhiyun u32 sr; /* 0x04 - Status Register */
110*4882a593Smuzhiyun u64 cdar; /* 0x08 - Current descriptor address register */
111*4882a593Smuzhiyun u64 sar; /* 0x10 - Source Address Register */
112*4882a593Smuzhiyun u64 dar; /* 0x18 - Destination Address Register */
113*4882a593Smuzhiyun u32 bcr; /* 0x20 - Byte Count Register */
114*4882a593Smuzhiyun u64 ndar; /* 0x24 - Next Descriptor Address Register */
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun struct fsldma_chan;
118*4882a593Smuzhiyun #define FSL_DMA_MAX_CHANS_PER_DEVICE 8
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun struct fsldma_device {
121*4882a593Smuzhiyun void __iomem *regs; /* DGSR register base */
122*4882a593Smuzhiyun struct device *dev;
123*4882a593Smuzhiyun struct dma_device common;
124*4882a593Smuzhiyun struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
125*4882a593Smuzhiyun u32 feature; /* The same as DMA channels */
126*4882a593Smuzhiyun int irq; /* Channel IRQ */
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Define macros for fsldma_chan->feature property */
130*4882a593Smuzhiyun #define FSL_DMA_LITTLE_ENDIAN 0x00000000
131*4882a593Smuzhiyun #define FSL_DMA_BIG_ENDIAN 0x00000001
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define FSL_DMA_IP_MASK 0x00000ff0
134*4882a593Smuzhiyun #define FSL_DMA_IP_85XX 0x00000010
135*4882a593Smuzhiyun #define FSL_DMA_IP_83XX 0x00000020
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
138*4882a593Smuzhiyun #define FSL_DMA_CHAN_START_EXT 0x00002000
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun #ifdef CONFIG_PM
141*4882a593Smuzhiyun struct fsldma_chan_regs_save {
142*4882a593Smuzhiyun u32 mr;
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun enum fsldma_pm_state {
146*4882a593Smuzhiyun RUNNING = 0,
147*4882a593Smuzhiyun SUSPENDED,
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun struct fsldma_chan {
152*4882a593Smuzhiyun char name[8]; /* Channel name */
153*4882a593Smuzhiyun struct fsldma_chan_regs __iomem *regs;
154*4882a593Smuzhiyun spinlock_t desc_lock; /* Descriptor operation lock */
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun * Descriptors which are queued to run, but have not yet been
157*4882a593Smuzhiyun * submitted to the hardware for execution
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun struct list_head ld_pending;
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Descriptors which are currently being executed by the hardware
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun struct list_head ld_running;
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Descriptors which have finished execution by the hardware. These
166*4882a593Smuzhiyun * descriptors have already had their cleanup actions run. They are
167*4882a593Smuzhiyun * waiting for the ACK bit to be set by the async_tx API.
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun struct list_head ld_completed; /* Link descriptors queue */
170*4882a593Smuzhiyun struct dma_chan common; /* DMA common channel */
171*4882a593Smuzhiyun struct dma_pool *desc_pool; /* Descriptors pool */
172*4882a593Smuzhiyun struct device *dev; /* Channel device */
173*4882a593Smuzhiyun int irq; /* Channel IRQ */
174*4882a593Smuzhiyun int id; /* Raw id of this channel */
175*4882a593Smuzhiyun struct tasklet_struct tasklet;
176*4882a593Smuzhiyun u32 feature;
177*4882a593Smuzhiyun bool idle; /* DMA controller is idle */
178*4882a593Smuzhiyun #ifdef CONFIG_PM
179*4882a593Smuzhiyun struct fsldma_chan_regs_save regs_save;
180*4882a593Smuzhiyun enum fsldma_pm_state pm_state;
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
184*4882a593Smuzhiyun void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
185*4882a593Smuzhiyun void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
186*4882a593Smuzhiyun void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
187*4882a593Smuzhiyun void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
191*4882a593Smuzhiyun #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
192*4882a593Smuzhiyun #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #ifdef CONFIG_PPC
195*4882a593Smuzhiyun #define fsl_ioread32(p) in_le32(p)
196*4882a593Smuzhiyun #define fsl_ioread32be(p) in_be32(p)
197*4882a593Smuzhiyun #define fsl_iowrite32(v, p) out_le32(p, v)
198*4882a593Smuzhiyun #define fsl_iowrite32be(v, p) out_be32(p, v)
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #ifdef __powerpc64__
201*4882a593Smuzhiyun #define fsl_ioread64(p) in_le64(p)
202*4882a593Smuzhiyun #define fsl_ioread64be(p) in_be64(p)
203*4882a593Smuzhiyun #define fsl_iowrite64(v, p) out_le64(p, v)
204*4882a593Smuzhiyun #define fsl_iowrite64be(v, p) out_be64(p, v)
205*4882a593Smuzhiyun #else
fsl_ioread64(const u64 __iomem * addr)206*4882a593Smuzhiyun static u64 fsl_ioread64(const u64 __iomem *addr)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun u32 val_lo = in_le32((u32 __iomem *)addr);
209*4882a593Smuzhiyun u32 val_hi = in_le32((u32 __iomem *)addr + 1);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return ((u64)val_hi << 32) + val_lo;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
fsl_iowrite64(u64 val,u64 __iomem * addr)214*4882a593Smuzhiyun static void fsl_iowrite64(u64 val, u64 __iomem *addr)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun out_le32((u32 __iomem *)addr + 1, val >> 32);
217*4882a593Smuzhiyun out_le32((u32 __iomem *)addr, (u32)val);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
fsl_ioread64be(const u64 __iomem * addr)220*4882a593Smuzhiyun static u64 fsl_ioread64be(const u64 __iomem *addr)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun u32 val_hi = in_be32((u32 __iomem *)addr);
223*4882a593Smuzhiyun u32 val_lo = in_be32((u32 __iomem *)addr + 1);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return ((u64)val_hi << 32) + val_lo;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
fsl_iowrite64be(u64 val,u64 __iomem * addr)228*4882a593Smuzhiyun static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun out_be32((u32 __iomem *)addr, val >> 32);
231*4882a593Smuzhiyun out_be32((u32 __iomem *)addr + 1, (u32)val);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun #endif
234*4882a593Smuzhiyun #endif
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
237*4882a593Smuzhiyun #define fsl_ioread32(p) ioread32(p)
238*4882a593Smuzhiyun #define fsl_ioread32be(p) ioread32be(p)
239*4882a593Smuzhiyun #define fsl_iowrite32(v, p) iowrite32(v, p)
240*4882a593Smuzhiyun #define fsl_iowrite32be(v, p) iowrite32be(v, p)
241*4882a593Smuzhiyun #define fsl_ioread64(p) ioread64(p)
242*4882a593Smuzhiyun #define fsl_ioread64be(p) ioread64be(p)
243*4882a593Smuzhiyun #define fsl_iowrite64(v, p) iowrite64(v, p)
244*4882a593Smuzhiyun #define fsl_iowrite64be(v, p) iowrite64be(v, p)
245*4882a593Smuzhiyun #endif
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #define FSL_DMA_IN(fsl_dma, addr, width) \
248*4882a593Smuzhiyun (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
249*4882a593Smuzhiyun fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun #define FSL_DMA_OUT(fsl_dma, addr, val, width) \
252*4882a593Smuzhiyun (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \
253*4882a593Smuzhiyun fsl_iowrite##width##be(val, addr) : fsl_iowrite \
254*4882a593Smuzhiyun ##width(val, addr))
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun #define DMA_TO_CPU(fsl_chan, d, width) \
257*4882a593Smuzhiyun (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
258*4882a593Smuzhiyun be##width##_to_cpu((__force __be##width)(v##width)d) : \
259*4882a593Smuzhiyun le##width##_to_cpu((__force __le##width)(v##width)d))
260*4882a593Smuzhiyun #define CPU_TO_DMA(fsl_chan, c, width) \
261*4882a593Smuzhiyun (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
262*4882a593Smuzhiyun (__force v##width)cpu_to_be##width(c) : \
263*4882a593Smuzhiyun (__force v##width)cpu_to_le##width(c))
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun #endif /* __DMA_FSLDMA_H */
266