xref: /OK3568_Linux_fs/u-boot/drivers/dma/ti-edma3.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Enhanced Direct Memory Access (EDMA3) Controller
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) Copyright 2014
5*4882a593Smuzhiyun  *     Texas Instruments Incorporated, <www.ti.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * SPDX-License-Identifier:     GPL-2.0+
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/io.h>
13*4882a593Smuzhiyun #include <common.h>
14*4882a593Smuzhiyun #include <dm.h>
15*4882a593Smuzhiyun #include <dma.h>
16*4882a593Smuzhiyun #include <asm/omap_common.h>
17*4882a593Smuzhiyun #include <asm/ti-common/ti-edma3.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define EDMA3_SL_BASE(slot)			(0x4000 + ((slot) << 5))
20*4882a593Smuzhiyun #define EDMA3_SL_MAX_NUM			512
21*4882a593Smuzhiyun #define EDMA3_SLOPT_FIFO_WIDTH_MASK		(0x7 << 8)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define EDMA3_QCHMAP(ch)			0x0200 + ((ch) << 2)
24*4882a593Smuzhiyun #define EDMA3_CHMAP_PARSET_MASK			0x1ff
25*4882a593Smuzhiyun #define EDMA3_CHMAP_PARSET_SHIFT		0x5
26*4882a593Smuzhiyun #define EDMA3_CHMAP_TRIGWORD_SHIFT		0x2
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define EDMA3_QEMCR				0x314
29*4882a593Smuzhiyun #define EDMA3_IPR				0x1068
30*4882a593Smuzhiyun #define EDMA3_IPRH				0x106c
31*4882a593Smuzhiyun #define EDMA3_ICR				0x1070
32*4882a593Smuzhiyun #define EDMA3_ICRH				0x1074
33*4882a593Smuzhiyun #define EDMA3_QEECR				0x1088
34*4882a593Smuzhiyun #define EDMA3_QEESR				0x108c
35*4882a593Smuzhiyun #define EDMA3_QSECR				0x1094
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct ti_edma3_priv {
38*4882a593Smuzhiyun 	u32 base;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * qedma3_start - start qdma on a channel
43*4882a593Smuzhiyun  * @base: base address of edma
44*4882a593Smuzhiyun  * @cfg: pinter to struct edma3_channel_config where you can set
45*4882a593Smuzhiyun  * the slot number to associate with, the chnum, which corresponds
46*4882a593Smuzhiyun  * your quick channel number 0-7, complete code - transfer complete code
47*4882a593Smuzhiyun  * and trigger slot word - which has to correspond to the word number in
48*4882a593Smuzhiyun  * edma3_slot_layout struct for generating event.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  */
qedma3_start(u32 base,struct edma3_channel_config * cfg)51*4882a593Smuzhiyun void qedma3_start(u32 base, struct edma3_channel_config *cfg)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	u32 qchmap;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Clear the pending int bit */
56*4882a593Smuzhiyun 	if (cfg->complete_code < 32)
57*4882a593Smuzhiyun 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
58*4882a593Smuzhiyun 	else
59*4882a593Smuzhiyun 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Map parameter set and trigger word 7 to quick channel */
62*4882a593Smuzhiyun 	qchmap = ((EDMA3_CHMAP_PARSET_MASK & cfg->slot)
63*4882a593Smuzhiyun 		  << EDMA3_CHMAP_PARSET_SHIFT) |
64*4882a593Smuzhiyun 		  (cfg->trigger_slot_word << EDMA3_CHMAP_TRIGWORD_SHIFT);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	__raw_writel(qchmap, base + EDMA3_QCHMAP(cfg->chnum));
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* Clear missed event if set*/
69*4882a593Smuzhiyun 	__raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
70*4882a593Smuzhiyun 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* Enable qdma channel event */
73*4882a593Smuzhiyun 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEESR);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * edma3_set_dest - set initial DMA destination address in parameter RAM slot
78*4882a593Smuzhiyun  * @base: base address of edma
79*4882a593Smuzhiyun  * @slot: parameter RAM slot being configured
80*4882a593Smuzhiyun  * @dst: physical address of destination (memory, controller FIFO, etc)
81*4882a593Smuzhiyun  * @addressMode: INCR, except in very rare cases
82*4882a593Smuzhiyun  * @width: ignored unless @addressMode is FIFO, else specifies the
83*4882a593Smuzhiyun  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  * Note that the destination address is modified during the DMA transfer
86*4882a593Smuzhiyun  * according to edma3_set_dest_index().
87*4882a593Smuzhiyun  */
edma3_set_dest(u32 base,int slot,u32 dst,enum edma3_address_mode mode,enum edma3_fifo_width width)88*4882a593Smuzhiyun void edma3_set_dest(u32 base, int slot, u32 dst, enum edma3_address_mode mode,
89*4882a593Smuzhiyun 		    enum edma3_fifo_width width)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	u32 opt;
92*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	opt = __raw_readl(&rg->opt);
97*4882a593Smuzhiyun 	if (mode == FIFO)
98*4882a593Smuzhiyun 		opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
99*4882a593Smuzhiyun 		       (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
100*4882a593Smuzhiyun 			EDMA3_SLOPT_FIFO_WIDTH_SET(width));
101*4882a593Smuzhiyun 	else
102*4882a593Smuzhiyun 		opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	__raw_writel(opt, &rg->opt);
105*4882a593Smuzhiyun 	__raw_writel(dst, &rg->dst);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun  * edma3_set_dest_index - configure DMA destination address indexing
110*4882a593Smuzhiyun  * @base: base address of edma
111*4882a593Smuzhiyun  * @slot: parameter RAM slot being configured
112*4882a593Smuzhiyun  * @bidx: byte offset between destination arrays in a frame
113*4882a593Smuzhiyun  * @cidx: byte offset between destination frames in a block
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * Offsets are specified to support either contiguous or discontiguous
116*4882a593Smuzhiyun  * memory transfers, or repeated access to a hardware register, as needed.
117*4882a593Smuzhiyun  * When accessing hardware registers, both offsets are normally zero.
118*4882a593Smuzhiyun  */
edma3_set_dest_index(u32 base,unsigned slot,int bidx,int cidx)119*4882a593Smuzhiyun void edma3_set_dest_index(u32 base, unsigned slot, int bidx, int cidx)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u32 src_dst_bidx;
122*4882a593Smuzhiyun 	u32 src_dst_cidx;
123*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
128*4882a593Smuzhiyun 	src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	__raw_writel((src_dst_bidx & 0x0000ffff) | (bidx << 16),
131*4882a593Smuzhiyun 		     &rg->src_dst_bidx);
132*4882a593Smuzhiyun 	__raw_writel((src_dst_cidx & 0x0000ffff) | (cidx << 16),
133*4882a593Smuzhiyun 		     &rg->src_dst_cidx);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /**
137*4882a593Smuzhiyun  * edma3_set_dest_addr - set destination address for slot only
138*4882a593Smuzhiyun  */
edma3_set_dest_addr(u32 base,int slot,u32 dst)139*4882a593Smuzhiyun void edma3_set_dest_addr(u32 base, int slot, u32 dst)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
144*4882a593Smuzhiyun 	__raw_writel(dst, &rg->dst);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun  * edma3_set_src - set initial DMA source address in parameter RAM slot
149*4882a593Smuzhiyun  * @base: base address of edma
150*4882a593Smuzhiyun  * @slot: parameter RAM slot being configured
151*4882a593Smuzhiyun  * @src_port: physical address of source (memory, controller FIFO, etc)
152*4882a593Smuzhiyun  * @mode: INCR, except in very rare cases
153*4882a593Smuzhiyun  * @width: ignored unless @addressMode is FIFO, else specifies the
154*4882a593Smuzhiyun  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  * Note that the source address is modified during the DMA transfer
157*4882a593Smuzhiyun  * according to edma3_set_src_index().
158*4882a593Smuzhiyun  */
edma3_set_src(u32 base,int slot,u32 src,enum edma3_address_mode mode,enum edma3_fifo_width width)159*4882a593Smuzhiyun void edma3_set_src(u32 base, int slot, u32 src, enum edma3_address_mode mode,
160*4882a593Smuzhiyun 		   enum edma3_fifo_width width)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	u32 opt;
163*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	opt = __raw_readl(&rg->opt);
168*4882a593Smuzhiyun 	if (mode == FIFO)
169*4882a593Smuzhiyun 		opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
170*4882a593Smuzhiyun 		       (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
171*4882a593Smuzhiyun 			EDMA3_SLOPT_FIFO_WIDTH_SET(width));
172*4882a593Smuzhiyun 	else
173*4882a593Smuzhiyun 		opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	__raw_writel(opt, &rg->opt);
176*4882a593Smuzhiyun 	__raw_writel(src, &rg->src);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  * edma3_set_src_index - configure DMA source address indexing
181*4882a593Smuzhiyun  * @base: base address of edma
182*4882a593Smuzhiyun  * @slot: parameter RAM slot being configured
183*4882a593Smuzhiyun  * @bidx: byte offset between source arrays in a frame
184*4882a593Smuzhiyun  * @cidx: byte offset between source frames in a block
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * Offsets are specified to support either contiguous or discontiguous
187*4882a593Smuzhiyun  * memory transfers, or repeated access to a hardware register, as needed.
188*4882a593Smuzhiyun  * When accessing hardware registers, both offsets are normally zero.
189*4882a593Smuzhiyun  */
edma3_set_src_index(u32 base,unsigned slot,int bidx,int cidx)190*4882a593Smuzhiyun void edma3_set_src_index(u32 base, unsigned slot, int bidx, int cidx)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	u32 src_dst_bidx;
193*4882a593Smuzhiyun 	u32 src_dst_cidx;
194*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
199*4882a593Smuzhiyun 	src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	__raw_writel((src_dst_bidx & 0xffff0000) | bidx,
202*4882a593Smuzhiyun 		     &rg->src_dst_bidx);
203*4882a593Smuzhiyun 	__raw_writel((src_dst_cidx & 0xffff0000) | cidx,
204*4882a593Smuzhiyun 		     &rg->src_dst_cidx);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun  * edma3_set_src_addr - set source address for slot only
209*4882a593Smuzhiyun  */
edma3_set_src_addr(u32 base,int slot,u32 src)210*4882a593Smuzhiyun void edma3_set_src_addr(u32 base, int slot, u32 src)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
215*4882a593Smuzhiyun 	__raw_writel(src, &rg->src);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun  * edma3_set_transfer_params - configure DMA transfer parameters
220*4882a593Smuzhiyun  * @base: base address of edma
221*4882a593Smuzhiyun  * @slot: parameter RAM slot being configured
222*4882a593Smuzhiyun  * @acnt: how many bytes per array (at least one)
223*4882a593Smuzhiyun  * @bcnt: how many arrays per frame (at least one)
224*4882a593Smuzhiyun  * @ccnt: how many frames per block (at least one)
225*4882a593Smuzhiyun  * @bcnt_rld: used only for A-Synchronized transfers; this specifies
226*4882a593Smuzhiyun  *	the value to reload into bcnt when it decrements to zero
227*4882a593Smuzhiyun  * @sync_mode: ASYNC or ABSYNC
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * See the EDMA3 documentation to understand how to configure and link
230*4882a593Smuzhiyun  * transfers using the fields in PaRAM slots.  If you are not doing it
231*4882a593Smuzhiyun  * all at once with edma3_write_slot(), you will use this routine
232*4882a593Smuzhiyun  * plus two calls each for source and destination, setting the initial
233*4882a593Smuzhiyun  * address and saying how to index that address.
234*4882a593Smuzhiyun  *
235*4882a593Smuzhiyun  * An example of an A-Synchronized transfer is a serial link using a
236*4882a593Smuzhiyun  * single word shift register.  In that case, @acnt would be equal to
237*4882a593Smuzhiyun  * that word size; the serial controller issues a DMA synchronization
238*4882a593Smuzhiyun  * event to transfer each word, and memory access by the DMA transfer
239*4882a593Smuzhiyun  * controller will be word-at-a-time.
240*4882a593Smuzhiyun  *
241*4882a593Smuzhiyun  * An example of an AB-Synchronized transfer is a device using a FIFO.
242*4882a593Smuzhiyun  * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
243*4882a593Smuzhiyun  * The controller with the FIFO issues DMA synchronization events when
244*4882a593Smuzhiyun  * the FIFO threshold is reached, and the DMA transfer controller will
245*4882a593Smuzhiyun  * transfer one frame to (or from) the FIFO.  It will probably use
246*4882a593Smuzhiyun  * efficient burst modes to access memory.
247*4882a593Smuzhiyun  */
edma3_set_transfer_params(u32 base,int slot,int acnt,int bcnt,int ccnt,u16 bcnt_rld,enum edma3_sync_dimension sync_mode)248*4882a593Smuzhiyun void edma3_set_transfer_params(u32 base, int slot, int acnt,
249*4882a593Smuzhiyun 			       int bcnt, int ccnt, u16 bcnt_rld,
250*4882a593Smuzhiyun 			       enum edma3_sync_dimension sync_mode)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	u32 opt;
253*4882a593Smuzhiyun 	u32 link_bcntrld;
254*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	link_bcntrld = __raw_readl(&rg->link_bcntrld);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	__raw_writel((bcnt_rld << 16) | (0x0000ffff & link_bcntrld),
261*4882a593Smuzhiyun 		     &rg->link_bcntrld);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	opt = __raw_readl(&rg->opt);
264*4882a593Smuzhiyun 	if (sync_mode == ASYNC)
265*4882a593Smuzhiyun 		__raw_writel(opt & ~EDMA3_SLOPT_AB_SYNC, &rg->opt);
266*4882a593Smuzhiyun 	else
267*4882a593Smuzhiyun 		__raw_writel(opt | EDMA3_SLOPT_AB_SYNC, &rg->opt);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Set the acount, bcount, ccount registers */
270*4882a593Smuzhiyun 	__raw_writel((bcnt << 16) | (acnt & 0xffff), &rg->a_b_cnt);
271*4882a593Smuzhiyun 	__raw_writel(0xffff & ccnt, &rg->ccnt);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun  * edma3_write_slot - write parameter RAM data for slot
276*4882a593Smuzhiyun  * @base: base address of edma
277*4882a593Smuzhiyun  * @slot: number of parameter RAM slot being modified
278*4882a593Smuzhiyun  * @param: data to be written into parameter RAM slot
279*4882a593Smuzhiyun  *
280*4882a593Smuzhiyun  * Use this to assign all parameters of a transfer at once.  This
281*4882a593Smuzhiyun  * allows more efficient setup of transfers than issuing multiple
282*4882a593Smuzhiyun  * calls to set up those parameters in small pieces, and provides
283*4882a593Smuzhiyun  * complete control over all transfer options.
284*4882a593Smuzhiyun  */
edma3_write_slot(u32 base,int slot,struct edma3_slot_layout * param)285*4882a593Smuzhiyun void edma3_write_slot(u32 base, int slot, struct edma3_slot_layout *param)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	int i;
288*4882a593Smuzhiyun 	u32 *p = (u32 *)param;
289*4882a593Smuzhiyun 	u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
292*4882a593Smuzhiyun 		__raw_writel(*p++, addr++);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /**
296*4882a593Smuzhiyun  * edma3_read_slot - read parameter RAM data from slot
297*4882a593Smuzhiyun  * @base: base address of edma
298*4882a593Smuzhiyun  * @slot: number of parameter RAM slot being copied
299*4882a593Smuzhiyun  * @param: where to store copy of parameter RAM data
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  * Use this to read data from a parameter RAM slot, perhaps to
302*4882a593Smuzhiyun  * save them as a template for later reuse.
303*4882a593Smuzhiyun  */
edma3_read_slot(u32 base,int slot,struct edma3_slot_layout * param)304*4882a593Smuzhiyun void edma3_read_slot(u32 base, int slot, struct edma3_slot_layout *param)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	int i;
307*4882a593Smuzhiyun 	u32 *p = (u32 *)param;
308*4882a593Smuzhiyun 	u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
311*4882a593Smuzhiyun 		*p++ = __raw_readl(addr++);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
edma3_slot_configure(u32 base,int slot,struct edma3_slot_config * cfg)314*4882a593Smuzhiyun void edma3_slot_configure(u32 base, int slot, struct edma3_slot_config *cfg)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct edma3_slot_layout *rg;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	__raw_writel(cfg->opt, &rg->opt);
321*4882a593Smuzhiyun 	__raw_writel(cfg->src, &rg->src);
322*4882a593Smuzhiyun 	__raw_writel((cfg->bcnt << 16) | (cfg->acnt & 0xffff), &rg->a_b_cnt);
323*4882a593Smuzhiyun 	__raw_writel(cfg->dst, &rg->dst);
324*4882a593Smuzhiyun 	__raw_writel((cfg->dst_bidx << 16) |
325*4882a593Smuzhiyun 		     (cfg->src_bidx & 0xffff), &rg->src_dst_bidx);
326*4882a593Smuzhiyun 	__raw_writel((cfg->bcntrld << 16) |
327*4882a593Smuzhiyun 		     (cfg->link & 0xffff), &rg->link_bcntrld);
328*4882a593Smuzhiyun 	__raw_writel((cfg->dst_cidx << 16) |
329*4882a593Smuzhiyun 		     (cfg->src_cidx & 0xffff), &rg->src_dst_cidx);
330*4882a593Smuzhiyun 	__raw_writel(0xffff & cfg->ccnt, &rg->ccnt);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun  * edma3_check_for_transfer - check if transfer coplete by checking
335*4882a593Smuzhiyun  * interrupt pending bit. Clear interrupt pending bit if complete.
336*4882a593Smuzhiyun  * @base: base address of edma
337*4882a593Smuzhiyun  * @cfg: pinter to struct edma3_channel_config which was passed
338*4882a593Smuzhiyun  * to qedma3_start when you started qdma channel
339*4882a593Smuzhiyun  *
340*4882a593Smuzhiyun  * Return 0 if complete, 1 if not.
341*4882a593Smuzhiyun  */
edma3_check_for_transfer(u32 base,struct edma3_channel_config * cfg)342*4882a593Smuzhiyun int edma3_check_for_transfer(u32 base, struct edma3_channel_config *cfg)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	u32 inum;
345*4882a593Smuzhiyun 	u32 ipr_base;
346*4882a593Smuzhiyun 	u32 icr_base;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (cfg->complete_code < 32) {
349*4882a593Smuzhiyun 		ipr_base = base + EDMA3_IPR;
350*4882a593Smuzhiyun 		icr_base = base + EDMA3_ICR;
351*4882a593Smuzhiyun 		inum = 1 << cfg->complete_code;
352*4882a593Smuzhiyun 	} else {
353*4882a593Smuzhiyun 		ipr_base = base + EDMA3_IPRH;
354*4882a593Smuzhiyun 		icr_base = base + EDMA3_ICRH;
355*4882a593Smuzhiyun 		inum = 1 << (cfg->complete_code - 32);
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/* check complete interrupt */
359*4882a593Smuzhiyun 	if (!(__raw_readl(ipr_base) & inum))
360*4882a593Smuzhiyun 		return 1;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* clean up the pending int bit */
363*4882a593Smuzhiyun 	__raw_writel(inum, icr_base);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun /**
369*4882a593Smuzhiyun  * qedma3_stop - stops dma on the channel passed
370*4882a593Smuzhiyun  * @base: base address of edma
371*4882a593Smuzhiyun  * @cfg: pinter to struct edma3_channel_config which was passed
372*4882a593Smuzhiyun  * to qedma3_start when you started qdma channel
373*4882a593Smuzhiyun  */
qedma3_stop(u32 base,struct edma3_channel_config * cfg)374*4882a593Smuzhiyun void qedma3_stop(u32 base, struct edma3_channel_config *cfg)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	/* Disable qdma channel event */
377*4882a593Smuzhiyun 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEECR);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* clean up the interrupt indication */
380*4882a593Smuzhiyun 	if (cfg->complete_code < 32)
381*4882a593Smuzhiyun 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
382*4882a593Smuzhiyun 	else
383*4882a593Smuzhiyun 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* Clear missed event if set*/
386*4882a593Smuzhiyun 	__raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
387*4882a593Smuzhiyun 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* Clear the channel map */
390*4882a593Smuzhiyun 	__raw_writel(0, base + EDMA3_QCHMAP(cfg->chnum));
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
__edma3_transfer(unsigned long edma3_base_addr,unsigned int edma_slot_num,void * dst,void * src,size_t len)393*4882a593Smuzhiyun void __edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
394*4882a593Smuzhiyun 		      void *dst, void *src, size_t len)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct edma3_slot_config        slot;
397*4882a593Smuzhiyun 	struct edma3_channel_config     edma_channel;
398*4882a593Smuzhiyun 	int                             b_cnt_value = 1;
399*4882a593Smuzhiyun 	int                             rem_bytes  = 0;
400*4882a593Smuzhiyun 	int                             a_cnt_value = len;
401*4882a593Smuzhiyun 	unsigned int                    addr = (unsigned int) (dst);
402*4882a593Smuzhiyun 	unsigned int                    max_acnt  = 0x7FFFU;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (len > max_acnt) {
405*4882a593Smuzhiyun 		b_cnt_value = (len / max_acnt);
406*4882a593Smuzhiyun 		rem_bytes  = (len % max_acnt);
407*4882a593Smuzhiyun 		a_cnt_value = max_acnt;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	slot.opt        = 0;
411*4882a593Smuzhiyun 	slot.src        = ((unsigned int) src);
412*4882a593Smuzhiyun 	slot.acnt       = a_cnt_value;
413*4882a593Smuzhiyun 	slot.bcnt       = b_cnt_value;
414*4882a593Smuzhiyun 	slot.ccnt       = 1;
415*4882a593Smuzhiyun 	slot.src_bidx   = a_cnt_value;
416*4882a593Smuzhiyun 	slot.dst_bidx   = a_cnt_value;
417*4882a593Smuzhiyun 	slot.src_cidx   = 0;
418*4882a593Smuzhiyun 	slot.dst_cidx   = 0;
419*4882a593Smuzhiyun 	slot.link       = EDMA3_PARSET_NULL_LINK;
420*4882a593Smuzhiyun 	slot.bcntrld    = 0;
421*4882a593Smuzhiyun 	slot.opt        = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
422*4882a593Smuzhiyun 			  EDMA3_SLOPT_COMP_CODE(0) |
423*4882a593Smuzhiyun 			  EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
426*4882a593Smuzhiyun 	edma_channel.slot = edma_slot_num;
427*4882a593Smuzhiyun 	edma_channel.chnum = 0;
428*4882a593Smuzhiyun 	edma_channel.complete_code = 0;
429*4882a593Smuzhiyun 	 /* set event trigger to dst update */
430*4882a593Smuzhiyun 	edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	qedma3_start(edma3_base_addr, &edma_channel);
433*4882a593Smuzhiyun 	edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
436*4882a593Smuzhiyun 		;
437*4882a593Smuzhiyun 	qedma3_stop(edma3_base_addr, &edma_channel);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (rem_bytes != 0) {
440*4882a593Smuzhiyun 		slot.opt        = 0;
441*4882a593Smuzhiyun 		slot.src        =
442*4882a593Smuzhiyun 			(b_cnt_value * max_acnt) + ((unsigned int) src);
443*4882a593Smuzhiyun 		slot.acnt       = rem_bytes;
444*4882a593Smuzhiyun 		slot.bcnt       = 1;
445*4882a593Smuzhiyun 		slot.ccnt       = 1;
446*4882a593Smuzhiyun 		slot.src_bidx   = rem_bytes;
447*4882a593Smuzhiyun 		slot.dst_bidx   = rem_bytes;
448*4882a593Smuzhiyun 		slot.src_cidx   = 0;
449*4882a593Smuzhiyun 		slot.dst_cidx   = 0;
450*4882a593Smuzhiyun 		slot.link       = EDMA3_PARSET_NULL_LINK;
451*4882a593Smuzhiyun 		slot.bcntrld    = 0;
452*4882a593Smuzhiyun 		slot.opt        = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
453*4882a593Smuzhiyun 				  EDMA3_SLOPT_COMP_CODE(0) |
454*4882a593Smuzhiyun 				  EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
455*4882a593Smuzhiyun 		edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
456*4882a593Smuzhiyun 		edma_channel.slot = edma_slot_num;
457*4882a593Smuzhiyun 		edma_channel.chnum = 0;
458*4882a593Smuzhiyun 		edma_channel.complete_code = 0;
459*4882a593Smuzhiyun 		/* set event trigger to dst update */
460*4882a593Smuzhiyun 		edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		qedma3_start(edma3_base_addr, &edma_channel);
463*4882a593Smuzhiyun 		edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr +
464*4882a593Smuzhiyun 				    (max_acnt * b_cnt_value));
465*4882a593Smuzhiyun 		while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
466*4882a593Smuzhiyun 			;
467*4882a593Smuzhiyun 		qedma3_stop(edma3_base_addr, &edma_channel);
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun #ifndef CONFIG_DMA
472*4882a593Smuzhiyun 
edma3_transfer(unsigned long edma3_base_addr,unsigned int edma_slot_num,void * dst,void * src,size_t len)473*4882a593Smuzhiyun void edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
474*4882a593Smuzhiyun 		    void *dst, void *src, size_t len)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	__edma3_transfer(edma3_base_addr, edma_slot_num, dst, src, len);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun #else
480*4882a593Smuzhiyun 
ti_edma3_transfer(struct udevice * dev,int direction,void * dst,void * src,size_t len)481*4882a593Smuzhiyun static int ti_edma3_transfer(struct udevice *dev, int direction, void *dst,
482*4882a593Smuzhiyun 			     void *src, size_t len)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct ti_edma3_priv *priv = dev_get_priv(dev);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* enable edma3 clocks */
487*4882a593Smuzhiyun 	enable_edma3_clocks();
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	switch (direction) {
490*4882a593Smuzhiyun 	case DMA_MEM_TO_MEM:
491*4882a593Smuzhiyun 		__edma3_transfer(priv->base, 1, dst, src, len);
492*4882a593Smuzhiyun 		break;
493*4882a593Smuzhiyun 	default:
494*4882a593Smuzhiyun 		pr_err("Transfer type not implemented in DMA driver\n");
495*4882a593Smuzhiyun 		break;
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/* disable edma3 clocks */
499*4882a593Smuzhiyun 	disable_edma3_clocks();
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	return 0;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
ti_edma3_ofdata_to_platdata(struct udevice * dev)504*4882a593Smuzhiyun static int ti_edma3_ofdata_to_platdata(struct udevice *dev)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	struct ti_edma3_priv *priv = dev_get_priv(dev);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	priv->base = devfdt_get_addr(dev);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
ti_edma3_probe(struct udevice * dev)513*4882a593Smuzhiyun static int ti_edma3_probe(struct udevice *dev)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun static const struct dma_ops ti_edma3_ops = {
523*4882a593Smuzhiyun 	.transfer	= ti_edma3_transfer,
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun static const struct udevice_id ti_edma3_ids[] = {
527*4882a593Smuzhiyun 	{ .compatible = "ti,edma3" },
528*4882a593Smuzhiyun 	{ }
529*4882a593Smuzhiyun };
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun U_BOOT_DRIVER(ti_edma3) = {
532*4882a593Smuzhiyun 	.name	= "ti_edma3",
533*4882a593Smuzhiyun 	.id	= UCLASS_DMA,
534*4882a593Smuzhiyun 	.of_match = ti_edma3_ids,
535*4882a593Smuzhiyun 	.ops	= &ti_edma3_ops,
536*4882a593Smuzhiyun 	.ofdata_to_platdata = ti_edma3_ofdata_to_platdata,
537*4882a593Smuzhiyun 	.probe	= ti_edma3_probe,
538*4882a593Smuzhiyun 	.priv_auto_alloc_size = sizeof(struct ti_edma3_priv),
539*4882a593Smuzhiyun };
540*4882a593Smuzhiyun #endif /* CONFIG_DMA */
541