xref: /OK3568_Linux_fs/kernel/drivers/dma/dw-edma/dw-edma-v0-core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4*4882a593Smuzhiyun  * Synopsys DesignWare eDMA v0 core
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bitfield.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "dw-edma-core.h"
12*4882a593Smuzhiyun #include "dw-edma-v0-core.h"
13*4882a593Smuzhiyun #include "dw-edma-v0-regs.h"
14*4882a593Smuzhiyun #include "dw-edma-v0-debugfs.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun enum dw_edma_control {
17*4882a593Smuzhiyun 	DW_EDMA_V0_CB					= BIT(0),
18*4882a593Smuzhiyun 	DW_EDMA_V0_TCB					= BIT(1),
19*4882a593Smuzhiyun 	DW_EDMA_V0_LLP					= BIT(2),
20*4882a593Smuzhiyun 	DW_EDMA_V0_LIE					= BIT(3),
21*4882a593Smuzhiyun 	DW_EDMA_V0_RIE					= BIT(4),
22*4882a593Smuzhiyun 	DW_EDMA_V0_CCS					= BIT(8),
23*4882a593Smuzhiyun 	DW_EDMA_V0_LLE					= BIT(9),
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
__dw_regs(struct dw_edma * dw)26*4882a593Smuzhiyun static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	return dw->rg_region.vaddr;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define SET(dw, name, value)				\
32*4882a593Smuzhiyun 	writel(value, &(__dw_regs(dw)->name))
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define GET(dw, name)					\
35*4882a593Smuzhiyun 	readl(&(__dw_regs(dw)->name))
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define SET_RW(dw, dir, name, value)			\
38*4882a593Smuzhiyun 	do {						\
39*4882a593Smuzhiyun 		if ((dir) == EDMA_DIR_WRITE)		\
40*4882a593Smuzhiyun 			SET(dw, wr_##name, value);	\
41*4882a593Smuzhiyun 		else					\
42*4882a593Smuzhiyun 			SET(dw, rd_##name, value);	\
43*4882a593Smuzhiyun 	} while (0)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define GET_RW(dw, dir, name)				\
46*4882a593Smuzhiyun 	((dir) == EDMA_DIR_WRITE			\
47*4882a593Smuzhiyun 	  ? GET(dw, wr_##name)				\
48*4882a593Smuzhiyun 	  : GET(dw, rd_##name))
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define SET_BOTH(dw, name, value)			\
51*4882a593Smuzhiyun 	do {						\
52*4882a593Smuzhiyun 		SET(dw, wr_##name, value);		\
53*4882a593Smuzhiyun 		SET(dw, rd_##name, value);		\
54*4882a593Smuzhiyun 	} while (0)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch)57*4882a593Smuzhiyun __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	if (dw->mode == EDMA_MODE_LEGACY)
60*4882a593Smuzhiyun 		return &(__dw_regs(dw)->type.legacy.ch);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (dir == EDMA_DIR_WRITE)
63*4882a593Smuzhiyun 		return &__dw_regs(dw)->type.unroll.ch[ch].wr;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return &__dw_regs(dw)->type.unroll.ch[ch].rd;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
writel_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,u32 value,void __iomem * addr)68*4882a593Smuzhiyun static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
69*4882a593Smuzhiyun 			     u32 value, void __iomem *addr)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	if (dw->mode == EDMA_MODE_LEGACY) {
72*4882a593Smuzhiyun 		u32 viewport_sel;
73*4882a593Smuzhiyun 		unsigned long flags;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&dw->lock, flags);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
78*4882a593Smuzhiyun 		if (dir == EDMA_DIR_READ)
79*4882a593Smuzhiyun 			viewport_sel |= BIT(31);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		writel(viewport_sel,
82*4882a593Smuzhiyun 		       &(__dw_regs(dw)->type.legacy.viewport_sel));
83*4882a593Smuzhiyun 		writel(value, addr);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&dw->lock, flags);
86*4882a593Smuzhiyun 	} else {
87*4882a593Smuzhiyun 		writel(value, addr);
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
readl_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,const void __iomem * addr)91*4882a593Smuzhiyun static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
92*4882a593Smuzhiyun 			   const void __iomem *addr)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	u32 value;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (dw->mode == EDMA_MODE_LEGACY) {
97*4882a593Smuzhiyun 		u32 viewport_sel;
98*4882a593Smuzhiyun 		unsigned long flags;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&dw->lock, flags);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
103*4882a593Smuzhiyun 		if (dir == EDMA_DIR_READ)
104*4882a593Smuzhiyun 			viewport_sel |= BIT(31);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		writel(viewport_sel,
107*4882a593Smuzhiyun 		       &(__dw_regs(dw)->type.legacy.viewport_sel));
108*4882a593Smuzhiyun 		value = readl(addr);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&dw->lock, flags);
111*4882a593Smuzhiyun 	} else {
112*4882a593Smuzhiyun 		value = readl(addr);
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return value;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define SET_CH(dw, dir, ch, name, value) \
119*4882a593Smuzhiyun 	writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define GET_CH(dw, dir, ch, name) \
122*4882a593Smuzhiyun 	readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #define SET_LL(ll, value) \
125*4882a593Smuzhiyun 	writel(value, ll)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* eDMA management callbacks */
dw_edma_v0_core_off(struct dw_edma * dw)128*4882a593Smuzhiyun void dw_edma_v0_core_off(struct dw_edma *dw)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
131*4882a593Smuzhiyun 	SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
132*4882a593Smuzhiyun 	SET_BOTH(dw, engine_en, 0);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
dw_edma_v0_core_ch_count(struct dw_edma * dw,enum dw_edma_dir dir)135*4882a593Smuzhiyun u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	u32 num_ch;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (dir == EDMA_DIR_WRITE)
140*4882a593Smuzhiyun 		num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
141*4882a593Smuzhiyun 	else
142*4882a593Smuzhiyun 		num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (num_ch > EDMA_V0_MAX_NR_CH)
145*4882a593Smuzhiyun 		num_ch = EDMA_V0_MAX_NR_CH;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	return (u16)num_ch;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
dw_edma_v0_core_ch_status(struct dw_edma_chan * chan)150*4882a593Smuzhiyun enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct dw_edma *dw = chan->chip->dw;
153*4882a593Smuzhiyun 	u32 tmp;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
156*4882a593Smuzhiyun 			GET_CH(dw, chan->dir, chan->id, ch_control1));
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (tmp == 1)
159*4882a593Smuzhiyun 		return DMA_IN_PROGRESS;
160*4882a593Smuzhiyun 	else if (tmp == 3)
161*4882a593Smuzhiyun 		return DMA_COMPLETE;
162*4882a593Smuzhiyun 	else
163*4882a593Smuzhiyun 		return DMA_ERROR;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
dw_edma_v0_core_clear_done_int(struct dw_edma_chan * chan)166*4882a593Smuzhiyun void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct dw_edma *dw = chan->chip->dw;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, int_clear,
171*4882a593Smuzhiyun 	       FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
dw_edma_v0_core_clear_abort_int(struct dw_edma_chan * chan)174*4882a593Smuzhiyun void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct dw_edma *dw = chan->chip->dw;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, int_clear,
179*4882a593Smuzhiyun 	       FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
dw_edma_v0_core_status_done_int(struct dw_edma * dw,enum dw_edma_dir dir)182*4882a593Smuzhiyun u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
dw_edma_v0_core_status_abort_int(struct dw_edma * dw,enum dw_edma_dir dir)187*4882a593Smuzhiyun u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
dw_edma_v0_core_write_chunk(struct dw_edma_chunk * chunk)192*4882a593Smuzhiyun static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct dw_edma_burst *child;
195*4882a593Smuzhiyun 	struct dw_edma_v0_lli __iomem *lli;
196*4882a593Smuzhiyun 	struct dw_edma_v0_llp __iomem *llp;
197*4882a593Smuzhiyun 	u32 control = 0, i = 0;
198*4882a593Smuzhiyun 	int j;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	lli = chunk->ll_region.vaddr;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (chunk->cb)
203*4882a593Smuzhiyun 		control = DW_EDMA_V0_CB;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	j = chunk->bursts_alloc;
206*4882a593Smuzhiyun 	list_for_each_entry(child, &chunk->burst->list, list) {
207*4882a593Smuzhiyun 		j--;
208*4882a593Smuzhiyun 		if (!j)
209*4882a593Smuzhiyun 			control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		/* Channel control */
212*4882a593Smuzhiyun 		SET_LL(&lli[i].control, control);
213*4882a593Smuzhiyun 		/* Transfer size */
214*4882a593Smuzhiyun 		SET_LL(&lli[i].transfer_size, child->sz);
215*4882a593Smuzhiyun 		/* SAR - low, high */
216*4882a593Smuzhiyun 		SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
217*4882a593Smuzhiyun 		SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
218*4882a593Smuzhiyun 		/* DAR - low, high */
219*4882a593Smuzhiyun 		SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
220*4882a593Smuzhiyun 		SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
221*4882a593Smuzhiyun 		i++;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	llp = (void __iomem *)&lli[i];
225*4882a593Smuzhiyun 	control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
226*4882a593Smuzhiyun 	if (!chunk->cb)
227*4882a593Smuzhiyun 		control |= DW_EDMA_V0_CB;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Channel control */
230*4882a593Smuzhiyun 	SET_LL(&llp->control, control);
231*4882a593Smuzhiyun 	/* Linked list  - low, high */
232*4882a593Smuzhiyun 	SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
233*4882a593Smuzhiyun 	SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
dw_edma_v0_core_start(struct dw_edma_chunk * chunk,bool first)236*4882a593Smuzhiyun void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct dw_edma_chan *chan = chunk->chan;
239*4882a593Smuzhiyun 	struct dw_edma *dw = chan->chip->dw;
240*4882a593Smuzhiyun 	u32 tmp;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	dw_edma_v0_core_write_chunk(chunk);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (first) {
245*4882a593Smuzhiyun 		/* Enable engine */
246*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, engine_en, BIT(0));
247*4882a593Smuzhiyun 		/* Interrupt unmask - done, abort */
248*4882a593Smuzhiyun 		tmp = GET_RW(dw, chan->dir, int_mask);
249*4882a593Smuzhiyun 		tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
250*4882a593Smuzhiyun 		tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
251*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, int_mask, tmp);
252*4882a593Smuzhiyun 		/* Linked list error */
253*4882a593Smuzhiyun 		tmp = GET_RW(dw, chan->dir, linked_list_err_en);
254*4882a593Smuzhiyun 		tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
255*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, linked_list_err_en, tmp);
256*4882a593Smuzhiyun 		/* Channel control */
257*4882a593Smuzhiyun 		SET_CH(dw, chan->dir, chan->id, ch_control1,
258*4882a593Smuzhiyun 		       (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
259*4882a593Smuzhiyun 		/* Linked list - low, high */
260*4882a593Smuzhiyun 		SET_CH(dw, chan->dir, chan->id, llp_low,
261*4882a593Smuzhiyun 		       lower_32_bits(chunk->ll_region.paddr));
262*4882a593Smuzhiyun 		SET_CH(dw, chan->dir, chan->id, llp_high,
263*4882a593Smuzhiyun 		       upper_32_bits(chunk->ll_region.paddr));
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	/* Doorbell */
266*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, doorbell,
267*4882a593Smuzhiyun 	       FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
dw_edma_v0_core_device_config(struct dw_edma_chan * chan)270*4882a593Smuzhiyun int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	struct dw_edma *dw = chan->chip->dw;
273*4882a593Smuzhiyun 	u32 tmp = 0;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* MSI done addr - low, high */
276*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
277*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
278*4882a593Smuzhiyun 	/* MSI abort addr - low, high */
279*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
280*4882a593Smuzhiyun 	SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
281*4882a593Smuzhiyun 	/* MSI data - low, high */
282*4882a593Smuzhiyun 	switch (chan->id) {
283*4882a593Smuzhiyun 	case 0:
284*4882a593Smuzhiyun 	case 1:
285*4882a593Smuzhiyun 		tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	case 2:
289*4882a593Smuzhiyun 	case 3:
290*4882a593Smuzhiyun 		tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
291*4882a593Smuzhiyun 		break;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	case 4:
294*4882a593Smuzhiyun 	case 5:
295*4882a593Smuzhiyun 		tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
296*4882a593Smuzhiyun 		break;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	case 6:
299*4882a593Smuzhiyun 	case 7:
300*4882a593Smuzhiyun 		tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
301*4882a593Smuzhiyun 		break;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (chan->id & BIT(0)) {
305*4882a593Smuzhiyun 		/* Channel odd {1, 3, 5, 7} */
306*4882a593Smuzhiyun 		tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
307*4882a593Smuzhiyun 		tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
308*4882a593Smuzhiyun 				  chan->msi.data);
309*4882a593Smuzhiyun 	} else {
310*4882a593Smuzhiyun 		/* Channel even {0, 2, 4, 6} */
311*4882a593Smuzhiyun 		tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
312*4882a593Smuzhiyun 		tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
313*4882a593Smuzhiyun 				  chan->msi.data);
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	switch (chan->id) {
317*4882a593Smuzhiyun 	case 0:
318*4882a593Smuzhiyun 	case 1:
319*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
320*4882a593Smuzhiyun 		break;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	case 2:
323*4882a593Smuzhiyun 	case 3:
324*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
325*4882a593Smuzhiyun 		break;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	case 4:
328*4882a593Smuzhiyun 	case 5:
329*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
330*4882a593Smuzhiyun 		break;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	case 6:
333*4882a593Smuzhiyun 	case 7:
334*4882a593Smuzhiyun 		SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
335*4882a593Smuzhiyun 		break;
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /* eDMA debugfs callbacks */
dw_edma_v0_core_debugfs_on(struct dw_edma_chip * chip)342*4882a593Smuzhiyun void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	dw_edma_v0_debugfs_on(chip);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
dw_edma_v0_core_debugfs_off(void)347*4882a593Smuzhiyun void dw_edma_v0_core_debugfs_off(void)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	dw_edma_v0_debugfs_off();
350*4882a593Smuzhiyun }
351