1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Renesas R-Car Audio DMAC support
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Copyright (C) 2015 Renesas Electronics Corp.
6*4882a593Smuzhiyun // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun #include <linux/of_dma.h>
10*4882a593Smuzhiyun #include "rsnd.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * Audio DMAC peri peri register
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun #define PDMASAR 0x00
16*4882a593Smuzhiyun #define PDMADAR 0x04
17*4882a593Smuzhiyun #define PDMACHCR 0x0c
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* PDMACHCR */
20*4882a593Smuzhiyun #define PDMACHCR_DE (1 << 0)
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun struct rsnd_dmaen {
24*4882a593Smuzhiyun struct dma_chan *chan;
25*4882a593Smuzhiyun dma_cookie_t cookie;
26*4882a593Smuzhiyun unsigned int dma_len;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct rsnd_dmapp {
30*4882a593Smuzhiyun int dmapp_id;
31*4882a593Smuzhiyun u32 chcr;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct rsnd_dma {
35*4882a593Smuzhiyun struct rsnd_mod mod;
36*4882a593Smuzhiyun struct rsnd_mod *mod_from;
37*4882a593Smuzhiyun struct rsnd_mod *mod_to;
38*4882a593Smuzhiyun dma_addr_t src_addr;
39*4882a593Smuzhiyun dma_addr_t dst_addr;
40*4882a593Smuzhiyun union {
41*4882a593Smuzhiyun struct rsnd_dmaen en;
42*4882a593Smuzhiyun struct rsnd_dmapp pp;
43*4882a593Smuzhiyun } dma;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct rsnd_dma_ctrl {
47*4882a593Smuzhiyun void __iomem *base;
48*4882a593Smuzhiyun int dmaen_num;
49*4882a593Smuzhiyun int dmapp_num;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
53*4882a593Smuzhiyun #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
54*4882a593Smuzhiyun #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
55*4882a593Smuzhiyun #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* for DEBUG */
58*4882a593Smuzhiyun static struct rsnd_mod_ops mem_ops = {
59*4882a593Smuzhiyun .name = "mem",
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static struct rsnd_mod mem = {
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Audio DMAC
67*4882a593Smuzhiyun */
__rsnd_dmaen_complete(struct rsnd_mod * mod,struct rsnd_dai_stream * io)68*4882a593Smuzhiyun static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
69*4882a593Smuzhiyun struct rsnd_dai_stream *io)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun if (rsnd_io_is_working(io))
72*4882a593Smuzhiyun rsnd_dai_period_elapsed(io);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
rsnd_dmaen_complete(void * data)75*4882a593Smuzhiyun static void rsnd_dmaen_complete(void *data)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct rsnd_mod *mod = data;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
rsnd_dmaen_request_channel(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)82*4882a593Smuzhiyun static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
83*4882a593Smuzhiyun struct rsnd_mod *mod_from,
84*4882a593Smuzhiyun struct rsnd_mod *mod_to)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun if ((!mod_from && !mod_to) ||
87*4882a593Smuzhiyun (mod_from && mod_to))
88*4882a593Smuzhiyun return NULL;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun if (mod_from)
91*4882a593Smuzhiyun return rsnd_mod_dma_req(io, mod_from);
92*4882a593Smuzhiyun else
93*4882a593Smuzhiyun return rsnd_mod_dma_req(io, mod_to);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
rsnd_dmaen_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)96*4882a593Smuzhiyun static int rsnd_dmaen_stop(struct rsnd_mod *mod,
97*4882a593Smuzhiyun struct rsnd_dai_stream *io,
98*4882a593Smuzhiyun struct rsnd_priv *priv)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
101*4882a593Smuzhiyun struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (dmaen->chan)
104*4882a593Smuzhiyun dmaengine_terminate_all(dmaen->chan);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
rsnd_dmaen_cleanup(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)109*4882a593Smuzhiyun static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
110*4882a593Smuzhiyun struct rsnd_dai_stream *io,
111*4882a593Smuzhiyun struct rsnd_priv *priv)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
114*4882a593Smuzhiyun struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * DMAEngine release uses mutex lock.
118*4882a593Smuzhiyun * Thus, it shouldn't be called under spinlock.
119*4882a593Smuzhiyun * Let's call it under prepare
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun if (dmaen->chan)
122*4882a593Smuzhiyun dma_release_channel(dmaen->chan);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun dmaen->chan = NULL;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
rsnd_dmaen_prepare(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)129*4882a593Smuzhiyun static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
130*4882a593Smuzhiyun struct rsnd_dai_stream *io,
131*4882a593Smuzhiyun struct rsnd_priv *priv)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
134*4882a593Smuzhiyun struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
135*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* maybe suspended */
138*4882a593Smuzhiyun if (dmaen->chan)
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * DMAEngine request uses mutex lock.
143*4882a593Smuzhiyun * Thus, it shouldn't be called under spinlock.
144*4882a593Smuzhiyun * Let's call it under prepare
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun dmaen->chan = rsnd_dmaen_request_channel(io,
147*4882a593Smuzhiyun dma->mod_from,
148*4882a593Smuzhiyun dma->mod_to);
149*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dmaen->chan)) {
150*4882a593Smuzhiyun dmaen->chan = NULL;
151*4882a593Smuzhiyun dev_err(dev, "can't get dma channel\n");
152*4882a593Smuzhiyun return -EIO;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
rsnd_dmaen_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)158*4882a593Smuzhiyun static int rsnd_dmaen_start(struct rsnd_mod *mod,
159*4882a593Smuzhiyun struct rsnd_dai_stream *io,
160*4882a593Smuzhiyun struct rsnd_priv *priv)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
163*4882a593Smuzhiyun struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
164*4882a593Smuzhiyun struct snd_pcm_substream *substream = io->substream;
165*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
166*4882a593Smuzhiyun struct dma_async_tx_descriptor *desc;
167*4882a593Smuzhiyun struct dma_slave_config cfg = {};
168*4882a593Smuzhiyun enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
169*4882a593Smuzhiyun int is_play = rsnd_io_is_play(io);
170*4882a593Smuzhiyun int ret;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * in case of monaural data writing or reading through Audio-DMAC
174*4882a593Smuzhiyun * data is always in Left Justified format, so both src and dst
175*4882a593Smuzhiyun * DMA Bus width need to be set equal to physical data width.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun if (rsnd_runtime_channel_original(io) == 1) {
178*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
179*4882a593Smuzhiyun int bits = snd_pcm_format_physical_width(runtime->format);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun switch (bits) {
182*4882a593Smuzhiyun case 8:
183*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
184*4882a593Smuzhiyun break;
185*4882a593Smuzhiyun case 16:
186*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun case 32:
189*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
190*4882a593Smuzhiyun break;
191*4882a593Smuzhiyun default:
192*4882a593Smuzhiyun dev_err(dev, "invalid format width %d\n", bits);
193*4882a593Smuzhiyun return -EINVAL;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
198*4882a593Smuzhiyun cfg.src_addr = dma->src_addr;
199*4882a593Smuzhiyun cfg.dst_addr = dma->dst_addr;
200*4882a593Smuzhiyun cfg.src_addr_width = buswidth;
201*4882a593Smuzhiyun cfg.dst_addr_width = buswidth;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun dev_dbg(dev, "%s %pad -> %pad\n",
204*4882a593Smuzhiyun rsnd_mod_name(mod),
205*4882a593Smuzhiyun &cfg.src_addr, &cfg.dst_addr);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun ret = dmaengine_slave_config(dmaen->chan, &cfg);
208*4882a593Smuzhiyun if (ret < 0)
209*4882a593Smuzhiyun return ret;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun desc = dmaengine_prep_dma_cyclic(dmaen->chan,
212*4882a593Smuzhiyun substream->runtime->dma_addr,
213*4882a593Smuzhiyun snd_pcm_lib_buffer_bytes(substream),
214*4882a593Smuzhiyun snd_pcm_lib_period_bytes(substream),
215*4882a593Smuzhiyun is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
216*4882a593Smuzhiyun DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (!desc) {
219*4882a593Smuzhiyun dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
220*4882a593Smuzhiyun return -EIO;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun desc->callback = rsnd_dmaen_complete;
224*4882a593Smuzhiyun desc->callback_param = rsnd_mod_get(dma);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun dmaen->cookie = dmaengine_submit(desc);
229*4882a593Smuzhiyun if (dmaen->cookie < 0) {
230*4882a593Smuzhiyun dev_err(dev, "dmaengine_submit() fail\n");
231*4882a593Smuzhiyun return -EIO;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun dma_async_issue_pending(dmaen->chan);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
rsnd_dma_request_channel(struct device_node * of_node,struct rsnd_mod * mod,char * name)239*4882a593Smuzhiyun struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
240*4882a593Smuzhiyun struct rsnd_mod *mod, char *name)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct dma_chan *chan = NULL;
243*4882a593Smuzhiyun struct device_node *np;
244*4882a593Smuzhiyun int i = 0;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun for_each_child_of_node(of_node, np) {
247*4882a593Smuzhiyun if (i == rsnd_mod_id_raw(mod) && (!chan))
248*4882a593Smuzhiyun chan = of_dma_request_slave_channel(np, name);
249*4882a593Smuzhiyun i++;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
253*4882a593Smuzhiyun of_node_put(of_node);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return chan;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
rsnd_dmaen_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)258*4882a593Smuzhiyun static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
259*4882a593Smuzhiyun struct rsnd_dma *dma,
260*4882a593Smuzhiyun struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_io_to_priv(io);
263*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
264*4882a593Smuzhiyun struct dma_chan *chan;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* try to get DMAEngine channel */
267*4882a593Smuzhiyun chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
268*4882a593Smuzhiyun if (IS_ERR_OR_NULL(chan)) {
269*4882a593Smuzhiyun /* Let's follow when -EPROBE_DEFER case */
270*4882a593Smuzhiyun if (PTR_ERR(chan) == -EPROBE_DEFER)
271*4882a593Smuzhiyun return PTR_ERR(chan);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * DMA failed. try to PIO mode
275*4882a593Smuzhiyun * see
276*4882a593Smuzhiyun * rsnd_ssi_fallback()
277*4882a593Smuzhiyun * rsnd_rdai_continuance_probe()
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun return -EAGAIN;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * use it for IPMMU if needed
284*4882a593Smuzhiyun * see
285*4882a593Smuzhiyun * rsnd_preallocate_pages()
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun io->dmac_dev = chan->device->dev;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun dma_release_channel(chan);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun dmac->dmaen_num++;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return 0;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
rsnd_dmaen_pointer(struct rsnd_mod * mod,struct rsnd_dai_stream * io,snd_pcm_uframes_t * pointer)296*4882a593Smuzhiyun static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
297*4882a593Smuzhiyun struct rsnd_dai_stream *io,
298*4882a593Smuzhiyun snd_pcm_uframes_t *pointer)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
301*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
302*4882a593Smuzhiyun struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
303*4882a593Smuzhiyun struct dma_tx_state state;
304*4882a593Smuzhiyun enum dma_status status;
305*4882a593Smuzhiyun unsigned int pos = 0;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
308*4882a593Smuzhiyun if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
309*4882a593Smuzhiyun if (state.residue > 0 && state.residue <= dmaen->dma_len)
310*4882a593Smuzhiyun pos = dmaen->dma_len - state.residue;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun *pointer = bytes_to_frames(runtime, pos);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun static struct rsnd_mod_ops rsnd_dmaen_ops = {
318*4882a593Smuzhiyun .name = "audmac",
319*4882a593Smuzhiyun .prepare = rsnd_dmaen_prepare,
320*4882a593Smuzhiyun .cleanup = rsnd_dmaen_cleanup,
321*4882a593Smuzhiyun .start = rsnd_dmaen_start,
322*4882a593Smuzhiyun .stop = rsnd_dmaen_stop,
323*4882a593Smuzhiyun .pointer = rsnd_dmaen_pointer,
324*4882a593Smuzhiyun .get_status = rsnd_mod_get_status,
325*4882a593Smuzhiyun };
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * Audio DMAC peri peri
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun static const u8 gen2_id_table_ssiu[] = {
331*4882a593Smuzhiyun /* SSI00 ~ SSI07 */
332*4882a593Smuzhiyun 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
333*4882a593Smuzhiyun /* SSI10 ~ SSI17 */
334*4882a593Smuzhiyun 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
335*4882a593Smuzhiyun /* SSI20 ~ SSI27 */
336*4882a593Smuzhiyun 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
337*4882a593Smuzhiyun /* SSI30 ~ SSI37 */
338*4882a593Smuzhiyun 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
339*4882a593Smuzhiyun /* SSI40 ~ SSI47 */
340*4882a593Smuzhiyun 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
341*4882a593Smuzhiyun /* SSI5 */
342*4882a593Smuzhiyun 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343*4882a593Smuzhiyun /* SSI6 */
344*4882a593Smuzhiyun 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
345*4882a593Smuzhiyun /* SSI7 */
346*4882a593Smuzhiyun 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
347*4882a593Smuzhiyun /* SSI8 */
348*4882a593Smuzhiyun 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
349*4882a593Smuzhiyun /* SSI90 ~ SSI97 */
350*4882a593Smuzhiyun 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
351*4882a593Smuzhiyun };
352*4882a593Smuzhiyun static const u8 gen2_id_table_scu[] = {
353*4882a593Smuzhiyun 0x2d, /* SCU_SRCI0 */
354*4882a593Smuzhiyun 0x2e, /* SCU_SRCI1 */
355*4882a593Smuzhiyun 0x2f, /* SCU_SRCI2 */
356*4882a593Smuzhiyun 0x30, /* SCU_SRCI3 */
357*4882a593Smuzhiyun 0x31, /* SCU_SRCI4 */
358*4882a593Smuzhiyun 0x32, /* SCU_SRCI5 */
359*4882a593Smuzhiyun 0x33, /* SCU_SRCI6 */
360*4882a593Smuzhiyun 0x34, /* SCU_SRCI7 */
361*4882a593Smuzhiyun 0x35, /* SCU_SRCI8 */
362*4882a593Smuzhiyun 0x36, /* SCU_SRCI9 */
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun static const u8 gen2_id_table_cmd[] = {
365*4882a593Smuzhiyun 0x37, /* SCU_CMD0 */
366*4882a593Smuzhiyun 0x38, /* SCU_CMD1 */
367*4882a593Smuzhiyun };
368*4882a593Smuzhiyun
rsnd_dmapp_get_id(struct rsnd_dai_stream * io,struct rsnd_mod * mod)369*4882a593Smuzhiyun static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
370*4882a593Smuzhiyun struct rsnd_mod *mod)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
373*4882a593Smuzhiyun struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
374*4882a593Smuzhiyun struct rsnd_mod *src = rsnd_io_to_mod_src(io);
375*4882a593Smuzhiyun struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
376*4882a593Smuzhiyun const u8 *entry = NULL;
377*4882a593Smuzhiyun int id = 255;
378*4882a593Smuzhiyun int size = 0;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if ((mod == ssi) ||
381*4882a593Smuzhiyun (mod == ssiu)) {
382*4882a593Smuzhiyun int busif = rsnd_mod_id_sub(ssiu);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun entry = gen2_id_table_ssiu;
385*4882a593Smuzhiyun size = ARRAY_SIZE(gen2_id_table_ssiu);
386*4882a593Smuzhiyun id = (rsnd_mod_id(mod) * 8) + busif;
387*4882a593Smuzhiyun } else if (mod == src) {
388*4882a593Smuzhiyun entry = gen2_id_table_scu;
389*4882a593Smuzhiyun size = ARRAY_SIZE(gen2_id_table_scu);
390*4882a593Smuzhiyun id = rsnd_mod_id(mod);
391*4882a593Smuzhiyun } else if (mod == dvc) {
392*4882a593Smuzhiyun entry = gen2_id_table_cmd;
393*4882a593Smuzhiyun size = ARRAY_SIZE(gen2_id_table_cmd);
394*4882a593Smuzhiyun id = rsnd_mod_id(mod);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if ((!entry) || (size <= id)) {
398*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* use non-prohibited SRS number as error */
403*4882a593Smuzhiyun return 0x00; /* SSI00 */
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return entry[id];
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
rsnd_dmapp_get_chcr(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)409*4882a593Smuzhiyun static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
410*4882a593Smuzhiyun struct rsnd_mod *mod_from,
411*4882a593Smuzhiyun struct rsnd_mod *mod_to)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun return (rsnd_dmapp_get_id(io, mod_from) << 24) +
414*4882a593Smuzhiyun (rsnd_dmapp_get_id(io, mod_to) << 16);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun #define rsnd_dmapp_addr(dmac, dma, reg) \
418*4882a593Smuzhiyun (dmac->base + 0x20 + reg + \
419*4882a593Smuzhiyun (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
rsnd_dmapp_write(struct rsnd_dma * dma,u32 data,u32 reg)420*4882a593Smuzhiyun static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct rsnd_mod *mod = rsnd_mod_get(dma);
423*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
424*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
425*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
rsnd_dmapp_read(struct rsnd_dma * dma,u32 reg)432*4882a593Smuzhiyun static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct rsnd_mod *mod = rsnd_mod_get(dma);
435*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
436*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
rsnd_dmapp_bset(struct rsnd_dma * dma,u32 data,u32 mask,u32 reg)441*4882a593Smuzhiyun static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct rsnd_mod *mod = rsnd_mod_get(dma);
444*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
445*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
446*4882a593Smuzhiyun void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
447*4882a593Smuzhiyun u32 val = ioread32(addr);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun val &= ~mask;
450*4882a593Smuzhiyun val |= (data & mask);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun iowrite32(val, addr);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
rsnd_dmapp_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)455*4882a593Smuzhiyun static int rsnd_dmapp_stop(struct rsnd_mod *mod,
456*4882a593Smuzhiyun struct rsnd_dai_stream *io,
457*4882a593Smuzhiyun struct rsnd_priv *priv)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
460*4882a593Smuzhiyun int i;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun for (i = 0; i < 1024; i++) {
465*4882a593Smuzhiyun if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
466*4882a593Smuzhiyun return 0;
467*4882a593Smuzhiyun udelay(1);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun return -EIO;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
rsnd_dmapp_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)473*4882a593Smuzhiyun static int rsnd_dmapp_start(struct rsnd_mod *mod,
474*4882a593Smuzhiyun struct rsnd_dai_stream *io,
475*4882a593Smuzhiyun struct rsnd_priv *priv)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
478*4882a593Smuzhiyun struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
481*4882a593Smuzhiyun rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
482*4882a593Smuzhiyun rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun return 0;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
rsnd_dmapp_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)487*4882a593Smuzhiyun static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
488*4882a593Smuzhiyun struct rsnd_dma *dma,
489*4882a593Smuzhiyun struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
492*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_io_to_priv(io);
493*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
494*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun dmapp->dmapp_id = dmac->dmapp_num;
497*4882a593Smuzhiyun dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun dmac->dmapp_num++;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
502*4882a593Smuzhiyun dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun return 0;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun static struct rsnd_mod_ops rsnd_dmapp_ops = {
508*4882a593Smuzhiyun .name = "audmac-pp",
509*4882a593Smuzhiyun .start = rsnd_dmapp_start,
510*4882a593Smuzhiyun .stop = rsnd_dmapp_stop,
511*4882a593Smuzhiyun .quit = rsnd_dmapp_stop,
512*4882a593Smuzhiyun .get_status = rsnd_mod_get_status,
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /*
516*4882a593Smuzhiyun * Common DMAC Interface
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * DMA read/write register offset
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * RSND_xxx_I_N for Audio DMAC input
523*4882a593Smuzhiyun * RSND_xxx_O_N for Audio DMAC output
524*4882a593Smuzhiyun * RSND_xxx_I_P for Audio DMAC peri peri input
525*4882a593Smuzhiyun * RSND_xxx_O_P for Audio DMAC peri peri output
526*4882a593Smuzhiyun *
527*4882a593Smuzhiyun * ex) R-Car H2 case
528*4882a593Smuzhiyun * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
529*4882a593Smuzhiyun * SSI : 0xec541000 / 0xec241008 / 0xec24100c
530*4882a593Smuzhiyun * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
531*4882a593Smuzhiyun * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
532*4882a593Smuzhiyun * CMD : 0xec500000 / / 0xec008000 0xec308000
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
535*4882a593Smuzhiyun #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun #define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
538*4882a593Smuzhiyun #define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun #define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
541*4882a593Smuzhiyun #define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
544*4882a593Smuzhiyun #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
547*4882a593Smuzhiyun #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
550*4882a593Smuzhiyun #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun static dma_addr_t
rsnd_gen2_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)553*4882a593Smuzhiyun rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
554*4882a593Smuzhiyun struct rsnd_mod *mod,
555*4882a593Smuzhiyun int is_play, int is_from)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_io_to_priv(io);
558*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
559*4882a593Smuzhiyun phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
560*4882a593Smuzhiyun phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
561*4882a593Smuzhiyun int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
562*4882a593Smuzhiyun !!(rsnd_io_to_mod_ssiu(io) == mod);
563*4882a593Smuzhiyun int use_src = !!rsnd_io_to_mod_src(io);
564*4882a593Smuzhiyun int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
565*4882a593Smuzhiyun !!rsnd_io_to_mod_mix(io) ||
566*4882a593Smuzhiyun !!rsnd_io_to_mod_ctu(io);
567*4882a593Smuzhiyun int id = rsnd_mod_id(mod);
568*4882a593Smuzhiyun int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
569*4882a593Smuzhiyun struct dma_addr {
570*4882a593Smuzhiyun dma_addr_t out_addr;
571*4882a593Smuzhiyun dma_addr_t in_addr;
572*4882a593Smuzhiyun } dma_addrs[3][2][3] = {
573*4882a593Smuzhiyun /* SRC */
574*4882a593Smuzhiyun /* Capture */
575*4882a593Smuzhiyun {{{ 0, 0 },
576*4882a593Smuzhiyun { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
577*4882a593Smuzhiyun { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
578*4882a593Smuzhiyun /* Playback */
579*4882a593Smuzhiyun {{ 0, 0, },
580*4882a593Smuzhiyun { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
581*4882a593Smuzhiyun { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
582*4882a593Smuzhiyun },
583*4882a593Smuzhiyun /* SSI */
584*4882a593Smuzhiyun /* Capture */
585*4882a593Smuzhiyun {{{ RDMA_SSI_O_N(ssi, id), 0 },
586*4882a593Smuzhiyun { RDMA_SSIU_O_P(ssi, id, busif), 0 },
587*4882a593Smuzhiyun { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
588*4882a593Smuzhiyun /* Playback */
589*4882a593Smuzhiyun {{ 0, RDMA_SSI_I_N(ssi, id) },
590*4882a593Smuzhiyun { 0, RDMA_SSIU_I_P(ssi, id, busif) },
591*4882a593Smuzhiyun { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
592*4882a593Smuzhiyun },
593*4882a593Smuzhiyun /* SSIU */
594*4882a593Smuzhiyun /* Capture */
595*4882a593Smuzhiyun {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
596*4882a593Smuzhiyun { RDMA_SSIU_O_P(ssi, id, busif), 0 },
597*4882a593Smuzhiyun { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
598*4882a593Smuzhiyun /* Playback */
599*4882a593Smuzhiyun {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
600*4882a593Smuzhiyun { 0, RDMA_SSIU_I_P(ssi, id, busif) },
601*4882a593Smuzhiyun { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
602*4882a593Smuzhiyun };
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun * FIXME
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * We can't support SSI9-4/5/6/7, because its address is
608*4882a593Smuzhiyun * out of calculation rule
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun if ((id == 9) && (busif >= 4))
611*4882a593Smuzhiyun dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
612*4882a593Smuzhiyun id, busif);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* it shouldn't happen */
615*4882a593Smuzhiyun if (use_cmd && !use_src)
616*4882a593Smuzhiyun dev_err(dev, "DVC is selected without SRC\n");
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* use SSIU or SSI ? */
619*4882a593Smuzhiyun if (is_ssi && rsnd_ssi_use_busif(io))
620*4882a593Smuzhiyun is_ssi++;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun return (is_from) ?
623*4882a593Smuzhiyun dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
624*4882a593Smuzhiyun dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
rsnd_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)627*4882a593Smuzhiyun static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
628*4882a593Smuzhiyun struct rsnd_mod *mod,
629*4882a593Smuzhiyun int is_play, int is_from)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_io_to_priv(io);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /*
634*4882a593Smuzhiyun * gen1 uses default DMA addr
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun if (rsnd_is_gen1(priv))
637*4882a593Smuzhiyun return 0;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (!mod)
640*4882a593Smuzhiyun return 0;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
rsnd_dma_of_path(struct rsnd_mod * this,struct rsnd_dai_stream * io,int is_play,struct rsnd_mod ** mod_from,struct rsnd_mod ** mod_to)646*4882a593Smuzhiyun static void rsnd_dma_of_path(struct rsnd_mod *this,
647*4882a593Smuzhiyun struct rsnd_dai_stream *io,
648*4882a593Smuzhiyun int is_play,
649*4882a593Smuzhiyun struct rsnd_mod **mod_from,
650*4882a593Smuzhiyun struct rsnd_mod **mod_to)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct rsnd_mod *ssi;
653*4882a593Smuzhiyun struct rsnd_mod *src = rsnd_io_to_mod_src(io);
654*4882a593Smuzhiyun struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
655*4882a593Smuzhiyun struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
656*4882a593Smuzhiyun struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
657*4882a593Smuzhiyun struct rsnd_mod *mod[MOD_MAX];
658*4882a593Smuzhiyun struct rsnd_mod *mod_start, *mod_end;
659*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_mod_to_priv(this);
660*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
661*4882a593Smuzhiyun int nr, i, idx;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * It should use "rcar_sound,ssiu" on DT.
665*4882a593Smuzhiyun * But, we need to keep compatibility for old version.
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * If it has "rcar_sound.ssiu", it will be used.
668*4882a593Smuzhiyun * If not, "rcar_sound.ssi" will be used.
669*4882a593Smuzhiyun * see
670*4882a593Smuzhiyun * rsnd_ssiu_dma_req()
671*4882a593Smuzhiyun * rsnd_ssi_dma_req()
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun if (rsnd_ssiu_of_node(priv)) {
674*4882a593Smuzhiyun struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* use SSIU */
677*4882a593Smuzhiyun ssi = ssiu;
678*4882a593Smuzhiyun if (this == rsnd_io_to_mod_ssi(io))
679*4882a593Smuzhiyun this = ssiu;
680*4882a593Smuzhiyun } else {
681*4882a593Smuzhiyun /* keep compatible, use SSI */
682*4882a593Smuzhiyun ssi = rsnd_io_to_mod_ssi(io);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (!ssi)
686*4882a593Smuzhiyun return;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun nr = 0;
689*4882a593Smuzhiyun for (i = 0; i < MOD_MAX; i++) {
690*4882a593Smuzhiyun mod[i] = NULL;
691*4882a593Smuzhiyun nr += !!rsnd_io_to_mod(io, i);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /*
695*4882a593Smuzhiyun * [S] -*-> [E]
696*4882a593Smuzhiyun * [S] -*-> SRC -o-> [E]
697*4882a593Smuzhiyun * [S] -*-> SRC -> DVC -o-> [E]
698*4882a593Smuzhiyun * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
699*4882a593Smuzhiyun *
700*4882a593Smuzhiyun * playback [S] = mem
701*4882a593Smuzhiyun * [E] = SSI
702*4882a593Smuzhiyun *
703*4882a593Smuzhiyun * capture [S] = SSI
704*4882a593Smuzhiyun * [E] = mem
705*4882a593Smuzhiyun *
706*4882a593Smuzhiyun * -*-> Audio DMAC
707*4882a593Smuzhiyun * -o-> Audio DMAC peri peri
708*4882a593Smuzhiyun */
709*4882a593Smuzhiyun mod_start = (is_play) ? NULL : ssi;
710*4882a593Smuzhiyun mod_end = (is_play) ? ssi : NULL;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun idx = 0;
713*4882a593Smuzhiyun mod[idx++] = mod_start;
714*4882a593Smuzhiyun for (i = 1; i < nr; i++) {
715*4882a593Smuzhiyun if (src) {
716*4882a593Smuzhiyun mod[idx++] = src;
717*4882a593Smuzhiyun src = NULL;
718*4882a593Smuzhiyun } else if (ctu) {
719*4882a593Smuzhiyun mod[idx++] = ctu;
720*4882a593Smuzhiyun ctu = NULL;
721*4882a593Smuzhiyun } else if (mix) {
722*4882a593Smuzhiyun mod[idx++] = mix;
723*4882a593Smuzhiyun mix = NULL;
724*4882a593Smuzhiyun } else if (dvc) {
725*4882a593Smuzhiyun mod[idx++] = dvc;
726*4882a593Smuzhiyun dvc = NULL;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun mod[idx] = mod_end;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun * | SSI | SRC |
733*4882a593Smuzhiyun * -------------+-----+-----+
734*4882a593Smuzhiyun * is_play | o | * |
735*4882a593Smuzhiyun * !is_play | * | o |
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun if ((this == ssi) == (is_play)) {
738*4882a593Smuzhiyun *mod_from = mod[idx - 1];
739*4882a593Smuzhiyun *mod_to = mod[idx];
740*4882a593Smuzhiyun } else {
741*4882a593Smuzhiyun *mod_from = mod[0];
742*4882a593Smuzhiyun *mod_to = mod[1];
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
746*4882a593Smuzhiyun for (i = 0; i <= idx; i++) {
747*4882a593Smuzhiyun dev_dbg(dev, " %s%s\n",
748*4882a593Smuzhiyun rsnd_mod_name(mod[i] ? mod[i] : &mem),
749*4882a593Smuzhiyun (mod[i] == *mod_from) ? " from" :
750*4882a593Smuzhiyun (mod[i] == *mod_to) ? " to" : "");
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
rsnd_dma_alloc(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)754*4882a593Smuzhiyun static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
755*4882a593Smuzhiyun struct rsnd_mod **dma_mod)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun struct rsnd_mod *mod_from = NULL;
758*4882a593Smuzhiyun struct rsnd_mod *mod_to = NULL;
759*4882a593Smuzhiyun struct rsnd_priv *priv = rsnd_io_to_priv(io);
760*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
761*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
762*4882a593Smuzhiyun struct rsnd_dma *dma;
763*4882a593Smuzhiyun struct rsnd_mod_ops *ops;
764*4882a593Smuzhiyun enum rsnd_mod_type type;
765*4882a593Smuzhiyun int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
766*4882a593Smuzhiyun struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
767*4882a593Smuzhiyun int is_play = rsnd_io_is_play(io);
768*4882a593Smuzhiyun int ret, dma_id;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun * DMA failed. try to PIO mode
772*4882a593Smuzhiyun * see
773*4882a593Smuzhiyun * rsnd_ssi_fallback()
774*4882a593Smuzhiyun * rsnd_rdai_continuance_probe()
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun if (!dmac)
777*4882a593Smuzhiyun return -EAGAIN;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /* for Gen2 or later */
782*4882a593Smuzhiyun if (mod_from && mod_to) {
783*4882a593Smuzhiyun ops = &rsnd_dmapp_ops;
784*4882a593Smuzhiyun attach = rsnd_dmapp_attach;
785*4882a593Smuzhiyun dma_id = dmac->dmapp_num;
786*4882a593Smuzhiyun type = RSND_MOD_AUDMAPP;
787*4882a593Smuzhiyun } else {
788*4882a593Smuzhiyun ops = &rsnd_dmaen_ops;
789*4882a593Smuzhiyun attach = rsnd_dmaen_attach;
790*4882a593Smuzhiyun dma_id = dmac->dmaen_num;
791*4882a593Smuzhiyun type = RSND_MOD_AUDMA;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* for Gen1, overwrite */
795*4882a593Smuzhiyun if (rsnd_is_gen1(priv)) {
796*4882a593Smuzhiyun ops = &rsnd_dmaen_ops;
797*4882a593Smuzhiyun attach = rsnd_dmaen_attach;
798*4882a593Smuzhiyun dma_id = dmac->dmaen_num;
799*4882a593Smuzhiyun type = RSND_MOD_AUDMA;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
803*4882a593Smuzhiyun if (!dma)
804*4882a593Smuzhiyun return -ENOMEM;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun *dma_mod = rsnd_mod_get(dma);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
809*4882a593Smuzhiyun type, dma_id);
810*4882a593Smuzhiyun if (ret < 0)
811*4882a593Smuzhiyun return ret;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun dev_dbg(dev, "%s %s -> %s\n",
814*4882a593Smuzhiyun rsnd_mod_name(*dma_mod),
815*4882a593Smuzhiyun rsnd_mod_name(mod_from ? mod_from : &mem),
816*4882a593Smuzhiyun rsnd_mod_name(mod_to ? mod_to : &mem));
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun ret = attach(io, dma, mod_from, mod_to);
819*4882a593Smuzhiyun if (ret < 0)
820*4882a593Smuzhiyun return ret;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
823*4882a593Smuzhiyun dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
824*4882a593Smuzhiyun dma->mod_from = mod_from;
825*4882a593Smuzhiyun dma->mod_to = mod_to;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
rsnd_dma_attach(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)830*4882a593Smuzhiyun int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
831*4882a593Smuzhiyun struct rsnd_mod **dma_mod)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun if (!(*dma_mod)) {
834*4882a593Smuzhiyun int ret = rsnd_dma_alloc(io, mod, dma_mod);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (ret < 0)
837*4882a593Smuzhiyun return ret;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
rsnd_dma_probe(struct rsnd_priv * priv)843*4882a593Smuzhiyun int rsnd_dma_probe(struct rsnd_priv *priv)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun struct platform_device *pdev = rsnd_priv_to_pdev(priv);
846*4882a593Smuzhiyun struct device *dev = rsnd_priv_to_dev(priv);
847*4882a593Smuzhiyun struct rsnd_dma_ctrl *dmac;
848*4882a593Smuzhiyun struct resource *res;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /*
851*4882a593Smuzhiyun * for Gen1
852*4882a593Smuzhiyun */
853*4882a593Smuzhiyun if (rsnd_is_gen1(priv))
854*4882a593Smuzhiyun return 0;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun * for Gen2 or later
858*4882a593Smuzhiyun */
859*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
860*4882a593Smuzhiyun dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
861*4882a593Smuzhiyun if (!dmac || !res) {
862*4882a593Smuzhiyun dev_err(dev, "dma allocate failed\n");
863*4882a593Smuzhiyun return 0; /* it will be PIO mode */
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun dmac->dmapp_num = 0;
867*4882a593Smuzhiyun dmac->base = devm_ioremap_resource(dev, res);
868*4882a593Smuzhiyun if (IS_ERR(dmac->base))
869*4882a593Smuzhiyun return PTR_ERR(dmac->base);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun priv->dma = dmac;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun /* dummy mem mod for debug */
874*4882a593Smuzhiyun return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
875*4882a593Smuzhiyun }
876