1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Freescale ASRC ALSA SoC Platform (DMA) driver
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Copyright (C) 2014 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun //
7*4882a593Smuzhiyun // Author: Nicolin Chen <nicoleotsuka@gmail.com>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/platform_data/dma-imx.h>
12*4882a593Smuzhiyun #include <sound/dmaengine_pcm.h>
13*4882a593Smuzhiyun #include <sound/pcm_params.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "fsl_asrc_common.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define FSL_ASRC_DMABUF_SIZE (256 * 1024)
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static struct snd_pcm_hardware snd_imx_hardware = {
20*4882a593Smuzhiyun .info = SNDRV_PCM_INFO_INTERLEAVED |
21*4882a593Smuzhiyun SNDRV_PCM_INFO_BLOCK_TRANSFER |
22*4882a593Smuzhiyun SNDRV_PCM_INFO_MMAP |
23*4882a593Smuzhiyun SNDRV_PCM_INFO_MMAP_VALID,
24*4882a593Smuzhiyun .buffer_bytes_max = FSL_ASRC_DMABUF_SIZE,
25*4882a593Smuzhiyun .period_bytes_min = 128,
26*4882a593Smuzhiyun .period_bytes_max = 65535, /* Limited by SDMA engine */
27*4882a593Smuzhiyun .periods_min = 2,
28*4882a593Smuzhiyun .periods_max = 255,
29*4882a593Smuzhiyun .fifo_size = 0,
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
filter(struct dma_chan * chan,void * param)32*4882a593Smuzhiyun static bool filter(struct dma_chan *chan, void *param)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun if (!imx_dma_is_general_purpose(chan))
35*4882a593Smuzhiyun return false;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun chan->private = param;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun return true;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
fsl_asrc_dma_complete(void * arg)42*4882a593Smuzhiyun static void fsl_asrc_dma_complete(void *arg)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct snd_pcm_substream *substream = arg;
45*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
46*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun pair->pos += snd_pcm_lib_period_bytes(substream);
49*4882a593Smuzhiyun if (pair->pos >= snd_pcm_lib_buffer_bytes(substream))
50*4882a593Smuzhiyun pair->pos = 0;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun snd_pcm_period_elapsed(substream);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream * substream,struct snd_soc_component * component)55*4882a593Smuzhiyun static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream,
56*4882a593Smuzhiyun struct snd_soc_component *component)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun u8 dir = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUT : IN;
59*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
60*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
61*4882a593Smuzhiyun struct device *dev = component->dev;
62*4882a593Smuzhiyun unsigned long flags = DMA_CTRL_ACK;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Prepare and submit Front-End DMA channel */
65*4882a593Smuzhiyun if (!substream->runtime->no_period_wakeup)
66*4882a593Smuzhiyun flags |= DMA_PREP_INTERRUPT;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun pair->pos = 0;
69*4882a593Smuzhiyun pair->desc[!dir] = dmaengine_prep_dma_cyclic(
70*4882a593Smuzhiyun pair->dma_chan[!dir], runtime->dma_addr,
71*4882a593Smuzhiyun snd_pcm_lib_buffer_bytes(substream),
72*4882a593Smuzhiyun snd_pcm_lib_period_bytes(substream),
73*4882a593Smuzhiyun dir == OUT ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, flags);
74*4882a593Smuzhiyun if (!pair->desc[!dir]) {
75*4882a593Smuzhiyun dev_err(dev, "failed to prepare slave DMA for Front-End\n");
76*4882a593Smuzhiyun return -ENOMEM;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun pair->desc[!dir]->callback = fsl_asrc_dma_complete;
80*4882a593Smuzhiyun pair->desc[!dir]->callback_param = substream;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun dmaengine_submit(pair->desc[!dir]);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Prepare and submit Back-End DMA channel */
85*4882a593Smuzhiyun pair->desc[dir] = dmaengine_prep_dma_cyclic(
86*4882a593Smuzhiyun pair->dma_chan[dir], 0xffff, 64, 64, DMA_DEV_TO_DEV, 0);
87*4882a593Smuzhiyun if (!pair->desc[dir]) {
88*4882a593Smuzhiyun dev_err(dev, "failed to prepare slave DMA for Back-End\n");
89*4882a593Smuzhiyun return -ENOMEM;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun dmaengine_submit(pair->desc[dir]);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
fsl_asrc_dma_trigger(struct snd_soc_component * component,struct snd_pcm_substream * substream,int cmd)97*4882a593Smuzhiyun static int fsl_asrc_dma_trigger(struct snd_soc_component *component,
98*4882a593Smuzhiyun struct snd_pcm_substream *substream, int cmd)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
101*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
102*4882a593Smuzhiyun int ret;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun switch (cmd) {
105*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_START:
106*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_RESUME:
107*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
108*4882a593Smuzhiyun ret = fsl_asrc_dma_prepare_and_submit(substream, component);
109*4882a593Smuzhiyun if (ret)
110*4882a593Smuzhiyun return ret;
111*4882a593Smuzhiyun dma_async_issue_pending(pair->dma_chan[IN]);
112*4882a593Smuzhiyun dma_async_issue_pending(pair->dma_chan[OUT]);
113*4882a593Smuzhiyun break;
114*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_STOP:
115*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_SUSPEND:
116*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
117*4882a593Smuzhiyun dmaengine_terminate_all(pair->dma_chan[OUT]);
118*4882a593Smuzhiyun dmaengine_terminate_all(pair->dma_chan[IN]);
119*4882a593Smuzhiyun break;
120*4882a593Smuzhiyun default:
121*4882a593Smuzhiyun return -EINVAL;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return 0;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
fsl_asrc_dma_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)127*4882a593Smuzhiyun static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
128*4882a593Smuzhiyun struct snd_pcm_substream *substream,
129*4882a593Smuzhiyun struct snd_pcm_hw_params *params)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
132*4882a593Smuzhiyun struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
133*4882a593Smuzhiyun bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
134*4882a593Smuzhiyun struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
135*4882a593Smuzhiyun struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
136*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
137*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
138*4882a593Smuzhiyun struct dma_chan *tmp_chan = NULL, *be_chan = NULL;
139*4882a593Smuzhiyun struct snd_soc_component *component_be = NULL;
140*4882a593Smuzhiyun struct fsl_asrc *asrc = pair->asrc;
141*4882a593Smuzhiyun struct dma_slave_config config_fe, config_be;
142*4882a593Smuzhiyun enum asrc_pair_index index = pair->index;
143*4882a593Smuzhiyun struct device *dev = component->dev;
144*4882a593Smuzhiyun int stream = substream->stream;
145*4882a593Smuzhiyun struct imx_dma_data *tmp_data;
146*4882a593Smuzhiyun struct snd_soc_dpcm *dpcm;
147*4882a593Smuzhiyun struct device *dev_be;
148*4882a593Smuzhiyun u8 dir = tx ? OUT : IN;
149*4882a593Smuzhiyun dma_cap_mask_t mask;
150*4882a593Smuzhiyun int ret, width;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Fetch the Back-End dma_data from DPCM */
153*4882a593Smuzhiyun for_each_dpcm_be(rtd, stream, dpcm) {
154*4882a593Smuzhiyun struct snd_soc_pcm_runtime *be = dpcm->be;
155*4882a593Smuzhiyun struct snd_pcm_substream *substream_be;
156*4882a593Smuzhiyun struct snd_soc_dai *dai = asoc_rtd_to_cpu(be, 0);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (dpcm->fe != rtd)
159*4882a593Smuzhiyun continue;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun substream_be = snd_soc_dpcm_get_substream(be, stream);
162*4882a593Smuzhiyun dma_params_be = snd_soc_dai_get_dma_data(dai, substream_be);
163*4882a593Smuzhiyun dev_be = dai->dev;
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (!dma_params_be) {
168*4882a593Smuzhiyun dev_err(dev, "failed to get the substream of Back-End\n");
169*4882a593Smuzhiyun return -EINVAL;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Override dma_data of the Front-End and config its dmaengine */
173*4882a593Smuzhiyun dma_params_fe = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
174*4882a593Smuzhiyun dma_params_fe->addr = asrc->paddr + asrc->get_fifo_addr(!dir, index);
175*4882a593Smuzhiyun dma_params_fe->maxburst = dma_params_be->maxburst;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun pair->dma_chan[!dir] = asrc->get_dma_channel(pair, !dir);
178*4882a593Smuzhiyun if (!pair->dma_chan[!dir]) {
179*4882a593Smuzhiyun dev_err(dev, "failed to request DMA channel\n");
180*4882a593Smuzhiyun return -EINVAL;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun memset(&config_fe, 0, sizeof(config_fe));
184*4882a593Smuzhiyun ret = snd_dmaengine_pcm_prepare_slave_config(substream, params, &config_fe);
185*4882a593Smuzhiyun if (ret) {
186*4882a593Smuzhiyun dev_err(dev, "failed to prepare DMA config for Front-End\n");
187*4882a593Smuzhiyun return ret;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun ret = dmaengine_slave_config(pair->dma_chan[!dir], &config_fe);
191*4882a593Smuzhiyun if (ret) {
192*4882a593Smuzhiyun dev_err(dev, "failed to config DMA channel for Front-End\n");
193*4882a593Smuzhiyun return ret;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* Request and config DMA channel for Back-End */
197*4882a593Smuzhiyun dma_cap_zero(mask);
198*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, mask);
199*4882a593Smuzhiyun dma_cap_set(DMA_CYCLIC, mask);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * The Back-End device might have already requested a DMA channel,
203*4882a593Smuzhiyun * so try to reuse it first, and then request a new one upon NULL.
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun component_be = snd_soc_lookup_component_nolocked(dev_be, SND_DMAENGINE_PCM_DRV_NAME);
206*4882a593Smuzhiyun if (component_be) {
207*4882a593Smuzhiyun be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
208*4882a593Smuzhiyun tmp_chan = be_chan;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun if (!tmp_chan)
211*4882a593Smuzhiyun tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
215*4882a593Smuzhiyun * peripheral, unlike SDMA channel that is allocated dynamically. So no
216*4882a593Smuzhiyun * need to configure dma_request and dma_request2, but get dma_chan of
217*4882a593Smuzhiyun * Back-End device directly via dma_request_slave_channel.
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun if (!asrc->use_edma) {
220*4882a593Smuzhiyun /* Get DMA request of Back-End */
221*4882a593Smuzhiyun tmp_data = tmp_chan->private;
222*4882a593Smuzhiyun pair->dma_data.dma_request = tmp_data->dma_request;
223*4882a593Smuzhiyun if (!be_chan)
224*4882a593Smuzhiyun dma_release_channel(tmp_chan);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Get DMA request of Front-End */
227*4882a593Smuzhiyun tmp_chan = asrc->get_dma_channel(pair, dir);
228*4882a593Smuzhiyun tmp_data = tmp_chan->private;
229*4882a593Smuzhiyun pair->dma_data.dma_request2 = tmp_data->dma_request;
230*4882a593Smuzhiyun pair->dma_data.peripheral_type = tmp_data->peripheral_type;
231*4882a593Smuzhiyun pair->dma_data.priority = tmp_data->priority;
232*4882a593Smuzhiyun dma_release_channel(tmp_chan);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun pair->dma_chan[dir] =
235*4882a593Smuzhiyun dma_request_channel(mask, filter, &pair->dma_data);
236*4882a593Smuzhiyun pair->req_dma_chan = true;
237*4882a593Smuzhiyun } else {
238*4882a593Smuzhiyun pair->dma_chan[dir] = tmp_chan;
239*4882a593Smuzhiyun /* Do not flag to release if we are reusing the Back-End one */
240*4882a593Smuzhiyun pair->req_dma_chan = !be_chan;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (!pair->dma_chan[dir]) {
244*4882a593Smuzhiyun dev_err(dev, "failed to request DMA channel for Back-End\n");
245*4882a593Smuzhiyun return -EINVAL;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun width = snd_pcm_format_physical_width(asrc->asrc_format);
249*4882a593Smuzhiyun if (width < 8 || width > 64)
250*4882a593Smuzhiyun return -EINVAL;
251*4882a593Smuzhiyun else if (width == 8)
252*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
253*4882a593Smuzhiyun else if (width == 16)
254*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
255*4882a593Smuzhiyun else if (width == 24)
256*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
257*4882a593Smuzhiyun else if (width <= 32)
258*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
259*4882a593Smuzhiyun else
260*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun config_be.direction = DMA_DEV_TO_DEV;
263*4882a593Smuzhiyun config_be.src_addr_width = buswidth;
264*4882a593Smuzhiyun config_be.src_maxburst = dma_params_be->maxburst;
265*4882a593Smuzhiyun config_be.dst_addr_width = buswidth;
266*4882a593Smuzhiyun config_be.dst_maxburst = dma_params_be->maxburst;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (tx) {
269*4882a593Smuzhiyun config_be.src_addr = asrc->paddr + asrc->get_fifo_addr(OUT, index);
270*4882a593Smuzhiyun config_be.dst_addr = dma_params_be->addr;
271*4882a593Smuzhiyun } else {
272*4882a593Smuzhiyun config_be.dst_addr = asrc->paddr + asrc->get_fifo_addr(IN, index);
273*4882a593Smuzhiyun config_be.src_addr = dma_params_be->addr;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
277*4882a593Smuzhiyun if (ret) {
278*4882a593Smuzhiyun dev_err(dev, "failed to config DMA channel for Back-End\n");
279*4882a593Smuzhiyun if (pair->req_dma_chan)
280*4882a593Smuzhiyun dma_release_channel(pair->dma_chan[dir]);
281*4882a593Smuzhiyun return ret;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
fsl_asrc_dma_hw_free(struct snd_soc_component * component,struct snd_pcm_substream * substream)289*4882a593Smuzhiyun static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
290*4882a593Smuzhiyun struct snd_pcm_substream *substream)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
293*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
294*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
295*4882a593Smuzhiyun u8 dir = tx ? OUT : IN;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun snd_pcm_set_runtime_buffer(substream, NULL);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (pair->dma_chan[!dir])
300*4882a593Smuzhiyun dma_release_channel(pair->dma_chan[!dir]);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* release dev_to_dev chan if we aren't reusing the Back-End one */
303*4882a593Smuzhiyun if (pair->dma_chan[dir] && pair->req_dma_chan)
304*4882a593Smuzhiyun dma_release_channel(pair->dma_chan[dir]);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun pair->dma_chan[!dir] = NULL;
307*4882a593Smuzhiyun pair->dma_chan[dir] = NULL;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
fsl_asrc_dma_startup(struct snd_soc_component * component,struct snd_pcm_substream * substream)312*4882a593Smuzhiyun static int fsl_asrc_dma_startup(struct snd_soc_component *component,
313*4882a593Smuzhiyun struct snd_pcm_substream *substream)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
316*4882a593Smuzhiyun struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
317*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
318*4882a593Smuzhiyun struct snd_dmaengine_dai_dma_data *dma_data;
319*4882a593Smuzhiyun struct device *dev = component->dev;
320*4882a593Smuzhiyun struct fsl_asrc *asrc = dev_get_drvdata(dev);
321*4882a593Smuzhiyun struct fsl_asrc_pair *pair;
322*4882a593Smuzhiyun struct dma_chan *tmp_chan = NULL;
323*4882a593Smuzhiyun u8 dir = tx ? OUT : IN;
324*4882a593Smuzhiyun bool release_pair = true;
325*4882a593Smuzhiyun int ret = 0;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun ret = snd_pcm_hw_constraint_integer(substream->runtime,
328*4882a593Smuzhiyun SNDRV_PCM_HW_PARAM_PERIODS);
329*4882a593Smuzhiyun if (ret < 0) {
330*4882a593Smuzhiyun dev_err(dev, "failed to set pcm hw params periods\n");
331*4882a593Smuzhiyun return ret;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun pair = kzalloc(sizeof(*pair) + asrc->pair_priv_size, GFP_KERNEL);
335*4882a593Smuzhiyun if (!pair)
336*4882a593Smuzhiyun return -ENOMEM;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun pair->asrc = asrc;
339*4882a593Smuzhiyun pair->private = (void *)pair + sizeof(struct fsl_asrc_pair);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun runtime->private_data = pair;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* Request a dummy pair, which will be released later.
344*4882a593Smuzhiyun * Request pair function needs channel num as input, for this
345*4882a593Smuzhiyun * dummy pair, we just request "1" channel temporarily.
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun ret = asrc->request_pair(1, pair);
348*4882a593Smuzhiyun if (ret < 0) {
349*4882a593Smuzhiyun dev_err(dev, "failed to request asrc pair\n");
350*4882a593Smuzhiyun goto req_pair_err;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Request a dummy dma channel, which will be released later. */
354*4882a593Smuzhiyun tmp_chan = asrc->get_dma_channel(pair, dir);
355*4882a593Smuzhiyun if (!tmp_chan) {
356*4882a593Smuzhiyun dev_err(dev, "failed to get dma channel\n");
357*4882a593Smuzhiyun ret = -EINVAL;
358*4882a593Smuzhiyun goto dma_chan_err;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Refine the snd_imx_hardware according to caps of DMA. */
364*4882a593Smuzhiyun ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
365*4882a593Smuzhiyun dma_data,
366*4882a593Smuzhiyun &snd_imx_hardware,
367*4882a593Smuzhiyun tmp_chan);
368*4882a593Smuzhiyun if (ret < 0) {
369*4882a593Smuzhiyun dev_err(dev, "failed to refine runtime hwparams\n");
370*4882a593Smuzhiyun goto out;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun release_pair = false;
374*4882a593Smuzhiyun snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun out:
377*4882a593Smuzhiyun dma_release_channel(tmp_chan);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun dma_chan_err:
380*4882a593Smuzhiyun asrc->release_pair(pair);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun req_pair_err:
383*4882a593Smuzhiyun if (release_pair)
384*4882a593Smuzhiyun kfree(pair);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return ret;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
fsl_asrc_dma_shutdown(struct snd_soc_component * component,struct snd_pcm_substream * substream)389*4882a593Smuzhiyun static int fsl_asrc_dma_shutdown(struct snd_soc_component *component,
390*4882a593Smuzhiyun struct snd_pcm_substream *substream)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
393*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
394*4882a593Smuzhiyun struct fsl_asrc *asrc;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (!pair)
397*4882a593Smuzhiyun return 0;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun asrc = pair->asrc;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (asrc->pair[pair->index] == pair)
402*4882a593Smuzhiyun asrc->pair[pair->index] = NULL;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun kfree(pair);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun static snd_pcm_uframes_t
fsl_asrc_dma_pcm_pointer(struct snd_soc_component * component,struct snd_pcm_substream * substream)410*4882a593Smuzhiyun fsl_asrc_dma_pcm_pointer(struct snd_soc_component *component,
411*4882a593Smuzhiyun struct snd_pcm_substream *substream)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
414*4882a593Smuzhiyun struct fsl_asrc_pair *pair = runtime->private_data;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return bytes_to_frames(substream->runtime, pair->pos);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
fsl_asrc_dma_pcm_new(struct snd_soc_component * component,struct snd_soc_pcm_runtime * rtd)419*4882a593Smuzhiyun static int fsl_asrc_dma_pcm_new(struct snd_soc_component *component,
420*4882a593Smuzhiyun struct snd_soc_pcm_runtime *rtd)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct snd_card *card = rtd->card->snd_card;
423*4882a593Smuzhiyun struct snd_pcm_substream *substream;
424*4882a593Smuzhiyun struct snd_pcm *pcm = rtd->pcm;
425*4882a593Smuzhiyun int ret, i;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
428*4882a593Smuzhiyun if (ret) {
429*4882a593Smuzhiyun dev_err(card->dev, "failed to set DMA mask\n");
430*4882a593Smuzhiyun return ret;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun for_each_pcm_streams(i) {
434*4882a593Smuzhiyun substream = pcm->streams[i].substream;
435*4882a593Smuzhiyun if (!substream)
436*4882a593Smuzhiyun continue;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
439*4882a593Smuzhiyun FSL_ASRC_DMABUF_SIZE, &substream->dma_buffer);
440*4882a593Smuzhiyun if (ret) {
441*4882a593Smuzhiyun dev_err(card->dev, "failed to allocate DMA buffer\n");
442*4882a593Smuzhiyun goto err;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return 0;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun err:
449*4882a593Smuzhiyun if (--i == 0 && pcm->streams[i].substream)
450*4882a593Smuzhiyun snd_dma_free_pages(&pcm->streams[i].substream->dma_buffer);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun return ret;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
fsl_asrc_dma_pcm_free(struct snd_soc_component * component,struct snd_pcm * pcm)455*4882a593Smuzhiyun static void fsl_asrc_dma_pcm_free(struct snd_soc_component *component,
456*4882a593Smuzhiyun struct snd_pcm *pcm)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun struct snd_pcm_substream *substream;
459*4882a593Smuzhiyun int i;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun for_each_pcm_streams(i) {
462*4882a593Smuzhiyun substream = pcm->streams[i].substream;
463*4882a593Smuzhiyun if (!substream)
464*4882a593Smuzhiyun continue;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun snd_dma_free_pages(&substream->dma_buffer);
467*4882a593Smuzhiyun substream->dma_buffer.area = NULL;
468*4882a593Smuzhiyun substream->dma_buffer.addr = 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun struct snd_soc_component_driver fsl_asrc_component = {
473*4882a593Smuzhiyun .name = DRV_NAME,
474*4882a593Smuzhiyun .hw_params = fsl_asrc_dma_hw_params,
475*4882a593Smuzhiyun .hw_free = fsl_asrc_dma_hw_free,
476*4882a593Smuzhiyun .trigger = fsl_asrc_dma_trigger,
477*4882a593Smuzhiyun .open = fsl_asrc_dma_startup,
478*4882a593Smuzhiyun .close = fsl_asrc_dma_shutdown,
479*4882a593Smuzhiyun .pointer = fsl_asrc_dma_pcm_pointer,
480*4882a593Smuzhiyun .pcm_construct = fsl_asrc_dma_pcm_new,
481*4882a593Smuzhiyun .pcm_destruct = fsl_asrc_dma_pcm_free,
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fsl_asrc_component);
484