1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * lpass-cpu.c -- ALSA SoC CPU DAI driver for QTi LPASS
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <sound/pcm.h>
15*4882a593Smuzhiyun #include <sound/pcm_params.h>
16*4882a593Smuzhiyun #include <linux/regmap.h>
17*4882a593Smuzhiyun #include <sound/soc.h>
18*4882a593Smuzhiyun #include <sound/soc-dai.h>
19*4882a593Smuzhiyun #include "lpass-lpaif-reg.h"
20*4882a593Smuzhiyun #include "lpass.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define LPASS_CPU_MAX_MI2S_LINES 4
23*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD0_MASK BIT(0)
24*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD1_MASK BIT(1)
25*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD2_MASK BIT(2)
26*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD3_MASK BIT(3)
27*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD0_1_MASK GENMASK(1, 0)
28*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2)
29*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
30*4882a593Smuzhiyun #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
31*4882a593Smuzhiyun
lpass_cpu_init_i2sctl_bitfields(struct device * dev,struct lpaif_i2sctl * i2sctl,struct regmap * map)32*4882a593Smuzhiyun static int lpass_cpu_init_i2sctl_bitfields(struct device *dev,
33*4882a593Smuzhiyun struct lpaif_i2sctl *i2sctl, struct regmap *map)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
36*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun i2sctl->loopback = devm_regmap_field_alloc(dev, map, v->loopback);
39*4882a593Smuzhiyun i2sctl->spken = devm_regmap_field_alloc(dev, map, v->spken);
40*4882a593Smuzhiyun i2sctl->spkmode = devm_regmap_field_alloc(dev, map, v->spkmode);
41*4882a593Smuzhiyun i2sctl->spkmono = devm_regmap_field_alloc(dev, map, v->spkmono);
42*4882a593Smuzhiyun i2sctl->micen = devm_regmap_field_alloc(dev, map, v->micen);
43*4882a593Smuzhiyun i2sctl->micmode = devm_regmap_field_alloc(dev, map, v->micmode);
44*4882a593Smuzhiyun i2sctl->micmono = devm_regmap_field_alloc(dev, map, v->micmono);
45*4882a593Smuzhiyun i2sctl->wssrc = devm_regmap_field_alloc(dev, map, v->wssrc);
46*4882a593Smuzhiyun i2sctl->bitwidth = devm_regmap_field_alloc(dev, map, v->bitwidth);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (IS_ERR(i2sctl->loopback) || IS_ERR(i2sctl->spken) ||
49*4882a593Smuzhiyun IS_ERR(i2sctl->spkmode) || IS_ERR(i2sctl->spkmono) ||
50*4882a593Smuzhiyun IS_ERR(i2sctl->micen) || IS_ERR(i2sctl->micmode) ||
51*4882a593Smuzhiyun IS_ERR(i2sctl->micmono) || IS_ERR(i2sctl->wssrc) ||
52*4882a593Smuzhiyun IS_ERR(i2sctl->bitwidth))
53*4882a593Smuzhiyun return -EINVAL;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
lpass_cpu_daiops_set_sysclk(struct snd_soc_dai * dai,int clk_id,unsigned int freq,int dir)58*4882a593Smuzhiyun static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
59*4882a593Smuzhiyun unsigned int freq, int dir)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
62*4882a593Smuzhiyun int ret;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun ret = clk_set_rate(drvdata->mi2s_osr_clk[dai->driver->id], freq);
65*4882a593Smuzhiyun if (ret)
66*4882a593Smuzhiyun dev_err(dai->dev, "error setting mi2s osrclk to %u: %d\n",
67*4882a593Smuzhiyun freq, ret);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return ret;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
lpass_cpu_daiops_startup(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)72*4882a593Smuzhiyun static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream,
73*4882a593Smuzhiyun struct snd_soc_dai *dai)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
76*4882a593Smuzhiyun int ret;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ret = clk_prepare_enable(drvdata->mi2s_osr_clk[dai->driver->id]);
79*4882a593Smuzhiyun if (ret) {
80*4882a593Smuzhiyun dev_err(dai->dev, "error in enabling mi2s osr clk: %d\n", ret);
81*4882a593Smuzhiyun return ret;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun ret = clk_prepare(drvdata->mi2s_bit_clk[dai->driver->id]);
84*4882a593Smuzhiyun if (ret) {
85*4882a593Smuzhiyun dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
86*4882a593Smuzhiyun clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
87*4882a593Smuzhiyun return ret;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun return 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
lpass_cpu_daiops_shutdown(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)92*4882a593Smuzhiyun static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
93*4882a593Smuzhiyun struct snd_soc_dai *dai)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
96*4882a593Smuzhiyun struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
97*4882a593Smuzhiyun unsigned int id = dai->driver->id;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Ensure LRCLK is disabled even in device node validation.
102*4882a593Smuzhiyun * Will not impact if disabled in lpass_cpu_daiops_trigger()
103*4882a593Smuzhiyun * suspend.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
106*4882a593Smuzhiyun regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
107*4882a593Smuzhiyun else
108*4882a593Smuzhiyun regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
112*4882a593Smuzhiyun * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
113*4882a593Smuzhiyun * lpass_cpu_daiops_prepare.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun if (drvdata->mi2s_was_prepared[dai->driver->id]) {
116*4882a593Smuzhiyun drvdata->mi2s_was_prepared[dai->driver->id] = false;
117*4882a593Smuzhiyun clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
lpass_cpu_daiops_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,struct snd_soc_dai * dai)123*4882a593Smuzhiyun static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
124*4882a593Smuzhiyun struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
127*4882a593Smuzhiyun struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
128*4882a593Smuzhiyun unsigned int id = dai->driver->id;
129*4882a593Smuzhiyun snd_pcm_format_t format = params_format(params);
130*4882a593Smuzhiyun unsigned int channels = params_channels(params);
131*4882a593Smuzhiyun unsigned int rate = params_rate(params);
132*4882a593Smuzhiyun unsigned int mode;
133*4882a593Smuzhiyun unsigned int regval;
134*4882a593Smuzhiyun int bitwidth, ret;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun bitwidth = snd_pcm_format_width(format);
137*4882a593Smuzhiyun if (bitwidth < 0) {
138*4882a593Smuzhiyun dev_err(dai->dev, "invalid bit width given: %d\n", bitwidth);
139*4882a593Smuzhiyun return bitwidth;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->loopback, id,
143*4882a593Smuzhiyun LPAIF_I2SCTL_LOOPBACK_DISABLE);
144*4882a593Smuzhiyun if (ret) {
145*4882a593Smuzhiyun dev_err(dai->dev, "error updating loopback field: %d\n", ret);
146*4882a593Smuzhiyun return ret;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->wssrc, id,
150*4882a593Smuzhiyun LPAIF_I2SCTL_WSSRC_INTERNAL);
151*4882a593Smuzhiyun if (ret) {
152*4882a593Smuzhiyun dev_err(dai->dev, "error updating wssrc field: %d\n", ret);
153*4882a593Smuzhiyun return ret;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun switch (bitwidth) {
157*4882a593Smuzhiyun case 16:
158*4882a593Smuzhiyun regval = LPAIF_I2SCTL_BITWIDTH_16;
159*4882a593Smuzhiyun break;
160*4882a593Smuzhiyun case 24:
161*4882a593Smuzhiyun regval = LPAIF_I2SCTL_BITWIDTH_24;
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun case 32:
164*4882a593Smuzhiyun regval = LPAIF_I2SCTL_BITWIDTH_32;
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun default:
167*4882a593Smuzhiyun dev_err(dai->dev, "invalid bitwidth given: %d\n", bitwidth);
168*4882a593Smuzhiyun return -EINVAL;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->bitwidth, id, regval);
172*4882a593Smuzhiyun if (ret) {
173*4882a593Smuzhiyun dev_err(dai->dev, "error updating bitwidth field: %d\n", ret);
174*4882a593Smuzhiyun return ret;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
178*4882a593Smuzhiyun mode = drvdata->mi2s_playback_sd_mode[id];
179*4882a593Smuzhiyun else
180*4882a593Smuzhiyun mode = drvdata->mi2s_capture_sd_mode[id];
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (!mode) {
183*4882a593Smuzhiyun dev_err(dai->dev, "no line is assigned\n");
184*4882a593Smuzhiyun return -EINVAL;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun switch (channels) {
188*4882a593Smuzhiyun case 1:
189*4882a593Smuzhiyun case 2:
190*4882a593Smuzhiyun switch (mode) {
191*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_QUAD01:
192*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_6CH:
193*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_8CH:
194*4882a593Smuzhiyun mode = LPAIF_I2SCTL_MODE_SD0;
195*4882a593Smuzhiyun break;
196*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_QUAD23:
197*4882a593Smuzhiyun mode = LPAIF_I2SCTL_MODE_SD2;
198*4882a593Smuzhiyun break;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun break;
202*4882a593Smuzhiyun case 4:
203*4882a593Smuzhiyun if (mode < LPAIF_I2SCTL_MODE_QUAD01) {
204*4882a593Smuzhiyun dev_err(dai->dev, "cannot configure 4 channels with mode %d\n",
205*4882a593Smuzhiyun mode);
206*4882a593Smuzhiyun return -EINVAL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun switch (mode) {
210*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_6CH:
211*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_8CH:
212*4882a593Smuzhiyun mode = LPAIF_I2SCTL_MODE_QUAD01;
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun case 6:
217*4882a593Smuzhiyun if (mode < LPAIF_I2SCTL_MODE_6CH) {
218*4882a593Smuzhiyun dev_err(dai->dev, "cannot configure 6 channels with mode %d\n",
219*4882a593Smuzhiyun mode);
220*4882a593Smuzhiyun return -EINVAL;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun switch (mode) {
224*4882a593Smuzhiyun case LPAIF_I2SCTL_MODE_8CH:
225*4882a593Smuzhiyun mode = LPAIF_I2SCTL_MODE_6CH;
226*4882a593Smuzhiyun break;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun case 8:
230*4882a593Smuzhiyun if (mode < LPAIF_I2SCTL_MODE_8CH) {
231*4882a593Smuzhiyun dev_err(dai->dev, "cannot configure 8 channels with mode %d\n",
232*4882a593Smuzhiyun mode);
233*4882a593Smuzhiyun return -EINVAL;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun default:
237*4882a593Smuzhiyun dev_err(dai->dev, "invalid channels given: %u\n", channels);
238*4882a593Smuzhiyun return -EINVAL;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
242*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->spkmode, id,
243*4882a593Smuzhiyun LPAIF_I2SCTL_SPKMODE(mode));
244*4882a593Smuzhiyun if (ret) {
245*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl spkr mode: %d\n",
246*4882a593Smuzhiyun ret);
247*4882a593Smuzhiyun return ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun if (channels >= 2)
250*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->spkmono, id,
251*4882a593Smuzhiyun LPAIF_I2SCTL_SPKMONO_STEREO);
252*4882a593Smuzhiyun else
253*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->spkmono, id,
254*4882a593Smuzhiyun LPAIF_I2SCTL_SPKMONO_MONO);
255*4882a593Smuzhiyun } else {
256*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->micmode, id,
257*4882a593Smuzhiyun LPAIF_I2SCTL_MICMODE(mode));
258*4882a593Smuzhiyun if (ret) {
259*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl mic mode: %d\n",
260*4882a593Smuzhiyun ret);
261*4882a593Smuzhiyun return ret;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun if (channels >= 2)
264*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->micmono, id,
265*4882a593Smuzhiyun LPAIF_I2SCTL_MICMONO_STEREO);
266*4882a593Smuzhiyun else
267*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->micmono, id,
268*4882a593Smuzhiyun LPAIF_I2SCTL_MICMONO_MONO);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (ret) {
272*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl channels mode: %d\n",
273*4882a593Smuzhiyun ret);
274*4882a593Smuzhiyun return ret;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun ret = clk_set_rate(drvdata->mi2s_bit_clk[id],
278*4882a593Smuzhiyun rate * bitwidth * 2);
279*4882a593Smuzhiyun if (ret) {
280*4882a593Smuzhiyun dev_err(dai->dev, "error setting mi2s bitclk to %u: %d\n",
281*4882a593Smuzhiyun rate * bitwidth * 2, ret);
282*4882a593Smuzhiyun return ret;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
lpass_cpu_daiops_trigger(struct snd_pcm_substream * substream,int cmd,struct snd_soc_dai * dai)288*4882a593Smuzhiyun static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
289*4882a593Smuzhiyun int cmd, struct snd_soc_dai *dai)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
292*4882a593Smuzhiyun struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
293*4882a593Smuzhiyun unsigned int id = dai->driver->id;
294*4882a593Smuzhiyun int ret = -EINVAL;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun switch (cmd) {
297*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_START:
298*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_RESUME:
299*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * Ensure lpass BCLK/LRCLK is enabled during
302*4882a593Smuzhiyun * device resume as lpass_cpu_daiops_prepare() is not called
303*4882a593Smuzhiyun * after the device resumes. We don't check mi2s_was_prepared before
304*4882a593Smuzhiyun * enable/disable BCLK in trigger events because:
305*4882a593Smuzhiyun * 1. These trigger events are paired, so the BCLK
306*4882a593Smuzhiyun * enable_count is balanced.
307*4882a593Smuzhiyun * 2. the BCLK can be shared (ex: headset and headset mic),
308*4882a593Smuzhiyun * we need to increase the enable_count so that we don't
309*4882a593Smuzhiyun * turn off the shared BCLK while other devices are using
310*4882a593Smuzhiyun * it.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
313*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->spken, id,
314*4882a593Smuzhiyun LPAIF_I2SCTL_SPKEN_ENABLE);
315*4882a593Smuzhiyun } else {
316*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->micen, id,
317*4882a593Smuzhiyun LPAIF_I2SCTL_MICEN_ENABLE);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun if (ret)
320*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
321*4882a593Smuzhiyun ret);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun ret = clk_enable(drvdata->mi2s_bit_clk[id]);
324*4882a593Smuzhiyun if (ret) {
325*4882a593Smuzhiyun dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
326*4882a593Smuzhiyun clk_disable(drvdata->mi2s_osr_clk[id]);
327*4882a593Smuzhiyun return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_STOP:
331*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_SUSPEND:
332*4882a593Smuzhiyun case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * To ensure lpass BCLK/LRCLK is disabled during
335*4882a593Smuzhiyun * device suspend.
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
338*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->spken, id,
339*4882a593Smuzhiyun LPAIF_I2SCTL_SPKEN_DISABLE);
340*4882a593Smuzhiyun } else {
341*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->micen, id,
342*4882a593Smuzhiyun LPAIF_I2SCTL_MICEN_DISABLE);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun if (ret)
345*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
346*4882a593Smuzhiyun ret);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun break;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return ret;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
lpass_cpu_daiops_prepare(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)356*4882a593Smuzhiyun static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
357*4882a593Smuzhiyun struct snd_soc_dai *dai)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
360*4882a593Smuzhiyun struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
361*4882a593Smuzhiyun unsigned int id = dai->driver->id;
362*4882a593Smuzhiyun int ret;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
366*4882a593Smuzhiyun * data flow starts. This allows other codec to have some delay before
367*4882a593Smuzhiyun * the data flow.
368*4882a593Smuzhiyun * (ex: to drop start up pop noise before capture starts).
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
371*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
372*4882a593Smuzhiyun else
373*4882a593Smuzhiyun ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (ret) {
376*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
377*4882a593Smuzhiyun return ret;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
382*4882a593Smuzhiyun * be called multiple times. It's paired with the clk_disable in
383*4882a593Smuzhiyun * lpass_cpu_daiops_shutdown.
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
386*4882a593Smuzhiyun ret = clk_enable(drvdata->mi2s_bit_clk[id]);
387*4882a593Smuzhiyun if (ret) {
388*4882a593Smuzhiyun dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
389*4882a593Smuzhiyun return ret;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun drvdata->mi2s_was_prepared[dai->driver->id] = true;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun return 0;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
397*4882a593Smuzhiyun .set_sysclk = lpass_cpu_daiops_set_sysclk,
398*4882a593Smuzhiyun .startup = lpass_cpu_daiops_startup,
399*4882a593Smuzhiyun .shutdown = lpass_cpu_daiops_shutdown,
400*4882a593Smuzhiyun .hw_params = lpass_cpu_daiops_hw_params,
401*4882a593Smuzhiyun .trigger = lpass_cpu_daiops_trigger,
402*4882a593Smuzhiyun .prepare = lpass_cpu_daiops_prepare,
403*4882a593Smuzhiyun };
404*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
405*4882a593Smuzhiyun
asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai * dai)406*4882a593Smuzhiyun int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
409*4882a593Smuzhiyun int ret;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* ensure audio hardware is disabled */
412*4882a593Smuzhiyun ret = regmap_write(drvdata->lpaif_map,
413*4882a593Smuzhiyun LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), 0);
414*4882a593Smuzhiyun if (ret)
415*4882a593Smuzhiyun dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return ret;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
420*4882a593Smuzhiyun
asoc_qcom_of_xlate_dai_name(struct snd_soc_component * component,struct of_phandle_args * args,const char ** dai_name)421*4882a593Smuzhiyun static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
422*4882a593Smuzhiyun struct of_phandle_args *args,
423*4882a593Smuzhiyun const char **dai_name)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
426*4882a593Smuzhiyun struct lpass_variant *variant = drvdata->variant;
427*4882a593Smuzhiyun int id = args->args[0];
428*4882a593Smuzhiyun int ret = -EINVAL;
429*4882a593Smuzhiyun int i;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun for (i = 0; i < variant->num_dai; i++) {
432*4882a593Smuzhiyun if (variant->dai_driver[i].id == id) {
433*4882a593Smuzhiyun *dai_name = variant->dai_driver[i].name;
434*4882a593Smuzhiyun ret = 0;
435*4882a593Smuzhiyun break;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
443*4882a593Smuzhiyun .name = "lpass-cpu",
444*4882a593Smuzhiyun .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
445*4882a593Smuzhiyun };
446*4882a593Smuzhiyun
lpass_cpu_regmap_writeable(struct device * dev,unsigned int reg)447*4882a593Smuzhiyun static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
450*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
451*4882a593Smuzhiyun int i;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun for (i = 0; i < v->i2s_ports; ++i)
454*4882a593Smuzhiyun if (reg == LPAIF_I2SCTL_REG(v, i))
455*4882a593Smuzhiyun return true;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun for (i = 0; i < v->irq_ports; ++i) {
458*4882a593Smuzhiyun if (reg == LPAIF_IRQEN_REG(v, i))
459*4882a593Smuzhiyun return true;
460*4882a593Smuzhiyun if (reg == LPAIF_IRQCLEAR_REG(v, i))
461*4882a593Smuzhiyun return true;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun for (i = 0; i < v->rdma_channels; ++i) {
465*4882a593Smuzhiyun if (reg == LPAIF_RDMACTL_REG(v, i))
466*4882a593Smuzhiyun return true;
467*4882a593Smuzhiyun if (reg == LPAIF_RDMABASE_REG(v, i))
468*4882a593Smuzhiyun return true;
469*4882a593Smuzhiyun if (reg == LPAIF_RDMABUFF_REG(v, i))
470*4882a593Smuzhiyun return true;
471*4882a593Smuzhiyun if (reg == LPAIF_RDMAPER_REG(v, i))
472*4882a593Smuzhiyun return true;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun for (i = 0; i < v->wrdma_channels; ++i) {
476*4882a593Smuzhiyun if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
477*4882a593Smuzhiyun return true;
478*4882a593Smuzhiyun if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
479*4882a593Smuzhiyun return true;
480*4882a593Smuzhiyun if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
481*4882a593Smuzhiyun return true;
482*4882a593Smuzhiyun if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
483*4882a593Smuzhiyun return true;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return false;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
lpass_cpu_regmap_readable(struct device * dev,unsigned int reg)489*4882a593Smuzhiyun static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
492*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
493*4882a593Smuzhiyun int i;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun for (i = 0; i < v->i2s_ports; ++i)
496*4882a593Smuzhiyun if (reg == LPAIF_I2SCTL_REG(v, i))
497*4882a593Smuzhiyun return true;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun for (i = 0; i < v->irq_ports; ++i) {
500*4882a593Smuzhiyun if (reg == LPAIF_IRQEN_REG(v, i))
501*4882a593Smuzhiyun return true;
502*4882a593Smuzhiyun if (reg == LPAIF_IRQSTAT_REG(v, i))
503*4882a593Smuzhiyun return true;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun for (i = 0; i < v->rdma_channels; ++i) {
507*4882a593Smuzhiyun if (reg == LPAIF_RDMACTL_REG(v, i))
508*4882a593Smuzhiyun return true;
509*4882a593Smuzhiyun if (reg == LPAIF_RDMABASE_REG(v, i))
510*4882a593Smuzhiyun return true;
511*4882a593Smuzhiyun if (reg == LPAIF_RDMABUFF_REG(v, i))
512*4882a593Smuzhiyun return true;
513*4882a593Smuzhiyun if (reg == LPAIF_RDMACURR_REG(v, i))
514*4882a593Smuzhiyun return true;
515*4882a593Smuzhiyun if (reg == LPAIF_RDMAPER_REG(v, i))
516*4882a593Smuzhiyun return true;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun for (i = 0; i < v->wrdma_channels; ++i) {
520*4882a593Smuzhiyun if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
521*4882a593Smuzhiyun return true;
522*4882a593Smuzhiyun if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
523*4882a593Smuzhiyun return true;
524*4882a593Smuzhiyun if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
525*4882a593Smuzhiyun return true;
526*4882a593Smuzhiyun if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
527*4882a593Smuzhiyun return true;
528*4882a593Smuzhiyun if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
529*4882a593Smuzhiyun return true;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return false;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
lpass_cpu_regmap_volatile(struct device * dev,unsigned int reg)535*4882a593Smuzhiyun static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
538*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
539*4882a593Smuzhiyun int i;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun for (i = 0; i < v->irq_ports; ++i)
542*4882a593Smuzhiyun if (reg == LPAIF_IRQSTAT_REG(v, i))
543*4882a593Smuzhiyun return true;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun for (i = 0; i < v->rdma_channels; ++i)
546*4882a593Smuzhiyun if (reg == LPAIF_RDMACURR_REG(v, i))
547*4882a593Smuzhiyun return true;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun for (i = 0; i < v->wrdma_channels; ++i)
550*4882a593Smuzhiyun if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
551*4882a593Smuzhiyun return true;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return false;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun static struct regmap_config lpass_cpu_regmap_config = {
557*4882a593Smuzhiyun .reg_bits = 32,
558*4882a593Smuzhiyun .reg_stride = 4,
559*4882a593Smuzhiyun .val_bits = 32,
560*4882a593Smuzhiyun .writeable_reg = lpass_cpu_regmap_writeable,
561*4882a593Smuzhiyun .readable_reg = lpass_cpu_regmap_readable,
562*4882a593Smuzhiyun .volatile_reg = lpass_cpu_regmap_volatile,
563*4882a593Smuzhiyun .cache_type = REGCACHE_FLAT,
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun
lpass_hdmi_init_bitfields(struct device * dev,struct regmap * map)566*4882a593Smuzhiyun static int lpass_hdmi_init_bitfields(struct device *dev, struct regmap *map)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
569*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
570*4882a593Smuzhiyun unsigned int i;
571*4882a593Smuzhiyun struct lpass_hdmi_tx_ctl *tx_ctl;
572*4882a593Smuzhiyun struct regmap_field *legacy_en;
573*4882a593Smuzhiyun struct lpass_vbit_ctrl *vbit_ctl;
574*4882a593Smuzhiyun struct regmap_field *tx_parity;
575*4882a593Smuzhiyun struct lpass_dp_metadata_ctl *meta_ctl;
576*4882a593Smuzhiyun struct lpass_sstream_ctl *sstream_ctl;
577*4882a593Smuzhiyun struct regmap_field *ch_msb;
578*4882a593Smuzhiyun struct regmap_field *ch_lsb;
579*4882a593Smuzhiyun struct lpass_hdmitx_dmactl *tx_dmactl;
580*4882a593Smuzhiyun int rval;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun tx_ctl = devm_kzalloc(dev, sizeof(*tx_ctl), GFP_KERNEL);
583*4882a593Smuzhiyun if (!tx_ctl)
584*4882a593Smuzhiyun return -ENOMEM;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->soft_reset, tx_ctl->soft_reset);
587*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->force_reset, tx_ctl->force_reset);
588*4882a593Smuzhiyun drvdata->tx_ctl = tx_ctl;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->legacy_en, legacy_en);
591*4882a593Smuzhiyun drvdata->hdmitx_legacy_en = legacy_en;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun vbit_ctl = devm_kzalloc(dev, sizeof(*vbit_ctl), GFP_KERNEL);
594*4882a593Smuzhiyun if (!vbit_ctl)
595*4882a593Smuzhiyun return -ENOMEM;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->replace_vbit, vbit_ctl->replace_vbit);
598*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->vbit_stream, vbit_ctl->vbit_stream);
599*4882a593Smuzhiyun drvdata->vbit_ctl = vbit_ctl;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->calc_en, tx_parity);
603*4882a593Smuzhiyun drvdata->hdmitx_parity_calc_en = tx_parity;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun meta_ctl = devm_kzalloc(dev, sizeof(*meta_ctl), GFP_KERNEL);
606*4882a593Smuzhiyun if (!meta_ctl)
607*4882a593Smuzhiyun return -ENOMEM;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun rval = devm_regmap_field_bulk_alloc(dev, map, &meta_ctl->mute, &v->mute, 7);
610*4882a593Smuzhiyun if (rval)
611*4882a593Smuzhiyun return rval;
612*4882a593Smuzhiyun drvdata->meta_ctl = meta_ctl;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun sstream_ctl = devm_kzalloc(dev, sizeof(*sstream_ctl), GFP_KERNEL);
615*4882a593Smuzhiyun if (!sstream_ctl)
616*4882a593Smuzhiyun return -ENOMEM;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun rval = devm_regmap_field_bulk_alloc(dev, map, &sstream_ctl->sstream_en, &v->sstream_en, 9);
619*4882a593Smuzhiyun if (rval)
620*4882a593Smuzhiyun return rval;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun drvdata->sstream_ctl = sstream_ctl;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun for (i = 0; i < LPASS_MAX_HDMI_DMA_CHANNELS; i++) {
625*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->msb_bits, ch_msb);
626*4882a593Smuzhiyun drvdata->hdmitx_ch_msb[i] = ch_msb;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->lsb_bits, ch_lsb);
629*4882a593Smuzhiyun drvdata->hdmitx_ch_lsb[i] = ch_lsb;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun tx_dmactl = devm_kzalloc(dev, sizeof(*tx_dmactl), GFP_KERNEL);
632*4882a593Smuzhiyun if (!tx_dmactl)
633*4882a593Smuzhiyun return -ENOMEM;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_chs, tx_dmactl->use_hw_chs);
636*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_usr, tx_dmactl->use_hw_usr);
637*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_chs_sel, tx_dmactl->hw_chs_sel);
638*4882a593Smuzhiyun QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_usr_sel, tx_dmactl->hw_usr_sel);
639*4882a593Smuzhiyun drvdata->hdmi_tx_dmactl[i] = tx_dmactl;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
lpass_hdmi_regmap_writeable(struct device * dev,unsigned int reg)644*4882a593Smuzhiyun static bool lpass_hdmi_regmap_writeable(struct device *dev, unsigned int reg)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
647*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
648*4882a593Smuzhiyun int i;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
651*4882a593Smuzhiyun return true;
652*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
653*4882a593Smuzhiyun return true;
654*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
655*4882a593Smuzhiyun return true;
656*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
657*4882a593Smuzhiyun return true;
658*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_DP_ADDR(v))
659*4882a593Smuzhiyun return true;
660*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
661*4882a593Smuzhiyun return true;
662*4882a593Smuzhiyun if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
663*4882a593Smuzhiyun return true;
664*4882a593Smuzhiyun if (reg == LPASS_HDMITX_APP_IRQCLEAR_REG(v))
665*4882a593Smuzhiyun return true;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun for (i = 0; i < v->hdmi_rdma_channels; i++) {
668*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
669*4882a593Smuzhiyun return true;
670*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
671*4882a593Smuzhiyun return true;
672*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
673*4882a593Smuzhiyun return true;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun for (i = 0; i < v->hdmi_rdma_channels; ++i) {
677*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
678*4882a593Smuzhiyun return true;
679*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
680*4882a593Smuzhiyun return true;
681*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
682*4882a593Smuzhiyun return true;
683*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
684*4882a593Smuzhiyun return true;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun return false;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
lpass_hdmi_regmap_readable(struct device * dev,unsigned int reg)689*4882a593Smuzhiyun static bool lpass_hdmi_regmap_readable(struct device *dev, unsigned int reg)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
692*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
693*4882a593Smuzhiyun int i;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
696*4882a593Smuzhiyun return true;
697*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
698*4882a593Smuzhiyun return true;
699*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
700*4882a593Smuzhiyun return true;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun for (i = 0; i < v->hdmi_rdma_channels; i++) {
703*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
704*4882a593Smuzhiyun return true;
705*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
706*4882a593Smuzhiyun return true;
707*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
708*4882a593Smuzhiyun return true;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
712*4882a593Smuzhiyun return true;
713*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_DP_ADDR(v))
714*4882a593Smuzhiyun return true;
715*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
716*4882a593Smuzhiyun return true;
717*4882a593Smuzhiyun if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
718*4882a593Smuzhiyun return true;
719*4882a593Smuzhiyun if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
720*4882a593Smuzhiyun return true;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun for (i = 0; i < v->hdmi_rdma_channels; ++i) {
723*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
724*4882a593Smuzhiyun return true;
725*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
726*4882a593Smuzhiyun return true;
727*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
728*4882a593Smuzhiyun return true;
729*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
730*4882a593Smuzhiyun return true;
731*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
732*4882a593Smuzhiyun return true;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun return false;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
lpass_hdmi_regmap_volatile(struct device * dev,unsigned int reg)738*4882a593Smuzhiyun static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun struct lpass_data *drvdata = dev_get_drvdata(dev);
741*4882a593Smuzhiyun struct lpass_variant *v = drvdata->variant;
742*4882a593Smuzhiyun int i;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
745*4882a593Smuzhiyun return true;
746*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
747*4882a593Smuzhiyun return true;
748*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
749*4882a593Smuzhiyun return true;
750*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
751*4882a593Smuzhiyun return true;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun for (i = 0; i < v->hdmi_rdma_channels; ++i) {
754*4882a593Smuzhiyun if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
755*4882a593Smuzhiyun return true;
756*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
757*4882a593Smuzhiyun return true;
758*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
759*4882a593Smuzhiyun return true;
760*4882a593Smuzhiyun if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
761*4882a593Smuzhiyun return true;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun return false;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun struct regmap_config lpass_hdmi_regmap_config = {
767*4882a593Smuzhiyun .reg_bits = 32,
768*4882a593Smuzhiyun .reg_stride = 4,
769*4882a593Smuzhiyun .val_bits = 32,
770*4882a593Smuzhiyun .writeable_reg = lpass_hdmi_regmap_writeable,
771*4882a593Smuzhiyun .readable_reg = lpass_hdmi_regmap_readable,
772*4882a593Smuzhiyun .volatile_reg = lpass_hdmi_regmap_volatile,
773*4882a593Smuzhiyun .cache_type = REGCACHE_FLAT,
774*4882a593Smuzhiyun };
775*4882a593Smuzhiyun
of_lpass_cpu_parse_sd_lines(struct device * dev,struct device_node * node,const char * name)776*4882a593Smuzhiyun static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
777*4882a593Smuzhiyun struct device_node *node,
778*4882a593Smuzhiyun const char *name)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun unsigned int lines[LPASS_CPU_MAX_MI2S_LINES];
781*4882a593Smuzhiyun unsigned int sd_line_mask = 0;
782*4882a593Smuzhiyun int num_lines, i;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
785*4882a593Smuzhiyun LPASS_CPU_MAX_MI2S_LINES);
786*4882a593Smuzhiyun if (num_lines < 0)
787*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_NONE;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun for (i = 0; i < num_lines; i++)
790*4882a593Smuzhiyun sd_line_mask |= BIT(lines[i]);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun switch (sd_line_mask) {
793*4882a593Smuzhiyun case LPASS_CPU_I2S_SD0_MASK:
794*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_SD0;
795*4882a593Smuzhiyun case LPASS_CPU_I2S_SD1_MASK:
796*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_SD1;
797*4882a593Smuzhiyun case LPASS_CPU_I2S_SD2_MASK:
798*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_SD2;
799*4882a593Smuzhiyun case LPASS_CPU_I2S_SD3_MASK:
800*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_SD3;
801*4882a593Smuzhiyun case LPASS_CPU_I2S_SD0_1_MASK:
802*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_QUAD01;
803*4882a593Smuzhiyun case LPASS_CPU_I2S_SD2_3_MASK:
804*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_QUAD23;
805*4882a593Smuzhiyun case LPASS_CPU_I2S_SD0_1_2_MASK:
806*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_6CH;
807*4882a593Smuzhiyun case LPASS_CPU_I2S_SD0_1_2_3_MASK:
808*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_8CH;
809*4882a593Smuzhiyun default:
810*4882a593Smuzhiyun dev_err(dev, "Unsupported SD line mask: %#x\n", sd_line_mask);
811*4882a593Smuzhiyun return LPAIF_I2SCTL_MODE_NONE;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
of_lpass_cpu_parse_dai_data(struct device * dev,struct lpass_data * data)815*4882a593Smuzhiyun static void of_lpass_cpu_parse_dai_data(struct device *dev,
816*4882a593Smuzhiyun struct lpass_data *data)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun struct device_node *node;
819*4882a593Smuzhiyun int ret, id;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* Allow all channels by default for backwards compatibility */
822*4882a593Smuzhiyun for (id = 0; id < data->variant->num_dai; id++) {
823*4882a593Smuzhiyun data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
824*4882a593Smuzhiyun data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun for_each_child_of_node(dev->of_node, node) {
828*4882a593Smuzhiyun ret = of_property_read_u32(node, "reg", &id);
829*4882a593Smuzhiyun if (ret || id < 0) {
830*4882a593Smuzhiyun dev_err(dev, "valid dai id not found: %d\n", ret);
831*4882a593Smuzhiyun continue;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun if (id == LPASS_DP_RX) {
834*4882a593Smuzhiyun data->hdmi_port_enable = 1;
835*4882a593Smuzhiyun } else {
836*4882a593Smuzhiyun data->mi2s_playback_sd_mode[id] =
837*4882a593Smuzhiyun of_lpass_cpu_parse_sd_lines(dev, node,
838*4882a593Smuzhiyun "qcom,playback-sd-lines");
839*4882a593Smuzhiyun data->mi2s_capture_sd_mode[id] =
840*4882a593Smuzhiyun of_lpass_cpu_parse_sd_lines(dev, node,
841*4882a593Smuzhiyun "qcom,capture-sd-lines");
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
asoc_qcom_lpass_cpu_platform_probe(struct platform_device * pdev)846*4882a593Smuzhiyun int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct lpass_data *drvdata;
849*4882a593Smuzhiyun struct device_node *dsp_of_node;
850*4882a593Smuzhiyun struct resource *res;
851*4882a593Smuzhiyun struct lpass_variant *variant;
852*4882a593Smuzhiyun struct device *dev = &pdev->dev;
853*4882a593Smuzhiyun const struct of_device_id *match;
854*4882a593Smuzhiyun int ret, i, dai_id;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
857*4882a593Smuzhiyun if (dsp_of_node) {
858*4882a593Smuzhiyun dev_err(dev, "DSP exists and holds audio resources\n");
859*4882a593Smuzhiyun of_node_put(dsp_of_node);
860*4882a593Smuzhiyun return -EBUSY;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun drvdata = devm_kzalloc(dev, sizeof(struct lpass_data), GFP_KERNEL);
864*4882a593Smuzhiyun if (!drvdata)
865*4882a593Smuzhiyun return -ENOMEM;
866*4882a593Smuzhiyun platform_set_drvdata(pdev, drvdata);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun match = of_match_device(dev->driver->of_match_table, dev);
869*4882a593Smuzhiyun if (!match || !match->data)
870*4882a593Smuzhiyun return -EINVAL;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun drvdata->variant = (struct lpass_variant *)match->data;
873*4882a593Smuzhiyun variant = drvdata->variant;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun of_lpass_cpu_parse_dai_data(dev, drvdata);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-lpaif");
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun drvdata->lpaif = devm_ioremap_resource(dev, res);
880*4882a593Smuzhiyun if (IS_ERR((void const __force *)drvdata->lpaif)) {
881*4882a593Smuzhiyun dev_err(dev, "error mapping reg resource: %ld\n",
882*4882a593Smuzhiyun PTR_ERR((void const __force *)drvdata->lpaif));
883*4882a593Smuzhiyun return PTR_ERR((void const __force *)drvdata->lpaif);
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun lpass_cpu_regmap_config.max_register = LPAIF_WRDMAPER_REG(variant,
887*4882a593Smuzhiyun variant->wrdma_channels +
888*4882a593Smuzhiyun variant->wrdma_channel_start);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun drvdata->lpaif_map = devm_regmap_init_mmio(dev, drvdata->lpaif,
891*4882a593Smuzhiyun &lpass_cpu_regmap_config);
892*4882a593Smuzhiyun if (IS_ERR(drvdata->lpaif_map)) {
893*4882a593Smuzhiyun dev_err(dev, "error initializing regmap: %ld\n",
894*4882a593Smuzhiyun PTR_ERR(drvdata->lpaif_map));
895*4882a593Smuzhiyun return PTR_ERR(drvdata->lpaif_map);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun if (drvdata->hdmi_port_enable) {
899*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-hdmiif");
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun drvdata->hdmiif = devm_ioremap_resource(dev, res);
902*4882a593Smuzhiyun if (IS_ERR((void const __force *)drvdata->hdmiif)) {
903*4882a593Smuzhiyun dev_err(dev, "error mapping reg resource: %ld\n",
904*4882a593Smuzhiyun PTR_ERR((void const __force *)drvdata->hdmiif));
905*4882a593Smuzhiyun return PTR_ERR((void const __force *)drvdata->hdmiif);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun lpass_hdmi_regmap_config.max_register = LPAIF_HDMI_RDMAPER_REG(variant,
909*4882a593Smuzhiyun variant->hdmi_rdma_channels - 1);
910*4882a593Smuzhiyun drvdata->hdmiif_map = devm_regmap_init_mmio(dev, drvdata->hdmiif,
911*4882a593Smuzhiyun &lpass_hdmi_regmap_config);
912*4882a593Smuzhiyun if (IS_ERR(drvdata->hdmiif_map)) {
913*4882a593Smuzhiyun dev_err(dev, "error initializing regmap: %ld\n",
914*4882a593Smuzhiyun PTR_ERR(drvdata->hdmiif_map));
915*4882a593Smuzhiyun return PTR_ERR(drvdata->hdmiif_map);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (variant->init) {
920*4882a593Smuzhiyun ret = variant->init(pdev);
921*4882a593Smuzhiyun if (ret) {
922*4882a593Smuzhiyun dev_err(dev, "error initializing variant: %d\n", ret);
923*4882a593Smuzhiyun return ret;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun for (i = 0; i < variant->num_dai; i++) {
928*4882a593Smuzhiyun dai_id = variant->dai_driver[i].id;
929*4882a593Smuzhiyun if (dai_id == LPASS_DP_RX)
930*4882a593Smuzhiyun continue;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
933*4882a593Smuzhiyun variant->dai_osr_clk_names[i]);
934*4882a593Smuzhiyun drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
935*4882a593Smuzhiyun variant->dai_bit_clk_names[i]);
936*4882a593Smuzhiyun if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
937*4882a593Smuzhiyun dev_err(dev,
938*4882a593Smuzhiyun "error getting %s: %ld\n",
939*4882a593Smuzhiyun variant->dai_bit_clk_names[i],
940*4882a593Smuzhiyun PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
941*4882a593Smuzhiyun return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /* Allocation for i2sctl regmap fields */
946*4882a593Smuzhiyun drvdata->i2sctl = devm_kzalloc(&pdev->dev, sizeof(struct lpaif_i2sctl),
947*4882a593Smuzhiyun GFP_KERNEL);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* Initialize bitfields for dai I2SCTL register */
950*4882a593Smuzhiyun ret = lpass_cpu_init_i2sctl_bitfields(dev, drvdata->i2sctl,
951*4882a593Smuzhiyun drvdata->lpaif_map);
952*4882a593Smuzhiyun if (ret) {
953*4882a593Smuzhiyun dev_err(dev, "error init i2sctl field: %d\n", ret);
954*4882a593Smuzhiyun return ret;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (drvdata->hdmi_port_enable) {
958*4882a593Smuzhiyun ret = lpass_hdmi_init_bitfields(dev, drvdata->hdmiif_map);
959*4882a593Smuzhiyun if (ret) {
960*4882a593Smuzhiyun dev_err(dev, "%s error hdmi init failed\n", __func__);
961*4882a593Smuzhiyun return ret;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun ret = devm_snd_soc_register_component(dev,
965*4882a593Smuzhiyun &lpass_cpu_comp_driver,
966*4882a593Smuzhiyun variant->dai_driver,
967*4882a593Smuzhiyun variant->num_dai);
968*4882a593Smuzhiyun if (ret) {
969*4882a593Smuzhiyun dev_err(dev, "error registering cpu driver: %d\n", ret);
970*4882a593Smuzhiyun goto err;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun ret = asoc_qcom_lpass_platform_register(pdev);
974*4882a593Smuzhiyun if (ret) {
975*4882a593Smuzhiyun dev_err(dev, "error registering platform driver: %d\n", ret);
976*4882a593Smuzhiyun goto err;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun err:
980*4882a593Smuzhiyun return ret;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_probe);
983*4882a593Smuzhiyun
asoc_qcom_lpass_cpu_platform_remove(struct platform_device * pdev)984*4882a593Smuzhiyun int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun struct lpass_data *drvdata = platform_get_drvdata(pdev);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (drvdata->variant->exit)
989*4882a593Smuzhiyun drvdata->variant->exit(pdev);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun return 0;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun MODULE_DESCRIPTION("QTi LPASS CPU Driver");
997*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
998