1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2*4882a593Smuzhiyun // Copyright(c) 2015-17 Intel Corporation.
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun * Soundwire Intel Master Driver
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/acpi.h>
9*4882a593Smuzhiyun #include <linux/debugfs.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/platform_device.h>
15*4882a593Smuzhiyun #include <sound/pcm_params.h>
16*4882a593Smuzhiyun #include <linux/pm_runtime.h>
17*4882a593Smuzhiyun #include <sound/soc.h>
18*4882a593Smuzhiyun #include <linux/soundwire/sdw_registers.h>
19*4882a593Smuzhiyun #include <linux/soundwire/sdw.h>
20*4882a593Smuzhiyun #include <linux/soundwire/sdw_intel.h>
21*4882a593Smuzhiyun #include "cadence_master.h"
22*4882a593Smuzhiyun #include "bus.h"
23*4882a593Smuzhiyun #include "intel.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define INTEL_MASTER_SUSPEND_DELAY_MS 3000
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * debug/config flags for the Intel SoundWire Master.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * Since we may have multiple masters active, we can have up to 8
31*4882a593Smuzhiyun * flags reused in each byte, with master0 using the ls-byte, etc.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME BIT(0)
35*4882a593Smuzhiyun #define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP BIT(1)
36*4882a593Smuzhiyun #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE BIT(2)
37*4882a593Smuzhiyun #define SDW_INTEL_MASTER_DISABLE_MULTI_LINK BIT(3)
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static int md_flags;
40*4882a593Smuzhiyun module_param_named(sdw_md_flags, md_flags, int, 0444);
41*4882a593Smuzhiyun MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Intel SHIM Registers Definition */
44*4882a593Smuzhiyun #define SDW_SHIM_LCAP 0x0
45*4882a593Smuzhiyun #define SDW_SHIM_LCTL 0x4
46*4882a593Smuzhiyun #define SDW_SHIM_IPPTR 0x8
47*4882a593Smuzhiyun #define SDW_SHIM_SYNC 0xC
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
50*4882a593Smuzhiyun #define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
51*4882a593Smuzhiyun #define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
52*4882a593Smuzhiyun #define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
53*4882a593Smuzhiyun #define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
54*4882a593Smuzhiyun #define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
57*4882a593Smuzhiyun #define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
58*4882a593Smuzhiyun #define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x))
59*4882a593Smuzhiyun #define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
60*4882a593Smuzhiyun #define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define SDW_SHIM_WAKEEN 0x190
63*4882a593Smuzhiyun #define SDW_SHIM_WAKESTS 0x192
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define SDW_SHIM_LCTL_SPA BIT(0)
66*4882a593Smuzhiyun #define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
67*4882a593Smuzhiyun #define SDW_SHIM_LCTL_CPA BIT(8)
68*4882a593Smuzhiyun #define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
71*4882a593Smuzhiyun #define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
72*4882a593Smuzhiyun #define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
73*4882a593Smuzhiyun #define SDW_SHIM_SYNC_SYNCCPU BIT(15)
74*4882a593Smuzhiyun #define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
75*4882a593Smuzhiyun #define SDW_SHIM_SYNC_CMDSYNC BIT(16)
76*4882a593Smuzhiyun #define SDW_SHIM_SYNC_SYNCGO BIT(24)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
79*4882a593Smuzhiyun #define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
80*4882a593Smuzhiyun #define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
83*4882a593Smuzhiyun #define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
84*4882a593Smuzhiyun #define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
85*4882a593Smuzhiyun #define SDW_SHIM_PCMSYCM_DIR BIT(15)
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0)
88*4882a593Smuzhiyun #define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4)
89*4882a593Smuzhiyun #define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8)
90*4882a593Smuzhiyun #define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13)
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_MIF BIT(0)
93*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_CO BIT(1)
94*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_COE BIT(2)
95*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_DO BIT(3)
96*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_DOE BIT(4)
97*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_BKE BIT(5)
98*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_WPDD BIT(6)
99*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_CIBD BIT(8)
100*4882a593Smuzhiyun #define SDW_SHIM_IOCTL_DIBD BIT(9)
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define SDW_SHIM_CTMCTL_DACTQE BIT(0)
103*4882a593Smuzhiyun #define SDW_SHIM_CTMCTL_DODS BIT(1)
104*4882a593Smuzhiyun #define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define SDW_SHIM_WAKEEN_ENABLE BIT(0)
107*4882a593Smuzhiyun #define SDW_SHIM_WAKESTS_STATUS BIT(0)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Intel ALH Register definitions */
110*4882a593Smuzhiyun #define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
111*4882a593Smuzhiyun #define SDW_ALH_NUM_STREAMS 64
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
114*4882a593Smuzhiyun #define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
115*4882a593Smuzhiyun #define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun enum intel_pdi_type {
118*4882a593Smuzhiyun INTEL_PDI_IN = 0,
119*4882a593Smuzhiyun INTEL_PDI_OUT = 1,
120*4882a593Smuzhiyun INTEL_PDI_BD = 2,
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Read, write helpers for HW registers
127*4882a593Smuzhiyun */
intel_readl(void __iomem * base,int offset)128*4882a593Smuzhiyun static inline int intel_readl(void __iomem *base, int offset)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return readl(base + offset);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
intel_writel(void __iomem * base,int offset,int value)133*4882a593Smuzhiyun static inline void intel_writel(void __iomem *base, int offset, int value)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun writel(value, base + offset);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
intel_readw(void __iomem * base,int offset)138*4882a593Smuzhiyun static inline u16 intel_readw(void __iomem *base, int offset)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun return readw(base + offset);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
intel_writew(void __iomem * base,int offset,u16 value)143*4882a593Smuzhiyun static inline void intel_writew(void __iomem *base, int offset, u16 value)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun writew(value, base + offset);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
intel_wait_bit(void __iomem * base,int offset,u32 mask,u32 target)148*4882a593Smuzhiyun static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun int timeout = 10;
151*4882a593Smuzhiyun u32 reg_read;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun do {
154*4882a593Smuzhiyun reg_read = readl(base + offset);
155*4882a593Smuzhiyun if ((reg_read & mask) == target)
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun timeout--;
159*4882a593Smuzhiyun usleep_range(50, 100);
160*4882a593Smuzhiyun } while (timeout != 0);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return -EAGAIN;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
intel_clear_bit(void __iomem * base,int offset,u32 value,u32 mask)165*4882a593Smuzhiyun static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun writel(value, base + offset);
168*4882a593Smuzhiyun return intel_wait_bit(base, offset, mask, 0);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
intel_set_bit(void __iomem * base,int offset,u32 value,u32 mask)171*4882a593Smuzhiyun static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun writel(value, base + offset);
174*4882a593Smuzhiyun return intel_wait_bit(base, offset, mask, mask);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun * debugfs
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun #define RD_BUF (2 * PAGE_SIZE)
183*4882a593Smuzhiyun
intel_sprintf(void __iomem * mem,bool l,char * buf,size_t pos,unsigned int reg)184*4882a593Smuzhiyun static ssize_t intel_sprintf(void __iomem *mem, bool l,
185*4882a593Smuzhiyun char *buf, size_t pos, unsigned int reg)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int value;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (l)
190*4882a593Smuzhiyun value = intel_readl(mem, reg);
191*4882a593Smuzhiyun else
192*4882a593Smuzhiyun value = intel_readw(mem, reg);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
intel_reg_show(struct seq_file * s_file,void * data)197*4882a593Smuzhiyun static int intel_reg_show(struct seq_file *s_file, void *data)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct sdw_intel *sdw = s_file->private;
200*4882a593Smuzhiyun void __iomem *s = sdw->link_res->shim;
201*4882a593Smuzhiyun void __iomem *a = sdw->link_res->alh;
202*4882a593Smuzhiyun char *buf;
203*4882a593Smuzhiyun ssize_t ret;
204*4882a593Smuzhiyun int i, j;
205*4882a593Smuzhiyun unsigned int links, reg;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun buf = kzalloc(RD_BUF, GFP_KERNEL);
208*4882a593Smuzhiyun if (!buf)
209*4882a593Smuzhiyun return -ENOMEM;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun ret = scnprintf(buf, RD_BUF, "Register Value\n");
214*4882a593Smuzhiyun ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun for (i = 0; i < links; i++) {
217*4882a593Smuzhiyun reg = SDW_SHIM_LCAP + i * 4;
218*4882a593Smuzhiyun ret += intel_sprintf(s, true, buf, ret, reg);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun for (i = 0; i < links; i++) {
222*4882a593Smuzhiyun ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
223*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
224*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
225*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
226*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
227*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
228*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * the value 10 is the number of PDIs. We will need a
234*4882a593Smuzhiyun * cleanup to remove hard-coded Intel configurations
235*4882a593Smuzhiyun * from cadence_master.c
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun for (j = 0; j < 10; j++) {
238*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret,
239*4882a593Smuzhiyun SDW_SHIM_PCMSYCHM(i, j));
240*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret,
241*4882a593Smuzhiyun SDW_SHIM_PCMSYCHC(i, j));
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
246*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
247*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
251*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
252*4882a593Smuzhiyun ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
255*4882a593Smuzhiyun for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
256*4882a593Smuzhiyun ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun seq_printf(s_file, "%s", buf);
259*4882a593Smuzhiyun kfree(buf);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(intel_reg);
264*4882a593Smuzhiyun
intel_set_m_datamode(void * data,u64 value)265*4882a593Smuzhiyun static int intel_set_m_datamode(void *data, u64 value)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct sdw_intel *sdw = data;
268*4882a593Smuzhiyun struct sdw_bus *bus = &sdw->cdns.bus;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (value > SDW_PORT_DATA_MODE_STATIC_1)
271*4882a593Smuzhiyun return -EINVAL;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Userspace changed the hardware state behind the kernel's back */
274*4882a593Smuzhiyun add_taint(TAINT_USER, LOCKDEP_STILL_OK);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun bus->params.m_data_mode = value;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun return 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
281*4882a593Smuzhiyun intel_set_m_datamode, "%llu\n");
282*4882a593Smuzhiyun
intel_set_s_datamode(void * data,u64 value)283*4882a593Smuzhiyun static int intel_set_s_datamode(void *data, u64 value)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct sdw_intel *sdw = data;
286*4882a593Smuzhiyun struct sdw_bus *bus = &sdw->cdns.bus;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (value > SDW_PORT_DATA_MODE_STATIC_1)
289*4882a593Smuzhiyun return -EINVAL;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Userspace changed the hardware state behind the kernel's back */
292*4882a593Smuzhiyun add_taint(TAINT_USER, LOCKDEP_STILL_OK);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun bus->params.s_data_mode = value;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
299*4882a593Smuzhiyun intel_set_s_datamode, "%llu\n");
300*4882a593Smuzhiyun
intel_debugfs_init(struct sdw_intel * sdw)301*4882a593Smuzhiyun static void intel_debugfs_init(struct sdw_intel *sdw)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct dentry *root = sdw->cdns.bus.debugfs;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (!root)
306*4882a593Smuzhiyun return;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun sdw->debugfs = debugfs_create_dir("intel-sdw", root);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
311*4882a593Smuzhiyun &intel_reg_fops);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
314*4882a593Smuzhiyun &intel_set_m_datamode_fops);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
317*4882a593Smuzhiyun &intel_set_s_datamode_fops);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
intel_debugfs_exit(struct sdw_intel * sdw)322*4882a593Smuzhiyun static void intel_debugfs_exit(struct sdw_intel *sdw)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun debugfs_remove_recursive(sdw->debugfs);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun #else
intel_debugfs_init(struct sdw_intel * sdw)327*4882a593Smuzhiyun static void intel_debugfs_init(struct sdw_intel *sdw) {}
intel_debugfs_exit(struct sdw_intel * sdw)328*4882a593Smuzhiyun static void intel_debugfs_exit(struct sdw_intel *sdw) {}
329*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * shim ops
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun
intel_link_power_up(struct sdw_intel * sdw)335*4882a593Smuzhiyun static int intel_link_power_up(struct sdw_intel *sdw)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
338*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
339*4882a593Smuzhiyun u32 *shim_mask = sdw->link_res->shim_mask;
340*4882a593Smuzhiyun struct sdw_bus *bus = &sdw->cdns.bus;
341*4882a593Smuzhiyun struct sdw_master_prop *prop = &bus->prop;
342*4882a593Smuzhiyun u32 spa_mask, cpa_mask;
343*4882a593Smuzhiyun u32 link_control;
344*4882a593Smuzhiyun int ret = 0;
345*4882a593Smuzhiyun u32 syncprd;
346*4882a593Smuzhiyun u32 sync_reg;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * The hardware relies on an internal counter, typically 4kHz,
352*4882a593Smuzhiyun * to generate the SoundWire SSP - which defines a 'safe'
353*4882a593Smuzhiyun * synchronization point between commands and audio transport
354*4882a593Smuzhiyun * and allows for multi link synchronization. The SYNCPRD value
355*4882a593Smuzhiyun * is only dependent on the oscillator clock provided to
356*4882a593Smuzhiyun * the IP, so adjust based on _DSD properties reported in DSDT
357*4882a593Smuzhiyun * tables. The values reported are based on either 24MHz
358*4882a593Smuzhiyun * (CNL/CML) or 38.4 MHz (ICL/TGL+).
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun if (prop->mclk_freq % 6000000)
361*4882a593Smuzhiyun syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
362*4882a593Smuzhiyun else
363*4882a593Smuzhiyun syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!*shim_mask) {
366*4882a593Smuzhiyun dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* we first need to program the SyncPRD/CPU registers */
369*4882a593Smuzhiyun dev_dbg(sdw->cdns.dev,
370*4882a593Smuzhiyun "%s: first link up, programming SYNCPRD\n", __func__);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* set SyncPRD period */
373*4882a593Smuzhiyun sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
374*4882a593Smuzhiyun u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Set SyncCPU bit */
377*4882a593Smuzhiyun sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
378*4882a593Smuzhiyun intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Link power up sequence */
381*4882a593Smuzhiyun link_control = intel_readl(shim, SDW_SHIM_LCTL);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* only power-up enabled links */
384*4882a593Smuzhiyun spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
385*4882a593Smuzhiyun cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun link_control |= spa_mask;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
390*4882a593Smuzhiyun if (ret < 0) {
391*4882a593Smuzhiyun dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
392*4882a593Smuzhiyun goto out;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* SyncCPU will change once link is active */
396*4882a593Smuzhiyun ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
397*4882a593Smuzhiyun SDW_SHIM_SYNC_SYNCCPU, 0);
398*4882a593Smuzhiyun if (ret < 0) {
399*4882a593Smuzhiyun dev_err(sdw->cdns.dev,
400*4882a593Smuzhiyun "Failed to set SHIM_SYNC: %d\n", ret);
401*4882a593Smuzhiyun goto out;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun *shim_mask |= BIT(link_id);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun sdw->cdns.link_up = true;
408*4882a593Smuzhiyun out:
409*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun return ret;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* this needs to be called with shim_lock */
intel_shim_glue_to_master_ip(struct sdw_intel * sdw)415*4882a593Smuzhiyun static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
418*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
419*4882a593Smuzhiyun u16 ioctl;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Switch to MIP from Glue logic */
422*4882a593Smuzhiyun ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun ioctl &= ~(SDW_SHIM_IOCTL_DOE);
425*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
426*4882a593Smuzhiyun usleep_range(10, 15);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun ioctl &= ~(SDW_SHIM_IOCTL_DO);
429*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
430*4882a593Smuzhiyun usleep_range(10, 15);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun ioctl |= (SDW_SHIM_IOCTL_MIF);
433*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
434*4882a593Smuzhiyun usleep_range(10, 15);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun ioctl &= ~(SDW_SHIM_IOCTL_BKE);
437*4882a593Smuzhiyun ioctl &= ~(SDW_SHIM_IOCTL_COE);
438*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
439*4882a593Smuzhiyun usleep_range(10, 15);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* at this point Master IP has full control of the I/Os */
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* this needs to be called with shim_lock */
intel_shim_master_ip_to_glue(struct sdw_intel * sdw)445*4882a593Smuzhiyun static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
448*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
449*4882a593Smuzhiyun u16 ioctl;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* Glue logic */
452*4882a593Smuzhiyun ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
453*4882a593Smuzhiyun ioctl |= SDW_SHIM_IOCTL_BKE;
454*4882a593Smuzhiyun ioctl |= SDW_SHIM_IOCTL_COE;
455*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
456*4882a593Smuzhiyun usleep_range(10, 15);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun ioctl &= ~(SDW_SHIM_IOCTL_MIF);
459*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
460*4882a593Smuzhiyun usleep_range(10, 15);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* at this point Integration Glue has full control of the I/Os */
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
intel_shim_init(struct sdw_intel * sdw,bool clock_stop)465*4882a593Smuzhiyun static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
468*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
469*4882a593Smuzhiyun int ret = 0;
470*4882a593Smuzhiyun u16 ioctl = 0, act = 0;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Initialize Shim */
475*4882a593Smuzhiyun ioctl |= SDW_SHIM_IOCTL_BKE;
476*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
477*4882a593Smuzhiyun usleep_range(10, 15);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun ioctl |= SDW_SHIM_IOCTL_WPDD;
480*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
481*4882a593Smuzhiyun usleep_range(10, 15);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun ioctl |= SDW_SHIM_IOCTL_DO;
484*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
485*4882a593Smuzhiyun usleep_range(10, 15);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun ioctl |= SDW_SHIM_IOCTL_DOE;
488*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
489*4882a593Smuzhiyun usleep_range(10, 15);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun intel_shim_glue_to_master_ip(sdw);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
494*4882a593Smuzhiyun act |= SDW_SHIM_CTMCTL_DACTQE;
495*4882a593Smuzhiyun act |= SDW_SHIM_CTMCTL_DODS;
496*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
497*4882a593Smuzhiyun usleep_range(10, 15);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun return ret;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
intel_shim_wake(struct sdw_intel * sdw,bool wake_enable)504*4882a593Smuzhiyun static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
507*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
508*4882a593Smuzhiyun u16 wake_en, wake_sts;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
511*4882a593Smuzhiyun wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (wake_enable) {
514*4882a593Smuzhiyun /* Enable the wakeup */
515*4882a593Smuzhiyun wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
516*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
517*4882a593Smuzhiyun } else {
518*4882a593Smuzhiyun /* Disable the wake up interrupt */
519*4882a593Smuzhiyun wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
520*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Clear wake status */
523*4882a593Smuzhiyun wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
524*4882a593Smuzhiyun wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
525*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
intel_link_power_down(struct sdw_intel * sdw)530*4882a593Smuzhiyun static int intel_link_power_down(struct sdw_intel *sdw)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun u32 link_control, spa_mask, cpa_mask;
533*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
534*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
535*4882a593Smuzhiyun u32 *shim_mask = sdw->link_res->shim_mask;
536*4882a593Smuzhiyun int ret = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (!(*shim_mask & BIT(link_id)))
541*4882a593Smuzhiyun dev_err(sdw->cdns.dev,
542*4882a593Smuzhiyun "%s: Unbalanced power-up/down calls\n", __func__);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun sdw->cdns.link_up = false;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun intel_shim_master_ip_to_glue(sdw);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun *shim_mask &= ~BIT(link_id);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (!*shim_mask) {
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Link power down sequence */
555*4882a593Smuzhiyun link_control = intel_readl(shim, SDW_SHIM_LCTL);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* only power-down enabled links */
558*4882a593Smuzhiyun spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
559*4882a593Smuzhiyun cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun link_control &= spa_mask;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
564*4882a593Smuzhiyun if (ret < 0) {
565*4882a593Smuzhiyun dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /*
568*4882a593Smuzhiyun * we leave the sdw->cdns.link_up flag as false since we've disabled
569*4882a593Smuzhiyun * the link at this point and cannot handle interrupts any longer.
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun link_control = intel_readl(shim, SDW_SHIM_LCTL);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun return ret;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
intel_shim_sync_arm(struct sdw_intel * sdw)581*4882a593Smuzhiyun static void intel_shim_sync_arm(struct sdw_intel *sdw)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
584*4882a593Smuzhiyun u32 sync_reg;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* update SYNC register */
589*4882a593Smuzhiyun sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
590*4882a593Smuzhiyun sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
591*4882a593Smuzhiyun intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
intel_shim_sync_go_unlocked(struct sdw_intel * sdw)596*4882a593Smuzhiyun static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
599*4882a593Smuzhiyun u32 sync_reg;
600*4882a593Smuzhiyun int ret;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* Read SYNC register */
603*4882a593Smuzhiyun sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Set SyncGO bit to synchronously trigger a bank switch for
607*4882a593Smuzhiyun * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
608*4882a593Smuzhiyun * the Masters.
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun sync_reg |= SDW_SHIM_SYNC_SYNCGO;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
613*4882a593Smuzhiyun SDW_SHIM_SYNC_SYNCGO);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (ret < 0)
616*4882a593Smuzhiyun dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return ret;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
intel_shim_sync_go(struct sdw_intel * sdw)621*4882a593Smuzhiyun static int intel_shim_sync_go(struct sdw_intel *sdw)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun int ret;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun ret = intel_shim_sync_go_unlocked(sdw);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun return ret;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * PDI routines
636*4882a593Smuzhiyun */
intel_pdi_init(struct sdw_intel * sdw,struct sdw_cdns_stream_config * config)637*4882a593Smuzhiyun static void intel_pdi_init(struct sdw_intel *sdw,
638*4882a593Smuzhiyun struct sdw_cdns_stream_config *config)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
641*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
642*4882a593Smuzhiyun int pcm_cap, pdm_cap;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /* PCM Stream Capability */
645*4882a593Smuzhiyun pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
648*4882a593Smuzhiyun config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
649*4882a593Smuzhiyun config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
652*4882a593Smuzhiyun config->pcm_bd, config->pcm_in, config->pcm_out);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* PDM Stream Capability */
655*4882a593Smuzhiyun pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
658*4882a593Smuzhiyun config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
659*4882a593Smuzhiyun config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
662*4882a593Smuzhiyun config->pdm_bd, config->pdm_in, config->pdm_out);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun static int
intel_pdi_get_ch_cap(struct sdw_intel * sdw,unsigned int pdi_num,bool pcm)666*4882a593Smuzhiyun intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
669*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
670*4882a593Smuzhiyun int count;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (pcm) {
673*4882a593Smuzhiyun count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun * WORKAROUND: on all existing Intel controllers, pdi
677*4882a593Smuzhiyun * number 2 reports channel count as 1 even though it
678*4882a593Smuzhiyun * supports 8 channels. Performing hardcoding for pdi
679*4882a593Smuzhiyun * number 2.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun if (pdi_num == 2)
682*4882a593Smuzhiyun count = 7;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun } else {
685*4882a593Smuzhiyun count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
686*4882a593Smuzhiyun count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /* zero based values for channel count in register */
690*4882a593Smuzhiyun count++;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun return count;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
intel_pdi_get_ch_update(struct sdw_intel * sdw,struct sdw_cdns_pdi * pdi,unsigned int num_pdi,unsigned int * num_ch,bool pcm)695*4882a593Smuzhiyun static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
696*4882a593Smuzhiyun struct sdw_cdns_pdi *pdi,
697*4882a593Smuzhiyun unsigned int num_pdi,
698*4882a593Smuzhiyun unsigned int *num_ch, bool pcm)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun int i, ch_count = 0;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun for (i = 0; i < num_pdi; i++) {
703*4882a593Smuzhiyun pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm);
704*4882a593Smuzhiyun ch_count += pdi->ch_count;
705*4882a593Smuzhiyun pdi++;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun *num_ch = ch_count;
709*4882a593Smuzhiyun return 0;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
intel_pdi_stream_ch_update(struct sdw_intel * sdw,struct sdw_cdns_streams * stream,bool pcm)712*4882a593Smuzhiyun static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
713*4882a593Smuzhiyun struct sdw_cdns_streams *stream, bool pcm)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
716*4882a593Smuzhiyun &stream->num_ch_bd, pcm);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
719*4882a593Smuzhiyun &stream->num_ch_in, pcm);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
722*4882a593Smuzhiyun &stream->num_ch_out, pcm);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun return 0;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
intel_pdi_ch_update(struct sdw_intel * sdw)727*4882a593Smuzhiyun static int intel_pdi_ch_update(struct sdw_intel *sdw)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun /* First update PCM streams followed by PDM streams */
730*4882a593Smuzhiyun intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true);
731*4882a593Smuzhiyun intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun return 0;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun static void
intel_pdi_shim_configure(struct sdw_intel * sdw,struct sdw_cdns_pdi * pdi)737*4882a593Smuzhiyun intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
740*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
741*4882a593Smuzhiyun int pdi_conf = 0;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* the Bulk and PCM streams are not contiguous */
744*4882a593Smuzhiyun pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
745*4882a593Smuzhiyun if (pdi->num >= 2)
746*4882a593Smuzhiyun pdi->intel_alh_id += 2;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /*
749*4882a593Smuzhiyun * Program stream parameters to stream SHIM register
750*4882a593Smuzhiyun * This is applicable for PCM stream only.
751*4882a593Smuzhiyun */
752*4882a593Smuzhiyun if (pdi->type != SDW_STREAM_PCM)
753*4882a593Smuzhiyun return;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (pdi->dir == SDW_DATA_DIR_RX)
756*4882a593Smuzhiyun pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
757*4882a593Smuzhiyun else
758*4882a593Smuzhiyun pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
761*4882a593Smuzhiyun u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
762*4882a593Smuzhiyun u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun static void
intel_pdi_alh_configure(struct sdw_intel * sdw,struct sdw_cdns_pdi * pdi)768*4882a593Smuzhiyun intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun void __iomem *alh = sdw->link_res->alh;
771*4882a593Smuzhiyun unsigned int link_id = sdw->instance;
772*4882a593Smuzhiyun unsigned int conf;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /* the Bulk and PCM streams are not contiguous */
775*4882a593Smuzhiyun pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
776*4882a593Smuzhiyun if (pdi->num >= 2)
777*4882a593Smuzhiyun pdi->intel_alh_id += 2;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* Program Stream config ALH register */
780*4882a593Smuzhiyun conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
783*4882a593Smuzhiyun u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
intel_params_stream(struct sdw_intel * sdw,struct snd_pcm_substream * substream,struct snd_soc_dai * dai,struct snd_pcm_hw_params * hw_params,int link_id,int alh_stream_id)788*4882a593Smuzhiyun static int intel_params_stream(struct sdw_intel *sdw,
789*4882a593Smuzhiyun struct snd_pcm_substream *substream,
790*4882a593Smuzhiyun struct snd_soc_dai *dai,
791*4882a593Smuzhiyun struct snd_pcm_hw_params *hw_params,
792*4882a593Smuzhiyun int link_id, int alh_stream_id)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun struct sdw_intel_link_res *res = sdw->link_res;
795*4882a593Smuzhiyun struct sdw_intel_stream_params_data params_data;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun params_data.substream = substream;
798*4882a593Smuzhiyun params_data.dai = dai;
799*4882a593Smuzhiyun params_data.hw_params = hw_params;
800*4882a593Smuzhiyun params_data.link_id = link_id;
801*4882a593Smuzhiyun params_data.alh_stream_id = alh_stream_id;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (res->ops && res->ops->params_stream && res->dev)
804*4882a593Smuzhiyun return res->ops->params_stream(res->dev,
805*4882a593Smuzhiyun ¶ms_data);
806*4882a593Smuzhiyun return -EIO;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
intel_free_stream(struct sdw_intel * sdw,struct snd_pcm_substream * substream,struct snd_soc_dai * dai,int link_id)809*4882a593Smuzhiyun static int intel_free_stream(struct sdw_intel *sdw,
810*4882a593Smuzhiyun struct snd_pcm_substream *substream,
811*4882a593Smuzhiyun struct snd_soc_dai *dai,
812*4882a593Smuzhiyun int link_id)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun struct sdw_intel_link_res *res = sdw->link_res;
815*4882a593Smuzhiyun struct sdw_intel_stream_free_data free_data;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun free_data.substream = substream;
818*4882a593Smuzhiyun free_data.dai = dai;
819*4882a593Smuzhiyun free_data.link_id = link_id;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (res->ops && res->ops->free_stream && res->dev)
822*4882a593Smuzhiyun return res->ops->free_stream(res->dev,
823*4882a593Smuzhiyun &free_data);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun return 0;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * bank switch routines
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun
intel_pre_bank_switch(struct sdw_bus * bus)832*4882a593Smuzhiyun static int intel_pre_bank_switch(struct sdw_bus *bus)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct sdw_cdns *cdns = bus_to_cdns(bus);
835*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* Write to register only for multi-link */
838*4882a593Smuzhiyun if (!bus->multi_link)
839*4882a593Smuzhiyun return 0;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun intel_shim_sync_arm(sdw);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun return 0;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
intel_post_bank_switch(struct sdw_bus * bus)846*4882a593Smuzhiyun static int intel_post_bank_switch(struct sdw_bus *bus)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct sdw_cdns *cdns = bus_to_cdns(bus);
849*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
850*4882a593Smuzhiyun void __iomem *shim = sdw->link_res->shim;
851*4882a593Smuzhiyun int sync_reg, ret;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* Write to register only for multi-link */
854*4882a593Smuzhiyun if (!bus->multi_link)
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun mutex_lock(sdw->link_res->shim_lock);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /* Read SYNC register */
860*4882a593Smuzhiyun sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * post_bank_switch() ops is called from the bus in loop for
864*4882a593Smuzhiyun * all the Masters in the steam with the expectation that
865*4882a593Smuzhiyun * we trigger the bankswitch for the only first Master in the list
866*4882a593Smuzhiyun * and do nothing for the other Masters
867*4882a593Smuzhiyun *
868*4882a593Smuzhiyun * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
869*4882a593Smuzhiyun */
870*4882a593Smuzhiyun if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
871*4882a593Smuzhiyun ret = 0;
872*4882a593Smuzhiyun goto unlock;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun ret = intel_shim_sync_go_unlocked(sdw);
876*4882a593Smuzhiyun unlock:
877*4882a593Smuzhiyun mutex_unlock(sdw->link_res->shim_lock);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (ret < 0)
880*4882a593Smuzhiyun dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun return ret;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /*
886*4882a593Smuzhiyun * DAI routines
887*4882a593Smuzhiyun */
888*4882a593Smuzhiyun
intel_startup(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)889*4882a593Smuzhiyun static int intel_startup(struct snd_pcm_substream *substream,
890*4882a593Smuzhiyun struct snd_soc_dai *dai)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
893*4882a593Smuzhiyun int ret;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun ret = pm_runtime_get_sync(cdns->dev);
896*4882a593Smuzhiyun if (ret < 0 && ret != -EACCES) {
897*4882a593Smuzhiyun dev_err_ratelimited(cdns->dev,
898*4882a593Smuzhiyun "pm_runtime_get_sync failed in %s, ret %d\n",
899*4882a593Smuzhiyun __func__, ret);
900*4882a593Smuzhiyun pm_runtime_put_noidle(cdns->dev);
901*4882a593Smuzhiyun return ret;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun return 0;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
intel_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,struct snd_soc_dai * dai)906*4882a593Smuzhiyun static int intel_hw_params(struct snd_pcm_substream *substream,
907*4882a593Smuzhiyun struct snd_pcm_hw_params *params,
908*4882a593Smuzhiyun struct snd_soc_dai *dai)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
911*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
912*4882a593Smuzhiyun struct sdw_cdns_dma_data *dma;
913*4882a593Smuzhiyun struct sdw_cdns_pdi *pdi;
914*4882a593Smuzhiyun struct sdw_stream_config sconfig;
915*4882a593Smuzhiyun struct sdw_port_config *pconfig;
916*4882a593Smuzhiyun int ch, dir;
917*4882a593Smuzhiyun int ret;
918*4882a593Smuzhiyun bool pcm = true;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun dma = snd_soc_dai_get_dma_data(dai, substream);
921*4882a593Smuzhiyun if (!dma)
922*4882a593Smuzhiyun return -EIO;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun ch = params_channels(params);
925*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
926*4882a593Smuzhiyun dir = SDW_DATA_DIR_RX;
927*4882a593Smuzhiyun else
928*4882a593Smuzhiyun dir = SDW_DATA_DIR_TX;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (dma->stream_type == SDW_STREAM_PDM)
931*4882a593Smuzhiyun pcm = false;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (pcm)
934*4882a593Smuzhiyun pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
935*4882a593Smuzhiyun else
936*4882a593Smuzhiyun pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun if (!pdi) {
939*4882a593Smuzhiyun ret = -EINVAL;
940*4882a593Smuzhiyun goto error;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /* do run-time configurations for SHIM, ALH and PDI/PORT */
944*4882a593Smuzhiyun intel_pdi_shim_configure(sdw, pdi);
945*4882a593Smuzhiyun intel_pdi_alh_configure(sdw, pdi);
946*4882a593Smuzhiyun sdw_cdns_config_stream(cdns, ch, dir, pdi);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /* store pdi and hw_params, may be needed in prepare step */
949*4882a593Smuzhiyun dma->suspended = false;
950*4882a593Smuzhiyun dma->pdi = pdi;
951*4882a593Smuzhiyun dma->hw_params = params;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun /* Inform DSP about PDI stream number */
954*4882a593Smuzhiyun ret = intel_params_stream(sdw, substream, dai, params,
955*4882a593Smuzhiyun sdw->instance,
956*4882a593Smuzhiyun pdi->intel_alh_id);
957*4882a593Smuzhiyun if (ret)
958*4882a593Smuzhiyun goto error;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun sconfig.direction = dir;
961*4882a593Smuzhiyun sconfig.ch_count = ch;
962*4882a593Smuzhiyun sconfig.frame_rate = params_rate(params);
963*4882a593Smuzhiyun sconfig.type = dma->stream_type;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (dma->stream_type == SDW_STREAM_PDM) {
966*4882a593Smuzhiyun sconfig.frame_rate *= 50;
967*4882a593Smuzhiyun sconfig.bps = 1;
968*4882a593Smuzhiyun } else {
969*4882a593Smuzhiyun sconfig.bps = snd_pcm_format_width(params_format(params));
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* Port configuration */
973*4882a593Smuzhiyun pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
974*4882a593Smuzhiyun if (!pconfig) {
975*4882a593Smuzhiyun ret = -ENOMEM;
976*4882a593Smuzhiyun goto error;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun pconfig->num = pdi->num;
980*4882a593Smuzhiyun pconfig->ch_mask = (1 << ch) - 1;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun ret = sdw_stream_add_master(&cdns->bus, &sconfig,
983*4882a593Smuzhiyun pconfig, 1, dma->stream);
984*4882a593Smuzhiyun if (ret)
985*4882a593Smuzhiyun dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun kfree(pconfig);
988*4882a593Smuzhiyun error:
989*4882a593Smuzhiyun return ret;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
intel_prepare(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)992*4882a593Smuzhiyun static int intel_prepare(struct snd_pcm_substream *substream,
993*4882a593Smuzhiyun struct snd_soc_dai *dai)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
996*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
997*4882a593Smuzhiyun struct sdw_cdns_dma_data *dma;
998*4882a593Smuzhiyun int ch, dir;
999*4882a593Smuzhiyun int ret = 0;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun dma = snd_soc_dai_get_dma_data(dai, substream);
1002*4882a593Smuzhiyun if (!dma) {
1003*4882a593Smuzhiyun dev_err(dai->dev, "failed to get dma data in %s",
1004*4882a593Smuzhiyun __func__);
1005*4882a593Smuzhiyun return -EIO;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun if (dma->suspended) {
1009*4882a593Smuzhiyun dma->suspended = false;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun * .prepare() is called after system resume, where we
1013*4882a593Smuzhiyun * need to reinitialize the SHIM/ALH/Cadence IP.
1014*4882a593Smuzhiyun * .prepare() is also called to deal with underflows,
1015*4882a593Smuzhiyun * but in those cases we cannot touch ALH/SHIM
1016*4882a593Smuzhiyun * registers
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /* configure stream */
1020*4882a593Smuzhiyun ch = params_channels(dma->hw_params);
1021*4882a593Smuzhiyun if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
1022*4882a593Smuzhiyun dir = SDW_DATA_DIR_RX;
1023*4882a593Smuzhiyun else
1024*4882a593Smuzhiyun dir = SDW_DATA_DIR_TX;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun intel_pdi_shim_configure(sdw, dma->pdi);
1027*4882a593Smuzhiyun intel_pdi_alh_configure(sdw, dma->pdi);
1028*4882a593Smuzhiyun sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Inform DSP about PDI stream number */
1031*4882a593Smuzhiyun ret = intel_params_stream(sdw, substream, dai,
1032*4882a593Smuzhiyun dma->hw_params,
1033*4882a593Smuzhiyun sdw->instance,
1034*4882a593Smuzhiyun dma->pdi->intel_alh_id);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun return ret;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun static int
intel_hw_free(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)1041*4882a593Smuzhiyun intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1044*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1045*4882a593Smuzhiyun struct sdw_cdns_dma_data *dma;
1046*4882a593Smuzhiyun int ret;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun dma = snd_soc_dai_get_dma_data(dai, substream);
1049*4882a593Smuzhiyun if (!dma)
1050*4882a593Smuzhiyun return -EIO;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /*
1053*4882a593Smuzhiyun * The sdw stream state will transition to RELEASED when stream->
1054*4882a593Smuzhiyun * master_list is empty. So the stream state will transition to
1055*4882a593Smuzhiyun * DEPREPARED for the first cpu-dai and to RELEASED for the last
1056*4882a593Smuzhiyun * cpu-dai.
1057*4882a593Smuzhiyun */
1058*4882a593Smuzhiyun ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
1059*4882a593Smuzhiyun if (ret < 0) {
1060*4882a593Smuzhiyun dev_err(dai->dev, "remove master from stream %s failed: %d\n",
1061*4882a593Smuzhiyun dma->stream->name, ret);
1062*4882a593Smuzhiyun return ret;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun ret = intel_free_stream(sdw, substream, dai, sdw->instance);
1066*4882a593Smuzhiyun if (ret < 0) {
1067*4882a593Smuzhiyun dev_err(dai->dev, "intel_free_stream: failed %d", ret);
1068*4882a593Smuzhiyun return ret;
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun dma->hw_params = NULL;
1072*4882a593Smuzhiyun dma->pdi = NULL;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun return 0;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
intel_shutdown(struct snd_pcm_substream * substream,struct snd_soc_dai * dai)1077*4882a593Smuzhiyun static void intel_shutdown(struct snd_pcm_substream *substream,
1078*4882a593Smuzhiyun struct snd_soc_dai *dai)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun pm_runtime_mark_last_busy(cdns->dev);
1083*4882a593Smuzhiyun pm_runtime_put_autosuspend(cdns->dev);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
intel_component_dais_suspend(struct snd_soc_component * component)1086*4882a593Smuzhiyun static int intel_component_dais_suspend(struct snd_soc_component *component)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun struct sdw_cdns_dma_data *dma;
1089*4882a593Smuzhiyun struct snd_soc_dai *dai;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun for_each_component_dais(component, dai) {
1092*4882a593Smuzhiyun /*
1093*4882a593Smuzhiyun * we don't have a .suspend dai_ops, and we don't have access
1094*4882a593Smuzhiyun * to the substream, so let's mark both capture and playback
1095*4882a593Smuzhiyun * DMA contexts as suspended
1096*4882a593Smuzhiyun */
1097*4882a593Smuzhiyun dma = dai->playback_dma_data;
1098*4882a593Smuzhiyun if (dma)
1099*4882a593Smuzhiyun dma->suspended = true;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun dma = dai->capture_dma_data;
1102*4882a593Smuzhiyun if (dma)
1103*4882a593Smuzhiyun dma->suspended = true;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun return 0;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
intel_pcm_set_sdw_stream(struct snd_soc_dai * dai,void * stream,int direction)1109*4882a593Smuzhiyun static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
1110*4882a593Smuzhiyun void *stream, int direction)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun return cdns_set_sdw_stream(dai, stream, true, direction);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
intel_pdm_set_sdw_stream(struct snd_soc_dai * dai,void * stream,int direction)1115*4882a593Smuzhiyun static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
1116*4882a593Smuzhiyun void *stream, int direction)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun return cdns_set_sdw_stream(dai, stream, false, direction);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
intel_get_sdw_stream(struct snd_soc_dai * dai,int direction)1121*4882a593Smuzhiyun static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
1122*4882a593Smuzhiyun int direction)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun struct sdw_cdns_dma_data *dma;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1127*4882a593Smuzhiyun dma = dai->playback_dma_data;
1128*4882a593Smuzhiyun else
1129*4882a593Smuzhiyun dma = dai->capture_dma_data;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (!dma)
1132*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun return dma->stream;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1138*4882a593Smuzhiyun .startup = intel_startup,
1139*4882a593Smuzhiyun .hw_params = intel_hw_params,
1140*4882a593Smuzhiyun .prepare = intel_prepare,
1141*4882a593Smuzhiyun .hw_free = intel_hw_free,
1142*4882a593Smuzhiyun .shutdown = intel_shutdown,
1143*4882a593Smuzhiyun .set_sdw_stream = intel_pcm_set_sdw_stream,
1144*4882a593Smuzhiyun .get_sdw_stream = intel_get_sdw_stream,
1145*4882a593Smuzhiyun };
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
1148*4882a593Smuzhiyun .startup = intel_startup,
1149*4882a593Smuzhiyun .hw_params = intel_hw_params,
1150*4882a593Smuzhiyun .prepare = intel_prepare,
1151*4882a593Smuzhiyun .hw_free = intel_hw_free,
1152*4882a593Smuzhiyun .shutdown = intel_shutdown,
1153*4882a593Smuzhiyun .set_sdw_stream = intel_pdm_set_sdw_stream,
1154*4882a593Smuzhiyun .get_sdw_stream = intel_get_sdw_stream,
1155*4882a593Smuzhiyun };
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun static const struct snd_soc_component_driver dai_component = {
1158*4882a593Smuzhiyun .name = "soundwire",
1159*4882a593Smuzhiyun .suspend = intel_component_dais_suspend
1160*4882a593Smuzhiyun };
1161*4882a593Smuzhiyun
intel_create_dai(struct sdw_cdns * cdns,struct snd_soc_dai_driver * dais,enum intel_pdi_type type,u32 num,u32 off,u32 max_ch,bool pcm)1162*4882a593Smuzhiyun static int intel_create_dai(struct sdw_cdns *cdns,
1163*4882a593Smuzhiyun struct snd_soc_dai_driver *dais,
1164*4882a593Smuzhiyun enum intel_pdi_type type,
1165*4882a593Smuzhiyun u32 num, u32 off, u32 max_ch, bool pcm)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun int i;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (num == 0)
1170*4882a593Smuzhiyun return 0;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* TODO: Read supported rates/formats from hardware */
1173*4882a593Smuzhiyun for (i = off; i < (off + num); i++) {
1174*4882a593Smuzhiyun dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1175*4882a593Smuzhiyun "SDW%d Pin%d",
1176*4882a593Smuzhiyun cdns->instance, i);
1177*4882a593Smuzhiyun if (!dais[i].name)
1178*4882a593Smuzhiyun return -ENOMEM;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1181*4882a593Smuzhiyun dais[i].playback.channels_min = 1;
1182*4882a593Smuzhiyun dais[i].playback.channels_max = max_ch;
1183*4882a593Smuzhiyun dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1184*4882a593Smuzhiyun dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1188*4882a593Smuzhiyun dais[i].capture.channels_min = 1;
1189*4882a593Smuzhiyun dais[i].capture.channels_max = max_ch;
1190*4882a593Smuzhiyun dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1191*4882a593Smuzhiyun dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun if (pcm)
1195*4882a593Smuzhiyun dais[i].ops = &intel_pcm_dai_ops;
1196*4882a593Smuzhiyun else
1197*4882a593Smuzhiyun dais[i].ops = &intel_pdm_dai_ops;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun return 0;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
intel_register_dai(struct sdw_intel * sdw)1203*4882a593Smuzhiyun static int intel_register_dai(struct sdw_intel *sdw)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun struct sdw_cdns *cdns = &sdw->cdns;
1206*4882a593Smuzhiyun struct sdw_cdns_streams *stream;
1207*4882a593Smuzhiyun struct snd_soc_dai_driver *dais;
1208*4882a593Smuzhiyun int num_dai, ret, off = 0;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun /* DAIs are created based on total number of PDIs supported */
1211*4882a593Smuzhiyun num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1214*4882a593Smuzhiyun if (!dais)
1215*4882a593Smuzhiyun return -ENOMEM;
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /* Create PCM DAIs */
1218*4882a593Smuzhiyun stream = &cdns->pcm;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1221*4882a593Smuzhiyun off, stream->num_ch_in, true);
1222*4882a593Smuzhiyun if (ret)
1223*4882a593Smuzhiyun return ret;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun off += cdns->pcm.num_in;
1226*4882a593Smuzhiyun ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1227*4882a593Smuzhiyun off, stream->num_ch_out, true);
1228*4882a593Smuzhiyun if (ret)
1229*4882a593Smuzhiyun return ret;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun off += cdns->pcm.num_out;
1232*4882a593Smuzhiyun ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1233*4882a593Smuzhiyun off, stream->num_ch_bd, true);
1234*4882a593Smuzhiyun if (ret)
1235*4882a593Smuzhiyun return ret;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /* Create PDM DAIs */
1238*4882a593Smuzhiyun stream = &cdns->pdm;
1239*4882a593Smuzhiyun off += cdns->pcm.num_bd;
1240*4882a593Smuzhiyun ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
1241*4882a593Smuzhiyun off, stream->num_ch_in, false);
1242*4882a593Smuzhiyun if (ret)
1243*4882a593Smuzhiyun return ret;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun off += cdns->pdm.num_in;
1246*4882a593Smuzhiyun ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
1247*4882a593Smuzhiyun off, stream->num_ch_out, false);
1248*4882a593Smuzhiyun if (ret)
1249*4882a593Smuzhiyun return ret;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun off += cdns->pdm.num_out;
1252*4882a593Smuzhiyun ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
1253*4882a593Smuzhiyun off, stream->num_ch_bd, false);
1254*4882a593Smuzhiyun if (ret)
1255*4882a593Smuzhiyun return ret;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun return snd_soc_register_component(cdns->dev, &dai_component,
1258*4882a593Smuzhiyun dais, num_dai);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
sdw_master_read_intel_prop(struct sdw_bus * bus)1261*4882a593Smuzhiyun static int sdw_master_read_intel_prop(struct sdw_bus *bus)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun struct sdw_master_prop *prop = &bus->prop;
1264*4882a593Smuzhiyun struct fwnode_handle *link;
1265*4882a593Smuzhiyun char name[32];
1266*4882a593Smuzhiyun u32 quirk_mask;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /* Find master handle */
1269*4882a593Smuzhiyun snprintf(name, sizeof(name),
1270*4882a593Smuzhiyun "mipi-sdw-link-%d-subproperties", bus->link_id);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun link = device_get_named_child_node(bus->dev, name);
1273*4882a593Smuzhiyun if (!link) {
1274*4882a593Smuzhiyun dev_err(bus->dev, "Master node %s not found\n", name);
1275*4882a593Smuzhiyun return -EIO;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun fwnode_property_read_u32(link,
1279*4882a593Smuzhiyun "intel-sdw-ip-clock",
1280*4882a593Smuzhiyun &prop->mclk_freq);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun /* the values reported by BIOS are the 2x clock, not the bus clock */
1283*4882a593Smuzhiyun prop->mclk_freq /= 2;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun fwnode_property_read_u32(link,
1286*4882a593Smuzhiyun "intel-quirk-mask",
1287*4882a593Smuzhiyun &quirk_mask);
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
1290*4882a593Smuzhiyun prop->hw_disabled = true;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun return 0;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
intel_prop_read(struct sdw_bus * bus)1295*4882a593Smuzhiyun static int intel_prop_read(struct sdw_bus *bus)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun /* Initialize with default handler to read all DisCo properties */
1298*4882a593Smuzhiyun sdw_master_read_prop(bus);
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* read Intel-specific properties */
1301*4882a593Smuzhiyun sdw_master_read_intel_prop(bus);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun return 0;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun static struct sdw_master_ops sdw_intel_ops = {
1307*4882a593Smuzhiyun .read_prop = sdw_master_read_prop,
1308*4882a593Smuzhiyun .xfer_msg = cdns_xfer_msg,
1309*4882a593Smuzhiyun .xfer_msg_defer = cdns_xfer_msg_defer,
1310*4882a593Smuzhiyun .reset_page_addr = cdns_reset_page_addr,
1311*4882a593Smuzhiyun .set_bus_conf = cdns_bus_conf,
1312*4882a593Smuzhiyun .pre_bank_switch = intel_pre_bank_switch,
1313*4882a593Smuzhiyun .post_bank_switch = intel_post_bank_switch,
1314*4882a593Smuzhiyun };
1315*4882a593Smuzhiyun
intel_init(struct sdw_intel * sdw)1316*4882a593Smuzhiyun static int intel_init(struct sdw_intel *sdw)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun bool clock_stop;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun /* Initialize shim and controller */
1321*4882a593Smuzhiyun intel_link_power_up(sdw);
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun intel_shim_init(sdw, clock_stop);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun return 0;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /*
1331*4882a593Smuzhiyun * probe and init
1332*4882a593Smuzhiyun */
intel_master_probe(struct platform_device * pdev)1333*4882a593Smuzhiyun static int intel_master_probe(struct platform_device *pdev)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1336*4882a593Smuzhiyun struct sdw_intel *sdw;
1337*4882a593Smuzhiyun struct sdw_cdns *cdns;
1338*4882a593Smuzhiyun struct sdw_bus *bus;
1339*4882a593Smuzhiyun int ret;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
1342*4882a593Smuzhiyun if (!sdw)
1343*4882a593Smuzhiyun return -ENOMEM;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun cdns = &sdw->cdns;
1346*4882a593Smuzhiyun bus = &cdns->bus;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun sdw->instance = pdev->id;
1349*4882a593Smuzhiyun sdw->link_res = dev_get_platdata(dev);
1350*4882a593Smuzhiyun cdns->dev = dev;
1351*4882a593Smuzhiyun cdns->registers = sdw->link_res->registers;
1352*4882a593Smuzhiyun cdns->instance = sdw->instance;
1353*4882a593Smuzhiyun cdns->msg_count = 0;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun bus->link_id = pdev->id;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun sdw_cdns_probe(cdns);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* Set property read ops */
1360*4882a593Smuzhiyun sdw_intel_ops.read_prop = intel_prop_read;
1361*4882a593Smuzhiyun bus->ops = &sdw_intel_ops;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun /* set driver data, accessed by snd_soc_dai_get_drvdata() */
1364*4882a593Smuzhiyun dev_set_drvdata(dev, cdns);
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun /* use generic bandwidth allocation algorithm */
1367*4882a593Smuzhiyun sdw->cdns.bus.compute_params = sdw_compute_params;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun ret = sdw_bus_master_add(bus, dev, dev->fwnode);
1370*4882a593Smuzhiyun if (ret) {
1371*4882a593Smuzhiyun dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
1372*4882a593Smuzhiyun return ret;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun if (bus->prop.hw_disabled)
1376*4882a593Smuzhiyun dev_info(dev,
1377*4882a593Smuzhiyun "SoundWire master %d is disabled, will be ignored\n",
1378*4882a593Smuzhiyun bus->link_id);
1379*4882a593Smuzhiyun /*
1380*4882a593Smuzhiyun * Ignore BIOS err_threshold, it's a really bad idea when dealing
1381*4882a593Smuzhiyun * with multiple hardware synchronized links
1382*4882a593Smuzhiyun */
1383*4882a593Smuzhiyun bus->prop.err_threshold = 0;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun return 0;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun
intel_master_startup(struct platform_device * pdev)1388*4882a593Smuzhiyun int intel_master_startup(struct platform_device *pdev)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun struct sdw_cdns_stream_config config;
1391*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1392*4882a593Smuzhiyun struct sdw_cdns *cdns = dev_get_drvdata(dev);
1393*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1394*4882a593Smuzhiyun struct sdw_bus *bus = &cdns->bus;
1395*4882a593Smuzhiyun int link_flags;
1396*4882a593Smuzhiyun bool multi_link;
1397*4882a593Smuzhiyun u32 clock_stop_quirks;
1398*4882a593Smuzhiyun int ret;
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun if (bus->prop.hw_disabled) {
1401*4882a593Smuzhiyun dev_info(dev,
1402*4882a593Smuzhiyun "SoundWire master %d is disabled, ignoring\n",
1403*4882a593Smuzhiyun sdw->instance);
1404*4882a593Smuzhiyun return 0;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun link_flags = md_flags >> (bus->link_id * 8);
1408*4882a593Smuzhiyun multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1409*4882a593Smuzhiyun if (!multi_link) {
1410*4882a593Smuzhiyun dev_dbg(dev, "Multi-link is disabled\n");
1411*4882a593Smuzhiyun bus->multi_link = false;
1412*4882a593Smuzhiyun } else {
1413*4882a593Smuzhiyun /*
1414*4882a593Smuzhiyun * hardware-based synchronization is required regardless
1415*4882a593Smuzhiyun * of the number of segments used by a stream: SSP-based
1416*4882a593Smuzhiyun * synchronization is gated by gsync when the multi-master
1417*4882a593Smuzhiyun * mode is set.
1418*4882a593Smuzhiyun */
1419*4882a593Smuzhiyun bus->multi_link = true;
1420*4882a593Smuzhiyun bus->hw_sync_min_links = 1;
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun /* Initialize shim, controller */
1424*4882a593Smuzhiyun ret = intel_init(sdw);
1425*4882a593Smuzhiyun if (ret)
1426*4882a593Smuzhiyun goto err_init;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun /* Read the PDI config and initialize cadence PDI */
1429*4882a593Smuzhiyun intel_pdi_init(sdw, &config);
1430*4882a593Smuzhiyun ret = sdw_cdns_pdi_init(cdns, config);
1431*4882a593Smuzhiyun if (ret)
1432*4882a593Smuzhiyun goto err_init;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun intel_pdi_ch_update(sdw);
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, true);
1437*4882a593Smuzhiyun if (ret < 0) {
1438*4882a593Smuzhiyun dev_err(dev, "cannot enable interrupts\n");
1439*4882a593Smuzhiyun goto err_init;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun /*
1443*4882a593Smuzhiyun * follow recommended programming flows to avoid timeouts when
1444*4882a593Smuzhiyun * gsync is enabled
1445*4882a593Smuzhiyun */
1446*4882a593Smuzhiyun if (multi_link)
1447*4882a593Smuzhiyun intel_shim_sync_arm(sdw);
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun ret = sdw_cdns_init(cdns);
1450*4882a593Smuzhiyun if (ret < 0) {
1451*4882a593Smuzhiyun dev_err(dev, "unable to initialize Cadence IP\n");
1452*4882a593Smuzhiyun goto err_interrupt;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun ret = sdw_cdns_exit_reset(cdns);
1456*4882a593Smuzhiyun if (ret < 0) {
1457*4882a593Smuzhiyun dev_err(dev, "unable to exit bus reset sequence\n");
1458*4882a593Smuzhiyun goto err_interrupt;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun if (multi_link) {
1462*4882a593Smuzhiyun ret = intel_shim_sync_go(sdw);
1463*4882a593Smuzhiyun if (ret < 0) {
1464*4882a593Smuzhiyun dev_err(dev, "sync go failed: %d\n", ret);
1465*4882a593Smuzhiyun goto err_interrupt;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun /* Register DAIs */
1470*4882a593Smuzhiyun ret = intel_register_dai(sdw);
1471*4882a593Smuzhiyun if (ret) {
1472*4882a593Smuzhiyun dev_err(dev, "DAI registration failed: %d\n", ret);
1473*4882a593Smuzhiyun goto err_interrupt;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun intel_debugfs_init(sdw);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun /* Enable runtime PM */
1479*4882a593Smuzhiyun if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
1480*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(dev,
1481*4882a593Smuzhiyun INTEL_MASTER_SUSPEND_DELAY_MS);
1482*4882a593Smuzhiyun pm_runtime_use_autosuspend(dev);
1483*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun pm_runtime_set_active(dev);
1486*4882a593Smuzhiyun pm_runtime_enable(dev);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1490*4882a593Smuzhiyun if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
1491*4882a593Smuzhiyun /*
1492*4882a593Smuzhiyun * To keep the clock running we need to prevent
1493*4882a593Smuzhiyun * pm_runtime suspend from happening by increasing the
1494*4882a593Smuzhiyun * reference count.
1495*4882a593Smuzhiyun * This quirk is specified by the parent PCI device in
1496*4882a593Smuzhiyun * case of specific latency requirements. It will have
1497*4882a593Smuzhiyun * no effect if pm_runtime is disabled by the user via
1498*4882a593Smuzhiyun * a module parameter for testing purposes.
1499*4882a593Smuzhiyun */
1500*4882a593Smuzhiyun pm_runtime_get_noresume(dev);
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun /*
1504*4882a593Smuzhiyun * The runtime PM status of Slave devices is "Unsupported"
1505*4882a593Smuzhiyun * until they report as ATTACHED. If they don't, e.g. because
1506*4882a593Smuzhiyun * there are no Slave devices populated or if the power-on is
1507*4882a593Smuzhiyun * delayed or dependent on a power switch, the Master will
1508*4882a593Smuzhiyun * remain active and prevent its parent from suspending.
1509*4882a593Smuzhiyun *
1510*4882a593Smuzhiyun * Conditionally force the pm_runtime core to re-evaluate the
1511*4882a593Smuzhiyun * Master status in the absence of any Slave activity. A quirk
1512*4882a593Smuzhiyun * is provided to e.g. deal with Slaves that may be powered on
1513*4882a593Smuzhiyun * with a delay. A more complete solution would require the
1514*4882a593Smuzhiyun * definition of Master properties.
1515*4882a593Smuzhiyun */
1516*4882a593Smuzhiyun if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1517*4882a593Smuzhiyun pm_runtime_idle(dev);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun return 0;
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun err_interrupt:
1522*4882a593Smuzhiyun sdw_cdns_enable_interrupt(cdns, false);
1523*4882a593Smuzhiyun err_init:
1524*4882a593Smuzhiyun return ret;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun
intel_master_remove(struct platform_device * pdev)1527*4882a593Smuzhiyun static int intel_master_remove(struct platform_device *pdev)
1528*4882a593Smuzhiyun {
1529*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1530*4882a593Smuzhiyun struct sdw_cdns *cdns = dev_get_drvdata(dev);
1531*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1532*4882a593Smuzhiyun struct sdw_bus *bus = &cdns->bus;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun /*
1535*4882a593Smuzhiyun * Since pm_runtime is already disabled, we don't decrease
1536*4882a593Smuzhiyun * the refcount when the clock_stop_quirk is
1537*4882a593Smuzhiyun * SDW_INTEL_CLK_STOP_NOT_ALLOWED
1538*4882a593Smuzhiyun */
1539*4882a593Smuzhiyun if (!bus->prop.hw_disabled) {
1540*4882a593Smuzhiyun intel_debugfs_exit(sdw);
1541*4882a593Smuzhiyun sdw_cdns_enable_interrupt(cdns, false);
1542*4882a593Smuzhiyun snd_soc_unregister_component(dev);
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun sdw_bus_master_delete(bus);
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun return 0;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
intel_master_process_wakeen_event(struct platform_device * pdev)1549*4882a593Smuzhiyun int intel_master_process_wakeen_event(struct platform_device *pdev)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1552*4882a593Smuzhiyun struct sdw_intel *sdw;
1553*4882a593Smuzhiyun struct sdw_bus *bus;
1554*4882a593Smuzhiyun void __iomem *shim;
1555*4882a593Smuzhiyun u16 wake_sts;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun sdw = platform_get_drvdata(pdev);
1558*4882a593Smuzhiyun bus = &sdw->cdns.bus;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (bus->prop.hw_disabled) {
1561*4882a593Smuzhiyun dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n", bus->link_id);
1562*4882a593Smuzhiyun return 0;
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun shim = sdw->link_res->shim;
1566*4882a593Smuzhiyun wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun if (!(wake_sts & BIT(sdw->instance)))
1569*4882a593Smuzhiyun return 0;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun /* disable WAKEEN interrupt ASAP to prevent interrupt flood */
1572*4882a593Smuzhiyun intel_shim_wake(sdw, false);
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun /*
1575*4882a593Smuzhiyun * resume the Master, which will generate a bus reset and result in
1576*4882a593Smuzhiyun * Slaves re-attaching and be re-enumerated. The SoundWire physical
1577*4882a593Smuzhiyun * device which generated the wake will trigger an interrupt, which
1578*4882a593Smuzhiyun * will in turn cause the corresponding Linux Slave device to be
1579*4882a593Smuzhiyun * resumed and the Slave codec driver to check the status.
1580*4882a593Smuzhiyun */
1581*4882a593Smuzhiyun pm_request_resume(dev);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun return 0;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun /*
1587*4882a593Smuzhiyun * PM calls
1588*4882a593Smuzhiyun */
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun #ifdef CONFIG_PM
1591*4882a593Smuzhiyun
intel_suspend(struct device * dev)1592*4882a593Smuzhiyun static int __maybe_unused intel_suspend(struct device *dev)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun struct sdw_cdns *cdns = dev_get_drvdata(dev);
1595*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1596*4882a593Smuzhiyun struct sdw_bus *bus = &cdns->bus;
1597*4882a593Smuzhiyun u32 clock_stop_quirks;
1598*4882a593Smuzhiyun int ret;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun if (bus->prop.hw_disabled) {
1601*4882a593Smuzhiyun dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1602*4882a593Smuzhiyun bus->link_id);
1603*4882a593Smuzhiyun return 0;
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun if (pm_runtime_suspended(dev)) {
1607*4882a593Smuzhiyun dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1612*4882a593Smuzhiyun !clock_stop_quirks) &&
1613*4882a593Smuzhiyun !pm_runtime_suspended(dev->parent)) {
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun /*
1616*4882a593Smuzhiyun * if we've enabled clock stop, and the parent
1617*4882a593Smuzhiyun * is still active, disable shim wake. The
1618*4882a593Smuzhiyun * SHIM registers are not accessible if the
1619*4882a593Smuzhiyun * parent is already pm_runtime suspended so
1620*4882a593Smuzhiyun * it's too late to change that configuration
1621*4882a593Smuzhiyun */
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun intel_shim_wake(sdw, false);
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun return 0;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, false);
1630*4882a593Smuzhiyun if (ret < 0) {
1631*4882a593Smuzhiyun dev_err(dev, "cannot disable interrupts on suspend\n");
1632*4882a593Smuzhiyun return ret;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun ret = intel_link_power_down(sdw);
1636*4882a593Smuzhiyun if (ret) {
1637*4882a593Smuzhiyun dev_err(dev, "Link power down failed: %d", ret);
1638*4882a593Smuzhiyun return ret;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun intel_shim_wake(sdw, false);
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun return 0;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun
intel_suspend_runtime(struct device * dev)1646*4882a593Smuzhiyun static int intel_suspend_runtime(struct device *dev)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun struct sdw_cdns *cdns = dev_get_drvdata(dev);
1649*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1650*4882a593Smuzhiyun struct sdw_bus *bus = &cdns->bus;
1651*4882a593Smuzhiyun u32 clock_stop_quirks;
1652*4882a593Smuzhiyun int ret;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (bus->prop.hw_disabled) {
1655*4882a593Smuzhiyun dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1656*4882a593Smuzhiyun bus->link_id);
1657*4882a593Smuzhiyun return 0;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, false);
1665*4882a593Smuzhiyun if (ret < 0) {
1666*4882a593Smuzhiyun dev_err(dev, "cannot disable interrupts on suspend\n");
1667*4882a593Smuzhiyun return ret;
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun ret = intel_link_power_down(sdw);
1671*4882a593Smuzhiyun if (ret) {
1672*4882a593Smuzhiyun dev_err(dev, "Link power down failed: %d", ret);
1673*4882a593Smuzhiyun return ret;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun intel_shim_wake(sdw, false);
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
1679*4882a593Smuzhiyun !clock_stop_quirks) {
1680*4882a593Smuzhiyun ret = sdw_cdns_clock_stop(cdns, true);
1681*4882a593Smuzhiyun if (ret < 0) {
1682*4882a593Smuzhiyun dev_err(dev, "cannot enable clock stop on suspend\n");
1683*4882a593Smuzhiyun return ret;
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, false);
1687*4882a593Smuzhiyun if (ret < 0) {
1688*4882a593Smuzhiyun dev_err(dev, "cannot disable interrupts on suspend\n");
1689*4882a593Smuzhiyun return ret;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun ret = intel_link_power_down(sdw);
1693*4882a593Smuzhiyun if (ret) {
1694*4882a593Smuzhiyun dev_err(dev, "Link power down failed: %d", ret);
1695*4882a593Smuzhiyun return ret;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun intel_shim_wake(sdw, true);
1699*4882a593Smuzhiyun } else {
1700*4882a593Smuzhiyun dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1701*4882a593Smuzhiyun __func__, clock_stop_quirks);
1702*4882a593Smuzhiyun ret = -EINVAL;
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun return ret;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun
intel_resume(struct device * dev)1708*4882a593Smuzhiyun static int __maybe_unused intel_resume(struct device *dev)
1709*4882a593Smuzhiyun {
1710*4882a593Smuzhiyun struct sdw_cdns *cdns = dev_get_drvdata(dev);
1711*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1712*4882a593Smuzhiyun struct sdw_bus *bus = &cdns->bus;
1713*4882a593Smuzhiyun int link_flags;
1714*4882a593Smuzhiyun bool multi_link;
1715*4882a593Smuzhiyun int ret;
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun if (bus->prop.hw_disabled) {
1718*4882a593Smuzhiyun dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1719*4882a593Smuzhiyun bus->link_id);
1720*4882a593Smuzhiyun return 0;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun link_flags = md_flags >> (bus->link_id * 8);
1724*4882a593Smuzhiyun multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun if (pm_runtime_suspended(dev)) {
1727*4882a593Smuzhiyun dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun /* follow required sequence from runtime_pm.rst */
1730*4882a593Smuzhiyun pm_runtime_disable(dev);
1731*4882a593Smuzhiyun pm_runtime_set_active(dev);
1732*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
1733*4882a593Smuzhiyun pm_runtime_enable(dev);
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun link_flags = md_flags >> (bus->link_id * 8);
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1738*4882a593Smuzhiyun pm_runtime_idle(dev);
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun ret = intel_init(sdw);
1742*4882a593Smuzhiyun if (ret) {
1743*4882a593Smuzhiyun dev_err(dev, "%s failed: %d", __func__, ret);
1744*4882a593Smuzhiyun return ret;
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun /*
1748*4882a593Smuzhiyun * make sure all Slaves are tagged as UNATTACHED and provide
1749*4882a593Smuzhiyun * reason for reinitialization
1750*4882a593Smuzhiyun */
1751*4882a593Smuzhiyun sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, true);
1754*4882a593Smuzhiyun if (ret < 0) {
1755*4882a593Smuzhiyun dev_err(dev, "cannot enable interrupts during resume\n");
1756*4882a593Smuzhiyun return ret;
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun /*
1760*4882a593Smuzhiyun * follow recommended programming flows to avoid timeouts when
1761*4882a593Smuzhiyun * gsync is enabled
1762*4882a593Smuzhiyun */
1763*4882a593Smuzhiyun if (multi_link)
1764*4882a593Smuzhiyun intel_shim_sync_arm(sdw);
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun ret = sdw_cdns_init(&sdw->cdns);
1767*4882a593Smuzhiyun if (ret < 0) {
1768*4882a593Smuzhiyun dev_err(dev, "unable to initialize Cadence IP during resume\n");
1769*4882a593Smuzhiyun return ret;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun ret = sdw_cdns_exit_reset(cdns);
1773*4882a593Smuzhiyun if (ret < 0) {
1774*4882a593Smuzhiyun dev_err(dev, "unable to exit bus reset sequence during resume\n");
1775*4882a593Smuzhiyun return ret;
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun if (multi_link) {
1779*4882a593Smuzhiyun ret = intel_shim_sync_go(sdw);
1780*4882a593Smuzhiyun if (ret < 0) {
1781*4882a593Smuzhiyun dev_err(dev, "sync go failed during resume\n");
1782*4882a593Smuzhiyun return ret;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun /*
1787*4882a593Smuzhiyun * after system resume, the pm_runtime suspend() may kick in
1788*4882a593Smuzhiyun * during the enumeration, before any children device force the
1789*4882a593Smuzhiyun * master device to remain active. Using pm_runtime_get()
1790*4882a593Smuzhiyun * routines is not really possible, since it'd prevent the
1791*4882a593Smuzhiyun * master from suspending.
1792*4882a593Smuzhiyun * A reasonable compromise is to update the pm_runtime
1793*4882a593Smuzhiyun * counters and delay the pm_runtime suspend by several
1794*4882a593Smuzhiyun * seconds, by when all enumeration should be complete.
1795*4882a593Smuzhiyun */
1796*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun return ret;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun
intel_resume_runtime(struct device * dev)1801*4882a593Smuzhiyun static int intel_resume_runtime(struct device *dev)
1802*4882a593Smuzhiyun {
1803*4882a593Smuzhiyun struct sdw_cdns *cdns = dev_get_drvdata(dev);
1804*4882a593Smuzhiyun struct sdw_intel *sdw = cdns_to_intel(cdns);
1805*4882a593Smuzhiyun struct sdw_bus *bus = &cdns->bus;
1806*4882a593Smuzhiyun u32 clock_stop_quirks;
1807*4882a593Smuzhiyun bool clock_stop0;
1808*4882a593Smuzhiyun int link_flags;
1809*4882a593Smuzhiyun bool multi_link;
1810*4882a593Smuzhiyun int status;
1811*4882a593Smuzhiyun int ret;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun if (bus->prop.hw_disabled) {
1814*4882a593Smuzhiyun dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
1815*4882a593Smuzhiyun bus->link_id);
1816*4882a593Smuzhiyun return 0;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun link_flags = md_flags >> (bus->link_id * 8);
1820*4882a593Smuzhiyun multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1825*4882a593Smuzhiyun ret = intel_init(sdw);
1826*4882a593Smuzhiyun if (ret) {
1827*4882a593Smuzhiyun dev_err(dev, "%s failed: %d", __func__, ret);
1828*4882a593Smuzhiyun return ret;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun /*
1832*4882a593Smuzhiyun * make sure all Slaves are tagged as UNATTACHED and provide
1833*4882a593Smuzhiyun * reason for reinitialization
1834*4882a593Smuzhiyun */
1835*4882a593Smuzhiyun sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, true);
1838*4882a593Smuzhiyun if (ret < 0) {
1839*4882a593Smuzhiyun dev_err(dev, "cannot enable interrupts during resume\n");
1840*4882a593Smuzhiyun return ret;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun /*
1844*4882a593Smuzhiyun * follow recommended programming flows to avoid
1845*4882a593Smuzhiyun * timeouts when gsync is enabled
1846*4882a593Smuzhiyun */
1847*4882a593Smuzhiyun if (multi_link)
1848*4882a593Smuzhiyun intel_shim_sync_arm(sdw);
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun ret = sdw_cdns_init(&sdw->cdns);
1851*4882a593Smuzhiyun if (ret < 0) {
1852*4882a593Smuzhiyun dev_err(dev, "unable to initialize Cadence IP during resume\n");
1853*4882a593Smuzhiyun return ret;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun ret = sdw_cdns_exit_reset(cdns);
1857*4882a593Smuzhiyun if (ret < 0) {
1858*4882a593Smuzhiyun dev_err(dev, "unable to exit bus reset sequence during resume\n");
1859*4882a593Smuzhiyun return ret;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun if (multi_link) {
1863*4882a593Smuzhiyun ret = intel_shim_sync_go(sdw);
1864*4882a593Smuzhiyun if (ret < 0) {
1865*4882a593Smuzhiyun dev_err(dev, "sync go failed during resume\n");
1866*4882a593Smuzhiyun return ret;
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
1870*4882a593Smuzhiyun ret = intel_init(sdw);
1871*4882a593Smuzhiyun if (ret) {
1872*4882a593Smuzhiyun dev_err(dev, "%s failed: %d", __func__, ret);
1873*4882a593Smuzhiyun return ret;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun /*
1877*4882a593Smuzhiyun * An exception condition occurs for the CLK_STOP_BUS_RESET
1878*4882a593Smuzhiyun * case if one or more masters remain active. In this condition,
1879*4882a593Smuzhiyun * all the masters are powered on for they are in the same power
1880*4882a593Smuzhiyun * domain. Master can preserve its context for clock stop0, so
1881*4882a593Smuzhiyun * there is no need to clear slave status and reset bus.
1882*4882a593Smuzhiyun */
1883*4882a593Smuzhiyun clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun if (!clock_stop0) {
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun /*
1888*4882a593Smuzhiyun * make sure all Slaves are tagged as UNATTACHED and
1889*4882a593Smuzhiyun * provide reason for reinitialization
1890*4882a593Smuzhiyun */
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1893*4882a593Smuzhiyun sdw_clear_slave_status(bus, status);
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, true);
1896*4882a593Smuzhiyun if (ret < 0) {
1897*4882a593Smuzhiyun dev_err(dev, "cannot enable interrupts during resume\n");
1898*4882a593Smuzhiyun return ret;
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun /*
1902*4882a593Smuzhiyun * follow recommended programming flows to avoid
1903*4882a593Smuzhiyun * timeouts when gsync is enabled
1904*4882a593Smuzhiyun */
1905*4882a593Smuzhiyun if (multi_link)
1906*4882a593Smuzhiyun intel_shim_sync_arm(sdw);
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun /*
1909*4882a593Smuzhiyun * Re-initialize the IP since it was powered-off
1910*4882a593Smuzhiyun */
1911*4882a593Smuzhiyun sdw_cdns_init(&sdw->cdns);
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun } else {
1914*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, true);
1915*4882a593Smuzhiyun if (ret < 0) {
1916*4882a593Smuzhiyun dev_err(dev, "cannot enable interrupts during resume\n");
1917*4882a593Smuzhiyun return ret;
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun }
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1922*4882a593Smuzhiyun if (ret < 0) {
1923*4882a593Smuzhiyun dev_err(dev, "unable to restart clock during resume\n");
1924*4882a593Smuzhiyun return ret;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun if (!clock_stop0) {
1928*4882a593Smuzhiyun ret = sdw_cdns_exit_reset(cdns);
1929*4882a593Smuzhiyun if (ret < 0) {
1930*4882a593Smuzhiyun dev_err(dev, "unable to exit bus reset sequence during resume\n");
1931*4882a593Smuzhiyun return ret;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun if (multi_link) {
1935*4882a593Smuzhiyun ret = intel_shim_sync_go(sdw);
1936*4882a593Smuzhiyun if (ret < 0) {
1937*4882a593Smuzhiyun dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1938*4882a593Smuzhiyun return ret;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun } else if (!clock_stop_quirks) {
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1945*4882a593Smuzhiyun if (!clock_stop0)
1946*4882a593Smuzhiyun dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun ret = intel_init(sdw);
1949*4882a593Smuzhiyun if (ret) {
1950*4882a593Smuzhiyun dev_err(dev, "%s failed: %d", __func__, ret);
1951*4882a593Smuzhiyun return ret;
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun ret = sdw_cdns_enable_interrupt(cdns, true);
1955*4882a593Smuzhiyun if (ret < 0) {
1956*4882a593Smuzhiyun dev_err(dev, "cannot enable interrupts during resume\n");
1957*4882a593Smuzhiyun return ret;
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun ret = sdw_cdns_clock_restart(cdns, false);
1961*4882a593Smuzhiyun if (ret < 0) {
1962*4882a593Smuzhiyun dev_err(dev, "unable to resume master during resume\n");
1963*4882a593Smuzhiyun return ret;
1964*4882a593Smuzhiyun }
1965*4882a593Smuzhiyun } else {
1966*4882a593Smuzhiyun dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1967*4882a593Smuzhiyun __func__, clock_stop_quirks);
1968*4882a593Smuzhiyun ret = -EINVAL;
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun return ret;
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun #endif
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun static const struct dev_pm_ops intel_pm = {
1977*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
1978*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
1979*4882a593Smuzhiyun };
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun static struct platform_driver sdw_intel_drv = {
1982*4882a593Smuzhiyun .probe = intel_master_probe,
1983*4882a593Smuzhiyun .remove = intel_master_remove,
1984*4882a593Smuzhiyun .driver = {
1985*4882a593Smuzhiyun .name = "intel-sdw",
1986*4882a593Smuzhiyun .pm = &intel_pm,
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun };
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun module_platform_driver(sdw_intel_drv);
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
1993*4882a593Smuzhiyun MODULE_ALIAS("platform:intel-sdw");
1994*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel Soundwire Master Driver");
1995