xref: /OK3568_Linux_fs/kernel/drivers/hsi/controllers/omap_ssi_port.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* OMAP SSI port driver.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
5*4882a593Smuzhiyun  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/pm_runtime.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
17*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
18*4882a593Smuzhiyun #include <linux/debugfs.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "omap_ssi_regs.h"
21*4882a593Smuzhiyun #include "omap_ssi.h"
22*4882a593Smuzhiyun 
hsi_dummy_msg(struct hsi_msg * msg __maybe_unused)23*4882a593Smuzhiyun static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	return 0;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
hsi_dummy_cl(struct hsi_client * cl __maybe_unused)28*4882a593Smuzhiyun static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	return 0;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
ssi_wakein(struct hsi_port * port)33*4882a593Smuzhiyun static inline unsigned int ssi_wakein(struct hsi_port *port)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
36*4882a593Smuzhiyun 	return gpiod_get_value(omap_port->wake_gpio);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
ssi_debug_remove_port(struct hsi_port * port)40*4882a593Smuzhiyun static void ssi_debug_remove_port(struct hsi_port *port)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	debugfs_remove_recursive(omap_port->dir);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
ssi_port_regs_show(struct seq_file * m,void * p __maybe_unused)47*4882a593Smuzhiyun static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct hsi_port *port = m->private;
50*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
51*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
52*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
53*4882a593Smuzhiyun 	void __iomem	*base = omap_ssi->sys;
54*4882a593Smuzhiyun 	unsigned int ch;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
57*4882a593Smuzhiyun 	if (omap_port->wake_irq > 0)
58*4882a593Smuzhiyun 		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
59*4882a593Smuzhiyun 	seq_printf(m, "WAKE\t\t: 0x%08x\n",
60*4882a593Smuzhiyun 				readl(base + SSI_WAKE_REG(port->num)));
61*4882a593Smuzhiyun 	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
62*4882a593Smuzhiyun 			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
63*4882a593Smuzhiyun 	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
64*4882a593Smuzhiyun 			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
65*4882a593Smuzhiyun 	/* SST */
66*4882a593Smuzhiyun 	base = omap_port->sst_base;
67*4882a593Smuzhiyun 	seq_puts(m, "\nSST\n===\n");
68*4882a593Smuzhiyun 	seq_printf(m, "ID SST\t\t: 0x%08x\n",
69*4882a593Smuzhiyun 				readl(base + SSI_SST_ID_REG));
70*4882a593Smuzhiyun 	seq_printf(m, "MODE\t\t: 0x%08x\n",
71*4882a593Smuzhiyun 				readl(base + SSI_SST_MODE_REG));
72*4882a593Smuzhiyun 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
73*4882a593Smuzhiyun 				readl(base + SSI_SST_FRAMESIZE_REG));
74*4882a593Smuzhiyun 	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
75*4882a593Smuzhiyun 				readl(base + SSI_SST_DIVISOR_REG));
76*4882a593Smuzhiyun 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
77*4882a593Smuzhiyun 				readl(base + SSI_SST_CHANNELS_REG));
78*4882a593Smuzhiyun 	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
79*4882a593Smuzhiyun 				readl(base + SSI_SST_ARBMODE_REG));
80*4882a593Smuzhiyun 	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
81*4882a593Smuzhiyun 				readl(base + SSI_SST_TXSTATE_REG));
82*4882a593Smuzhiyun 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
83*4882a593Smuzhiyun 				readl(base + SSI_SST_BUFSTATE_REG));
84*4882a593Smuzhiyun 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
85*4882a593Smuzhiyun 				readl(base + SSI_SST_BREAK_REG));
86*4882a593Smuzhiyun 	for (ch = 0; ch < omap_port->channels; ch++) {
87*4882a593Smuzhiyun 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
88*4882a593Smuzhiyun 				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	/* SSR */
91*4882a593Smuzhiyun 	base = omap_port->ssr_base;
92*4882a593Smuzhiyun 	seq_puts(m, "\nSSR\n===\n");
93*4882a593Smuzhiyun 	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
94*4882a593Smuzhiyun 				readl(base + SSI_SSR_ID_REG));
95*4882a593Smuzhiyun 	seq_printf(m, "MODE\t\t: 0x%08x\n",
96*4882a593Smuzhiyun 				readl(base + SSI_SSR_MODE_REG));
97*4882a593Smuzhiyun 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
98*4882a593Smuzhiyun 				readl(base + SSI_SSR_FRAMESIZE_REG));
99*4882a593Smuzhiyun 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
100*4882a593Smuzhiyun 				readl(base + SSI_SSR_CHANNELS_REG));
101*4882a593Smuzhiyun 	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
102*4882a593Smuzhiyun 				readl(base + SSI_SSR_TIMEOUT_REG));
103*4882a593Smuzhiyun 	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
104*4882a593Smuzhiyun 				readl(base + SSI_SSR_RXSTATE_REG));
105*4882a593Smuzhiyun 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
106*4882a593Smuzhiyun 				readl(base + SSI_SSR_BUFSTATE_REG));
107*4882a593Smuzhiyun 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
108*4882a593Smuzhiyun 				readl(base + SSI_SSR_BREAK_REG));
109*4882a593Smuzhiyun 	seq_printf(m, "ERROR\t\t: 0x%08x\n",
110*4882a593Smuzhiyun 				readl(base + SSI_SSR_ERROR_REG));
111*4882a593Smuzhiyun 	seq_printf(m, "ERRORACK\t: 0x%08x\n",
112*4882a593Smuzhiyun 				readl(base + SSI_SSR_ERRORACK_REG));
113*4882a593Smuzhiyun 	for (ch = 0; ch < omap_port->channels; ch++) {
114*4882a593Smuzhiyun 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
115*4882a593Smuzhiyun 				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
123*4882a593Smuzhiyun 
ssi_div_get(void * data,u64 * val)124*4882a593Smuzhiyun static int ssi_div_get(void *data, u64 *val)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct hsi_port *port = data;
127*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
130*4882a593Smuzhiyun 	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
131*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
ssi_div_set(void * data,u64 val)136*4882a593Smuzhiyun static int ssi_div_set(void *data, u64 val)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct hsi_port *port = data;
139*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (val > 127)
142*4882a593Smuzhiyun 		return -EINVAL;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
145*4882a593Smuzhiyun 	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
146*4882a593Smuzhiyun 	omap_port->sst.divisor = val;
147*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
153*4882a593Smuzhiyun 
ssi_debug_add_port(struct omap_ssi_port * omap_port,struct dentry * dir)154*4882a593Smuzhiyun static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
155*4882a593Smuzhiyun 				     struct dentry *dir)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct hsi_port *port = to_hsi_port(omap_port->dev);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
160*4882a593Smuzhiyun 	if (!dir)
161*4882a593Smuzhiyun 		return -ENOMEM;
162*4882a593Smuzhiyun 	omap_port->dir = dir;
163*4882a593Smuzhiyun 	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
164*4882a593Smuzhiyun 	dir = debugfs_create_dir("sst", dir);
165*4882a593Smuzhiyun 	if (!dir)
166*4882a593Smuzhiyun 		return -ENOMEM;
167*4882a593Smuzhiyun 	debugfs_create_file_unsafe("divisor", 0644, dir, port,
168*4882a593Smuzhiyun 				   &ssi_sst_div_fops);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun #endif
173*4882a593Smuzhiyun 
ssi_process_errqueue(struct work_struct * work)174*4882a593Smuzhiyun static void ssi_process_errqueue(struct work_struct *work)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port;
177*4882a593Smuzhiyun 	struct list_head *head, *tmp;
178*4882a593Smuzhiyun 	struct hsi_msg *msg;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	list_for_each_safe(head, tmp, &omap_port->errqueue) {
183*4882a593Smuzhiyun 		msg = list_entry(head, struct hsi_msg, link);
184*4882a593Smuzhiyun 		msg->complete(msg);
185*4882a593Smuzhiyun 		list_del(head);
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
ssi_claim_lch(struct hsi_msg * msg)189*4882a593Smuzhiyun static int ssi_claim_lch(struct hsi_msg *msg)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(msg->cl);
193*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
194*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
195*4882a593Smuzhiyun 	int lch;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
198*4882a593Smuzhiyun 		if (!omap_ssi->gdd_trn[lch].msg) {
199*4882a593Smuzhiyun 			omap_ssi->gdd_trn[lch].msg = msg;
200*4882a593Smuzhiyun 			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
201*4882a593Smuzhiyun 			return lch;
202*4882a593Smuzhiyun 		}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	return -EBUSY;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
ssi_start_dma(struct hsi_msg * msg,int lch)207*4882a593Smuzhiyun static int ssi_start_dma(struct hsi_msg *msg, int lch)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(msg->cl);
210*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
211*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
212*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
213*4882a593Smuzhiyun 	void __iomem *gdd = omap_ssi->gdd;
214*4882a593Smuzhiyun 	int err;
215*4882a593Smuzhiyun 	u16 csdp;
216*4882a593Smuzhiyun 	u16 ccr;
217*4882a593Smuzhiyun 	u32 s_addr;
218*4882a593Smuzhiyun 	u32 d_addr;
219*4882a593Smuzhiyun 	u32 tmp;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* Hold clocks during the transfer */
222*4882a593Smuzhiyun 	pm_runtime_get(omap_port->pdev);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (!pm_runtime_active(omap_port->pdev)) {
225*4882a593Smuzhiyun 		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
226*4882a593Smuzhiyun 		pm_runtime_put_autosuspend(omap_port->pdev);
227*4882a593Smuzhiyun 		return -EREMOTEIO;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (msg->ttype == HSI_MSG_READ) {
231*4882a593Smuzhiyun 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
232*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
233*4882a593Smuzhiyun 		if (!err) {
234*4882a593Smuzhiyun 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
235*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
236*4882a593Smuzhiyun 			return -EIO;
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
239*4882a593Smuzhiyun 			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
240*4882a593Smuzhiyun 			SSI_DATA_TYPE_S32;
241*4882a593Smuzhiyun 		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
242*4882a593Smuzhiyun 		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
243*4882a593Smuzhiyun 			SSI_CCR_ENABLE;
244*4882a593Smuzhiyun 		s_addr = omap_port->ssr_dma +
245*4882a593Smuzhiyun 					SSI_SSR_BUFFER_CH_REG(msg->channel);
246*4882a593Smuzhiyun 		d_addr = sg_dma_address(msg->sgt.sgl);
247*4882a593Smuzhiyun 	} else {
248*4882a593Smuzhiyun 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
249*4882a593Smuzhiyun 							DMA_TO_DEVICE);
250*4882a593Smuzhiyun 		if (!err) {
251*4882a593Smuzhiyun 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
252*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
253*4882a593Smuzhiyun 			return -EIO;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
256*4882a593Smuzhiyun 			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
257*4882a593Smuzhiyun 			SSI_DATA_TYPE_S32;
258*4882a593Smuzhiyun 		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
259*4882a593Smuzhiyun 		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
260*4882a593Smuzhiyun 			SSI_CCR_ENABLE;
261*4882a593Smuzhiyun 		s_addr = sg_dma_address(msg->sgt.sgl);
262*4882a593Smuzhiyun 		d_addr = omap_port->sst_dma +
263*4882a593Smuzhiyun 					SSI_SST_BUFFER_CH_REG(msg->channel);
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
266*4882a593Smuzhiyun 		lch, csdp, ccr, s_addr, d_addr);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
269*4882a593Smuzhiyun 	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
270*4882a593Smuzhiyun 	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
271*4882a593Smuzhiyun 	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
272*4882a593Smuzhiyun 	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
273*4882a593Smuzhiyun 						gdd + SSI_GDD_CEN_REG(lch));
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	spin_lock_bh(&omap_ssi->lock);
276*4882a593Smuzhiyun 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
277*4882a593Smuzhiyun 	tmp |= SSI_GDD_LCH(lch);
278*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
279*4882a593Smuzhiyun 	spin_unlock_bh(&omap_ssi->lock);
280*4882a593Smuzhiyun 	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
281*4882a593Smuzhiyun 	msg->status = HSI_STATUS_PROCEEDING;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return 0;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
ssi_start_pio(struct hsi_msg * msg)286*4882a593Smuzhiyun static int ssi_start_pio(struct hsi_msg *msg)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(msg->cl);
289*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
290*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
291*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
292*4882a593Smuzhiyun 	u32 val;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	pm_runtime_get(omap_port->pdev);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (!pm_runtime_active(omap_port->pdev)) {
297*4882a593Smuzhiyun 		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
298*4882a593Smuzhiyun 		pm_runtime_put_autosuspend(omap_port->pdev);
299*4882a593Smuzhiyun 		return -EREMOTEIO;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (msg->ttype == HSI_MSG_WRITE) {
303*4882a593Smuzhiyun 		val = SSI_DATAACCEPT(msg->channel);
304*4882a593Smuzhiyun 		/* Hold clocks for pio writes */
305*4882a593Smuzhiyun 		pm_runtime_get(omap_port->pdev);
306*4882a593Smuzhiyun 	} else {
307*4882a593Smuzhiyun 		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 	dev_dbg(&port->device, "Single %s transfer\n",
310*4882a593Smuzhiyun 						msg->ttype ? "write" : "read");
311*4882a593Smuzhiyun 	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
312*4882a593Smuzhiyun 	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
313*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
314*4882a593Smuzhiyun 	msg->actual_len = 0;
315*4882a593Smuzhiyun 	msg->status = HSI_STATUS_PROCEEDING;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
ssi_start_transfer(struct list_head * queue)320*4882a593Smuzhiyun static int ssi_start_transfer(struct list_head *queue)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct hsi_msg *msg;
323*4882a593Smuzhiyun 	int lch = -1;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (list_empty(queue))
326*4882a593Smuzhiyun 		return 0;
327*4882a593Smuzhiyun 	msg = list_first_entry(queue, struct hsi_msg, link);
328*4882a593Smuzhiyun 	if (msg->status != HSI_STATUS_QUEUED)
329*4882a593Smuzhiyun 		return 0;
330*4882a593Smuzhiyun 	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
331*4882a593Smuzhiyun 		lch = ssi_claim_lch(msg);
332*4882a593Smuzhiyun 	if (lch >= 0)
333*4882a593Smuzhiyun 		return ssi_start_dma(msg, lch);
334*4882a593Smuzhiyun 	else
335*4882a593Smuzhiyun 		return ssi_start_pio(msg);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
ssi_async_break(struct hsi_msg * msg)338*4882a593Smuzhiyun static int ssi_async_break(struct hsi_msg *msg)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(msg->cl);
341*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
342*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
343*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
344*4882a593Smuzhiyun 	int err = 0;
345*4882a593Smuzhiyun 	u32 tmp;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
348*4882a593Smuzhiyun 	if (msg->ttype == HSI_MSG_WRITE) {
349*4882a593Smuzhiyun 		if (omap_port->sst.mode != SSI_MODE_FRAME) {
350*4882a593Smuzhiyun 			err = -EINVAL;
351*4882a593Smuzhiyun 			goto out;
352*4882a593Smuzhiyun 		}
353*4882a593Smuzhiyun 		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
354*4882a593Smuzhiyun 		msg->status = HSI_STATUS_COMPLETED;
355*4882a593Smuzhiyun 		msg->complete(msg);
356*4882a593Smuzhiyun 	} else {
357*4882a593Smuzhiyun 		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
358*4882a593Smuzhiyun 			err = -EINVAL;
359*4882a593Smuzhiyun 			goto out;
360*4882a593Smuzhiyun 		}
361*4882a593Smuzhiyun 		spin_lock_bh(&omap_port->lock);
362*4882a593Smuzhiyun 		tmp = readl(omap_ssi->sys +
363*4882a593Smuzhiyun 					SSI_MPU_ENABLE_REG(port->num, 0));
364*4882a593Smuzhiyun 		writel(tmp | SSI_BREAKDETECTED,
365*4882a593Smuzhiyun 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
366*4882a593Smuzhiyun 		msg->status = HSI_STATUS_PROCEEDING;
367*4882a593Smuzhiyun 		list_add_tail(&msg->link, &omap_port->brkqueue);
368*4882a593Smuzhiyun 		spin_unlock_bh(&omap_port->lock);
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun out:
371*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
372*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	return err;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
ssi_async(struct hsi_msg * msg)377*4882a593Smuzhiyun static int ssi_async(struct hsi_msg *msg)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(msg->cl);
380*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
381*4882a593Smuzhiyun 	struct list_head *queue;
382*4882a593Smuzhiyun 	int err = 0;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	BUG_ON(!msg);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (msg->sgt.nents > 1)
387*4882a593Smuzhiyun 		return -ENOSYS; /* TODO: Add sg support */
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (msg->break_frame)
390*4882a593Smuzhiyun 		return ssi_async_break(msg);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (msg->ttype) {
393*4882a593Smuzhiyun 		BUG_ON(msg->channel >= omap_port->sst.channels);
394*4882a593Smuzhiyun 		queue = &omap_port->txqueue[msg->channel];
395*4882a593Smuzhiyun 	} else {
396*4882a593Smuzhiyun 		BUG_ON(msg->channel >= omap_port->ssr.channels);
397*4882a593Smuzhiyun 		queue = &omap_port->rxqueue[msg->channel];
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 	msg->status = HSI_STATUS_QUEUED;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
402*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->lock);
403*4882a593Smuzhiyun 	list_add_tail(&msg->link, queue);
404*4882a593Smuzhiyun 	err = ssi_start_transfer(queue);
405*4882a593Smuzhiyun 	if (err < 0) {
406*4882a593Smuzhiyun 		list_del(&msg->link);
407*4882a593Smuzhiyun 		msg->status = HSI_STATUS_ERROR;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->lock);
410*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
411*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
412*4882a593Smuzhiyun 	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
413*4882a593Smuzhiyun 				msg->status, msg->ttype, msg->channel);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return err;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
ssi_calculate_div(struct hsi_controller * ssi)418*4882a593Smuzhiyun static u32 ssi_calculate_div(struct hsi_controller *ssi)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
421*4882a593Smuzhiyun 	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/* / 2 : SSI TX clock is always half of the SSI functional clock */
424*4882a593Smuzhiyun 	tx_fckrate >>= 1;
425*4882a593Smuzhiyun 	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
426*4882a593Smuzhiyun 	tx_fckrate--;
427*4882a593Smuzhiyun 	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
428*4882a593Smuzhiyun 		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
429*4882a593Smuzhiyun 		omap_ssi->max_speed);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return tx_fckrate / omap_ssi->max_speed;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
ssi_flush_queue(struct list_head * queue,struct hsi_client * cl)434*4882a593Smuzhiyun static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	struct list_head *node, *tmp;
437*4882a593Smuzhiyun 	struct hsi_msg *msg;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	list_for_each_safe(node, tmp, queue) {
440*4882a593Smuzhiyun 		msg = list_entry(node, struct hsi_msg, link);
441*4882a593Smuzhiyun 		if ((cl) && (cl != msg->cl))
442*4882a593Smuzhiyun 			continue;
443*4882a593Smuzhiyun 		list_del(node);
444*4882a593Smuzhiyun 		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
445*4882a593Smuzhiyun 			msg->channel, msg, msg->sgt.sgl->length,
446*4882a593Smuzhiyun 					msg->ttype, msg->context);
447*4882a593Smuzhiyun 		if (msg->destructor)
448*4882a593Smuzhiyun 			msg->destructor(msg);
449*4882a593Smuzhiyun 		else
450*4882a593Smuzhiyun 			hsi_free_msg(msg);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
ssi_setup(struct hsi_client * cl)454*4882a593Smuzhiyun static int ssi_setup(struct hsi_client *cl)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	struct hsi_port *port = to_hsi_port(cl->device.parent);
457*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
458*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
459*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
460*4882a593Smuzhiyun 	void __iomem *sst = omap_port->sst_base;
461*4882a593Smuzhiyun 	void __iomem *ssr = omap_port->ssr_base;
462*4882a593Smuzhiyun 	u32 div;
463*4882a593Smuzhiyun 	u32 val;
464*4882a593Smuzhiyun 	int err = 0;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
467*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->lock);
468*4882a593Smuzhiyun 	if (cl->tx_cfg.speed)
469*4882a593Smuzhiyun 		omap_ssi->max_speed = cl->tx_cfg.speed;
470*4882a593Smuzhiyun 	div = ssi_calculate_div(ssi);
471*4882a593Smuzhiyun 	if (div > SSI_MAX_DIVISOR) {
472*4882a593Smuzhiyun 		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
473*4882a593Smuzhiyun 						cl->tx_cfg.speed, div);
474*4882a593Smuzhiyun 		err = -EINVAL;
475*4882a593Smuzhiyun 		goto out;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
478*4882a593Smuzhiyun 	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
479*4882a593Smuzhiyun 	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
480*4882a593Smuzhiyun 	/* Flush posted write */
481*4882a593Smuzhiyun 	val = readl(ssr + SSI_SSR_MODE_REG);
482*4882a593Smuzhiyun 	/* TX */
483*4882a593Smuzhiyun 	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
484*4882a593Smuzhiyun 	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
485*4882a593Smuzhiyun 	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
486*4882a593Smuzhiyun 	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
487*4882a593Smuzhiyun 	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
488*4882a593Smuzhiyun 	/* RX */
489*4882a593Smuzhiyun 	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
490*4882a593Smuzhiyun 	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
491*4882a593Smuzhiyun 	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
492*4882a593Smuzhiyun 	/* Cleanup the break queue if we leave FRAME mode */
493*4882a593Smuzhiyun 	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
494*4882a593Smuzhiyun 		(cl->rx_cfg.mode != SSI_MODE_FRAME))
495*4882a593Smuzhiyun 		ssi_flush_queue(&omap_port->brkqueue, cl);
496*4882a593Smuzhiyun 	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
497*4882a593Smuzhiyun 	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
498*4882a593Smuzhiyun 				  cl->tx_cfg.num_hw_channels);
499*4882a593Smuzhiyun 	/* Shadow registering for OFF mode */
500*4882a593Smuzhiyun 	/* SST */
501*4882a593Smuzhiyun 	omap_port->sst.divisor = div;
502*4882a593Smuzhiyun 	omap_port->sst.frame_size = 31;
503*4882a593Smuzhiyun 	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
504*4882a593Smuzhiyun 	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
505*4882a593Smuzhiyun 	omap_port->sst.mode = cl->tx_cfg.mode;
506*4882a593Smuzhiyun 	/* SSR */
507*4882a593Smuzhiyun 	omap_port->ssr.frame_size = 31;
508*4882a593Smuzhiyun 	omap_port->ssr.timeout = 0;
509*4882a593Smuzhiyun 	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
510*4882a593Smuzhiyun 	omap_port->ssr.mode = cl->rx_cfg.mode;
511*4882a593Smuzhiyun out:
512*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->lock);
513*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
514*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return err;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
ssi_flush(struct hsi_client * cl)519*4882a593Smuzhiyun static int ssi_flush(struct hsi_client *cl)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(cl);
522*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
523*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
524*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
525*4882a593Smuzhiyun 	struct hsi_msg *msg;
526*4882a593Smuzhiyun 	void __iomem *sst = omap_port->sst_base;
527*4882a593Smuzhiyun 	void __iomem *ssr = omap_port->ssr_base;
528*4882a593Smuzhiyun 	unsigned int i;
529*4882a593Smuzhiyun 	u32 err;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
532*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->lock);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* stop all ssi communication */
535*4882a593Smuzhiyun 	pinctrl_pm_select_idle_state(omap_port->pdev);
536*4882a593Smuzhiyun 	udelay(1); /* wait for racing frames */
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* Stop all DMA transfers */
539*4882a593Smuzhiyun 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
540*4882a593Smuzhiyun 		msg = omap_ssi->gdd_trn[i].msg;
541*4882a593Smuzhiyun 		if (!msg || (port != hsi_get_port(msg->cl)))
542*4882a593Smuzhiyun 			continue;
543*4882a593Smuzhiyun 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
544*4882a593Smuzhiyun 		if (msg->ttype == HSI_MSG_READ)
545*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
546*4882a593Smuzhiyun 		omap_ssi->gdd_trn[i].msg = NULL;
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 	/* Flush all SST buffers */
549*4882a593Smuzhiyun 	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
550*4882a593Smuzhiyun 	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
551*4882a593Smuzhiyun 	/* Flush all SSR buffers */
552*4882a593Smuzhiyun 	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
553*4882a593Smuzhiyun 	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
554*4882a593Smuzhiyun 	/* Flush all errors */
555*4882a593Smuzhiyun 	err = readl(ssr + SSI_SSR_ERROR_REG);
556*4882a593Smuzhiyun 	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
557*4882a593Smuzhiyun 	/* Flush break */
558*4882a593Smuzhiyun 	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
559*4882a593Smuzhiyun 	/* Clear interrupts */
560*4882a593Smuzhiyun 	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
561*4882a593Smuzhiyun 	writel_relaxed(0xffffff00,
562*4882a593Smuzhiyun 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
563*4882a593Smuzhiyun 	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
564*4882a593Smuzhiyun 	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
565*4882a593Smuzhiyun 	/* Dequeue all pending requests */
566*4882a593Smuzhiyun 	for (i = 0; i < omap_port->channels; i++) {
567*4882a593Smuzhiyun 		/* Release write clocks */
568*4882a593Smuzhiyun 		if (!list_empty(&omap_port->txqueue[i]))
569*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
570*4882a593Smuzhiyun 		ssi_flush_queue(&omap_port->txqueue[i], NULL);
571*4882a593Smuzhiyun 		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 	ssi_flush_queue(&omap_port->brkqueue, NULL);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* Resume SSI communication */
576*4882a593Smuzhiyun 	pinctrl_pm_select_default_state(omap_port->pdev);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->lock);
579*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
580*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
start_tx_work(struct work_struct * work)585*4882a593Smuzhiyun static void start_tx_work(struct work_struct *work)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port =
588*4882a593Smuzhiyun 				container_of(work, struct omap_ssi_port, work);
589*4882a593Smuzhiyun 	struct hsi_port *port = to_hsi_port(omap_port->dev);
590*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
591*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
594*4882a593Smuzhiyun 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
ssi_start_tx(struct hsi_client * cl)597*4882a593Smuzhiyun static int ssi_start_tx(struct hsi_client *cl)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(cl);
600*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->wk_lock);
605*4882a593Smuzhiyun 	if (omap_port->wk_refcount++) {
606*4882a593Smuzhiyun 		spin_unlock_bh(&omap_port->wk_lock);
607*4882a593Smuzhiyun 		return 0;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->wk_lock);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	schedule_work(&omap_port->work);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	return 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
ssi_stop_tx(struct hsi_client * cl)616*4882a593Smuzhiyun static int ssi_stop_tx(struct hsi_client *cl)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(cl);
619*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
620*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
621*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->wk_lock);
626*4882a593Smuzhiyun 	BUG_ON(!omap_port->wk_refcount);
627*4882a593Smuzhiyun 	if (--omap_port->wk_refcount) {
628*4882a593Smuzhiyun 		spin_unlock_bh(&omap_port->wk_lock);
629*4882a593Smuzhiyun 		return 0;
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
632*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->wk_lock);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
635*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	return 0;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
ssi_transfer(struct omap_ssi_port * omap_port,struct list_head * queue)641*4882a593Smuzhiyun static void ssi_transfer(struct omap_ssi_port *omap_port,
642*4882a593Smuzhiyun 							struct list_head *queue)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	struct hsi_msg *msg;
645*4882a593Smuzhiyun 	int err = -1;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	pm_runtime_get(omap_port->pdev);
648*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->lock);
649*4882a593Smuzhiyun 	while (err < 0) {
650*4882a593Smuzhiyun 		err = ssi_start_transfer(queue);
651*4882a593Smuzhiyun 		if (err < 0) {
652*4882a593Smuzhiyun 			msg = list_first_entry(queue, struct hsi_msg, link);
653*4882a593Smuzhiyun 			msg->status = HSI_STATUS_ERROR;
654*4882a593Smuzhiyun 			msg->actual_len = 0;
655*4882a593Smuzhiyun 			list_del(&msg->link);
656*4882a593Smuzhiyun 			spin_unlock_bh(&omap_port->lock);
657*4882a593Smuzhiyun 			msg->complete(msg);
658*4882a593Smuzhiyun 			spin_lock_bh(&omap_port->lock);
659*4882a593Smuzhiyun 		}
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->lock);
662*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
663*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
ssi_cleanup_queues(struct hsi_client * cl)666*4882a593Smuzhiyun static void ssi_cleanup_queues(struct hsi_client *cl)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(cl);
669*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
670*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
671*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
672*4882a593Smuzhiyun 	struct hsi_msg *msg;
673*4882a593Smuzhiyun 	unsigned int i;
674*4882a593Smuzhiyun 	u32 rxbufstate = 0;
675*4882a593Smuzhiyun 	u32 txbufstate = 0;
676*4882a593Smuzhiyun 	u32 status = SSI_ERROROCCURED;
677*4882a593Smuzhiyun 	u32 tmp;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	ssi_flush_queue(&omap_port->brkqueue, cl);
680*4882a593Smuzhiyun 	if (list_empty(&omap_port->brkqueue))
681*4882a593Smuzhiyun 		status |= SSI_BREAKDETECTED;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	for (i = 0; i < omap_port->channels; i++) {
684*4882a593Smuzhiyun 		if (list_empty(&omap_port->txqueue[i]))
685*4882a593Smuzhiyun 			continue;
686*4882a593Smuzhiyun 		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
687*4882a593Smuzhiyun 									link);
688*4882a593Smuzhiyun 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
689*4882a593Smuzhiyun 			txbufstate |= (1 << i);
690*4882a593Smuzhiyun 			status |= SSI_DATAACCEPT(i);
691*4882a593Smuzhiyun 			/* Release the clocks writes, also GDD ones */
692*4882a593Smuzhiyun 			pm_runtime_mark_last_busy(omap_port->pdev);
693*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
694*4882a593Smuzhiyun 		}
695*4882a593Smuzhiyun 		ssi_flush_queue(&omap_port->txqueue[i], cl);
696*4882a593Smuzhiyun 	}
697*4882a593Smuzhiyun 	for (i = 0; i < omap_port->channels; i++) {
698*4882a593Smuzhiyun 		if (list_empty(&omap_port->rxqueue[i]))
699*4882a593Smuzhiyun 			continue;
700*4882a593Smuzhiyun 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
701*4882a593Smuzhiyun 									link);
702*4882a593Smuzhiyun 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
703*4882a593Smuzhiyun 			rxbufstate |= (1 << i);
704*4882a593Smuzhiyun 			status |= SSI_DATAAVAILABLE(i);
705*4882a593Smuzhiyun 		}
706*4882a593Smuzhiyun 		ssi_flush_queue(&omap_port->rxqueue[i], cl);
707*4882a593Smuzhiyun 		/* Check if we keep the error detection interrupt armed */
708*4882a593Smuzhiyun 		if (!list_empty(&omap_port->rxqueue[i]))
709*4882a593Smuzhiyun 			status &= ~SSI_ERROROCCURED;
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 	/* Cleanup write buffers */
712*4882a593Smuzhiyun 	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
713*4882a593Smuzhiyun 	tmp &= ~txbufstate;
714*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
715*4882a593Smuzhiyun 	/* Cleanup read buffers */
716*4882a593Smuzhiyun 	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
717*4882a593Smuzhiyun 	tmp &= ~rxbufstate;
718*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
719*4882a593Smuzhiyun 	/* Disarm and ack pending interrupts */
720*4882a593Smuzhiyun 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
721*4882a593Smuzhiyun 	tmp &= ~status;
722*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
723*4882a593Smuzhiyun 	writel_relaxed(status, omap_ssi->sys +
724*4882a593Smuzhiyun 		SSI_MPU_STATUS_REG(port->num, 0));
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun 
ssi_cleanup_gdd(struct hsi_controller * ssi,struct hsi_client * cl)727*4882a593Smuzhiyun static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
730*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(cl);
731*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
732*4882a593Smuzhiyun 	struct hsi_msg *msg;
733*4882a593Smuzhiyun 	unsigned int i;
734*4882a593Smuzhiyun 	u32 val = 0;
735*4882a593Smuzhiyun 	u32 tmp;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
738*4882a593Smuzhiyun 		msg = omap_ssi->gdd_trn[i].msg;
739*4882a593Smuzhiyun 		if ((!msg) || (msg->cl != cl))
740*4882a593Smuzhiyun 			continue;
741*4882a593Smuzhiyun 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
742*4882a593Smuzhiyun 		val |= (1 << i);
743*4882a593Smuzhiyun 		/*
744*4882a593Smuzhiyun 		 * Clock references for write will be handled in
745*4882a593Smuzhiyun 		 * ssi_cleanup_queues
746*4882a593Smuzhiyun 		 */
747*4882a593Smuzhiyun 		if (msg->ttype == HSI_MSG_READ) {
748*4882a593Smuzhiyun 			pm_runtime_mark_last_busy(omap_port->pdev);
749*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
750*4882a593Smuzhiyun 		}
751*4882a593Smuzhiyun 		omap_ssi->gdd_trn[i].msg = NULL;
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
754*4882a593Smuzhiyun 	tmp &= ~val;
755*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
756*4882a593Smuzhiyun 	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun 
ssi_set_port_mode(struct omap_ssi_port * omap_port,u32 mode)759*4882a593Smuzhiyun static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun 	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
762*4882a593Smuzhiyun 	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
763*4882a593Smuzhiyun 	/* OCP barrier */
764*4882a593Smuzhiyun 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	return 0;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
ssi_release(struct hsi_client * cl)769*4882a593Smuzhiyun static int ssi_release(struct hsi_client *cl)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun 	struct hsi_port *port = hsi_get_port(cl);
772*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
773*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
776*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->lock);
777*4882a593Smuzhiyun 	/* Stop all the pending DMA requests for that client */
778*4882a593Smuzhiyun 	ssi_cleanup_gdd(ssi, cl);
779*4882a593Smuzhiyun 	/* Now cleanup all the queues */
780*4882a593Smuzhiyun 	ssi_cleanup_queues(cl);
781*4882a593Smuzhiyun 	/* If it is the last client of the port, do extra checks and cleanup */
782*4882a593Smuzhiyun 	if (port->claimed <= 1) {
783*4882a593Smuzhiyun 		/*
784*4882a593Smuzhiyun 		 * Drop the clock reference for the incoming wake line
785*4882a593Smuzhiyun 		 * if it is still kept high by the other side.
786*4882a593Smuzhiyun 		 */
787*4882a593Smuzhiyun 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
788*4882a593Smuzhiyun 			pm_runtime_put_sync(omap_port->pdev);
789*4882a593Smuzhiyun 		pm_runtime_get(omap_port->pdev);
790*4882a593Smuzhiyun 		/* Stop any SSI TX/RX without a client */
791*4882a593Smuzhiyun 		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
792*4882a593Smuzhiyun 		omap_port->sst.mode = SSI_MODE_SLEEP;
793*4882a593Smuzhiyun 		omap_port->ssr.mode = SSI_MODE_SLEEP;
794*4882a593Smuzhiyun 		pm_runtime_put(omap_port->pdev);
795*4882a593Smuzhiyun 		WARN_ON(omap_port->wk_refcount != 0);
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->lock);
798*4882a593Smuzhiyun 	pm_runtime_put_sync(omap_port->pdev);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	return 0;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 
ssi_error(struct hsi_port * port)805*4882a593Smuzhiyun static void ssi_error(struct hsi_port *port)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
808*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
809*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
810*4882a593Smuzhiyun 	struct hsi_msg *msg;
811*4882a593Smuzhiyun 	unsigned int i;
812*4882a593Smuzhiyun 	u32 err;
813*4882a593Smuzhiyun 	u32 val;
814*4882a593Smuzhiyun 	u32 tmp;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	/* ACK error */
817*4882a593Smuzhiyun 	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
818*4882a593Smuzhiyun 	dev_err(&port->device, "SSI error: 0x%02x\n", err);
819*4882a593Smuzhiyun 	if (!err) {
820*4882a593Smuzhiyun 		dev_dbg(&port->device, "spurious SSI error ignored!\n");
821*4882a593Smuzhiyun 		return;
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 	spin_lock(&omap_ssi->lock);
824*4882a593Smuzhiyun 	/* Cancel all GDD read transfers */
825*4882a593Smuzhiyun 	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
826*4882a593Smuzhiyun 		msg = omap_ssi->gdd_trn[i].msg;
827*4882a593Smuzhiyun 		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
828*4882a593Smuzhiyun 			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
829*4882a593Smuzhiyun 			val |= (1 << i);
830*4882a593Smuzhiyun 			omap_ssi->gdd_trn[i].msg = NULL;
831*4882a593Smuzhiyun 		}
832*4882a593Smuzhiyun 	}
833*4882a593Smuzhiyun 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
834*4882a593Smuzhiyun 	tmp &= ~val;
835*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
836*4882a593Smuzhiyun 	spin_unlock(&omap_ssi->lock);
837*4882a593Smuzhiyun 	/* Cancel all PIO read transfers */
838*4882a593Smuzhiyun 	spin_lock(&omap_port->lock);
839*4882a593Smuzhiyun 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
840*4882a593Smuzhiyun 	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
841*4882a593Smuzhiyun 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
842*4882a593Smuzhiyun 	/* ACK error */
843*4882a593Smuzhiyun 	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
844*4882a593Smuzhiyun 	writel_relaxed(SSI_ERROROCCURED,
845*4882a593Smuzhiyun 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
846*4882a593Smuzhiyun 	/* Signal the error all current pending read requests */
847*4882a593Smuzhiyun 	for (i = 0; i < omap_port->channels; i++) {
848*4882a593Smuzhiyun 		if (list_empty(&omap_port->rxqueue[i]))
849*4882a593Smuzhiyun 			continue;
850*4882a593Smuzhiyun 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
851*4882a593Smuzhiyun 									link);
852*4882a593Smuzhiyun 		list_del(&msg->link);
853*4882a593Smuzhiyun 		msg->status = HSI_STATUS_ERROR;
854*4882a593Smuzhiyun 		spin_unlock(&omap_port->lock);
855*4882a593Smuzhiyun 		msg->complete(msg);
856*4882a593Smuzhiyun 		/* Now restart queued reads if any */
857*4882a593Smuzhiyun 		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
858*4882a593Smuzhiyun 		spin_lock(&omap_port->lock);
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 	spin_unlock(&omap_port->lock);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
ssi_break_complete(struct hsi_port * port)863*4882a593Smuzhiyun static void ssi_break_complete(struct hsi_port *port)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
866*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
867*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
868*4882a593Smuzhiyun 	struct hsi_msg *msg;
869*4882a593Smuzhiyun 	struct hsi_msg *tmp;
870*4882a593Smuzhiyun 	u32 val;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	dev_dbg(&port->device, "HWBREAK received\n");
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	spin_lock(&omap_port->lock);
875*4882a593Smuzhiyun 	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
876*4882a593Smuzhiyun 	val &= ~SSI_BREAKDETECTED;
877*4882a593Smuzhiyun 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
878*4882a593Smuzhiyun 	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
879*4882a593Smuzhiyun 	writel(SSI_BREAKDETECTED,
880*4882a593Smuzhiyun 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
881*4882a593Smuzhiyun 	spin_unlock(&omap_port->lock);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
884*4882a593Smuzhiyun 		msg->status = HSI_STATUS_COMPLETED;
885*4882a593Smuzhiyun 		spin_lock(&omap_port->lock);
886*4882a593Smuzhiyun 		list_del(&msg->link);
887*4882a593Smuzhiyun 		spin_unlock(&omap_port->lock);
888*4882a593Smuzhiyun 		msg->complete(msg);
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
ssi_pio_complete(struct hsi_port * port,struct list_head * queue)893*4882a593Smuzhiyun static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
896*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
897*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
898*4882a593Smuzhiyun 	struct hsi_msg *msg;
899*4882a593Smuzhiyun 	u32 *buf;
900*4882a593Smuzhiyun 	u32 reg;
901*4882a593Smuzhiyun 	u32 val;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	spin_lock_bh(&omap_port->lock);
904*4882a593Smuzhiyun 	msg = list_first_entry(queue, struct hsi_msg, link);
905*4882a593Smuzhiyun 	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
906*4882a593Smuzhiyun 		msg->actual_len = 0;
907*4882a593Smuzhiyun 		msg->status = HSI_STATUS_PENDING;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 	if (msg->ttype == HSI_MSG_WRITE)
910*4882a593Smuzhiyun 		val = SSI_DATAACCEPT(msg->channel);
911*4882a593Smuzhiyun 	else
912*4882a593Smuzhiyun 		val = SSI_DATAAVAILABLE(msg->channel);
913*4882a593Smuzhiyun 	if (msg->status == HSI_STATUS_PROCEEDING) {
914*4882a593Smuzhiyun 		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
915*4882a593Smuzhiyun 		if (msg->ttype == HSI_MSG_WRITE)
916*4882a593Smuzhiyun 			writel(*buf, omap_port->sst_base +
917*4882a593Smuzhiyun 					SSI_SST_BUFFER_CH_REG(msg->channel));
918*4882a593Smuzhiyun 		 else
919*4882a593Smuzhiyun 			*buf = readl(omap_port->ssr_base +
920*4882a593Smuzhiyun 					SSI_SSR_BUFFER_CH_REG(msg->channel));
921*4882a593Smuzhiyun 		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
922*4882a593Smuzhiyun 							msg->ttype, *buf);
923*4882a593Smuzhiyun 		msg->actual_len += sizeof(*buf);
924*4882a593Smuzhiyun 		if (msg->actual_len >= msg->sgt.sgl->length)
925*4882a593Smuzhiyun 			msg->status = HSI_STATUS_COMPLETED;
926*4882a593Smuzhiyun 		/*
927*4882a593Smuzhiyun 		 * Wait for the last written frame to be really sent before
928*4882a593Smuzhiyun 		 * we call the complete callback
929*4882a593Smuzhiyun 		 */
930*4882a593Smuzhiyun 		if ((msg->status == HSI_STATUS_PROCEEDING) ||
931*4882a593Smuzhiyun 				((msg->status == HSI_STATUS_COMPLETED) &&
932*4882a593Smuzhiyun 					(msg->ttype == HSI_MSG_WRITE))) {
933*4882a593Smuzhiyun 			writel(val, omap_ssi->sys +
934*4882a593Smuzhiyun 					SSI_MPU_STATUS_REG(port->num, 0));
935*4882a593Smuzhiyun 			spin_unlock_bh(&omap_port->lock);
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 			return;
938*4882a593Smuzhiyun 		}
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 	/* Transfer completed at this point */
942*4882a593Smuzhiyun 	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
943*4882a593Smuzhiyun 	if (msg->ttype == HSI_MSG_WRITE) {
944*4882a593Smuzhiyun 		/* Release clocks for write transfer */
945*4882a593Smuzhiyun 		pm_runtime_mark_last_busy(omap_port->pdev);
946*4882a593Smuzhiyun 		pm_runtime_put_autosuspend(omap_port->pdev);
947*4882a593Smuzhiyun 	}
948*4882a593Smuzhiyun 	reg &= ~val;
949*4882a593Smuzhiyun 	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
950*4882a593Smuzhiyun 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
951*4882a593Smuzhiyun 	list_del(&msg->link);
952*4882a593Smuzhiyun 	spin_unlock_bh(&omap_port->lock);
953*4882a593Smuzhiyun 	msg->complete(msg);
954*4882a593Smuzhiyun 	ssi_transfer(omap_port, queue);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun 
ssi_pio_thread(int irq,void * ssi_port)957*4882a593Smuzhiyun static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	struct hsi_port *port = (struct hsi_port *)ssi_port;
960*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
961*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
962*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
963*4882a593Smuzhiyun 	void __iomem *sys = omap_ssi->sys;
964*4882a593Smuzhiyun 	unsigned int ch;
965*4882a593Smuzhiyun 	u32 status_reg;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	pm_runtime_get_sync(omap_port->pdev);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	do {
970*4882a593Smuzhiyun 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
971*4882a593Smuzhiyun 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 		for (ch = 0; ch < omap_port->channels; ch++) {
974*4882a593Smuzhiyun 			if (status_reg & SSI_DATAACCEPT(ch))
975*4882a593Smuzhiyun 				ssi_pio_complete(port, &omap_port->txqueue[ch]);
976*4882a593Smuzhiyun 			if (status_reg & SSI_DATAAVAILABLE(ch))
977*4882a593Smuzhiyun 				ssi_pio_complete(port, &omap_port->rxqueue[ch]);
978*4882a593Smuzhiyun 		}
979*4882a593Smuzhiyun 		if (status_reg & SSI_BREAKDETECTED)
980*4882a593Smuzhiyun 			ssi_break_complete(port);
981*4882a593Smuzhiyun 		if (status_reg & SSI_ERROROCCURED)
982*4882a593Smuzhiyun 			ssi_error(port);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
985*4882a593Smuzhiyun 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 		/* TODO: sleep if we retry? */
988*4882a593Smuzhiyun 	} while (status_reg);
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(omap_port->pdev);
991*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(omap_port->pdev);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	return IRQ_HANDLED;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun 
ssi_wake_thread(int irq __maybe_unused,void * ssi_port)996*4882a593Smuzhiyun static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 	struct hsi_port *port = (struct hsi_port *)ssi_port;
999*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1000*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1001*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	if (ssi_wakein(port)) {
1004*4882a593Smuzhiyun 		/**
1005*4882a593Smuzhiyun 		 * We can have a quick High-Low-High transition in the line.
1006*4882a593Smuzhiyun 		 * In such a case if we have long interrupt latencies,
1007*4882a593Smuzhiyun 		 * we can miss the low event or get twice a high event.
1008*4882a593Smuzhiyun 		 * This workaround will avoid breaking the clock reference
1009*4882a593Smuzhiyun 		 * count when such a situation ocurrs.
1010*4882a593Smuzhiyun 		 */
1011*4882a593Smuzhiyun 		if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1012*4882a593Smuzhiyun 			pm_runtime_get_sync(omap_port->pdev);
1013*4882a593Smuzhiyun 		dev_dbg(&ssi->device, "Wake in high\n");
1014*4882a593Smuzhiyun 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1015*4882a593Smuzhiyun 			writel(SSI_WAKE(0),
1016*4882a593Smuzhiyun 				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 		hsi_event(port, HSI_EVENT_START_RX);
1019*4882a593Smuzhiyun 	} else {
1020*4882a593Smuzhiyun 		dev_dbg(&ssi->device, "Wake in low\n");
1021*4882a593Smuzhiyun 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1022*4882a593Smuzhiyun 			writel(SSI_WAKE(0),
1023*4882a593Smuzhiyun 				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1024*4882a593Smuzhiyun 		}
1025*4882a593Smuzhiyun 		hsi_event(port, HSI_EVENT_STOP_RX);
1026*4882a593Smuzhiyun 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1027*4882a593Smuzhiyun 			pm_runtime_mark_last_busy(omap_port->pdev);
1028*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(omap_port->pdev);
1029*4882a593Smuzhiyun 		}
1030*4882a593Smuzhiyun 	}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	return IRQ_HANDLED;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
ssi_port_irq(struct hsi_port * port,struct platform_device * pd)1035*4882a593Smuzhiyun static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1038*4882a593Smuzhiyun 	int err;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	err = platform_get_irq(pd, 0);
1041*4882a593Smuzhiyun 	if (err < 0)
1042*4882a593Smuzhiyun 		return err;
1043*4882a593Smuzhiyun 	omap_port->irq = err;
1044*4882a593Smuzhiyun 	err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1045*4882a593Smuzhiyun 				ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1046*4882a593Smuzhiyun 	if (err < 0)
1047*4882a593Smuzhiyun 		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1048*4882a593Smuzhiyun 							omap_port->irq, err);
1049*4882a593Smuzhiyun 	return err;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
ssi_wake_irq(struct hsi_port * port,struct platform_device * pd)1052*4882a593Smuzhiyun static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1055*4882a593Smuzhiyun 	int cawake_irq;
1056*4882a593Smuzhiyun 	int err;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	if (!omap_port->wake_gpio) {
1059*4882a593Smuzhiyun 		omap_port->wake_irq = -1;
1060*4882a593Smuzhiyun 		return 0;
1061*4882a593Smuzhiyun 	}
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1064*4882a593Smuzhiyun 	omap_port->wake_irq = cawake_irq;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1067*4882a593Smuzhiyun 		ssi_wake_thread,
1068*4882a593Smuzhiyun 		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1069*4882a593Smuzhiyun 		"SSI cawake", port);
1070*4882a593Smuzhiyun 	if (err < 0)
1071*4882a593Smuzhiyun 		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1072*4882a593Smuzhiyun 						cawake_irq, err);
1073*4882a593Smuzhiyun 	err = enable_irq_wake(cawake_irq);
1074*4882a593Smuzhiyun 	if (err < 0)
1075*4882a593Smuzhiyun 		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1076*4882a593Smuzhiyun 			cawake_irq, err);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	return err;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
ssi_queues_init(struct omap_ssi_port * omap_port)1081*4882a593Smuzhiyun static void ssi_queues_init(struct omap_ssi_port *omap_port)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	unsigned int ch;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1086*4882a593Smuzhiyun 		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1087*4882a593Smuzhiyun 		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1088*4882a593Smuzhiyun 	}
1089*4882a593Smuzhiyun 	INIT_LIST_HEAD(&omap_port->brkqueue);
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
ssi_port_get_iomem(struct platform_device * pd,const char * name,void __iomem ** pbase,dma_addr_t * phy)1092*4882a593Smuzhiyun static int ssi_port_get_iomem(struct platform_device *pd,
1093*4882a593Smuzhiyun 		const char *name, void __iomem **pbase, dma_addr_t *phy)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun 	struct hsi_port *port = platform_get_drvdata(pd);
1096*4882a593Smuzhiyun 	struct resource *mem;
1097*4882a593Smuzhiyun 	struct resource *ioarea;
1098*4882a593Smuzhiyun 	void __iomem *base;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1101*4882a593Smuzhiyun 	if (!mem) {
1102*4882a593Smuzhiyun 		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1103*4882a593Smuzhiyun 		return -ENXIO;
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 	ioarea = devm_request_mem_region(&port->device, mem->start,
1106*4882a593Smuzhiyun 					resource_size(mem), dev_name(&pd->dev));
1107*4882a593Smuzhiyun 	if (!ioarea) {
1108*4882a593Smuzhiyun 		dev_err(&pd->dev, "%s IO memory region request failed\n",
1109*4882a593Smuzhiyun 								mem->name);
1110*4882a593Smuzhiyun 		return -ENXIO;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1113*4882a593Smuzhiyun 	if (!base) {
1114*4882a593Smuzhiyun 		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1115*4882a593Smuzhiyun 		return -ENXIO;
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun 	*pbase = base;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	if (phy)
1120*4882a593Smuzhiyun 		*phy = mem->start;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	return 0;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
ssi_port_probe(struct platform_device * pd)1125*4882a593Smuzhiyun static int ssi_port_probe(struct platform_device *pd)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct device_node *np = pd->dev.of_node;
1128*4882a593Smuzhiyun 	struct hsi_port *port;
1129*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port;
1130*4882a593Smuzhiyun 	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1131*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1132*4882a593Smuzhiyun 	struct gpio_desc *cawake_gpio = NULL;
1133*4882a593Smuzhiyun 	u32 port_id;
1134*4882a593Smuzhiyun 	int err;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	dev_dbg(&pd->dev, "init ssi port...\n");
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	if (!ssi->port || !omap_ssi->port) {
1139*4882a593Smuzhiyun 		dev_err(&pd->dev, "ssi controller not initialized!\n");
1140*4882a593Smuzhiyun 		err = -ENODEV;
1141*4882a593Smuzhiyun 		goto error;
1142*4882a593Smuzhiyun 	}
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	/* get id of first uninitialized port in controller */
1145*4882a593Smuzhiyun 	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1146*4882a593Smuzhiyun 		port_id++)
1147*4882a593Smuzhiyun 		;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (port_id >= ssi->num_ports) {
1150*4882a593Smuzhiyun 		dev_err(&pd->dev, "port id out of range!\n");
1151*4882a593Smuzhiyun 		err = -ENODEV;
1152*4882a593Smuzhiyun 		goto error;
1153*4882a593Smuzhiyun 	}
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	port = ssi->port[port_id];
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	if (!np) {
1158*4882a593Smuzhiyun 		dev_err(&pd->dev, "missing device tree data\n");
1159*4882a593Smuzhiyun 		err = -EINVAL;
1160*4882a593Smuzhiyun 		goto error;
1161*4882a593Smuzhiyun 	}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1164*4882a593Smuzhiyun 	if (IS_ERR(cawake_gpio)) {
1165*4882a593Smuzhiyun 		err = PTR_ERR(cawake_gpio);
1166*4882a593Smuzhiyun 		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1167*4882a593Smuzhiyun 		goto error;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1171*4882a593Smuzhiyun 	if (!omap_port) {
1172*4882a593Smuzhiyun 		err = -ENOMEM;
1173*4882a593Smuzhiyun 		goto error;
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 	omap_port->wake_gpio = cawake_gpio;
1176*4882a593Smuzhiyun 	omap_port->pdev = &pd->dev;
1177*4882a593Smuzhiyun 	omap_port->port_id = port_id;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1180*4882a593Smuzhiyun 	INIT_WORK(&omap_port->work, start_tx_work);
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	/* initialize HSI port */
1183*4882a593Smuzhiyun 	port->async	= ssi_async;
1184*4882a593Smuzhiyun 	port->setup	= ssi_setup;
1185*4882a593Smuzhiyun 	port->flush	= ssi_flush;
1186*4882a593Smuzhiyun 	port->start_tx	= ssi_start_tx;
1187*4882a593Smuzhiyun 	port->stop_tx	= ssi_stop_tx;
1188*4882a593Smuzhiyun 	port->release	= ssi_release;
1189*4882a593Smuzhiyun 	hsi_port_set_drvdata(port, omap_port);
1190*4882a593Smuzhiyun 	omap_ssi->port[port_id] = omap_port;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	platform_set_drvdata(pd, port);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1195*4882a593Smuzhiyun 		&omap_port->sst_dma);
1196*4882a593Smuzhiyun 	if (err < 0)
1197*4882a593Smuzhiyun 		goto error;
1198*4882a593Smuzhiyun 	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1199*4882a593Smuzhiyun 		&omap_port->ssr_dma);
1200*4882a593Smuzhiyun 	if (err < 0)
1201*4882a593Smuzhiyun 		goto error;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	err = ssi_port_irq(port, pd);
1204*4882a593Smuzhiyun 	if (err < 0)
1205*4882a593Smuzhiyun 		goto error;
1206*4882a593Smuzhiyun 	err = ssi_wake_irq(port, pd);
1207*4882a593Smuzhiyun 	if (err < 0)
1208*4882a593Smuzhiyun 		goto error;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	ssi_queues_init(omap_port);
1211*4882a593Smuzhiyun 	spin_lock_init(&omap_port->lock);
1212*4882a593Smuzhiyun 	spin_lock_init(&omap_port->wk_lock);
1213*4882a593Smuzhiyun 	omap_port->dev = &port->device;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	pm_runtime_use_autosuspend(omap_port->pdev);
1216*4882a593Smuzhiyun 	pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1217*4882a593Smuzhiyun 	pm_runtime_enable(omap_port->pdev);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
1220*4882a593Smuzhiyun 	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1221*4882a593Smuzhiyun 	if (err < 0) {
1222*4882a593Smuzhiyun 		pm_runtime_disable(omap_port->pdev);
1223*4882a593Smuzhiyun 		goto error;
1224*4882a593Smuzhiyun 	}
1225*4882a593Smuzhiyun #endif
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	hsi_add_clients_from_dt(port, np);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	return 0;
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun error:
1234*4882a593Smuzhiyun 	return err;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun 
ssi_port_remove(struct platform_device * pd)1237*4882a593Smuzhiyun static int ssi_port_remove(struct platform_device *pd)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun 	struct hsi_port *port = platform_get_drvdata(pd);
1240*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1241*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1242*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
1245*4882a593Smuzhiyun 	ssi_debug_remove_port(port);
1246*4882a593Smuzhiyun #endif
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	cancel_delayed_work_sync(&omap_port->errqueue_work);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	hsi_port_unregister_clients(port);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	port->async	= hsi_dummy_msg;
1253*4882a593Smuzhiyun 	port->setup	= hsi_dummy_cl;
1254*4882a593Smuzhiyun 	port->flush	= hsi_dummy_cl;
1255*4882a593Smuzhiyun 	port->start_tx	= hsi_dummy_cl;
1256*4882a593Smuzhiyun 	port->stop_tx	= hsi_dummy_cl;
1257*4882a593Smuzhiyun 	port->release	= hsi_dummy_cl;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	omap_ssi->port[omap_port->port_id] = NULL;
1260*4882a593Smuzhiyun 	platform_set_drvdata(pd, NULL);
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	pm_runtime_dont_use_autosuspend(&pd->dev);
1263*4882a593Smuzhiyun 	pm_runtime_disable(&pd->dev);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	return 0;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun 
ssi_restore_divisor(struct omap_ssi_port * omap_port)1268*4882a593Smuzhiyun static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun 	writel_relaxed(omap_port->sst.divisor,
1271*4882a593Smuzhiyun 				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	return 0;
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun 
omap_ssi_port_update_fclk(struct hsi_controller * ssi,struct omap_ssi_port * omap_port)1276*4882a593Smuzhiyun void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1277*4882a593Smuzhiyun 			       struct omap_ssi_port *omap_port)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun 	/* update divisor */
1280*4882a593Smuzhiyun 	u32 div = ssi_calculate_div(ssi);
1281*4882a593Smuzhiyun 	omap_port->sst.divisor = div;
1282*4882a593Smuzhiyun 	ssi_restore_divisor(omap_port);
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun #ifdef CONFIG_PM
ssi_save_port_ctx(struct omap_ssi_port * omap_port)1286*4882a593Smuzhiyun static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1289*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1290*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1293*4882a593Smuzhiyun 					SSI_MPU_ENABLE_REG(port->num, 0));
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	return 0;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
ssi_restore_port_ctx(struct omap_ssi_port * omap_port)1298*4882a593Smuzhiyun static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1301*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1302*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1303*4882a593Smuzhiyun 	void __iomem	*base;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	writel_relaxed(omap_port->sys_mpu_enable,
1306*4882a593Smuzhiyun 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	/* SST context */
1309*4882a593Smuzhiyun 	base = omap_port->sst_base;
1310*4882a593Smuzhiyun 	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1311*4882a593Smuzhiyun 	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1312*4882a593Smuzhiyun 	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	/* SSR context */
1315*4882a593Smuzhiyun 	base = omap_port->ssr_base;
1316*4882a593Smuzhiyun 	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1317*4882a593Smuzhiyun 	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1318*4882a593Smuzhiyun 	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	return 0;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun 
ssi_restore_port_mode(struct omap_ssi_port * omap_port)1323*4882a593Smuzhiyun static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun 	u32 mode;
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	writel_relaxed(omap_port->sst.mode,
1328*4882a593Smuzhiyun 				omap_port->sst_base + SSI_SST_MODE_REG);
1329*4882a593Smuzhiyun 	writel_relaxed(omap_port->ssr.mode,
1330*4882a593Smuzhiyun 				omap_port->ssr_base + SSI_SSR_MODE_REG);
1331*4882a593Smuzhiyun 	/* OCP barrier */
1332*4882a593Smuzhiyun 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	return 0;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun 
omap_ssi_port_runtime_suspend(struct device * dev)1337*4882a593Smuzhiyun static int omap_ssi_port_runtime_suspend(struct device *dev)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun 	struct hsi_port *port = dev_get_drvdata(dev);
1340*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1341*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1342*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	dev_dbg(dev, "port runtime suspend!\n");
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1347*4882a593Smuzhiyun 	if (omap_ssi->get_loss)
1348*4882a593Smuzhiyun 		omap_port->loss_count =
1349*4882a593Smuzhiyun 				omap_ssi->get_loss(ssi->device.parent);
1350*4882a593Smuzhiyun 	ssi_save_port_ctx(omap_port);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	return 0;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun 
omap_ssi_port_runtime_resume(struct device * dev)1355*4882a593Smuzhiyun static int omap_ssi_port_runtime_resume(struct device *dev)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun 	struct hsi_port *port = dev_get_drvdata(dev);
1358*4882a593Smuzhiyun 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1359*4882a593Smuzhiyun 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1360*4882a593Smuzhiyun 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	dev_dbg(dev, "port runtime resume!\n");
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1365*4882a593Smuzhiyun 				omap_ssi->get_loss(ssi->device.parent)))
1366*4882a593Smuzhiyun 		goto mode; /* We always need to restore the mode & TX divisor */
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	ssi_restore_port_ctx(omap_port);
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun mode:
1371*4882a593Smuzhiyun 	ssi_restore_divisor(omap_port);
1372*4882a593Smuzhiyun 	ssi_restore_port_mode(omap_port);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	return 0;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1378*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1379*4882a593Smuzhiyun 		omap_ssi_port_runtime_resume, NULL)
1380*4882a593Smuzhiyun };
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1383*4882a593Smuzhiyun #else
1384*4882a593Smuzhiyun #define DEV_PM_OPS     NULL
1385*4882a593Smuzhiyun #endif
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun #ifdef CONFIG_OF
1389*4882a593Smuzhiyun static const struct of_device_id omap_ssi_port_of_match[] = {
1390*4882a593Smuzhiyun 	{ .compatible = "ti,omap3-ssi-port", },
1391*4882a593Smuzhiyun 	{},
1392*4882a593Smuzhiyun };
1393*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1394*4882a593Smuzhiyun #else
1395*4882a593Smuzhiyun #define omap_ssi_port_of_match NULL
1396*4882a593Smuzhiyun #endif
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun struct platform_driver ssi_port_pdriver = {
1399*4882a593Smuzhiyun 	.probe = ssi_port_probe,
1400*4882a593Smuzhiyun 	.remove	= ssi_port_remove,
1401*4882a593Smuzhiyun 	.driver	= {
1402*4882a593Smuzhiyun 		.name	= "omap_ssi_port",
1403*4882a593Smuzhiyun 		.of_match_table = omap_ssi_port_of_match,
1404*4882a593Smuzhiyun 		.pm	= DEV_PM_OPS,
1405*4882a593Smuzhiyun 	},
1406*4882a593Smuzhiyun };
1407