xref: /OK3568_Linux_fs/kernel/drivers/tty/serial/8250/8250_omap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * 8250-core based driver for the OMAP internal UART
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * based on omap-serial.c, Copyright (C) 2010 Texas Instruments.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2014 Sebastian Andrzej Siewior
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/serial_8250.h>
16*4882a593Smuzhiyun #include <linux/serial_reg.h>
17*4882a593Smuzhiyun #include <linux/tty_flip.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/of_device.h>
22*4882a593Smuzhiyun #include <linux/of_gpio.h>
23*4882a593Smuzhiyun #include <linux/of_irq.h>
24*4882a593Smuzhiyun #include <linux/delay.h>
25*4882a593Smuzhiyun #include <linux/pm_runtime.h>
26*4882a593Smuzhiyun #include <linux/console.h>
27*4882a593Smuzhiyun #include <linux/pm_qos.h>
28*4882a593Smuzhiyun #include <linux/pm_wakeirq.h>
29*4882a593Smuzhiyun #include <linux/dma-mapping.h>
30*4882a593Smuzhiyun #include <linux/sys_soc.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "8250.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define DEFAULT_CLK_SPEED	48000000
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define UART_ERRATA_i202_MDR1_ACCESS	(1 << 0)
37*4882a593Smuzhiyun #define OMAP_UART_WER_HAS_TX_WAKEUP	(1 << 1)
38*4882a593Smuzhiyun #define OMAP_DMA_TX_KICK		(1 << 2)
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * See Advisory 21 in AM437x errata SPRZ408B, updated April 2015.
41*4882a593Smuzhiyun  * The same errata is applicable to AM335x and DRA7x processors too.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #define UART_ERRATA_CLOCK_DISABLE	(1 << 3)
44*4882a593Smuzhiyun #define	UART_HAS_EFR2			BIT(4)
45*4882a593Smuzhiyun #define UART_HAS_RHR_IT_DIS		BIT(5)
46*4882a593Smuzhiyun #define UART_RX_TIMEOUT_QUIRK		BIT(6)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define OMAP_UART_FCR_RX_TRIG		6
49*4882a593Smuzhiyun #define OMAP_UART_FCR_TX_TRIG		4
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* SCR register bitmasks */
52*4882a593Smuzhiyun #define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK	(1 << 7)
53*4882a593Smuzhiyun #define OMAP_UART_SCR_TX_TRIG_GRANU1_MASK	(1 << 6)
54*4882a593Smuzhiyun #define OMAP_UART_SCR_TX_EMPTY			(1 << 3)
55*4882a593Smuzhiyun #define OMAP_UART_SCR_DMAMODE_MASK		(3 << 1)
56*4882a593Smuzhiyun #define OMAP_UART_SCR_DMAMODE_1			(1 << 1)
57*4882a593Smuzhiyun #define OMAP_UART_SCR_DMAMODE_CTL		(1 << 0)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* MVR register bitmasks */
60*4882a593Smuzhiyun #define OMAP_UART_MVR_SCHEME_SHIFT	30
61*4882a593Smuzhiyun #define OMAP_UART_LEGACY_MVR_MAJ_MASK	0xf0
62*4882a593Smuzhiyun #define OMAP_UART_LEGACY_MVR_MAJ_SHIFT	4
63*4882a593Smuzhiyun #define OMAP_UART_LEGACY_MVR_MIN_MASK	0x0f
64*4882a593Smuzhiyun #define OMAP_UART_MVR_MAJ_MASK		0x700
65*4882a593Smuzhiyun #define OMAP_UART_MVR_MAJ_SHIFT		8
66*4882a593Smuzhiyun #define OMAP_UART_MVR_MIN_MASK		0x3f
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* SYSC register bitmasks */
69*4882a593Smuzhiyun #define OMAP_UART_SYSC_SOFTRESET	(1 << 1)
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* SYSS register bitmasks */
72*4882a593Smuzhiyun #define OMAP_UART_SYSS_RESETDONE	(1 << 0)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define UART_TI752_TLR_TX	0
75*4882a593Smuzhiyun #define UART_TI752_TLR_RX	4
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define TRIGGER_TLR_MASK(x)	((x & 0x3c) >> 2)
78*4882a593Smuzhiyun #define TRIGGER_FCR_MASK(x)	(x & 3)
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* Enable XON/XOFF flow control on output */
81*4882a593Smuzhiyun #define OMAP_UART_SW_TX		0x08
82*4882a593Smuzhiyun /* Enable XON/XOFF flow control on input */
83*4882a593Smuzhiyun #define OMAP_UART_SW_RX		0x02
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define OMAP_UART_WER_MOD_WKUP	0x7f
86*4882a593Smuzhiyun #define OMAP_UART_TX_WAKEUP_EN	(1 << 7)
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define TX_TRIGGER	1
89*4882a593Smuzhiyun #define RX_TRIGGER	48
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define OMAP_UART_TCR_RESTORE(x)	((x / 4) << 4)
92*4882a593Smuzhiyun #define OMAP_UART_TCR_HALT(x)		((x / 4) << 0)
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define UART_BUILD_REVISION(x, y)	(((x) << 8) | (y))
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define OMAP_UART_REV_46 0x0406
97*4882a593Smuzhiyun #define OMAP_UART_REV_52 0x0502
98*4882a593Smuzhiyun #define OMAP_UART_REV_63 0x0603
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Interrupt Enable Register 2 */
101*4882a593Smuzhiyun #define UART_OMAP_IER2			0x1B
102*4882a593Smuzhiyun #define UART_OMAP_IER2_RHR_IT_DIS	BIT(2)
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* Enhanced features register 2 */
105*4882a593Smuzhiyun #define UART_OMAP_EFR2			0x23
106*4882a593Smuzhiyun #define UART_OMAP_EFR2_TIMEOUT_BEHAVE	BIT(6)
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /* RX FIFO occupancy indicator */
109*4882a593Smuzhiyun #define UART_OMAP_RX_LVL		0x19
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun struct omap8250_priv {
112*4882a593Smuzhiyun 	int line;
113*4882a593Smuzhiyun 	u8 habit;
114*4882a593Smuzhiyun 	u8 mdr1;
115*4882a593Smuzhiyun 	u8 efr;
116*4882a593Smuzhiyun 	u8 scr;
117*4882a593Smuzhiyun 	u8 wer;
118*4882a593Smuzhiyun 	u8 xon;
119*4882a593Smuzhiyun 	u8 xoff;
120*4882a593Smuzhiyun 	u8 delayed_restore;
121*4882a593Smuzhiyun 	u16 quot;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	u8 tx_trigger;
124*4882a593Smuzhiyun 	u8 rx_trigger;
125*4882a593Smuzhiyun 	bool is_suspending;
126*4882a593Smuzhiyun 	int wakeirq;
127*4882a593Smuzhiyun 	int wakeups_enabled;
128*4882a593Smuzhiyun 	u32 latency;
129*4882a593Smuzhiyun 	u32 calc_latency;
130*4882a593Smuzhiyun 	struct pm_qos_request pm_qos_request;
131*4882a593Smuzhiyun 	struct work_struct qos_work;
132*4882a593Smuzhiyun 	struct uart_8250_dma omap8250_dma;
133*4882a593Smuzhiyun 	spinlock_t rx_dma_lock;
134*4882a593Smuzhiyun 	bool rx_dma_broken;
135*4882a593Smuzhiyun 	bool throttled;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct omap8250_dma_params {
139*4882a593Smuzhiyun 	u32 rx_size;
140*4882a593Smuzhiyun 	u8 rx_trigger;
141*4882a593Smuzhiyun 	u8 tx_trigger;
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun struct omap8250_platdata {
145*4882a593Smuzhiyun 	struct omap8250_dma_params *dma_params;
146*4882a593Smuzhiyun 	u8 habit;
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_8250_DMA
150*4882a593Smuzhiyun static void omap_8250_rx_dma_flush(struct uart_8250_port *p);
151*4882a593Smuzhiyun #else
omap_8250_rx_dma_flush(struct uart_8250_port * p)152*4882a593Smuzhiyun static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { }
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 
uart_read(struct uart_8250_port * up,u32 reg)155*4882a593Smuzhiyun static u32 uart_read(struct uart_8250_port *up, u32 reg)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	return readl(up->port.membase + (reg << up->port.regshift));
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun  * Called on runtime PM resume path from omap8250_restore_regs(), and
162*4882a593Smuzhiyun  * omap8250_set_mctrl().
163*4882a593Smuzhiyun  */
__omap8250_set_mctrl(struct uart_port * port,unsigned int mctrl)164*4882a593Smuzhiyun static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
167*4882a593Smuzhiyun 	struct omap8250_priv *priv = up->port.private_data;
168*4882a593Smuzhiyun 	u8 lcr;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	serial8250_do_set_mctrl(port, mctrl);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) {
173*4882a593Smuzhiyun 		/*
174*4882a593Smuzhiyun 		 * Turn off autoRTS if RTS is lowered and restore autoRTS
175*4882a593Smuzhiyun 		 * setting if RTS is raised
176*4882a593Smuzhiyun 		 */
177*4882a593Smuzhiyun 		lcr = serial_in(up, UART_LCR);
178*4882a593Smuzhiyun 		serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
179*4882a593Smuzhiyun 		if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
180*4882a593Smuzhiyun 			priv->efr |= UART_EFR_RTS;
181*4882a593Smuzhiyun 		else
182*4882a593Smuzhiyun 			priv->efr &= ~UART_EFR_RTS;
183*4882a593Smuzhiyun 		serial_out(up, UART_EFR, priv->efr);
184*4882a593Smuzhiyun 		serial_out(up, UART_LCR, lcr);
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
omap8250_set_mctrl(struct uart_port * port,unsigned int mctrl)188*4882a593Smuzhiyun static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	int err;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	err = pm_runtime_resume_and_get(port->dev);
193*4882a593Smuzhiyun 	if (err)
194*4882a593Smuzhiyun 		return;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	__omap8250_set_mctrl(port, mctrl);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
199*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun  * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
204*4882a593Smuzhiyun  * The access to uart register after MDR1 Access
205*4882a593Smuzhiyun  * causes UART to corrupt data.
206*4882a593Smuzhiyun  *
207*4882a593Smuzhiyun  * Need a delay =
208*4882a593Smuzhiyun  * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
209*4882a593Smuzhiyun  * give 10 times as much
210*4882a593Smuzhiyun  */
omap_8250_mdr1_errataset(struct uart_8250_port * up,struct omap8250_priv * priv)211*4882a593Smuzhiyun static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
212*4882a593Smuzhiyun 				     struct omap8250_priv *priv)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_MDR1, priv->mdr1);
215*4882a593Smuzhiyun 	udelay(2);
216*4882a593Smuzhiyun 	serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
217*4882a593Smuzhiyun 			UART_FCR_CLEAR_RCVR);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
omap_8250_get_divisor(struct uart_port * port,unsigned int baud,struct omap8250_priv * priv)220*4882a593Smuzhiyun static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
221*4882a593Smuzhiyun 				  struct omap8250_priv *priv)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	unsigned int uartclk = port->uartclk;
224*4882a593Smuzhiyun 	unsigned int div_13, div_16;
225*4882a593Smuzhiyun 	unsigned int abs_d13, abs_d16;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/*
228*4882a593Smuzhiyun 	 * Old custom speed handling.
229*4882a593Smuzhiyun 	 */
230*4882a593Smuzhiyun 	if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
231*4882a593Smuzhiyun 		priv->quot = port->custom_divisor & UART_DIV_MAX;
232*4882a593Smuzhiyun 		/*
233*4882a593Smuzhiyun 		 * I assume that nobody is using this. But hey, if somebody
234*4882a593Smuzhiyun 		 * would like to specify the divisor _and_ the mode then the
235*4882a593Smuzhiyun 		 * driver is ready and waiting for it.
236*4882a593Smuzhiyun 		 */
237*4882a593Smuzhiyun 		if (port->custom_divisor & (1 << 16))
238*4882a593Smuzhiyun 			priv->mdr1 = UART_OMAP_MDR1_13X_MODE;
239*4882a593Smuzhiyun 		else
240*4882a593Smuzhiyun 			priv->mdr1 = UART_OMAP_MDR1_16X_MODE;
241*4882a593Smuzhiyun 		return;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 	div_13 = DIV_ROUND_CLOSEST(uartclk, 13 * baud);
244*4882a593Smuzhiyun 	div_16 = DIV_ROUND_CLOSEST(uartclk, 16 * baud);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!div_13)
247*4882a593Smuzhiyun 		div_13 = 1;
248*4882a593Smuzhiyun 	if (!div_16)
249*4882a593Smuzhiyun 		div_16 = 1;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	abs_d13 = abs(baud - uartclk / 13 / div_13);
252*4882a593Smuzhiyun 	abs_d16 = abs(baud - uartclk / 16 / div_16);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (abs_d13 >= abs_d16) {
255*4882a593Smuzhiyun 		priv->mdr1 = UART_OMAP_MDR1_16X_MODE;
256*4882a593Smuzhiyun 		priv->quot = div_16;
257*4882a593Smuzhiyun 	} else {
258*4882a593Smuzhiyun 		priv->mdr1 = UART_OMAP_MDR1_13X_MODE;
259*4882a593Smuzhiyun 		priv->quot = div_13;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
omap8250_update_scr(struct uart_8250_port * up,struct omap8250_priv * priv)263*4882a593Smuzhiyun static void omap8250_update_scr(struct uart_8250_port *up,
264*4882a593Smuzhiyun 				struct omap8250_priv *priv)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	u8 old_scr;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	old_scr = serial_in(up, UART_OMAP_SCR);
269*4882a593Smuzhiyun 	if (old_scr == priv->scr)
270*4882a593Smuzhiyun 		return;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/*
273*4882a593Smuzhiyun 	 * The manual recommends not to enable the DMA mode selector in the SCR
274*4882a593Smuzhiyun 	 * (instead of the FCR) register _and_ selecting the DMA mode as one
275*4882a593Smuzhiyun 	 * register write because this may lead to malfunction.
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 	if (priv->scr & OMAP_UART_SCR_DMAMODE_MASK)
278*4882a593Smuzhiyun 		serial_out(up, UART_OMAP_SCR,
279*4882a593Smuzhiyun 			   priv->scr & ~OMAP_UART_SCR_DMAMODE_MASK);
280*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_SCR, priv->scr);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
omap8250_update_mdr1(struct uart_8250_port * up,struct omap8250_priv * priv)283*4882a593Smuzhiyun static void omap8250_update_mdr1(struct uart_8250_port *up,
284*4882a593Smuzhiyun 				 struct omap8250_priv *priv)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS)
287*4882a593Smuzhiyun 		omap_8250_mdr1_errataset(up, priv);
288*4882a593Smuzhiyun 	else
289*4882a593Smuzhiyun 		serial_out(up, UART_OMAP_MDR1, priv->mdr1);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
omap8250_restore_regs(struct uart_8250_port * up)292*4882a593Smuzhiyun static void omap8250_restore_regs(struct uart_8250_port *up)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct omap8250_priv *priv = up->port.private_data;
295*4882a593Smuzhiyun 	struct uart_8250_dma	*dma = up->dma;
296*4882a593Smuzhiyun 	u8 mcr = serial8250_in_MCR(up);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (dma && dma->tx_running) {
299*4882a593Smuzhiyun 		/*
300*4882a593Smuzhiyun 		 * TCSANOW requests the change to occur immediately however if
301*4882a593Smuzhiyun 		 * we have a TX-DMA operation in progress then it has been
302*4882a593Smuzhiyun 		 * observed that it might stall and never complete. Therefore we
303*4882a593Smuzhiyun 		 * delay DMA completes to prevent this hang from happen.
304*4882a593Smuzhiyun 		 */
305*4882a593Smuzhiyun 		priv->delayed_restore = 1;
306*4882a593Smuzhiyun 		return;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
310*4882a593Smuzhiyun 	serial_out(up, UART_EFR, UART_EFR_ECB);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
313*4882a593Smuzhiyun 	serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR);
314*4882a593Smuzhiyun 	serial_out(up, UART_FCR, up->fcr);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	omap8250_update_scr(up, priv);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_RESTORE(16) |
321*4882a593Smuzhiyun 			OMAP_UART_TCR_HALT(52));
322*4882a593Smuzhiyun 	serial_out(up, UART_TI752_TLR,
323*4882a593Smuzhiyun 		   TRIGGER_TLR_MASK(priv->tx_trigger) << UART_TI752_TLR_TX |
324*4882a593Smuzhiyun 		   TRIGGER_TLR_MASK(priv->rx_trigger) << UART_TI752_TLR_RX);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	serial_out(up, UART_LCR, 0);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* drop TCR + TLR access, we setup XON/XOFF later */
329*4882a593Smuzhiyun 	serial8250_out_MCR(up, mcr);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	serial_out(up, UART_IER, up->ier);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
334*4882a593Smuzhiyun 	serial_dl_write(up, priv->quot);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	serial_out(up, UART_EFR, priv->efr);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* Configure flow control */
339*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
340*4882a593Smuzhiyun 	serial_out(up, UART_XON1, priv->xon);
341*4882a593Smuzhiyun 	serial_out(up, UART_XOFF1, priv->xoff);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	serial_out(up, UART_LCR, up->lcr);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	omap8250_update_mdr1(up, priv);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	__omap8250_set_mctrl(&up->port, up->port.mctrl);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if (up->port.rs485.flags & SER_RS485_ENABLED)
350*4882a593Smuzhiyun 		serial8250_em485_stop_tx(up);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun  * OMAP can use "CLK / (16 or 13) / div" for baud rate. And then we have have
355*4882a593Smuzhiyun  * some differences in how we want to handle flow control.
356*4882a593Smuzhiyun  */
omap_8250_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)357*4882a593Smuzhiyun static void omap_8250_set_termios(struct uart_port *port,
358*4882a593Smuzhiyun 				  struct ktermios *termios,
359*4882a593Smuzhiyun 				  struct ktermios *old)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
362*4882a593Smuzhiyun 	struct omap8250_priv *priv = up->port.private_data;
363*4882a593Smuzhiyun 	unsigned char cval = 0;
364*4882a593Smuzhiyun 	unsigned int baud;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	switch (termios->c_cflag & CSIZE) {
367*4882a593Smuzhiyun 	case CS5:
368*4882a593Smuzhiyun 		cval = UART_LCR_WLEN5;
369*4882a593Smuzhiyun 		break;
370*4882a593Smuzhiyun 	case CS6:
371*4882a593Smuzhiyun 		cval = UART_LCR_WLEN6;
372*4882a593Smuzhiyun 		break;
373*4882a593Smuzhiyun 	case CS7:
374*4882a593Smuzhiyun 		cval = UART_LCR_WLEN7;
375*4882a593Smuzhiyun 		break;
376*4882a593Smuzhiyun 	default:
377*4882a593Smuzhiyun 	case CS8:
378*4882a593Smuzhiyun 		cval = UART_LCR_WLEN8;
379*4882a593Smuzhiyun 		break;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (termios->c_cflag & CSTOPB)
383*4882a593Smuzhiyun 		cval |= UART_LCR_STOP;
384*4882a593Smuzhiyun 	if (termios->c_cflag & PARENB)
385*4882a593Smuzhiyun 		cval |= UART_LCR_PARITY;
386*4882a593Smuzhiyun 	if (!(termios->c_cflag & PARODD))
387*4882a593Smuzhiyun 		cval |= UART_LCR_EPAR;
388*4882a593Smuzhiyun 	if (termios->c_cflag & CMSPAR)
389*4882a593Smuzhiyun 		cval |= UART_LCR_SPAR;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/*
392*4882a593Smuzhiyun 	 * Ask the core to calculate the divisor for us.
393*4882a593Smuzhiyun 	 */
394*4882a593Smuzhiyun 	baud = uart_get_baud_rate(port, termios, old,
395*4882a593Smuzhiyun 				  port->uartclk / 16 / UART_DIV_MAX,
396*4882a593Smuzhiyun 				  port->uartclk / 13);
397*4882a593Smuzhiyun 	omap_8250_get_divisor(port, baud, priv);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/*
400*4882a593Smuzhiyun 	 * Ok, we're now changing the port state. Do it with
401*4882a593Smuzhiyun 	 * interrupts disabled.
402*4882a593Smuzhiyun 	 */
403*4882a593Smuzhiyun 	pm_runtime_get_sync(port->dev);
404*4882a593Smuzhiyun 	spin_lock_irq(&port->lock);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	/*
407*4882a593Smuzhiyun 	 * Update the per-port timeout.
408*4882a593Smuzhiyun 	 */
409*4882a593Smuzhiyun 	uart_update_timeout(port, termios->c_cflag, baud);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
412*4882a593Smuzhiyun 	if (termios->c_iflag & INPCK)
413*4882a593Smuzhiyun 		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
414*4882a593Smuzhiyun 	if (termios->c_iflag & (IGNBRK | PARMRK))
415*4882a593Smuzhiyun 		up->port.read_status_mask |= UART_LSR_BI;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/*
418*4882a593Smuzhiyun 	 * Characters to ignore
419*4882a593Smuzhiyun 	 */
420*4882a593Smuzhiyun 	up->port.ignore_status_mask = 0;
421*4882a593Smuzhiyun 	if (termios->c_iflag & IGNPAR)
422*4882a593Smuzhiyun 		up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
423*4882a593Smuzhiyun 	if (termios->c_iflag & IGNBRK) {
424*4882a593Smuzhiyun 		up->port.ignore_status_mask |= UART_LSR_BI;
425*4882a593Smuzhiyun 		/*
426*4882a593Smuzhiyun 		 * If we're ignoring parity and break indicators,
427*4882a593Smuzhiyun 		 * ignore overruns too (for real raw support).
428*4882a593Smuzhiyun 		 */
429*4882a593Smuzhiyun 		if (termios->c_iflag & IGNPAR)
430*4882a593Smuzhiyun 			up->port.ignore_status_mask |= UART_LSR_OE;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/*
434*4882a593Smuzhiyun 	 * ignore all characters if CREAD is not set
435*4882a593Smuzhiyun 	 */
436*4882a593Smuzhiyun 	if ((termios->c_cflag & CREAD) == 0)
437*4882a593Smuzhiyun 		up->port.ignore_status_mask |= UART_LSR_DR;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/*
440*4882a593Smuzhiyun 	 * Modem status interrupts
441*4882a593Smuzhiyun 	 */
442*4882a593Smuzhiyun 	up->ier &= ~UART_IER_MSI;
443*4882a593Smuzhiyun 	if (UART_ENABLE_MS(&up->port, termios->c_cflag))
444*4882a593Smuzhiyun 		up->ier |= UART_IER_MSI;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	up->lcr = cval;
447*4882a593Smuzhiyun 	/* Up to here it was mostly serial8250_do_set_termios() */
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/*
450*4882a593Smuzhiyun 	 * We enable TRIG_GRANU for RX and TX and additionally we set
451*4882a593Smuzhiyun 	 * SCR_TX_EMPTY bit. The result is the following:
452*4882a593Smuzhiyun 	 * - RX_TRIGGER amount of bytes in the FIFO will cause an interrupt.
453*4882a593Smuzhiyun 	 * - less than RX_TRIGGER number of bytes will also cause an interrupt
454*4882a593Smuzhiyun 	 *   once the UART decides that there no new bytes arriving.
455*4882a593Smuzhiyun 	 * - Once THRE is enabled, the interrupt will be fired once the FIFO is
456*4882a593Smuzhiyun 	 *   empty - the trigger level is ignored here.
457*4882a593Smuzhiyun 	 *
458*4882a593Smuzhiyun 	 * Once DMA is enabled:
459*4882a593Smuzhiyun 	 * - UART will assert the TX DMA line once there is room for TX_TRIGGER
460*4882a593Smuzhiyun 	 *   bytes in the TX FIFO. On each assert the DMA engine will move
461*4882a593Smuzhiyun 	 *   TX_TRIGGER bytes into the FIFO.
462*4882a593Smuzhiyun 	 * - UART will assert the RX DMA line once there are RX_TRIGGER bytes in
463*4882a593Smuzhiyun 	 *   the FIFO and move RX_TRIGGER bytes.
464*4882a593Smuzhiyun 	 * This is because threshold and trigger values are the same.
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	up->fcr = UART_FCR_ENABLE_FIFO;
467*4882a593Smuzhiyun 	up->fcr |= TRIGGER_FCR_MASK(priv->tx_trigger) << OMAP_UART_FCR_TX_TRIG;
468*4882a593Smuzhiyun 	up->fcr |= TRIGGER_FCR_MASK(priv->rx_trigger) << OMAP_UART_FCR_RX_TRIG;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	priv->scr = OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | OMAP_UART_SCR_TX_EMPTY |
471*4882a593Smuzhiyun 		OMAP_UART_SCR_TX_TRIG_GRANU1_MASK;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (up->dma)
474*4882a593Smuzhiyun 		priv->scr |= OMAP_UART_SCR_DMAMODE_1 |
475*4882a593Smuzhiyun 			OMAP_UART_SCR_DMAMODE_CTL;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	priv->xon = termios->c_cc[VSTART];
478*4882a593Smuzhiyun 	priv->xoff = termios->c_cc[VSTOP];
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	priv->efr = 0;
481*4882a593Smuzhiyun 	up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW &&
484*4882a593Smuzhiyun 	    !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) &&
485*4882a593Smuzhiyun 	    !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) {
486*4882a593Smuzhiyun 		/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
487*4882a593Smuzhiyun 		up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
488*4882a593Smuzhiyun 		priv->efr |= UART_EFR_CTS;
489*4882a593Smuzhiyun 	} else	if (up->port.flags & UPF_SOFT_FLOW) {
490*4882a593Smuzhiyun 		/*
491*4882a593Smuzhiyun 		 * OMAP rx s/w flow control is borked; the transmitter remains
492*4882a593Smuzhiyun 		 * stuck off even if rx flow control is subsequently disabled
493*4882a593Smuzhiyun 		 */
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		/*
496*4882a593Smuzhiyun 		 * IXOFF Flag:
497*4882a593Smuzhiyun 		 * Enable XON/XOFF flow control on output.
498*4882a593Smuzhiyun 		 * Transmit XON1, XOFF1
499*4882a593Smuzhiyun 		 */
500*4882a593Smuzhiyun 		if (termios->c_iflag & IXOFF) {
501*4882a593Smuzhiyun 			up->port.status |= UPSTAT_AUTOXOFF;
502*4882a593Smuzhiyun 			priv->efr |= OMAP_UART_SW_TX;
503*4882a593Smuzhiyun 		}
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 	omap8250_restore_regs(up);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	spin_unlock_irq(&up->port.lock);
508*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
509*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* calculate wakeup latency constraint */
512*4882a593Smuzhiyun 	priv->calc_latency = USEC_PER_SEC * 64 * 8 / baud;
513*4882a593Smuzhiyun 	priv->latency = priv->calc_latency;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	schedule_work(&priv->qos_work);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/* Don't rewrite B0 */
518*4882a593Smuzhiyun 	if (tty_termios_baud_rate(termios))
519*4882a593Smuzhiyun 		tty_termios_encode_baud_rate(termios, baud, baud);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun /* same as 8250 except that we may have extra flow bits set in EFR */
omap_8250_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)523*4882a593Smuzhiyun static void omap_8250_pm(struct uart_port *port, unsigned int state,
524*4882a593Smuzhiyun 			 unsigned int oldstate)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
527*4882a593Smuzhiyun 	u8 efr;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	pm_runtime_get_sync(port->dev);
530*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
531*4882a593Smuzhiyun 	efr = serial_in(up, UART_EFR);
532*4882a593Smuzhiyun 	serial_out(up, UART_EFR, efr | UART_EFR_ECB);
533*4882a593Smuzhiyun 	serial_out(up, UART_LCR, 0);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
536*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
537*4882a593Smuzhiyun 	serial_out(up, UART_EFR, efr);
538*4882a593Smuzhiyun 	serial_out(up, UART_LCR, 0);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
541*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
omap_serial_fill_features_erratas(struct uart_8250_port * up,struct omap8250_priv * priv)544*4882a593Smuzhiyun static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
545*4882a593Smuzhiyun 					      struct omap8250_priv *priv)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	const struct soc_device_attribute k3_soc_devices[] = {
548*4882a593Smuzhiyun 		{ .family = "AM65X",  },
549*4882a593Smuzhiyun 		{ .family = "J721E", .revision = "SR1.0" },
550*4882a593Smuzhiyun 		{ /* sentinel */ }
551*4882a593Smuzhiyun 	};
552*4882a593Smuzhiyun 	u32 mvr, scheme;
553*4882a593Smuzhiyun 	u16 revision, major, minor;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	mvr = uart_read(up, UART_OMAP_MVER);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* Check revision register scheme */
558*4882a593Smuzhiyun 	scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	switch (scheme) {
561*4882a593Smuzhiyun 	case 0: /* Legacy Scheme: OMAP2/3 */
562*4882a593Smuzhiyun 		/* MINOR_REV[0:4], MAJOR_REV[4:7] */
563*4882a593Smuzhiyun 		major = (mvr & OMAP_UART_LEGACY_MVR_MAJ_MASK) >>
564*4882a593Smuzhiyun 			OMAP_UART_LEGACY_MVR_MAJ_SHIFT;
565*4882a593Smuzhiyun 		minor = (mvr & OMAP_UART_LEGACY_MVR_MIN_MASK);
566*4882a593Smuzhiyun 		break;
567*4882a593Smuzhiyun 	case 1:
568*4882a593Smuzhiyun 		/* New Scheme: OMAP4+ */
569*4882a593Smuzhiyun 		/* MINOR_REV[0:5], MAJOR_REV[8:10] */
570*4882a593Smuzhiyun 		major = (mvr & OMAP_UART_MVR_MAJ_MASK) >>
571*4882a593Smuzhiyun 			OMAP_UART_MVR_MAJ_SHIFT;
572*4882a593Smuzhiyun 		minor = (mvr & OMAP_UART_MVR_MIN_MASK);
573*4882a593Smuzhiyun 		break;
574*4882a593Smuzhiyun 	default:
575*4882a593Smuzhiyun 		dev_warn(up->port.dev,
576*4882a593Smuzhiyun 			 "Unknown revision, defaulting to highest\n");
577*4882a593Smuzhiyun 		/* highest possible revision */
578*4882a593Smuzhiyun 		major = 0xff;
579*4882a593Smuzhiyun 		minor = 0xff;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 	/* normalize revision for the driver */
582*4882a593Smuzhiyun 	revision = UART_BUILD_REVISION(major, minor);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	switch (revision) {
585*4882a593Smuzhiyun 	case OMAP_UART_REV_46:
586*4882a593Smuzhiyun 		priv->habit |= UART_ERRATA_i202_MDR1_ACCESS;
587*4882a593Smuzhiyun 		break;
588*4882a593Smuzhiyun 	case OMAP_UART_REV_52:
589*4882a593Smuzhiyun 		priv->habit |= UART_ERRATA_i202_MDR1_ACCESS |
590*4882a593Smuzhiyun 				OMAP_UART_WER_HAS_TX_WAKEUP;
591*4882a593Smuzhiyun 		break;
592*4882a593Smuzhiyun 	case OMAP_UART_REV_63:
593*4882a593Smuzhiyun 		priv->habit |= UART_ERRATA_i202_MDR1_ACCESS |
594*4882a593Smuzhiyun 			OMAP_UART_WER_HAS_TX_WAKEUP;
595*4882a593Smuzhiyun 		break;
596*4882a593Smuzhiyun 	default:
597*4882a593Smuzhiyun 		break;
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/*
601*4882a593Smuzhiyun 	 * AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't
602*4882a593Smuzhiyun 	 * don't have RHR_IT_DIS bit in IER2 register. So drop to flag
603*4882a593Smuzhiyun 	 * to enable errata workaround.
604*4882a593Smuzhiyun 	 */
605*4882a593Smuzhiyun 	if (soc_device_match(k3_soc_devices))
606*4882a593Smuzhiyun 		priv->habit &= ~UART_HAS_RHR_IT_DIS;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
omap8250_uart_qos_work(struct work_struct * work)609*4882a593Smuzhiyun static void omap8250_uart_qos_work(struct work_struct *work)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct omap8250_priv *priv;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	priv = container_of(work, struct omap8250_priv, qos_work);
614*4882a593Smuzhiyun 	cpu_latency_qos_update_request(&priv->pm_qos_request, priv->latency);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_8250_DMA
618*4882a593Smuzhiyun static int omap_8250_dma_handle_irq(struct uart_port *port);
619*4882a593Smuzhiyun #endif
620*4882a593Smuzhiyun 
omap8250_irq(int irq,void * dev_id)621*4882a593Smuzhiyun static irqreturn_t omap8250_irq(int irq, void *dev_id)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	struct uart_port *port = dev_id;
624*4882a593Smuzhiyun 	struct omap8250_priv *priv = port->private_data;
625*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
626*4882a593Smuzhiyun 	unsigned int iir, lsr;
627*4882a593Smuzhiyun 	int ret;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_8250_DMA
630*4882a593Smuzhiyun 	if (up->dma) {
631*4882a593Smuzhiyun 		ret = omap_8250_dma_handle_irq(port);
632*4882a593Smuzhiyun 		return IRQ_RETVAL(ret);
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun #endif
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	serial8250_rpm_get(up);
637*4882a593Smuzhiyun 	lsr = serial_port_in(port, UART_LSR);
638*4882a593Smuzhiyun 	iir = serial_port_in(port, UART_IIR);
639*4882a593Smuzhiyun 	ret = serial8250_handle_irq(port, iir);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	/*
642*4882a593Smuzhiyun 	 * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
643*4882a593Smuzhiyun 	 * FIFO has been drained, in which case a dummy read of RX FIFO
644*4882a593Smuzhiyun 	 * is required to clear RX TIMEOUT condition.
645*4882a593Smuzhiyun 	 */
646*4882a593Smuzhiyun 	if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
647*4882a593Smuzhiyun 	    (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
648*4882a593Smuzhiyun 	    serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
649*4882a593Smuzhiyun 		serial_port_in(port, UART_RX);
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	/* Stop processing interrupts on input overrun */
653*4882a593Smuzhiyun 	if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) {
654*4882a593Smuzhiyun 		unsigned long delay;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 		up->ier = port->serial_in(port, UART_IER);
657*4882a593Smuzhiyun 		if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
658*4882a593Smuzhiyun 			port->ops->stop_rx(port);
659*4882a593Smuzhiyun 		} else {
660*4882a593Smuzhiyun 			/* Keep restarting the timer until
661*4882a593Smuzhiyun 			 * the input overrun subsides.
662*4882a593Smuzhiyun 			 */
663*4882a593Smuzhiyun 			cancel_delayed_work(&up->overrun_backoff);
664*4882a593Smuzhiyun 		}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 		delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
667*4882a593Smuzhiyun 		schedule_delayed_work(&up->overrun_backoff, delay);
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	serial8250_rpm_put(up);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	return IRQ_RETVAL(ret);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
omap_8250_startup(struct uart_port * port)675*4882a593Smuzhiyun static int omap_8250_startup(struct uart_port *port)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
678*4882a593Smuzhiyun 	struct omap8250_priv *priv = port->private_data;
679*4882a593Smuzhiyun 	int ret;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (priv->wakeirq) {
682*4882a593Smuzhiyun 		ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq);
683*4882a593Smuzhiyun 		if (ret)
684*4882a593Smuzhiyun 			return ret;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	pm_runtime_get_sync(port->dev);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	serial_out(up, UART_LCR, UART_LCR_WLEN8);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	up->lsr_saved_flags = 0;
694*4882a593Smuzhiyun 	up->msr_saved_flags = 0;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	/* Disable DMA for console UART */
697*4882a593Smuzhiyun 	if (uart_console(port))
698*4882a593Smuzhiyun 		up->dma = NULL;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (up->dma) {
701*4882a593Smuzhiyun 		ret = serial8250_request_dma(up);
702*4882a593Smuzhiyun 		if (ret) {
703*4882a593Smuzhiyun 			dev_warn_ratelimited(port->dev,
704*4882a593Smuzhiyun 					     "failed to request DMA\n");
705*4882a593Smuzhiyun 			up->dma = NULL;
706*4882a593Smuzhiyun 		}
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
710*4882a593Smuzhiyun 			  dev_name(port->dev), port);
711*4882a593Smuzhiyun 	if (ret < 0)
712*4882a593Smuzhiyun 		goto err;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	up->ier = UART_IER_RLSI | UART_IER_RDI;
715*4882a593Smuzhiyun 	serial_out(up, UART_IER, up->ier);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun #ifdef CONFIG_PM
718*4882a593Smuzhiyun 	up->capabilities |= UART_CAP_RPM;
719*4882a593Smuzhiyun #endif
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	/* Enable module level wake up */
722*4882a593Smuzhiyun 	priv->wer = OMAP_UART_WER_MOD_WKUP;
723*4882a593Smuzhiyun 	if (priv->habit & OMAP_UART_WER_HAS_TX_WAKEUP)
724*4882a593Smuzhiyun 		priv->wer |= OMAP_UART_TX_WAKEUP_EN;
725*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_WER, priv->wer);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	if (up->dma && !(priv->habit & UART_HAS_EFR2))
728*4882a593Smuzhiyun 		up->dma->rx_dma(up);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
731*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
732*4882a593Smuzhiyun 	return 0;
733*4882a593Smuzhiyun err:
734*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
735*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
736*4882a593Smuzhiyun 	dev_pm_clear_wake_irq(port->dev);
737*4882a593Smuzhiyun 	return ret;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
omap_8250_shutdown(struct uart_port * port)740*4882a593Smuzhiyun static void omap_8250_shutdown(struct uart_port *port)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
743*4882a593Smuzhiyun 	struct omap8250_priv *priv = port->private_data;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	flush_work(&priv->qos_work);
746*4882a593Smuzhiyun 	if (up->dma)
747*4882a593Smuzhiyun 		omap_8250_rx_dma_flush(up);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	pm_runtime_get_sync(port->dev);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_WER, 0);
752*4882a593Smuzhiyun 	if (priv->habit & UART_HAS_EFR2)
753*4882a593Smuzhiyun 		serial_out(up, UART_OMAP_EFR2, 0x0);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	up->ier = 0;
756*4882a593Smuzhiyun 	serial_out(up, UART_IER, 0);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (up->dma)
759*4882a593Smuzhiyun 		serial8250_release_dma(up);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	/*
762*4882a593Smuzhiyun 	 * Disable break condition and FIFOs
763*4882a593Smuzhiyun 	 */
764*4882a593Smuzhiyun 	if (up->lcr & UART_LCR_SBC)
765*4882a593Smuzhiyun 		serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC);
766*4882a593Smuzhiyun 	serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
769*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
770*4882a593Smuzhiyun 	free_irq(port->irq, port);
771*4882a593Smuzhiyun 	dev_pm_clear_wake_irq(port->dev);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
omap_8250_throttle(struct uart_port * port)774*4882a593Smuzhiyun static void omap_8250_throttle(struct uart_port *port)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	struct omap8250_priv *priv = port->private_data;
777*4882a593Smuzhiyun 	unsigned long flags;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	pm_runtime_get_sync(port->dev);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	spin_lock_irqsave(&port->lock, flags);
782*4882a593Smuzhiyun 	port->ops->stop_rx(port);
783*4882a593Smuzhiyun 	priv->throttled = true;
784*4882a593Smuzhiyun 	spin_unlock_irqrestore(&port->lock, flags);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
787*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
omap_8250_unthrottle(struct uart_port * port)790*4882a593Smuzhiyun static void omap_8250_unthrottle(struct uart_port *port)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	struct omap8250_priv *priv = port->private_data;
793*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
794*4882a593Smuzhiyun 	unsigned long flags;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	pm_runtime_get_sync(port->dev);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	spin_lock_irqsave(&port->lock, flags);
799*4882a593Smuzhiyun 	priv->throttled = false;
800*4882a593Smuzhiyun 	if (up->dma)
801*4882a593Smuzhiyun 		up->dma->rx_dma(up);
802*4882a593Smuzhiyun 	up->ier |= UART_IER_RLSI | UART_IER_RDI;
803*4882a593Smuzhiyun 	port->read_status_mask |= UART_LSR_DR;
804*4882a593Smuzhiyun 	serial_out(up, UART_IER, up->ier);
805*4882a593Smuzhiyun 	spin_unlock_irqrestore(&port->lock, flags);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(port->dev);
808*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(port->dev);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_8250_DMA
812*4882a593Smuzhiyun static int omap_8250_rx_dma(struct uart_8250_port *p);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun /* Must be called while priv->rx_dma_lock is held */
__dma_rx_do_complete(struct uart_8250_port * p)815*4882a593Smuzhiyun static void __dma_rx_do_complete(struct uart_8250_port *p)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun 	struct uart_8250_dma    *dma = p->dma;
818*4882a593Smuzhiyun 	struct tty_port         *tty_port = &p->port.state->port;
819*4882a593Smuzhiyun 	struct omap8250_priv	*priv = p->port.private_data;
820*4882a593Smuzhiyun 	struct dma_chan		*rxchan = dma->rxchan;
821*4882a593Smuzhiyun 	dma_cookie_t		cookie;
822*4882a593Smuzhiyun 	struct dma_tx_state     state;
823*4882a593Smuzhiyun 	int                     count;
824*4882a593Smuzhiyun 	int			ret;
825*4882a593Smuzhiyun 	u32			reg;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	if (!dma->rx_running)
828*4882a593Smuzhiyun 		goto out;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	cookie = dma->rx_cookie;
831*4882a593Smuzhiyun 	dma->rx_running = 0;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	/* Re-enable RX FIFO interrupt now that transfer is complete */
834*4882a593Smuzhiyun 	if (priv->habit & UART_HAS_RHR_IT_DIS) {
835*4882a593Smuzhiyun 		reg = serial_in(p, UART_OMAP_IER2);
836*4882a593Smuzhiyun 		reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
837*4882a593Smuzhiyun 		serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	dmaengine_tx_status(rxchan, cookie, &state);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	count = dma->rx_size - state.residue + state.in_flight_bytes;
843*4882a593Smuzhiyun 	if (count < dma->rx_size) {
844*4882a593Smuzhiyun 		dmaengine_terminate_async(rxchan);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		/*
847*4882a593Smuzhiyun 		 * Poll for teardown to complete which guarantees in
848*4882a593Smuzhiyun 		 * flight data is drained.
849*4882a593Smuzhiyun 		 */
850*4882a593Smuzhiyun 		if (state.in_flight_bytes) {
851*4882a593Smuzhiyun 			int poll_count = 25;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 			while (dmaengine_tx_status(rxchan, cookie, NULL) &&
854*4882a593Smuzhiyun 			       poll_count--)
855*4882a593Smuzhiyun 				cpu_relax();
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 			if (poll_count == -1)
858*4882a593Smuzhiyun 				dev_err(p->port.dev, "teardown incomplete\n");
859*4882a593Smuzhiyun 		}
860*4882a593Smuzhiyun 	}
861*4882a593Smuzhiyun 	if (!count)
862*4882a593Smuzhiyun 		goto out;
863*4882a593Smuzhiyun 	ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	p->port.icount.rx += ret;
866*4882a593Smuzhiyun 	p->port.icount.buf_overrun += count - ret;
867*4882a593Smuzhiyun out:
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	tty_flip_buffer_push(tty_port);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun 
__dma_rx_complete(void * param)872*4882a593Smuzhiyun static void __dma_rx_complete(void *param)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	struct uart_8250_port *p = param;
875*4882a593Smuzhiyun 	struct omap8250_priv *priv = p->port.private_data;
876*4882a593Smuzhiyun 	struct uart_8250_dma *dma = p->dma;
877*4882a593Smuzhiyun 	struct dma_tx_state     state;
878*4882a593Smuzhiyun 	unsigned long flags;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	spin_lock_irqsave(&p->port.lock, flags);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	/*
883*4882a593Smuzhiyun 	 * If the tx status is not DMA_COMPLETE, then this is a delayed
884*4882a593Smuzhiyun 	 * completion callback. A previous RX timeout flush would have
885*4882a593Smuzhiyun 	 * already pushed the data, so exit.
886*4882a593Smuzhiyun 	 */
887*4882a593Smuzhiyun 	if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
888*4882a593Smuzhiyun 			DMA_COMPLETE) {
889*4882a593Smuzhiyun 		spin_unlock_irqrestore(&p->port.lock, flags);
890*4882a593Smuzhiyun 		return;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 	__dma_rx_do_complete(p);
893*4882a593Smuzhiyun 	if (!priv->throttled) {
894*4882a593Smuzhiyun 		p->ier |= UART_IER_RLSI | UART_IER_RDI;
895*4882a593Smuzhiyun 		serial_out(p, UART_IER, p->ier);
896*4882a593Smuzhiyun 		if (!(priv->habit & UART_HAS_EFR2))
897*4882a593Smuzhiyun 			omap_8250_rx_dma(p);
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	spin_unlock_irqrestore(&p->port.lock, flags);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun 
omap_8250_rx_dma_flush(struct uart_8250_port * p)903*4882a593Smuzhiyun static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	struct omap8250_priv	*priv = p->port.private_data;
906*4882a593Smuzhiyun 	struct uart_8250_dma	*dma = p->dma;
907*4882a593Smuzhiyun 	struct dma_tx_state     state;
908*4882a593Smuzhiyun 	unsigned long		flags;
909*4882a593Smuzhiyun 	int ret;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->rx_dma_lock, flags);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	if (!dma->rx_running) {
914*4882a593Smuzhiyun 		spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
915*4882a593Smuzhiyun 		return;
916*4882a593Smuzhiyun 	}
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	ret = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
919*4882a593Smuzhiyun 	if (ret == DMA_IN_PROGRESS) {
920*4882a593Smuzhiyun 		ret = dmaengine_pause(dma->rxchan);
921*4882a593Smuzhiyun 		if (WARN_ON_ONCE(ret))
922*4882a593Smuzhiyun 			priv->rx_dma_broken = true;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 	__dma_rx_do_complete(p);
925*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun 
omap_8250_rx_dma(struct uart_8250_port * p)928*4882a593Smuzhiyun static int omap_8250_rx_dma(struct uart_8250_port *p)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	struct omap8250_priv		*priv = p->port.private_data;
931*4882a593Smuzhiyun 	struct uart_8250_dma            *dma = p->dma;
932*4882a593Smuzhiyun 	int				err = 0;
933*4882a593Smuzhiyun 	struct dma_async_tx_descriptor  *desc;
934*4882a593Smuzhiyun 	unsigned long			flags;
935*4882a593Smuzhiyun 	u32				reg;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (priv->rx_dma_broken)
938*4882a593Smuzhiyun 		return -EINVAL;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->rx_dma_lock, flags);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (dma->rx_running) {
943*4882a593Smuzhiyun 		enum dma_status state;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 		state = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, NULL);
946*4882a593Smuzhiyun 		if (state == DMA_COMPLETE) {
947*4882a593Smuzhiyun 			/*
948*4882a593Smuzhiyun 			 * Disable RX interrupts to allow RX DMA completion
949*4882a593Smuzhiyun 			 * callback to run.
950*4882a593Smuzhiyun 			 */
951*4882a593Smuzhiyun 			p->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
952*4882a593Smuzhiyun 			serial_out(p, UART_IER, p->ier);
953*4882a593Smuzhiyun 		}
954*4882a593Smuzhiyun 		goto out;
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
958*4882a593Smuzhiyun 					   dma->rx_size, DMA_DEV_TO_MEM,
959*4882a593Smuzhiyun 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
960*4882a593Smuzhiyun 	if (!desc) {
961*4882a593Smuzhiyun 		err = -EBUSY;
962*4882a593Smuzhiyun 		goto out;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	dma->rx_running = 1;
966*4882a593Smuzhiyun 	desc->callback = __dma_rx_complete;
967*4882a593Smuzhiyun 	desc->callback_param = p;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	dma->rx_cookie = dmaengine_submit(desc);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/*
972*4882a593Smuzhiyun 	 * Disable RX FIFO interrupt while RX DMA is enabled, else
973*4882a593Smuzhiyun 	 * spurious interrupt may be raised when data is in the RX FIFO
974*4882a593Smuzhiyun 	 * but is yet to be drained by DMA.
975*4882a593Smuzhiyun 	 */
976*4882a593Smuzhiyun 	if (priv->habit & UART_HAS_RHR_IT_DIS) {
977*4882a593Smuzhiyun 		reg = serial_in(p, UART_OMAP_IER2);
978*4882a593Smuzhiyun 		reg |= UART_OMAP_IER2_RHR_IT_DIS;
979*4882a593Smuzhiyun 		serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	dma_async_issue_pending(dma->rxchan);
983*4882a593Smuzhiyun out:
984*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
985*4882a593Smuzhiyun 	return err;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun static int omap_8250_tx_dma(struct uart_8250_port *p);
989*4882a593Smuzhiyun 
omap_8250_dma_tx_complete(void * param)990*4882a593Smuzhiyun static void omap_8250_dma_tx_complete(void *param)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct uart_8250_port	*p = param;
993*4882a593Smuzhiyun 	struct uart_8250_dma	*dma = p->dma;
994*4882a593Smuzhiyun 	struct circ_buf		*xmit = &p->port.state->xmit;
995*4882a593Smuzhiyun 	unsigned long		flags;
996*4882a593Smuzhiyun 	bool			en_thri = false;
997*4882a593Smuzhiyun 	struct omap8250_priv	*priv = p->port.private_data;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
1000*4882a593Smuzhiyun 				UART_XMIT_SIZE, DMA_TO_DEVICE);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	spin_lock_irqsave(&p->port.lock, flags);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	dma->tx_running = 0;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	xmit->tail += dma->tx_size;
1007*4882a593Smuzhiyun 	xmit->tail &= UART_XMIT_SIZE - 1;
1008*4882a593Smuzhiyun 	p->port.icount.tx += dma->tx_size;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	if (priv->delayed_restore) {
1011*4882a593Smuzhiyun 		priv->delayed_restore = 0;
1012*4882a593Smuzhiyun 		omap8250_restore_regs(p);
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1016*4882a593Smuzhiyun 		uart_write_wakeup(&p->port);
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) {
1019*4882a593Smuzhiyun 		int ret;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 		ret = omap_8250_tx_dma(p);
1022*4882a593Smuzhiyun 		if (ret)
1023*4882a593Smuzhiyun 			en_thri = true;
1024*4882a593Smuzhiyun 	} else if (p->capabilities & UART_CAP_RPM) {
1025*4882a593Smuzhiyun 		en_thri = true;
1026*4882a593Smuzhiyun 	}
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	if (en_thri) {
1029*4882a593Smuzhiyun 		dma->tx_err = 1;
1030*4882a593Smuzhiyun 		serial8250_set_THRI(p);
1031*4882a593Smuzhiyun 	}
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	spin_unlock_irqrestore(&p->port.lock, flags);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun 
omap_8250_tx_dma(struct uart_8250_port * p)1036*4882a593Smuzhiyun static int omap_8250_tx_dma(struct uart_8250_port *p)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun 	struct uart_8250_dma		*dma = p->dma;
1039*4882a593Smuzhiyun 	struct omap8250_priv		*priv = p->port.private_data;
1040*4882a593Smuzhiyun 	struct circ_buf			*xmit = &p->port.state->xmit;
1041*4882a593Smuzhiyun 	struct dma_async_tx_descriptor	*desc;
1042*4882a593Smuzhiyun 	unsigned int	skip_byte = 0;
1043*4882a593Smuzhiyun 	int ret;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (dma->tx_running)
1046*4882a593Smuzhiyun 		return 0;
1047*4882a593Smuzhiyun 	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 		/*
1050*4882a593Smuzhiyun 		 * Even if no data, we need to return an error for the two cases
1051*4882a593Smuzhiyun 		 * below so serial8250_tx_chars() is invoked and properly clears
1052*4882a593Smuzhiyun 		 * THRI and/or runtime suspend.
1053*4882a593Smuzhiyun 		 */
1054*4882a593Smuzhiyun 		if (dma->tx_err || p->capabilities & UART_CAP_RPM) {
1055*4882a593Smuzhiyun 			ret = -EBUSY;
1056*4882a593Smuzhiyun 			goto err;
1057*4882a593Smuzhiyun 		}
1058*4882a593Smuzhiyun 		serial8250_clear_THRI(p);
1059*4882a593Smuzhiyun 		return 0;
1060*4882a593Smuzhiyun 	}
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1063*4882a593Smuzhiyun 	if (priv->habit & OMAP_DMA_TX_KICK) {
1064*4882a593Smuzhiyun 		u8 tx_lvl;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 		/*
1067*4882a593Smuzhiyun 		 * We need to put the first byte into the FIFO in order to start
1068*4882a593Smuzhiyun 		 * the DMA transfer. For transfers smaller than four bytes we
1069*4882a593Smuzhiyun 		 * don't bother doing DMA at all. It seem not matter if there
1070*4882a593Smuzhiyun 		 * are still bytes in the FIFO from the last transfer (in case
1071*4882a593Smuzhiyun 		 * we got here directly from omap_8250_dma_tx_complete()). Bytes
1072*4882a593Smuzhiyun 		 * leaving the FIFO seem not to trigger the DMA transfer. It is
1073*4882a593Smuzhiyun 		 * really the byte that we put into the FIFO.
1074*4882a593Smuzhiyun 		 * If the FIFO is already full then we most likely got here from
1075*4882a593Smuzhiyun 		 * omap_8250_dma_tx_complete(). And this means the DMA engine
1076*4882a593Smuzhiyun 		 * just completed its work. We don't have to wait the complete
1077*4882a593Smuzhiyun 		 * 86us at 115200,8n1 but around 60us (not to mention lower
1078*4882a593Smuzhiyun 		 * baudrates). So in that case we take the interrupt and try
1079*4882a593Smuzhiyun 		 * again with an empty FIFO.
1080*4882a593Smuzhiyun 		 */
1081*4882a593Smuzhiyun 		tx_lvl = serial_in(p, UART_OMAP_TX_LVL);
1082*4882a593Smuzhiyun 		if (tx_lvl == p->tx_loadsz) {
1083*4882a593Smuzhiyun 			ret = -EBUSY;
1084*4882a593Smuzhiyun 			goto err;
1085*4882a593Smuzhiyun 		}
1086*4882a593Smuzhiyun 		if (dma->tx_size < 4) {
1087*4882a593Smuzhiyun 			ret = -EINVAL;
1088*4882a593Smuzhiyun 			goto err;
1089*4882a593Smuzhiyun 		}
1090*4882a593Smuzhiyun 		skip_byte = 1;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	desc = dmaengine_prep_slave_single(dma->txchan,
1094*4882a593Smuzhiyun 			dma->tx_addr + xmit->tail + skip_byte,
1095*4882a593Smuzhiyun 			dma->tx_size - skip_byte, DMA_MEM_TO_DEV,
1096*4882a593Smuzhiyun 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1097*4882a593Smuzhiyun 	if (!desc) {
1098*4882a593Smuzhiyun 		ret = -EBUSY;
1099*4882a593Smuzhiyun 		goto err;
1100*4882a593Smuzhiyun 	}
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	dma->tx_running = 1;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	desc->callback = omap_8250_dma_tx_complete;
1105*4882a593Smuzhiyun 	desc->callback_param = p;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	dma->tx_cookie = dmaengine_submit(desc);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
1110*4882a593Smuzhiyun 				   UART_XMIT_SIZE, DMA_TO_DEVICE);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	dma_async_issue_pending(dma->txchan);
1113*4882a593Smuzhiyun 	if (dma->tx_err)
1114*4882a593Smuzhiyun 		dma->tx_err = 0;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	serial8250_clear_THRI(p);
1117*4882a593Smuzhiyun 	if (skip_byte)
1118*4882a593Smuzhiyun 		serial_out(p, UART_TX, xmit->buf[xmit->tail]);
1119*4882a593Smuzhiyun 	return 0;
1120*4882a593Smuzhiyun err:
1121*4882a593Smuzhiyun 	dma->tx_err = 1;
1122*4882a593Smuzhiyun 	return ret;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
handle_rx_dma(struct uart_8250_port * up,unsigned int iir)1125*4882a593Smuzhiyun static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	switch (iir & 0x3f) {
1128*4882a593Smuzhiyun 	case UART_IIR_RLSI:
1129*4882a593Smuzhiyun 	case UART_IIR_RX_TIMEOUT:
1130*4882a593Smuzhiyun 	case UART_IIR_RDI:
1131*4882a593Smuzhiyun 		omap_8250_rx_dma_flush(up);
1132*4882a593Smuzhiyun 		return true;
1133*4882a593Smuzhiyun 	}
1134*4882a593Smuzhiyun 	return omap_8250_rx_dma(up);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
omap_8250_handle_rx_dma(struct uart_8250_port * up,u8 iir,unsigned char status)1137*4882a593Smuzhiyun static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
1138*4882a593Smuzhiyun 					     u8 iir, unsigned char status)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
1141*4882a593Smuzhiyun 	    (iir & UART_IIR_RDI)) {
1142*4882a593Smuzhiyun 		if (handle_rx_dma(up, iir)) {
1143*4882a593Smuzhiyun 			status = serial8250_rx_chars(up, status);
1144*4882a593Smuzhiyun 			omap_8250_rx_dma(up);
1145*4882a593Smuzhiyun 		}
1146*4882a593Smuzhiyun 	}
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	return status;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
am654_8250_handle_rx_dma(struct uart_8250_port * up,u8 iir,unsigned char status)1151*4882a593Smuzhiyun static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
1152*4882a593Smuzhiyun 				     unsigned char status)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun 	/*
1155*4882a593Smuzhiyun 	 * Queue a new transfer if FIFO has data.
1156*4882a593Smuzhiyun 	 */
1157*4882a593Smuzhiyun 	if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
1158*4882a593Smuzhiyun 	    (up->ier & UART_IER_RDI)) {
1159*4882a593Smuzhiyun 		omap_8250_rx_dma(up);
1160*4882a593Smuzhiyun 		serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
1161*4882a593Smuzhiyun 	} else if ((iir & 0x3f) == UART_IIR_RX_TIMEOUT) {
1162*4882a593Smuzhiyun 		/*
1163*4882a593Smuzhiyun 		 * Disable RX timeout, read IIR to clear
1164*4882a593Smuzhiyun 		 * current timeout condition, clear EFR2 to
1165*4882a593Smuzhiyun 		 * periodic timeouts, re-enable interrupts.
1166*4882a593Smuzhiyun 		 */
1167*4882a593Smuzhiyun 		up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
1168*4882a593Smuzhiyun 		serial_out(up, UART_IER, up->ier);
1169*4882a593Smuzhiyun 		omap_8250_rx_dma_flush(up);
1170*4882a593Smuzhiyun 		serial_in(up, UART_IIR);
1171*4882a593Smuzhiyun 		serial_out(up, UART_OMAP_EFR2, 0x0);
1172*4882a593Smuzhiyun 		up->ier |= UART_IER_RLSI | UART_IER_RDI;
1173*4882a593Smuzhiyun 		serial_out(up, UART_IER, up->ier);
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun /*
1178*4882a593Smuzhiyun  * This is mostly serial8250_handle_irq(). We have a slightly different DMA
1179*4882a593Smuzhiyun  * hoook for RX/TX and need different logic for them in the ISR. Therefore we
1180*4882a593Smuzhiyun  * use the default routine in the non-DMA case and this one for with DMA.
1181*4882a593Smuzhiyun  */
omap_8250_dma_handle_irq(struct uart_port * port)1182*4882a593Smuzhiyun static int omap_8250_dma_handle_irq(struct uart_port *port)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	struct uart_8250_port *up = up_to_u8250p(port);
1185*4882a593Smuzhiyun 	struct omap8250_priv *priv = up->port.private_data;
1186*4882a593Smuzhiyun 	unsigned char status;
1187*4882a593Smuzhiyun 	unsigned long flags;
1188*4882a593Smuzhiyun 	u8 iir;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	serial8250_rpm_get(up);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	iir = serial_port_in(port, UART_IIR);
1193*4882a593Smuzhiyun 	if (iir & UART_IIR_NO_INT) {
1194*4882a593Smuzhiyun 		serial8250_rpm_put(up);
1195*4882a593Smuzhiyun 		return IRQ_HANDLED;
1196*4882a593Smuzhiyun 	}
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	spin_lock_irqsave(&port->lock, flags);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	status = serial_port_in(port, UART_LSR);
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	if (priv->habit & UART_HAS_EFR2)
1203*4882a593Smuzhiyun 		am654_8250_handle_rx_dma(up, iir, status);
1204*4882a593Smuzhiyun 	else
1205*4882a593Smuzhiyun 		status = omap_8250_handle_rx_dma(up, iir, status);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	serial8250_modem_status(up);
1208*4882a593Smuzhiyun 	if (status & UART_LSR_THRE && up->dma->tx_err) {
1209*4882a593Smuzhiyun 		if (uart_tx_stopped(&up->port) ||
1210*4882a593Smuzhiyun 		    uart_circ_empty(&up->port.state->xmit)) {
1211*4882a593Smuzhiyun 			up->dma->tx_err = 0;
1212*4882a593Smuzhiyun 			serial8250_tx_chars(up);
1213*4882a593Smuzhiyun 		} else  {
1214*4882a593Smuzhiyun 			/*
1215*4882a593Smuzhiyun 			 * try again due to an earlier failer which
1216*4882a593Smuzhiyun 			 * might have been resolved by now.
1217*4882a593Smuzhiyun 			 */
1218*4882a593Smuzhiyun 			if (omap_8250_tx_dma(up))
1219*4882a593Smuzhiyun 				serial8250_tx_chars(up);
1220*4882a593Smuzhiyun 		}
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	uart_unlock_and_check_sysrq(port, flags);
1224*4882a593Smuzhiyun 	serial8250_rpm_put(up);
1225*4882a593Smuzhiyun 	return 1;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun 
the_no_dma_filter_fn(struct dma_chan * chan,void * param)1228*4882a593Smuzhiyun static bool the_no_dma_filter_fn(struct dma_chan *chan, void *param)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun 	return false;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun #else
1234*4882a593Smuzhiyun 
omap_8250_rx_dma(struct uart_8250_port * p)1235*4882a593Smuzhiyun static inline int omap_8250_rx_dma(struct uart_8250_port *p)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun 	return -EINVAL;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun #endif
1240*4882a593Smuzhiyun 
omap8250_no_handle_irq(struct uart_port * port)1241*4882a593Smuzhiyun static int omap8250_no_handle_irq(struct uart_port *port)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	/* IRQ has not been requested but handling irq? */
1244*4882a593Smuzhiyun 	WARN_ONCE(1, "Unexpected irq handling before port startup\n");
1245*4882a593Smuzhiyun 	return 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun static struct omap8250_dma_params am654_dma = {
1249*4882a593Smuzhiyun 	.rx_size = SZ_2K,
1250*4882a593Smuzhiyun 	.rx_trigger = 1,
1251*4882a593Smuzhiyun 	.tx_trigger = TX_TRIGGER,
1252*4882a593Smuzhiyun };
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun static struct omap8250_dma_params am33xx_dma = {
1255*4882a593Smuzhiyun 	.rx_size = RX_TRIGGER,
1256*4882a593Smuzhiyun 	.rx_trigger = RX_TRIGGER,
1257*4882a593Smuzhiyun 	.tx_trigger = TX_TRIGGER,
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun static struct omap8250_platdata am654_platdata = {
1261*4882a593Smuzhiyun 	.dma_params	= &am654_dma,
1262*4882a593Smuzhiyun 	.habit		= UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS |
1263*4882a593Smuzhiyun 			  UART_RX_TIMEOUT_QUIRK,
1264*4882a593Smuzhiyun };
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun static struct omap8250_platdata am33xx_platdata = {
1267*4882a593Smuzhiyun 	.dma_params	= &am33xx_dma,
1268*4882a593Smuzhiyun 	.habit		= OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE,
1269*4882a593Smuzhiyun };
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun static struct omap8250_platdata omap4_platdata = {
1272*4882a593Smuzhiyun 	.dma_params	= &am33xx_dma,
1273*4882a593Smuzhiyun 	.habit		= UART_ERRATA_CLOCK_DISABLE,
1274*4882a593Smuzhiyun };
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun static const struct of_device_id omap8250_dt_ids[] = {
1277*4882a593Smuzhiyun 	{ .compatible = "ti,am654-uart", .data = &am654_platdata, },
1278*4882a593Smuzhiyun 	{ .compatible = "ti,omap2-uart" },
1279*4882a593Smuzhiyun 	{ .compatible = "ti,omap3-uart" },
1280*4882a593Smuzhiyun 	{ .compatible = "ti,omap4-uart", .data = &omap4_platdata, },
1281*4882a593Smuzhiyun 	{ .compatible = "ti,am3352-uart", .data = &am33xx_platdata, },
1282*4882a593Smuzhiyun 	{ .compatible = "ti,am4372-uart", .data = &am33xx_platdata, },
1283*4882a593Smuzhiyun 	{ .compatible = "ti,dra742-uart", .data = &omap4_platdata, },
1284*4882a593Smuzhiyun 	{},
1285*4882a593Smuzhiyun };
1286*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
1287*4882a593Smuzhiyun 
omap8250_probe(struct platform_device * pdev)1288*4882a593Smuzhiyun static int omap8250_probe(struct platform_device *pdev)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
1291*4882a593Smuzhiyun 	struct omap8250_priv *priv;
1292*4882a593Smuzhiyun 	const struct omap8250_platdata *pdata;
1293*4882a593Smuzhiyun 	struct uart_8250_port up;
1294*4882a593Smuzhiyun 	struct resource *regs;
1295*4882a593Smuzhiyun 	void __iomem *membase;
1296*4882a593Smuzhiyun 	int irq, ret;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
1299*4882a593Smuzhiyun 	if (irq < 0)
1300*4882a593Smuzhiyun 		return irq;
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1303*4882a593Smuzhiyun 	if (!regs) {
1304*4882a593Smuzhiyun 		dev_err(&pdev->dev, "missing registers\n");
1305*4882a593Smuzhiyun 		return -EINVAL;
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1309*4882a593Smuzhiyun 	if (!priv)
1310*4882a593Smuzhiyun 		return -ENOMEM;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	membase = devm_ioremap(&pdev->dev, regs->start,
1313*4882a593Smuzhiyun 				       resource_size(regs));
1314*4882a593Smuzhiyun 	if (!membase)
1315*4882a593Smuzhiyun 		return -ENODEV;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	memset(&up, 0, sizeof(up));
1318*4882a593Smuzhiyun 	up.port.dev = &pdev->dev;
1319*4882a593Smuzhiyun 	up.port.mapbase = regs->start;
1320*4882a593Smuzhiyun 	up.port.membase = membase;
1321*4882a593Smuzhiyun 	up.port.irq = irq;
1322*4882a593Smuzhiyun 	/*
1323*4882a593Smuzhiyun 	 * It claims to be 16C750 compatible however it is a little different.
1324*4882a593Smuzhiyun 	 * It has EFR and has no FCR7_64byte bit. The AFE (which it claims to
1325*4882a593Smuzhiyun 	 * have) is enabled via EFR instead of MCR. The type is set here 8250
1326*4882a593Smuzhiyun 	 * just to get things going. UNKNOWN does not work for a few reasons and
1327*4882a593Smuzhiyun 	 * we don't need our own type since we don't use 8250's set_termios()
1328*4882a593Smuzhiyun 	 * or pm callback.
1329*4882a593Smuzhiyun 	 */
1330*4882a593Smuzhiyun 	up.port.type = PORT_8250;
1331*4882a593Smuzhiyun 	up.port.iotype = UPIO_MEM;
1332*4882a593Smuzhiyun 	up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW |
1333*4882a593Smuzhiyun 		UPF_HARD_FLOW;
1334*4882a593Smuzhiyun 	up.port.private_data = priv;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	up.port.regshift = 2;
1337*4882a593Smuzhiyun 	up.port.fifosize = 64;
1338*4882a593Smuzhiyun 	up.tx_loadsz = 64;
1339*4882a593Smuzhiyun 	up.capabilities = UART_CAP_FIFO;
1340*4882a593Smuzhiyun #ifdef CONFIG_PM
1341*4882a593Smuzhiyun 	/*
1342*4882a593Smuzhiyun 	 * Runtime PM is mostly transparent. However to do it right we need to a
1343*4882a593Smuzhiyun 	 * TX empty interrupt before we can put the device to auto idle. So if
1344*4882a593Smuzhiyun 	 * PM is not enabled we don't add that flag and can spare that one extra
1345*4882a593Smuzhiyun 	 * interrupt in the TX path.
1346*4882a593Smuzhiyun 	 */
1347*4882a593Smuzhiyun 	up.capabilities |= UART_CAP_RPM;
1348*4882a593Smuzhiyun #endif
1349*4882a593Smuzhiyun 	up.port.set_termios = omap_8250_set_termios;
1350*4882a593Smuzhiyun 	up.port.set_mctrl = omap8250_set_mctrl;
1351*4882a593Smuzhiyun 	up.port.pm = omap_8250_pm;
1352*4882a593Smuzhiyun 	up.port.startup = omap_8250_startup;
1353*4882a593Smuzhiyun 	up.port.shutdown = omap_8250_shutdown;
1354*4882a593Smuzhiyun 	up.port.throttle = omap_8250_throttle;
1355*4882a593Smuzhiyun 	up.port.unthrottle = omap_8250_unthrottle;
1356*4882a593Smuzhiyun 	up.port.rs485_config = serial8250_em485_config;
1357*4882a593Smuzhiyun 	up.rs485_start_tx = serial8250_em485_start_tx;
1358*4882a593Smuzhiyun 	up.rs485_stop_tx = serial8250_em485_stop_tx;
1359*4882a593Smuzhiyun 	up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	ret = of_alias_get_id(np, "serial");
1362*4882a593Smuzhiyun 	if (ret < 0) {
1363*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to get alias\n");
1364*4882a593Smuzhiyun 		return ret;
1365*4882a593Smuzhiyun 	}
1366*4882a593Smuzhiyun 	up.port.line = ret;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	if (of_property_read_u32(np, "clock-frequency", &up.port.uartclk)) {
1369*4882a593Smuzhiyun 		struct clk *clk;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 		clk = devm_clk_get(&pdev->dev, NULL);
1372*4882a593Smuzhiyun 		if (IS_ERR(clk)) {
1373*4882a593Smuzhiyun 			if (PTR_ERR(clk) == -EPROBE_DEFER)
1374*4882a593Smuzhiyun 				return -EPROBE_DEFER;
1375*4882a593Smuzhiyun 		} else {
1376*4882a593Smuzhiyun 			up.port.uartclk = clk_get_rate(clk);
1377*4882a593Smuzhiyun 		}
1378*4882a593Smuzhiyun 	}
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	if (of_property_read_u32(np, "overrun-throttle-ms",
1381*4882a593Smuzhiyun 				 &up.overrun_backoff_time_ms) != 0)
1382*4882a593Smuzhiyun 		up.overrun_backoff_time_ms = 0;
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	priv->wakeirq = irq_of_parse_and_map(np, 1);
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	pdata = of_device_get_match_data(&pdev->dev);
1387*4882a593Smuzhiyun 	if (pdata)
1388*4882a593Smuzhiyun 		priv->habit |= pdata->habit;
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	if (!up.port.uartclk) {
1391*4882a593Smuzhiyun 		up.port.uartclk = DEFAULT_CLK_SPEED;
1392*4882a593Smuzhiyun 		dev_warn(&pdev->dev,
1393*4882a593Smuzhiyun 			 "No clock speed specified: using default: %d\n",
1394*4882a593Smuzhiyun 			 DEFAULT_CLK_SPEED);
1395*4882a593Smuzhiyun 	}
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
1398*4882a593Smuzhiyun 	priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
1399*4882a593Smuzhiyun 	cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency);
1400*4882a593Smuzhiyun 	INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	spin_lock_init(&priv->rx_dma_lock);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	device_init_wakeup(&pdev->dev, true);
1405*4882a593Smuzhiyun 	pm_runtime_enable(&pdev->dev);
1406*4882a593Smuzhiyun 	pm_runtime_use_autosuspend(&pdev->dev);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/*
1409*4882a593Smuzhiyun 	 * Disable runtime PM until autosuspend delay unless specifically
1410*4882a593Smuzhiyun 	 * enabled by the user via sysfs. This is the historic way to
1411*4882a593Smuzhiyun 	 * prevent an unsafe default policy with lossy characters on wake-up.
1412*4882a593Smuzhiyun 	 * For serdev devices this is not needed, the policy can be managed by
1413*4882a593Smuzhiyun 	 * the serdev driver.
1414*4882a593Smuzhiyun 	 */
1415*4882a593Smuzhiyun 	if (!of_get_available_child_count(pdev->dev.of_node))
1416*4882a593Smuzhiyun 		pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	pm_runtime_irq_safe(&pdev->dev);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	pm_runtime_get_sync(&pdev->dev);
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	omap_serial_fill_features_erratas(&up, priv);
1423*4882a593Smuzhiyun 	up.port.handle_irq = omap8250_no_handle_irq;
1424*4882a593Smuzhiyun 	priv->rx_trigger = RX_TRIGGER;
1425*4882a593Smuzhiyun 	priv->tx_trigger = TX_TRIGGER;
1426*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_8250_DMA
1427*4882a593Smuzhiyun 	/*
1428*4882a593Smuzhiyun 	 * Oh DMA support. If there are no DMA properties in the DT then
1429*4882a593Smuzhiyun 	 * we will fall back to a generic DMA channel which does not
1430*4882a593Smuzhiyun 	 * really work here. To ensure that we do not get a generic DMA
1431*4882a593Smuzhiyun 	 * channel assigned, we have the the_no_dma_filter_fn() here.
1432*4882a593Smuzhiyun 	 * To avoid "failed to request DMA" messages we check for DMA
1433*4882a593Smuzhiyun 	 * properties in DT.
1434*4882a593Smuzhiyun 	 */
1435*4882a593Smuzhiyun 	ret = of_property_count_strings(np, "dma-names");
1436*4882a593Smuzhiyun 	if (ret == 2) {
1437*4882a593Smuzhiyun 		struct omap8250_dma_params *dma_params = NULL;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 		up.dma = &priv->omap8250_dma;
1440*4882a593Smuzhiyun 		up.dma->fn = the_no_dma_filter_fn;
1441*4882a593Smuzhiyun 		up.dma->tx_dma = omap_8250_tx_dma;
1442*4882a593Smuzhiyun 		up.dma->rx_dma = omap_8250_rx_dma;
1443*4882a593Smuzhiyun 		if (pdata)
1444*4882a593Smuzhiyun 			dma_params = pdata->dma_params;
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 		if (dma_params) {
1447*4882a593Smuzhiyun 			up.dma->rx_size = dma_params->rx_size;
1448*4882a593Smuzhiyun 			up.dma->rxconf.src_maxburst = dma_params->rx_trigger;
1449*4882a593Smuzhiyun 			up.dma->txconf.dst_maxburst = dma_params->tx_trigger;
1450*4882a593Smuzhiyun 			priv->rx_trigger = dma_params->rx_trigger;
1451*4882a593Smuzhiyun 			priv->tx_trigger = dma_params->tx_trigger;
1452*4882a593Smuzhiyun 		} else {
1453*4882a593Smuzhiyun 			up.dma->rx_size = RX_TRIGGER;
1454*4882a593Smuzhiyun 			up.dma->rxconf.src_maxburst = RX_TRIGGER;
1455*4882a593Smuzhiyun 			up.dma->txconf.dst_maxburst = TX_TRIGGER;
1456*4882a593Smuzhiyun 		}
1457*4882a593Smuzhiyun 	}
1458*4882a593Smuzhiyun #endif
1459*4882a593Smuzhiyun 	ret = serial8250_register_8250_port(&up);
1460*4882a593Smuzhiyun 	if (ret < 0) {
1461*4882a593Smuzhiyun 		dev_err(&pdev->dev, "unable to register 8250 port\n");
1462*4882a593Smuzhiyun 		goto err;
1463*4882a593Smuzhiyun 	}
1464*4882a593Smuzhiyun 	priv->line = ret;
1465*4882a593Smuzhiyun 	platform_set_drvdata(pdev, priv);
1466*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(&pdev->dev);
1467*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(&pdev->dev);
1468*4882a593Smuzhiyun 	return 0;
1469*4882a593Smuzhiyun err:
1470*4882a593Smuzhiyun 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1471*4882a593Smuzhiyun 	pm_runtime_put_sync(&pdev->dev);
1472*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1473*4882a593Smuzhiyun 	return ret;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun 
omap8250_remove(struct platform_device * pdev)1476*4882a593Smuzhiyun static int omap8250_remove(struct platform_device *pdev)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun 	struct omap8250_priv *priv = platform_get_drvdata(pdev);
1479*4882a593Smuzhiyun 	int err;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	err = pm_runtime_resume_and_get(&pdev->dev);
1482*4882a593Smuzhiyun 	if (err)
1483*4882a593Smuzhiyun 		return err;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1486*4882a593Smuzhiyun 	pm_runtime_put_sync(&pdev->dev);
1487*4882a593Smuzhiyun 	flush_work(&priv->qos_work);
1488*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1489*4882a593Smuzhiyun 	serial8250_unregister_port(priv->line);
1490*4882a593Smuzhiyun 	cpu_latency_qos_remove_request(&priv->pm_qos_request);
1491*4882a593Smuzhiyun 	device_init_wakeup(&pdev->dev, false);
1492*4882a593Smuzhiyun 	return 0;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
omap8250_prepare(struct device * dev)1496*4882a593Smuzhiyun static int omap8250_prepare(struct device *dev)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	if (!priv)
1501*4882a593Smuzhiyun 		return 0;
1502*4882a593Smuzhiyun 	priv->is_suspending = true;
1503*4882a593Smuzhiyun 	return 0;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun 
omap8250_complete(struct device * dev)1506*4882a593Smuzhiyun static void omap8250_complete(struct device *dev)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	if (!priv)
1511*4882a593Smuzhiyun 		return;
1512*4882a593Smuzhiyun 	priv->is_suspending = false;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun 
omap8250_suspend(struct device * dev)1515*4882a593Smuzhiyun static int omap8250_suspend(struct device *dev)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1518*4882a593Smuzhiyun 	struct uart_8250_port *up = serial8250_get_port(priv->line);
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	serial8250_suspend_port(priv->line);
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	pm_runtime_get_sync(dev);
1523*4882a593Smuzhiyun 	if (!device_may_wakeup(dev))
1524*4882a593Smuzhiyun 		priv->wer = 0;
1525*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_WER, priv->wer);
1526*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(dev);
1527*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	flush_work(&priv->qos_work);
1530*4882a593Smuzhiyun 	return 0;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun 
omap8250_resume(struct device * dev)1533*4882a593Smuzhiyun static int omap8250_resume(struct device *dev)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	serial8250_resume_port(priv->line);
1538*4882a593Smuzhiyun 	return 0;
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun #else
1541*4882a593Smuzhiyun #define omap8250_prepare NULL
1542*4882a593Smuzhiyun #define omap8250_complete NULL
1543*4882a593Smuzhiyun #endif
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun #ifdef CONFIG_PM
omap8250_lost_context(struct uart_8250_port * up)1546*4882a593Smuzhiyun static int omap8250_lost_context(struct uart_8250_port *up)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun 	u32 val;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	val = serial_in(up, UART_OMAP_SCR);
1551*4882a593Smuzhiyun 	/*
1552*4882a593Smuzhiyun 	 * If we lose context, then SCR is set to its reset value of zero.
1553*4882a593Smuzhiyun 	 * After set_termios() we set bit 3 of SCR (TX_EMPTY_CTL_IT) to 1,
1554*4882a593Smuzhiyun 	 * among other bits, to never set the register back to zero again.
1555*4882a593Smuzhiyun 	 */
1556*4882a593Smuzhiyun 	if (!val)
1557*4882a593Smuzhiyun 		return 1;
1558*4882a593Smuzhiyun 	return 0;
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun /* TODO: in future, this should happen via API in drivers/reset/ */
omap8250_soft_reset(struct device * dev)1562*4882a593Smuzhiyun static int omap8250_soft_reset(struct device *dev)
1563*4882a593Smuzhiyun {
1564*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1565*4882a593Smuzhiyun 	struct uart_8250_port *up = serial8250_get_port(priv->line);
1566*4882a593Smuzhiyun 	int timeout = 100;
1567*4882a593Smuzhiyun 	int sysc;
1568*4882a593Smuzhiyun 	int syss;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	/*
1571*4882a593Smuzhiyun 	 * At least on omap4, unused uarts may not idle after reset without
1572*4882a593Smuzhiyun 	 * a basic scr dma configuration even with no dma in use. The
1573*4882a593Smuzhiyun 	 * module clkctrl status bits will be 1 instead of 3 blocking idle
1574*4882a593Smuzhiyun 	 * for the whole clockdomain. The softreset below will clear scr,
1575*4882a593Smuzhiyun 	 * and we restore it on resume so this is safe to do on all SoCs
1576*4882a593Smuzhiyun 	 * needing omap8250_soft_reset() quirk. Do it in two writes as
1577*4882a593Smuzhiyun 	 * recommended in the comment for omap8250_update_scr().
1578*4882a593Smuzhiyun 	 */
1579*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
1580*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_SCR,
1581*4882a593Smuzhiyun 		   OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	sysc = serial_in(up, UART_OMAP_SYSC);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	/* softreset the UART */
1586*4882a593Smuzhiyun 	sysc |= OMAP_UART_SYSC_SOFTRESET;
1587*4882a593Smuzhiyun 	serial_out(up, UART_OMAP_SYSC, sysc);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	/* By experiments, 1us enough for reset complete on AM335x */
1590*4882a593Smuzhiyun 	do {
1591*4882a593Smuzhiyun 		udelay(1);
1592*4882a593Smuzhiyun 		syss = serial_in(up, UART_OMAP_SYSS);
1593*4882a593Smuzhiyun 	} while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE));
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	if (!timeout) {
1596*4882a593Smuzhiyun 		dev_err(dev, "timed out waiting for reset done\n");
1597*4882a593Smuzhiyun 		return -ETIMEDOUT;
1598*4882a593Smuzhiyun 	}
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	return 0;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun 
omap8250_runtime_suspend(struct device * dev)1603*4882a593Smuzhiyun static int omap8250_runtime_suspend(struct device *dev)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1606*4882a593Smuzhiyun 	struct uart_8250_port *up;
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	/* In case runtime-pm tries this before we are setup */
1609*4882a593Smuzhiyun 	if (!priv)
1610*4882a593Smuzhiyun 		return 0;
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	up = serial8250_get_port(priv->line);
1613*4882a593Smuzhiyun 	/*
1614*4882a593Smuzhiyun 	 * When using 'no_console_suspend', the console UART must not be
1615*4882a593Smuzhiyun 	 * suspended. Since driver suspend is managed by runtime suspend,
1616*4882a593Smuzhiyun 	 * preventing runtime suspend (by returning error) will keep device
1617*4882a593Smuzhiyun 	 * active during suspend.
1618*4882a593Smuzhiyun 	 */
1619*4882a593Smuzhiyun 	if (priv->is_suspending && !console_suspend_enabled) {
1620*4882a593Smuzhiyun 		if (uart_console(&up->port))
1621*4882a593Smuzhiyun 			return -EBUSY;
1622*4882a593Smuzhiyun 	}
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
1625*4882a593Smuzhiyun 		int ret;
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 		ret = omap8250_soft_reset(dev);
1628*4882a593Smuzhiyun 		if (ret)
1629*4882a593Smuzhiyun 			return ret;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 		/* Restore to UART mode after reset (for wakeup) */
1632*4882a593Smuzhiyun 		omap8250_update_mdr1(up, priv);
1633*4882a593Smuzhiyun 		/* Restore wakeup enable register */
1634*4882a593Smuzhiyun 		serial_out(up, UART_OMAP_WER, priv->wer);
1635*4882a593Smuzhiyun 	}
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	if (up->dma && up->dma->rxchan)
1638*4882a593Smuzhiyun 		omap_8250_rx_dma_flush(up);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
1641*4882a593Smuzhiyun 	schedule_work(&priv->qos_work);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	return 0;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
omap8250_runtime_resume(struct device * dev)1646*4882a593Smuzhiyun static int omap8250_runtime_resume(struct device *dev)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun 	struct omap8250_priv *priv = dev_get_drvdata(dev);
1649*4882a593Smuzhiyun 	struct uart_8250_port *up;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* In case runtime-pm tries this before we are setup */
1652*4882a593Smuzhiyun 	if (!priv)
1653*4882a593Smuzhiyun 		return 0;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	up = serial8250_get_port(priv->line);
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	if (omap8250_lost_context(up))
1658*4882a593Smuzhiyun 		omap8250_restore_regs(up);
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	if (up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2))
1661*4882a593Smuzhiyun 		omap_8250_rx_dma(up);
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	priv->latency = priv->calc_latency;
1664*4882a593Smuzhiyun 	schedule_work(&priv->qos_work);
1665*4882a593Smuzhiyun 	return 0;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun #endif
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_8250_OMAP_TTYO_FIXUP
omap8250_console_fixup(void)1670*4882a593Smuzhiyun static int __init omap8250_console_fixup(void)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	char *omap_str;
1673*4882a593Smuzhiyun 	char *options;
1674*4882a593Smuzhiyun 	u8 idx;
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	if (strstr(boot_command_line, "console=ttyS"))
1677*4882a593Smuzhiyun 		/* user set a ttyS based name for the console */
1678*4882a593Smuzhiyun 		return 0;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	omap_str = strstr(boot_command_line, "console=ttyO");
1681*4882a593Smuzhiyun 	if (!omap_str)
1682*4882a593Smuzhiyun 		/* user did not set ttyO based console, so we don't care */
1683*4882a593Smuzhiyun 		return 0;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	omap_str += 12;
1686*4882a593Smuzhiyun 	if ('0' <= *omap_str && *omap_str <= '9')
1687*4882a593Smuzhiyun 		idx = *omap_str - '0';
1688*4882a593Smuzhiyun 	else
1689*4882a593Smuzhiyun 		return 0;
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	omap_str++;
1692*4882a593Smuzhiyun 	if (omap_str[0] == ',') {
1693*4882a593Smuzhiyun 		omap_str++;
1694*4882a593Smuzhiyun 		options = omap_str;
1695*4882a593Smuzhiyun 	} else {
1696*4882a593Smuzhiyun 		options = NULL;
1697*4882a593Smuzhiyun 	}
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	add_preferred_console("ttyS", idx, options);
1700*4882a593Smuzhiyun 	pr_err("WARNING: Your 'console=ttyO%d' has been replaced by 'ttyS%d'\n",
1701*4882a593Smuzhiyun 	       idx, idx);
1702*4882a593Smuzhiyun 	pr_err("This ensures that you still see kernel messages. Please\n");
1703*4882a593Smuzhiyun 	pr_err("update your kernel commandline.\n");
1704*4882a593Smuzhiyun 	return 0;
1705*4882a593Smuzhiyun }
1706*4882a593Smuzhiyun console_initcall(omap8250_console_fixup);
1707*4882a593Smuzhiyun #endif
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun static const struct dev_pm_ops omap8250_dev_pm_ops = {
1710*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
1711*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(omap8250_runtime_suspend,
1712*4882a593Smuzhiyun 			   omap8250_runtime_resume, NULL)
1713*4882a593Smuzhiyun 	.prepare        = omap8250_prepare,
1714*4882a593Smuzhiyun 	.complete       = omap8250_complete,
1715*4882a593Smuzhiyun };
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun static struct platform_driver omap8250_platform_driver = {
1718*4882a593Smuzhiyun 	.driver = {
1719*4882a593Smuzhiyun 		.name		= "omap8250",
1720*4882a593Smuzhiyun 		.pm		= &omap8250_dev_pm_ops,
1721*4882a593Smuzhiyun 		.of_match_table = omap8250_dt_ids,
1722*4882a593Smuzhiyun 	},
1723*4882a593Smuzhiyun 	.probe			= omap8250_probe,
1724*4882a593Smuzhiyun 	.remove			= omap8250_remove,
1725*4882a593Smuzhiyun };
1726*4882a593Smuzhiyun module_platform_driver(omap8250_platform_driver);
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun MODULE_AUTHOR("Sebastian Andrzej Siewior");
1729*4882a593Smuzhiyun MODULE_DESCRIPTION("OMAP 8250 Driver");
1730*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1731