1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for AMBA serial ports
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright 1999 ARM Limited
8*4882a593Smuzhiyun * Copyright (C) 2000 Deep Blue Solutions Ltd.
9*4882a593Smuzhiyun * Copyright (C) 2010 ST-Ericsson SA
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This is a generic driver for ARM AMBA-type serial ports. They
12*4882a593Smuzhiyun * have a lot of 16550-like features, but are not register compatible.
13*4882a593Smuzhiyun * Note that although they do have CTS, DCD and DSR inputs, they do
14*4882a593Smuzhiyun * not have an RI input, nor do they have DTR or RTS outputs. If
15*4882a593Smuzhiyun * required, these have to be supplied via some other means (eg, GPIO)
16*4882a593Smuzhiyun * and hooked into this driver.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/ioport.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/console.h>
23*4882a593Smuzhiyun #include <linux/sysrq.h>
24*4882a593Smuzhiyun #include <linux/device.h>
25*4882a593Smuzhiyun #include <linux/tty.h>
26*4882a593Smuzhiyun #include <linux/tty_flip.h>
27*4882a593Smuzhiyun #include <linux/serial_core.h>
28*4882a593Smuzhiyun #include <linux/serial.h>
29*4882a593Smuzhiyun #include <linux/amba/bus.h>
30*4882a593Smuzhiyun #include <linux/amba/serial.h>
31*4882a593Smuzhiyun #include <linux/clk.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/dmaengine.h>
34*4882a593Smuzhiyun #include <linux/dma-mapping.h>
35*4882a593Smuzhiyun #include <linux/scatterlist.h>
36*4882a593Smuzhiyun #include <linux/delay.h>
37*4882a593Smuzhiyun #include <linux/types.h>
38*4882a593Smuzhiyun #include <linux/of.h>
39*4882a593Smuzhiyun #include <linux/of_device.h>
40*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
41*4882a593Smuzhiyun #include <linux/sizes.h>
42*4882a593Smuzhiyun #include <linux/io.h>
43*4882a593Smuzhiyun #include <linux/acpi.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "amba-pl011.h"
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define UART_NR 14
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define SERIAL_AMBA_MAJOR 204
50*4882a593Smuzhiyun #define SERIAL_AMBA_MINOR 64
51*4882a593Smuzhiyun #define SERIAL_AMBA_NR UART_NR
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define AMBA_ISR_PASS_LIMIT 256
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
56*4882a593Smuzhiyun #define UART_DUMMY_DR_RX (1 << 16)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
59*4882a593Smuzhiyun [REG_DR] = UART01x_DR,
60*4882a593Smuzhiyun [REG_FR] = UART01x_FR,
61*4882a593Smuzhiyun [REG_LCRH_RX] = UART011_LCRH,
62*4882a593Smuzhiyun [REG_LCRH_TX] = UART011_LCRH,
63*4882a593Smuzhiyun [REG_IBRD] = UART011_IBRD,
64*4882a593Smuzhiyun [REG_FBRD] = UART011_FBRD,
65*4882a593Smuzhiyun [REG_CR] = UART011_CR,
66*4882a593Smuzhiyun [REG_IFLS] = UART011_IFLS,
67*4882a593Smuzhiyun [REG_IMSC] = UART011_IMSC,
68*4882a593Smuzhiyun [REG_RIS] = UART011_RIS,
69*4882a593Smuzhiyun [REG_MIS] = UART011_MIS,
70*4882a593Smuzhiyun [REG_ICR] = UART011_ICR,
71*4882a593Smuzhiyun [REG_DMACR] = UART011_DMACR,
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* There is by now at least one vendor with differing details, so handle it */
75*4882a593Smuzhiyun struct vendor_data {
76*4882a593Smuzhiyun const u16 *reg_offset;
77*4882a593Smuzhiyun unsigned int ifls;
78*4882a593Smuzhiyun unsigned int fr_busy;
79*4882a593Smuzhiyun unsigned int fr_dsr;
80*4882a593Smuzhiyun unsigned int fr_cts;
81*4882a593Smuzhiyun unsigned int fr_ri;
82*4882a593Smuzhiyun unsigned int inv_fr;
83*4882a593Smuzhiyun bool access_32b;
84*4882a593Smuzhiyun bool oversampling;
85*4882a593Smuzhiyun bool dma_threshold;
86*4882a593Smuzhiyun bool cts_event_workaround;
87*4882a593Smuzhiyun bool always_enabled;
88*4882a593Smuzhiyun bool fixed_options;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun unsigned int (*get_fifosize)(struct amba_device *dev);
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
get_fifosize_arm(struct amba_device * dev)93*4882a593Smuzhiyun static unsigned int get_fifosize_arm(struct amba_device *dev)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun return amba_rev(dev) < 3 ? 16 : 32;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static struct vendor_data vendor_arm = {
99*4882a593Smuzhiyun .reg_offset = pl011_std_offsets,
100*4882a593Smuzhiyun .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
101*4882a593Smuzhiyun .fr_busy = UART01x_FR_BUSY,
102*4882a593Smuzhiyun .fr_dsr = UART01x_FR_DSR,
103*4882a593Smuzhiyun .fr_cts = UART01x_FR_CTS,
104*4882a593Smuzhiyun .fr_ri = UART011_FR_RI,
105*4882a593Smuzhiyun .oversampling = false,
106*4882a593Smuzhiyun .dma_threshold = false,
107*4882a593Smuzhiyun .cts_event_workaround = false,
108*4882a593Smuzhiyun .always_enabled = false,
109*4882a593Smuzhiyun .fixed_options = false,
110*4882a593Smuzhiyun .get_fifosize = get_fifosize_arm,
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun static const struct vendor_data vendor_sbsa = {
114*4882a593Smuzhiyun .reg_offset = pl011_std_offsets,
115*4882a593Smuzhiyun .fr_busy = UART01x_FR_BUSY,
116*4882a593Smuzhiyun .fr_dsr = UART01x_FR_DSR,
117*4882a593Smuzhiyun .fr_cts = UART01x_FR_CTS,
118*4882a593Smuzhiyun .fr_ri = UART011_FR_RI,
119*4882a593Smuzhiyun .access_32b = true,
120*4882a593Smuzhiyun .oversampling = false,
121*4882a593Smuzhiyun .dma_threshold = false,
122*4882a593Smuzhiyun .cts_event_workaround = false,
123*4882a593Smuzhiyun .always_enabled = true,
124*4882a593Smuzhiyun .fixed_options = true,
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun #ifdef CONFIG_ACPI_SPCR_TABLE
128*4882a593Smuzhiyun static const struct vendor_data vendor_qdt_qdf2400_e44 = {
129*4882a593Smuzhiyun .reg_offset = pl011_std_offsets,
130*4882a593Smuzhiyun .fr_busy = UART011_FR_TXFE,
131*4882a593Smuzhiyun .fr_dsr = UART01x_FR_DSR,
132*4882a593Smuzhiyun .fr_cts = UART01x_FR_CTS,
133*4882a593Smuzhiyun .fr_ri = UART011_FR_RI,
134*4882a593Smuzhiyun .inv_fr = UART011_FR_TXFE,
135*4882a593Smuzhiyun .access_32b = true,
136*4882a593Smuzhiyun .oversampling = false,
137*4882a593Smuzhiyun .dma_threshold = false,
138*4882a593Smuzhiyun .cts_event_workaround = false,
139*4882a593Smuzhiyun .always_enabled = true,
140*4882a593Smuzhiyun .fixed_options = true,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
145*4882a593Smuzhiyun [REG_DR] = UART01x_DR,
146*4882a593Smuzhiyun [REG_ST_DMAWM] = ST_UART011_DMAWM,
147*4882a593Smuzhiyun [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
148*4882a593Smuzhiyun [REG_FR] = UART01x_FR,
149*4882a593Smuzhiyun [REG_LCRH_RX] = ST_UART011_LCRH_RX,
150*4882a593Smuzhiyun [REG_LCRH_TX] = ST_UART011_LCRH_TX,
151*4882a593Smuzhiyun [REG_IBRD] = UART011_IBRD,
152*4882a593Smuzhiyun [REG_FBRD] = UART011_FBRD,
153*4882a593Smuzhiyun [REG_CR] = UART011_CR,
154*4882a593Smuzhiyun [REG_IFLS] = UART011_IFLS,
155*4882a593Smuzhiyun [REG_IMSC] = UART011_IMSC,
156*4882a593Smuzhiyun [REG_RIS] = UART011_RIS,
157*4882a593Smuzhiyun [REG_MIS] = UART011_MIS,
158*4882a593Smuzhiyun [REG_ICR] = UART011_ICR,
159*4882a593Smuzhiyun [REG_DMACR] = UART011_DMACR,
160*4882a593Smuzhiyun [REG_ST_XFCR] = ST_UART011_XFCR,
161*4882a593Smuzhiyun [REG_ST_XON1] = ST_UART011_XON1,
162*4882a593Smuzhiyun [REG_ST_XON2] = ST_UART011_XON2,
163*4882a593Smuzhiyun [REG_ST_XOFF1] = ST_UART011_XOFF1,
164*4882a593Smuzhiyun [REG_ST_XOFF2] = ST_UART011_XOFF2,
165*4882a593Smuzhiyun [REG_ST_ITCR] = ST_UART011_ITCR,
166*4882a593Smuzhiyun [REG_ST_ITIP] = ST_UART011_ITIP,
167*4882a593Smuzhiyun [REG_ST_ABCR] = ST_UART011_ABCR,
168*4882a593Smuzhiyun [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
get_fifosize_st(struct amba_device * dev)171*4882a593Smuzhiyun static unsigned int get_fifosize_st(struct amba_device *dev)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun return 64;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun static struct vendor_data vendor_st = {
177*4882a593Smuzhiyun .reg_offset = pl011_st_offsets,
178*4882a593Smuzhiyun .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
179*4882a593Smuzhiyun .fr_busy = UART01x_FR_BUSY,
180*4882a593Smuzhiyun .fr_dsr = UART01x_FR_DSR,
181*4882a593Smuzhiyun .fr_cts = UART01x_FR_CTS,
182*4882a593Smuzhiyun .fr_ri = UART011_FR_RI,
183*4882a593Smuzhiyun .oversampling = true,
184*4882a593Smuzhiyun .dma_threshold = true,
185*4882a593Smuzhiyun .cts_event_workaround = true,
186*4882a593Smuzhiyun .always_enabled = false,
187*4882a593Smuzhiyun .fixed_options = false,
188*4882a593Smuzhiyun .get_fifosize = get_fifosize_st,
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
192*4882a593Smuzhiyun [REG_DR] = ZX_UART011_DR,
193*4882a593Smuzhiyun [REG_FR] = ZX_UART011_FR,
194*4882a593Smuzhiyun [REG_LCRH_RX] = ZX_UART011_LCRH,
195*4882a593Smuzhiyun [REG_LCRH_TX] = ZX_UART011_LCRH,
196*4882a593Smuzhiyun [REG_IBRD] = ZX_UART011_IBRD,
197*4882a593Smuzhiyun [REG_FBRD] = ZX_UART011_FBRD,
198*4882a593Smuzhiyun [REG_CR] = ZX_UART011_CR,
199*4882a593Smuzhiyun [REG_IFLS] = ZX_UART011_IFLS,
200*4882a593Smuzhiyun [REG_IMSC] = ZX_UART011_IMSC,
201*4882a593Smuzhiyun [REG_RIS] = ZX_UART011_RIS,
202*4882a593Smuzhiyun [REG_MIS] = ZX_UART011_MIS,
203*4882a593Smuzhiyun [REG_ICR] = ZX_UART011_ICR,
204*4882a593Smuzhiyun [REG_DMACR] = ZX_UART011_DMACR,
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun
get_fifosize_zte(struct amba_device * dev)207*4882a593Smuzhiyun static unsigned int get_fifosize_zte(struct amba_device *dev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun return 16;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun static struct vendor_data vendor_zte = {
213*4882a593Smuzhiyun .reg_offset = pl011_zte_offsets,
214*4882a593Smuzhiyun .access_32b = true,
215*4882a593Smuzhiyun .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
216*4882a593Smuzhiyun .fr_busy = ZX_UART01x_FR_BUSY,
217*4882a593Smuzhiyun .fr_dsr = ZX_UART01x_FR_DSR,
218*4882a593Smuzhiyun .fr_cts = ZX_UART01x_FR_CTS,
219*4882a593Smuzhiyun .fr_ri = ZX_UART011_FR_RI,
220*4882a593Smuzhiyun .get_fifosize = get_fifosize_zte,
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* Deals with DMA transactions */
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun struct pl011_sgbuf {
226*4882a593Smuzhiyun struct scatterlist sg;
227*4882a593Smuzhiyun char *buf;
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun struct pl011_dmarx_data {
231*4882a593Smuzhiyun struct dma_chan *chan;
232*4882a593Smuzhiyun struct completion complete;
233*4882a593Smuzhiyun bool use_buf_b;
234*4882a593Smuzhiyun struct pl011_sgbuf sgbuf_a;
235*4882a593Smuzhiyun struct pl011_sgbuf sgbuf_b;
236*4882a593Smuzhiyun dma_cookie_t cookie;
237*4882a593Smuzhiyun bool running;
238*4882a593Smuzhiyun struct timer_list timer;
239*4882a593Smuzhiyun unsigned int last_residue;
240*4882a593Smuzhiyun unsigned long last_jiffies;
241*4882a593Smuzhiyun bool auto_poll_rate;
242*4882a593Smuzhiyun unsigned int poll_rate;
243*4882a593Smuzhiyun unsigned int poll_timeout;
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun struct pl011_dmatx_data {
247*4882a593Smuzhiyun struct dma_chan *chan;
248*4882a593Smuzhiyun struct scatterlist sg;
249*4882a593Smuzhiyun char *buf;
250*4882a593Smuzhiyun bool queued;
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * We wrap our port structure around the generic uart_port.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun struct uart_amba_port {
257*4882a593Smuzhiyun struct uart_port port;
258*4882a593Smuzhiyun const u16 *reg_offset;
259*4882a593Smuzhiyun struct clk *clk;
260*4882a593Smuzhiyun const struct vendor_data *vendor;
261*4882a593Smuzhiyun unsigned int dmacr; /* dma control reg */
262*4882a593Smuzhiyun unsigned int im; /* interrupt mask */
263*4882a593Smuzhiyun unsigned int old_status;
264*4882a593Smuzhiyun unsigned int fifosize; /* vendor-specific */
265*4882a593Smuzhiyun unsigned int old_cr; /* state during shutdown */
266*4882a593Smuzhiyun unsigned int fixed_baud; /* vendor-set fixed baud rate */
267*4882a593Smuzhiyun char type[12];
268*4882a593Smuzhiyun #ifdef CONFIG_DMA_ENGINE
269*4882a593Smuzhiyun /* DMA stuff */
270*4882a593Smuzhiyun bool using_tx_dma;
271*4882a593Smuzhiyun bool using_rx_dma;
272*4882a593Smuzhiyun struct pl011_dmarx_data dmarx;
273*4882a593Smuzhiyun struct pl011_dmatx_data dmatx;
274*4882a593Smuzhiyun bool dma_probed;
275*4882a593Smuzhiyun #endif
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun
pl011_reg_to_offset(const struct uart_amba_port * uap,unsigned int reg)278*4882a593Smuzhiyun static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
279*4882a593Smuzhiyun unsigned int reg)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return uap->reg_offset[reg];
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
pl011_read(const struct uart_amba_port * uap,unsigned int reg)284*4882a593Smuzhiyun static unsigned int pl011_read(const struct uart_amba_port *uap,
285*4882a593Smuzhiyun unsigned int reg)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return (uap->port.iotype == UPIO_MEM32) ?
290*4882a593Smuzhiyun readl_relaxed(addr) : readw_relaxed(addr);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
pl011_write(unsigned int val,const struct uart_amba_port * uap,unsigned int reg)293*4882a593Smuzhiyun static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
294*4882a593Smuzhiyun unsigned int reg)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (uap->port.iotype == UPIO_MEM32)
299*4882a593Smuzhiyun writel_relaxed(val, addr);
300*4882a593Smuzhiyun else
301*4882a593Smuzhiyun writew_relaxed(val, addr);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Reads up to 256 characters from the FIFO or until it's empty and
306*4882a593Smuzhiyun * inserts them into the TTY layer. Returns the number of characters
307*4882a593Smuzhiyun * read from the FIFO.
308*4882a593Smuzhiyun */
pl011_fifo_to_tty(struct uart_amba_port * uap)309*4882a593Smuzhiyun static int pl011_fifo_to_tty(struct uart_amba_port *uap)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun unsigned int ch, flag, fifotaken;
312*4882a593Smuzhiyun int sysrq;
313*4882a593Smuzhiyun u16 status;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun for (fifotaken = 0; fifotaken != 256; fifotaken++) {
316*4882a593Smuzhiyun status = pl011_read(uap, REG_FR);
317*4882a593Smuzhiyun if (status & UART01x_FR_RXFE)
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Take chars from the FIFO and update status */
321*4882a593Smuzhiyun ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
322*4882a593Smuzhiyun flag = TTY_NORMAL;
323*4882a593Smuzhiyun uap->port.icount.rx++;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (unlikely(ch & UART_DR_ERROR)) {
326*4882a593Smuzhiyun if (ch & UART011_DR_BE) {
327*4882a593Smuzhiyun ch &= ~(UART011_DR_FE | UART011_DR_PE);
328*4882a593Smuzhiyun uap->port.icount.brk++;
329*4882a593Smuzhiyun if (uart_handle_break(&uap->port))
330*4882a593Smuzhiyun continue;
331*4882a593Smuzhiyun } else if (ch & UART011_DR_PE)
332*4882a593Smuzhiyun uap->port.icount.parity++;
333*4882a593Smuzhiyun else if (ch & UART011_DR_FE)
334*4882a593Smuzhiyun uap->port.icount.frame++;
335*4882a593Smuzhiyun if (ch & UART011_DR_OE)
336*4882a593Smuzhiyun uap->port.icount.overrun++;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ch &= uap->port.read_status_mask;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (ch & UART011_DR_BE)
341*4882a593Smuzhiyun flag = TTY_BREAK;
342*4882a593Smuzhiyun else if (ch & UART011_DR_PE)
343*4882a593Smuzhiyun flag = TTY_PARITY;
344*4882a593Smuzhiyun else if (ch & UART011_DR_FE)
345*4882a593Smuzhiyun flag = TTY_FRAME;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun spin_unlock(&uap->port.lock);
349*4882a593Smuzhiyun sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
350*4882a593Smuzhiyun spin_lock(&uap->port.lock);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (!sysrq)
353*4882a593Smuzhiyun uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun return fifotaken;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * All the DMA operation mode stuff goes inside this ifdef.
362*4882a593Smuzhiyun * This assumes that you have a generic DMA device interface,
363*4882a593Smuzhiyun * no custom DMA interfaces are supported.
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun #ifdef CONFIG_DMA_ENGINE
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
368*4882a593Smuzhiyun
pl011_sgbuf_init(struct dma_chan * chan,struct pl011_sgbuf * sg,enum dma_data_direction dir)369*4882a593Smuzhiyun static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
370*4882a593Smuzhiyun enum dma_data_direction dir)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun dma_addr_t dma_addr;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun sg->buf = dma_alloc_coherent(chan->device->dev,
375*4882a593Smuzhiyun PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
376*4882a593Smuzhiyun if (!sg->buf)
377*4882a593Smuzhiyun return -ENOMEM;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun sg_init_table(&sg->sg, 1);
380*4882a593Smuzhiyun sg_set_page(&sg->sg, phys_to_page(dma_addr),
381*4882a593Smuzhiyun PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
382*4882a593Smuzhiyun sg_dma_address(&sg->sg) = dma_addr;
383*4882a593Smuzhiyun sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun return 0;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
pl011_sgbuf_free(struct dma_chan * chan,struct pl011_sgbuf * sg,enum dma_data_direction dir)388*4882a593Smuzhiyun static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
389*4882a593Smuzhiyun enum dma_data_direction dir)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun if (sg->buf) {
392*4882a593Smuzhiyun dma_free_coherent(chan->device->dev,
393*4882a593Smuzhiyun PL011_DMA_BUFFER_SIZE, sg->buf,
394*4882a593Smuzhiyun sg_dma_address(&sg->sg));
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
pl011_dma_probe(struct uart_amba_port * uap)398*4882a593Smuzhiyun static void pl011_dma_probe(struct uart_amba_port *uap)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun /* DMA is the sole user of the platform data right now */
401*4882a593Smuzhiyun struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
402*4882a593Smuzhiyun struct device *dev = uap->port.dev;
403*4882a593Smuzhiyun struct dma_slave_config tx_conf = {
404*4882a593Smuzhiyun .dst_addr = uap->port.mapbase +
405*4882a593Smuzhiyun pl011_reg_to_offset(uap, REG_DR),
406*4882a593Smuzhiyun .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
407*4882a593Smuzhiyun .direction = DMA_MEM_TO_DEV,
408*4882a593Smuzhiyun .dst_maxburst = uap->fifosize >> 1,
409*4882a593Smuzhiyun .device_fc = false,
410*4882a593Smuzhiyun };
411*4882a593Smuzhiyun struct dma_chan *chan;
412*4882a593Smuzhiyun dma_cap_mask_t mask;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun uap->dma_probed = true;
415*4882a593Smuzhiyun chan = dma_request_chan(dev, "tx");
416*4882a593Smuzhiyun if (IS_ERR(chan)) {
417*4882a593Smuzhiyun if (PTR_ERR(chan) == -EPROBE_DEFER) {
418*4882a593Smuzhiyun uap->dma_probed = false;
419*4882a593Smuzhiyun return;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* We need platform data */
423*4882a593Smuzhiyun if (!plat || !plat->dma_filter) {
424*4882a593Smuzhiyun dev_info(uap->port.dev, "no DMA platform data\n");
425*4882a593Smuzhiyun return;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Try to acquire a generic DMA engine slave TX channel */
429*4882a593Smuzhiyun dma_cap_zero(mask);
430*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, mask);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun chan = dma_request_channel(mask, plat->dma_filter,
433*4882a593Smuzhiyun plat->dma_tx_param);
434*4882a593Smuzhiyun if (!chan) {
435*4882a593Smuzhiyun dev_err(uap->port.dev, "no TX DMA channel!\n");
436*4882a593Smuzhiyun return;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun dmaengine_slave_config(chan, &tx_conf);
441*4882a593Smuzhiyun uap->dmatx.chan = chan;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun dev_info(uap->port.dev, "DMA channel TX %s\n",
444*4882a593Smuzhiyun dma_chan_name(uap->dmatx.chan));
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Optionally make use of an RX channel as well */
447*4882a593Smuzhiyun chan = dma_request_slave_channel(dev, "rx");
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (!chan && plat && plat->dma_rx_param) {
450*4882a593Smuzhiyun chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (!chan) {
453*4882a593Smuzhiyun dev_err(uap->port.dev, "no RX DMA channel!\n");
454*4882a593Smuzhiyun return;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (chan) {
459*4882a593Smuzhiyun struct dma_slave_config rx_conf = {
460*4882a593Smuzhiyun .src_addr = uap->port.mapbase +
461*4882a593Smuzhiyun pl011_reg_to_offset(uap, REG_DR),
462*4882a593Smuzhiyun .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
463*4882a593Smuzhiyun .direction = DMA_DEV_TO_MEM,
464*4882a593Smuzhiyun .src_maxburst = uap->fifosize >> 2,
465*4882a593Smuzhiyun .device_fc = false,
466*4882a593Smuzhiyun };
467*4882a593Smuzhiyun struct dma_slave_caps caps;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * Some DMA controllers provide information on their capabilities.
471*4882a593Smuzhiyun * If the controller does, check for suitable residue processing
472*4882a593Smuzhiyun * otherwise assime all is well.
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun if (0 == dma_get_slave_caps(chan, &caps)) {
475*4882a593Smuzhiyun if (caps.residue_granularity ==
476*4882a593Smuzhiyun DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
477*4882a593Smuzhiyun dma_release_channel(chan);
478*4882a593Smuzhiyun dev_info(uap->port.dev,
479*4882a593Smuzhiyun "RX DMA disabled - no residue processing\n");
480*4882a593Smuzhiyun return;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun dmaengine_slave_config(chan, &rx_conf);
484*4882a593Smuzhiyun uap->dmarx.chan = chan;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun uap->dmarx.auto_poll_rate = false;
487*4882a593Smuzhiyun if (plat && plat->dma_rx_poll_enable) {
488*4882a593Smuzhiyun /* Set poll rate if specified. */
489*4882a593Smuzhiyun if (plat->dma_rx_poll_rate) {
490*4882a593Smuzhiyun uap->dmarx.auto_poll_rate = false;
491*4882a593Smuzhiyun uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
492*4882a593Smuzhiyun } else {
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * 100 ms defaults to poll rate if not
495*4882a593Smuzhiyun * specified. This will be adjusted with
496*4882a593Smuzhiyun * the baud rate at set_termios.
497*4882a593Smuzhiyun */
498*4882a593Smuzhiyun uap->dmarx.auto_poll_rate = true;
499*4882a593Smuzhiyun uap->dmarx.poll_rate = 100;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun /* 3 secs defaults poll_timeout if not specified. */
502*4882a593Smuzhiyun if (plat->dma_rx_poll_timeout)
503*4882a593Smuzhiyun uap->dmarx.poll_timeout =
504*4882a593Smuzhiyun plat->dma_rx_poll_timeout;
505*4882a593Smuzhiyun else
506*4882a593Smuzhiyun uap->dmarx.poll_timeout = 3000;
507*4882a593Smuzhiyun } else if (!plat && dev->of_node) {
508*4882a593Smuzhiyun uap->dmarx.auto_poll_rate = of_property_read_bool(
509*4882a593Smuzhiyun dev->of_node, "auto-poll");
510*4882a593Smuzhiyun if (uap->dmarx.auto_poll_rate) {
511*4882a593Smuzhiyun u32 x;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (0 == of_property_read_u32(dev->of_node,
514*4882a593Smuzhiyun "poll-rate-ms", &x))
515*4882a593Smuzhiyun uap->dmarx.poll_rate = x;
516*4882a593Smuzhiyun else
517*4882a593Smuzhiyun uap->dmarx.poll_rate = 100;
518*4882a593Smuzhiyun if (0 == of_property_read_u32(dev->of_node,
519*4882a593Smuzhiyun "poll-timeout-ms", &x))
520*4882a593Smuzhiyun uap->dmarx.poll_timeout = x;
521*4882a593Smuzhiyun else
522*4882a593Smuzhiyun uap->dmarx.poll_timeout = 3000;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun dev_info(uap->port.dev, "DMA channel RX %s\n",
526*4882a593Smuzhiyun dma_chan_name(uap->dmarx.chan));
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
pl011_dma_remove(struct uart_amba_port * uap)530*4882a593Smuzhiyun static void pl011_dma_remove(struct uart_amba_port *uap)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun if (uap->dmatx.chan)
533*4882a593Smuzhiyun dma_release_channel(uap->dmatx.chan);
534*4882a593Smuzhiyun if (uap->dmarx.chan)
535*4882a593Smuzhiyun dma_release_channel(uap->dmarx.chan);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* Forward declare these for the refill routine */
539*4882a593Smuzhiyun static int pl011_dma_tx_refill(struct uart_amba_port *uap);
540*4882a593Smuzhiyun static void pl011_start_tx_pio(struct uart_amba_port *uap);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * The current DMA TX buffer has been sent.
544*4882a593Smuzhiyun * Try to queue up another DMA buffer.
545*4882a593Smuzhiyun */
pl011_dma_tx_callback(void * data)546*4882a593Smuzhiyun static void pl011_dma_tx_callback(void *data)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun struct uart_amba_port *uap = data;
549*4882a593Smuzhiyun struct pl011_dmatx_data *dmatx = &uap->dmatx;
550*4882a593Smuzhiyun unsigned long flags;
551*4882a593Smuzhiyun u16 dmacr;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun spin_lock_irqsave(&uap->port.lock, flags);
554*4882a593Smuzhiyun if (uap->dmatx.queued)
555*4882a593Smuzhiyun dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
556*4882a593Smuzhiyun DMA_TO_DEVICE);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun dmacr = uap->dmacr;
559*4882a593Smuzhiyun uap->dmacr = dmacr & ~UART011_TXDMAE;
560*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun * If TX DMA was disabled, it means that we've stopped the DMA for
564*4882a593Smuzhiyun * some reason (eg, XOFF received, or we want to send an X-char.)
565*4882a593Smuzhiyun *
566*4882a593Smuzhiyun * Note: we need to be careful here of a potential race between DMA
567*4882a593Smuzhiyun * and the rest of the driver - if the driver disables TX DMA while
568*4882a593Smuzhiyun * a TX buffer completing, we must update the tx queued status to
569*4882a593Smuzhiyun * get further refills (hence we check dmacr).
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
572*4882a593Smuzhiyun uart_circ_empty(&uap->port.state->xmit)) {
573*4882a593Smuzhiyun uap->dmatx.queued = false;
574*4882a593Smuzhiyun spin_unlock_irqrestore(&uap->port.lock, flags);
575*4882a593Smuzhiyun return;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (pl011_dma_tx_refill(uap) <= 0)
579*4882a593Smuzhiyun /*
580*4882a593Smuzhiyun * We didn't queue a DMA buffer for some reason, but we
581*4882a593Smuzhiyun * have data pending to be sent. Re-enable the TX IRQ.
582*4882a593Smuzhiyun */
583*4882a593Smuzhiyun pl011_start_tx_pio(uap);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun spin_unlock_irqrestore(&uap->port.lock, flags);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * Try to refill the TX DMA buffer.
590*4882a593Smuzhiyun * Locking: called with port lock held and IRQs disabled.
591*4882a593Smuzhiyun * Returns:
592*4882a593Smuzhiyun * 1 if we queued up a TX DMA buffer.
593*4882a593Smuzhiyun * 0 if we didn't want to handle this by DMA
594*4882a593Smuzhiyun * <0 on error
595*4882a593Smuzhiyun */
pl011_dma_tx_refill(struct uart_amba_port * uap)596*4882a593Smuzhiyun static int pl011_dma_tx_refill(struct uart_amba_port *uap)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct pl011_dmatx_data *dmatx = &uap->dmatx;
599*4882a593Smuzhiyun struct dma_chan *chan = dmatx->chan;
600*4882a593Smuzhiyun struct dma_device *dma_dev = chan->device;
601*4882a593Smuzhiyun struct dma_async_tx_descriptor *desc;
602*4882a593Smuzhiyun struct circ_buf *xmit = &uap->port.state->xmit;
603*4882a593Smuzhiyun unsigned int count;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Try to avoid the overhead involved in using DMA if the
607*4882a593Smuzhiyun * transaction fits in the first half of the FIFO, by using
608*4882a593Smuzhiyun * the standard interrupt handling. This ensures that we
609*4882a593Smuzhiyun * issue a uart_write_wakeup() at the appropriate time.
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun count = uart_circ_chars_pending(xmit);
612*4882a593Smuzhiyun if (count < (uap->fifosize >> 1)) {
613*4882a593Smuzhiyun uap->dmatx.queued = false;
614*4882a593Smuzhiyun return 0;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /*
618*4882a593Smuzhiyun * Bodge: don't send the last character by DMA, as this
619*4882a593Smuzhiyun * will prevent XON from notifying us to restart DMA.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun count -= 1;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
624*4882a593Smuzhiyun if (count > PL011_DMA_BUFFER_SIZE)
625*4882a593Smuzhiyun count = PL011_DMA_BUFFER_SIZE;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (xmit->tail < xmit->head)
628*4882a593Smuzhiyun memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
629*4882a593Smuzhiyun else {
630*4882a593Smuzhiyun size_t first = UART_XMIT_SIZE - xmit->tail;
631*4882a593Smuzhiyun size_t second;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (first > count)
634*4882a593Smuzhiyun first = count;
635*4882a593Smuzhiyun second = count - first;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
638*4882a593Smuzhiyun if (second)
639*4882a593Smuzhiyun memcpy(&dmatx->buf[first], &xmit->buf[0], second);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun dmatx->sg.length = count;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
645*4882a593Smuzhiyun uap->dmatx.queued = false;
646*4882a593Smuzhiyun dev_dbg(uap->port.dev, "unable to map TX DMA\n");
647*4882a593Smuzhiyun return -EBUSY;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
651*4882a593Smuzhiyun DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
652*4882a593Smuzhiyun if (!desc) {
653*4882a593Smuzhiyun dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
654*4882a593Smuzhiyun uap->dmatx.queued = false;
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun * If DMA cannot be used right now, we complete this
657*4882a593Smuzhiyun * transaction via IRQ and let the TTY layer retry.
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun dev_dbg(uap->port.dev, "TX DMA busy\n");
660*4882a593Smuzhiyun return -EBUSY;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* Some data to go along to the callback */
664*4882a593Smuzhiyun desc->callback = pl011_dma_tx_callback;
665*4882a593Smuzhiyun desc->callback_param = uap;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* All errors should happen at prepare time */
668*4882a593Smuzhiyun dmaengine_submit(desc);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /* Fire the DMA transaction */
671*4882a593Smuzhiyun dma_dev->device_issue_pending(chan);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun uap->dmacr |= UART011_TXDMAE;
674*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
675*4882a593Smuzhiyun uap->dmatx.queued = true;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /*
678*4882a593Smuzhiyun * Now we know that DMA will fire, so advance the ring buffer
679*4882a593Smuzhiyun * with the stuff we just dispatched.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
682*4882a593Smuzhiyun uap->port.icount.tx += count;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
685*4882a593Smuzhiyun uart_write_wakeup(&uap->port);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun return 1;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /*
691*4882a593Smuzhiyun * We received a transmit interrupt without a pending X-char but with
692*4882a593Smuzhiyun * pending characters.
693*4882a593Smuzhiyun * Locking: called with port lock held and IRQs disabled.
694*4882a593Smuzhiyun * Returns:
695*4882a593Smuzhiyun * false if we want to use PIO to transmit
696*4882a593Smuzhiyun * true if we queued a DMA buffer
697*4882a593Smuzhiyun */
pl011_dma_tx_irq(struct uart_amba_port * uap)698*4882a593Smuzhiyun static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun if (!uap->using_tx_dma)
701*4882a593Smuzhiyun return false;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /*
704*4882a593Smuzhiyun * If we already have a TX buffer queued, but received a
705*4882a593Smuzhiyun * TX interrupt, it will be because we've just sent an X-char.
706*4882a593Smuzhiyun * Ensure the TX DMA is enabled and the TX IRQ is disabled.
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun if (uap->dmatx.queued) {
709*4882a593Smuzhiyun uap->dmacr |= UART011_TXDMAE;
710*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
711*4882a593Smuzhiyun uap->im &= ~UART011_TXIM;
712*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
713*4882a593Smuzhiyun return true;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * We don't have a TX buffer queued, so try to queue one.
718*4882a593Smuzhiyun * If we successfully queued a buffer, mask the TX IRQ.
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun if (pl011_dma_tx_refill(uap) > 0) {
721*4882a593Smuzhiyun uap->im &= ~UART011_TXIM;
722*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
723*4882a593Smuzhiyun return true;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun return false;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun * Stop the DMA transmit (eg, due to received XOFF).
730*4882a593Smuzhiyun * Locking: called with port lock held and IRQs disabled.
731*4882a593Smuzhiyun */
pl011_dma_tx_stop(struct uart_amba_port * uap)732*4882a593Smuzhiyun static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun if (uap->dmatx.queued) {
735*4882a593Smuzhiyun uap->dmacr &= ~UART011_TXDMAE;
736*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /*
741*4882a593Smuzhiyun * Try to start a DMA transmit, or in the case of an XON/OFF
742*4882a593Smuzhiyun * character queued for send, try to get that character out ASAP.
743*4882a593Smuzhiyun * Locking: called with port lock held and IRQs disabled.
744*4882a593Smuzhiyun * Returns:
745*4882a593Smuzhiyun * false if we want the TX IRQ to be enabled
746*4882a593Smuzhiyun * true if we have a buffer queued
747*4882a593Smuzhiyun */
pl011_dma_tx_start(struct uart_amba_port * uap)748*4882a593Smuzhiyun static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun u16 dmacr;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (!uap->using_tx_dma)
753*4882a593Smuzhiyun return false;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (!uap->port.x_char) {
756*4882a593Smuzhiyun /* no X-char, try to push chars out in DMA mode */
757*4882a593Smuzhiyun bool ret = true;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (!uap->dmatx.queued) {
760*4882a593Smuzhiyun if (pl011_dma_tx_refill(uap) > 0) {
761*4882a593Smuzhiyun uap->im &= ~UART011_TXIM;
762*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
763*4882a593Smuzhiyun } else
764*4882a593Smuzhiyun ret = false;
765*4882a593Smuzhiyun } else if (!(uap->dmacr & UART011_TXDMAE)) {
766*4882a593Smuzhiyun uap->dmacr |= UART011_TXDMAE;
767*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun return ret;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * We have an X-char to send. Disable DMA to prevent it loading
774*4882a593Smuzhiyun * the TX fifo, and then see if we can stuff it into the FIFO.
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun dmacr = uap->dmacr;
777*4882a593Smuzhiyun uap->dmacr &= ~UART011_TXDMAE;
778*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun * No space in the FIFO, so enable the transmit interrupt
783*4882a593Smuzhiyun * so we know when there is space. Note that once we've
784*4882a593Smuzhiyun * loaded the character, we should just re-enable DMA.
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun return false;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun pl011_write(uap->port.x_char, uap, REG_DR);
790*4882a593Smuzhiyun uap->port.icount.tx++;
791*4882a593Smuzhiyun uap->port.x_char = 0;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* Success - restore the DMA state */
794*4882a593Smuzhiyun uap->dmacr = dmacr;
795*4882a593Smuzhiyun pl011_write(dmacr, uap, REG_DMACR);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun return true;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * Flush the transmit buffer.
802*4882a593Smuzhiyun * Locking: called with port lock held and IRQs disabled.
803*4882a593Smuzhiyun */
pl011_dma_flush_buffer(struct uart_port * port)804*4882a593Smuzhiyun static void pl011_dma_flush_buffer(struct uart_port *port)
805*4882a593Smuzhiyun __releases(&uap->port.lock)
806*4882a593Smuzhiyun __acquires(&uap->port.lock)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun struct uart_amba_port *uap =
809*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if (!uap->using_tx_dma)
812*4882a593Smuzhiyun return;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun dmaengine_terminate_async(uap->dmatx.chan);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (uap->dmatx.queued) {
817*4882a593Smuzhiyun dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
818*4882a593Smuzhiyun DMA_TO_DEVICE);
819*4882a593Smuzhiyun uap->dmatx.queued = false;
820*4882a593Smuzhiyun uap->dmacr &= ~UART011_TXDMAE;
821*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun static void pl011_dma_rx_callback(void *data);
826*4882a593Smuzhiyun
pl011_dma_rx_trigger_dma(struct uart_amba_port * uap)827*4882a593Smuzhiyun static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun struct dma_chan *rxchan = uap->dmarx.chan;
830*4882a593Smuzhiyun struct pl011_dmarx_data *dmarx = &uap->dmarx;
831*4882a593Smuzhiyun struct dma_async_tx_descriptor *desc;
832*4882a593Smuzhiyun struct pl011_sgbuf *sgbuf;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun if (!rxchan)
835*4882a593Smuzhiyun return -EIO;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* Start the RX DMA job */
838*4882a593Smuzhiyun sgbuf = uap->dmarx.use_buf_b ?
839*4882a593Smuzhiyun &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
840*4882a593Smuzhiyun desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
841*4882a593Smuzhiyun DMA_DEV_TO_MEM,
842*4882a593Smuzhiyun DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
843*4882a593Smuzhiyun /*
844*4882a593Smuzhiyun * If the DMA engine is busy and cannot prepare a
845*4882a593Smuzhiyun * channel, no big deal, the driver will fall back
846*4882a593Smuzhiyun * to interrupt mode as a result of this error code.
847*4882a593Smuzhiyun */
848*4882a593Smuzhiyun if (!desc) {
849*4882a593Smuzhiyun uap->dmarx.running = false;
850*4882a593Smuzhiyun dmaengine_terminate_all(rxchan);
851*4882a593Smuzhiyun return -EBUSY;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* Some data to go along to the callback */
855*4882a593Smuzhiyun desc->callback = pl011_dma_rx_callback;
856*4882a593Smuzhiyun desc->callback_param = uap;
857*4882a593Smuzhiyun dmarx->cookie = dmaengine_submit(desc);
858*4882a593Smuzhiyun dma_async_issue_pending(rxchan);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun uap->dmacr |= UART011_RXDMAE;
861*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
862*4882a593Smuzhiyun uap->dmarx.running = true;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun uap->im &= ~UART011_RXIM;
865*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun return 0;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /*
871*4882a593Smuzhiyun * This is called when either the DMA job is complete, or
872*4882a593Smuzhiyun * the FIFO timeout interrupt occurred. This must be called
873*4882a593Smuzhiyun * with the port spinlock uap->port.lock held.
874*4882a593Smuzhiyun */
pl011_dma_rx_chars(struct uart_amba_port * uap,u32 pending,bool use_buf_b,bool readfifo)875*4882a593Smuzhiyun static void pl011_dma_rx_chars(struct uart_amba_port *uap,
876*4882a593Smuzhiyun u32 pending, bool use_buf_b,
877*4882a593Smuzhiyun bool readfifo)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun struct tty_port *port = &uap->port.state->port;
880*4882a593Smuzhiyun struct pl011_sgbuf *sgbuf = use_buf_b ?
881*4882a593Smuzhiyun &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
882*4882a593Smuzhiyun int dma_count = 0;
883*4882a593Smuzhiyun u32 fifotaken = 0; /* only used for vdbg() */
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun struct pl011_dmarx_data *dmarx = &uap->dmarx;
886*4882a593Smuzhiyun int dmataken = 0;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (uap->dmarx.poll_rate) {
889*4882a593Smuzhiyun /* The data can be taken by polling */
890*4882a593Smuzhiyun dmataken = sgbuf->sg.length - dmarx->last_residue;
891*4882a593Smuzhiyun /* Recalculate the pending size */
892*4882a593Smuzhiyun if (pending >= dmataken)
893*4882a593Smuzhiyun pending -= dmataken;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /* Pick the remain data from the DMA */
897*4882a593Smuzhiyun if (pending) {
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * First take all chars in the DMA pipe, then look in the FIFO.
901*4882a593Smuzhiyun * Note that tty_insert_flip_buf() tries to take as many chars
902*4882a593Smuzhiyun * as it can.
903*4882a593Smuzhiyun */
904*4882a593Smuzhiyun dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
905*4882a593Smuzhiyun pending);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun uap->port.icount.rx += dma_count;
908*4882a593Smuzhiyun if (dma_count < pending)
909*4882a593Smuzhiyun dev_warn(uap->port.dev,
910*4882a593Smuzhiyun "couldn't insert all characters (TTY is full?)\n");
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* Reset the last_residue for Rx DMA poll */
914*4882a593Smuzhiyun if (uap->dmarx.poll_rate)
915*4882a593Smuzhiyun dmarx->last_residue = sgbuf->sg.length;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /*
918*4882a593Smuzhiyun * Only continue with trying to read the FIFO if all DMA chars have
919*4882a593Smuzhiyun * been taken first.
920*4882a593Smuzhiyun */
921*4882a593Smuzhiyun if (dma_count == pending && readfifo) {
922*4882a593Smuzhiyun /* Clear any error flags */
923*4882a593Smuzhiyun pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
924*4882a593Smuzhiyun UART011_FEIS, uap, REG_ICR);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /*
927*4882a593Smuzhiyun * If we read all the DMA'd characters, and we had an
928*4882a593Smuzhiyun * incomplete buffer, that could be due to an rx error, or
929*4882a593Smuzhiyun * maybe we just timed out. Read any pending chars and check
930*4882a593Smuzhiyun * the error status.
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * Error conditions will only occur in the FIFO, these will
933*4882a593Smuzhiyun * trigger an immediate interrupt and stop the DMA job, so we
934*4882a593Smuzhiyun * will always find the error in the FIFO, never in the DMA
935*4882a593Smuzhiyun * buffer.
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun fifotaken = pl011_fifo_to_tty(uap);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun spin_unlock(&uap->port.lock);
941*4882a593Smuzhiyun dev_vdbg(uap->port.dev,
942*4882a593Smuzhiyun "Took %d chars from DMA buffer and %d chars from the FIFO\n",
943*4882a593Smuzhiyun dma_count, fifotaken);
944*4882a593Smuzhiyun tty_flip_buffer_push(port);
945*4882a593Smuzhiyun spin_lock(&uap->port.lock);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
pl011_dma_rx_irq(struct uart_amba_port * uap)948*4882a593Smuzhiyun static void pl011_dma_rx_irq(struct uart_amba_port *uap)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun struct pl011_dmarx_data *dmarx = &uap->dmarx;
951*4882a593Smuzhiyun struct dma_chan *rxchan = dmarx->chan;
952*4882a593Smuzhiyun struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
953*4882a593Smuzhiyun &dmarx->sgbuf_b : &dmarx->sgbuf_a;
954*4882a593Smuzhiyun size_t pending;
955*4882a593Smuzhiyun struct dma_tx_state state;
956*4882a593Smuzhiyun enum dma_status dmastat;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /*
959*4882a593Smuzhiyun * Pause the transfer so we can trust the current counter,
960*4882a593Smuzhiyun * do this before we pause the PL011 block, else we may
961*4882a593Smuzhiyun * overflow the FIFO.
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun if (dmaengine_pause(rxchan))
964*4882a593Smuzhiyun dev_err(uap->port.dev, "unable to pause DMA transfer\n");
965*4882a593Smuzhiyun dmastat = rxchan->device->device_tx_status(rxchan,
966*4882a593Smuzhiyun dmarx->cookie, &state);
967*4882a593Smuzhiyun if (dmastat != DMA_PAUSED)
968*4882a593Smuzhiyun dev_err(uap->port.dev, "unable to pause DMA transfer\n");
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /* Disable RX DMA - incoming data will wait in the FIFO */
971*4882a593Smuzhiyun uap->dmacr &= ~UART011_RXDMAE;
972*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
973*4882a593Smuzhiyun uap->dmarx.running = false;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun pending = sgbuf->sg.length - state.residue;
976*4882a593Smuzhiyun BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
977*4882a593Smuzhiyun /* Then we terminate the transfer - we now know our residue */
978*4882a593Smuzhiyun dmaengine_terminate_all(rxchan);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /*
981*4882a593Smuzhiyun * This will take the chars we have so far and insert
982*4882a593Smuzhiyun * into the framework.
983*4882a593Smuzhiyun */
984*4882a593Smuzhiyun pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* Switch buffer & re-trigger DMA job */
987*4882a593Smuzhiyun dmarx->use_buf_b = !dmarx->use_buf_b;
988*4882a593Smuzhiyun if (pl011_dma_rx_trigger_dma(uap)) {
989*4882a593Smuzhiyun dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
990*4882a593Smuzhiyun "fall back to interrupt mode\n");
991*4882a593Smuzhiyun uap->im |= UART011_RXIM;
992*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
pl011_dma_rx_callback(void * data)996*4882a593Smuzhiyun static void pl011_dma_rx_callback(void *data)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun struct uart_amba_port *uap = data;
999*4882a593Smuzhiyun struct pl011_dmarx_data *dmarx = &uap->dmarx;
1000*4882a593Smuzhiyun struct dma_chan *rxchan = dmarx->chan;
1001*4882a593Smuzhiyun bool lastbuf = dmarx->use_buf_b;
1002*4882a593Smuzhiyun struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1003*4882a593Smuzhiyun &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1004*4882a593Smuzhiyun size_t pending;
1005*4882a593Smuzhiyun struct dma_tx_state state;
1006*4882a593Smuzhiyun int ret;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /*
1009*4882a593Smuzhiyun * This completion interrupt occurs typically when the
1010*4882a593Smuzhiyun * RX buffer is totally stuffed but no timeout has yet
1011*4882a593Smuzhiyun * occurred. When that happens, we just want the RX
1012*4882a593Smuzhiyun * routine to flush out the secondary DMA buffer while
1013*4882a593Smuzhiyun * we immediately trigger the next DMA job.
1014*4882a593Smuzhiyun */
1015*4882a593Smuzhiyun spin_lock_irq(&uap->port.lock);
1016*4882a593Smuzhiyun /*
1017*4882a593Smuzhiyun * Rx data can be taken by the UART interrupts during
1018*4882a593Smuzhiyun * the DMA irq handler. So we check the residue here.
1019*4882a593Smuzhiyun */
1020*4882a593Smuzhiyun rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1021*4882a593Smuzhiyun pending = sgbuf->sg.length - state.residue;
1022*4882a593Smuzhiyun BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1023*4882a593Smuzhiyun /* Then we terminate the transfer - we now know our residue */
1024*4882a593Smuzhiyun dmaengine_terminate_all(rxchan);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun uap->dmarx.running = false;
1027*4882a593Smuzhiyun dmarx->use_buf_b = !lastbuf;
1028*4882a593Smuzhiyun ret = pl011_dma_rx_trigger_dma(uap);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun pl011_dma_rx_chars(uap, pending, lastbuf, false);
1031*4882a593Smuzhiyun spin_unlock_irq(&uap->port.lock);
1032*4882a593Smuzhiyun /*
1033*4882a593Smuzhiyun * Do this check after we picked the DMA chars so we don't
1034*4882a593Smuzhiyun * get some IRQ immediately from RX.
1035*4882a593Smuzhiyun */
1036*4882a593Smuzhiyun if (ret) {
1037*4882a593Smuzhiyun dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1038*4882a593Smuzhiyun "fall back to interrupt mode\n");
1039*4882a593Smuzhiyun uap->im |= UART011_RXIM;
1040*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /*
1045*4882a593Smuzhiyun * Stop accepting received characters, when we're shutting down or
1046*4882a593Smuzhiyun * suspending this port.
1047*4882a593Smuzhiyun * Locking: called with port lock held and IRQs disabled.
1048*4882a593Smuzhiyun */
pl011_dma_rx_stop(struct uart_amba_port * uap)1049*4882a593Smuzhiyun static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun /* FIXME. Just disable the DMA enable */
1052*4882a593Smuzhiyun uap->dmacr &= ~UART011_RXDMAE;
1053*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /*
1057*4882a593Smuzhiyun * Timer handler for Rx DMA polling.
1058*4882a593Smuzhiyun * Every polling, It checks the residue in the dma buffer and transfer
1059*4882a593Smuzhiyun * data to the tty. Also, last_residue is updated for the next polling.
1060*4882a593Smuzhiyun */
pl011_dma_rx_poll(struct timer_list * t)1061*4882a593Smuzhiyun static void pl011_dma_rx_poll(struct timer_list *t)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
1064*4882a593Smuzhiyun struct tty_port *port = &uap->port.state->port;
1065*4882a593Smuzhiyun struct pl011_dmarx_data *dmarx = &uap->dmarx;
1066*4882a593Smuzhiyun struct dma_chan *rxchan = uap->dmarx.chan;
1067*4882a593Smuzhiyun unsigned long flags = 0;
1068*4882a593Smuzhiyun unsigned int dmataken = 0;
1069*4882a593Smuzhiyun unsigned int size = 0;
1070*4882a593Smuzhiyun struct pl011_sgbuf *sgbuf;
1071*4882a593Smuzhiyun int dma_count;
1072*4882a593Smuzhiyun struct dma_tx_state state;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1075*4882a593Smuzhiyun rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1076*4882a593Smuzhiyun if (likely(state.residue < dmarx->last_residue)) {
1077*4882a593Smuzhiyun dmataken = sgbuf->sg.length - dmarx->last_residue;
1078*4882a593Smuzhiyun size = dmarx->last_residue - state.residue;
1079*4882a593Smuzhiyun dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1080*4882a593Smuzhiyun size);
1081*4882a593Smuzhiyun if (dma_count == size)
1082*4882a593Smuzhiyun dmarx->last_residue = state.residue;
1083*4882a593Smuzhiyun dmarx->last_jiffies = jiffies;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun tty_flip_buffer_push(port);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /*
1088*4882a593Smuzhiyun * If no data is received in poll_timeout, the driver will fall back
1089*4882a593Smuzhiyun * to interrupt mode. We will retrigger DMA at the first interrupt.
1090*4882a593Smuzhiyun */
1091*4882a593Smuzhiyun if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1092*4882a593Smuzhiyun > uap->dmarx.poll_timeout) {
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun spin_lock_irqsave(&uap->port.lock, flags);
1095*4882a593Smuzhiyun pl011_dma_rx_stop(uap);
1096*4882a593Smuzhiyun uap->im |= UART011_RXIM;
1097*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1098*4882a593Smuzhiyun spin_unlock_irqrestore(&uap->port.lock, flags);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun uap->dmarx.running = false;
1101*4882a593Smuzhiyun dmaengine_terminate_all(rxchan);
1102*4882a593Smuzhiyun del_timer(&uap->dmarx.timer);
1103*4882a593Smuzhiyun } else {
1104*4882a593Smuzhiyun mod_timer(&uap->dmarx.timer,
1105*4882a593Smuzhiyun jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
pl011_dma_startup(struct uart_amba_port * uap)1109*4882a593Smuzhiyun static void pl011_dma_startup(struct uart_amba_port *uap)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun int ret;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun if (!uap->dma_probed)
1114*4882a593Smuzhiyun pl011_dma_probe(uap);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun if (!uap->dmatx.chan)
1117*4882a593Smuzhiyun return;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1120*4882a593Smuzhiyun if (!uap->dmatx.buf) {
1121*4882a593Smuzhiyun dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1122*4882a593Smuzhiyun uap->port.fifosize = uap->fifosize;
1123*4882a593Smuzhiyun return;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /* The DMA buffer is now the FIFO the TTY subsystem can use */
1129*4882a593Smuzhiyun uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1130*4882a593Smuzhiyun uap->using_tx_dma = true;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun if (!uap->dmarx.chan)
1133*4882a593Smuzhiyun goto skip_rx;
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /* Allocate and map DMA RX buffers */
1136*4882a593Smuzhiyun ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1137*4882a593Smuzhiyun DMA_FROM_DEVICE);
1138*4882a593Smuzhiyun if (ret) {
1139*4882a593Smuzhiyun dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1140*4882a593Smuzhiyun "RX buffer A", ret);
1141*4882a593Smuzhiyun goto skip_rx;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1145*4882a593Smuzhiyun DMA_FROM_DEVICE);
1146*4882a593Smuzhiyun if (ret) {
1147*4882a593Smuzhiyun dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1148*4882a593Smuzhiyun "RX buffer B", ret);
1149*4882a593Smuzhiyun pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1150*4882a593Smuzhiyun DMA_FROM_DEVICE);
1151*4882a593Smuzhiyun goto skip_rx;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun uap->using_rx_dma = true;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun skip_rx:
1157*4882a593Smuzhiyun /* Turn on DMA error (RX/TX will be enabled on demand) */
1158*4882a593Smuzhiyun uap->dmacr |= UART011_DMAONERR;
1159*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /*
1162*4882a593Smuzhiyun * ST Micro variants has some specific dma burst threshold
1163*4882a593Smuzhiyun * compensation. Set this to 16 bytes, so burst will only
1164*4882a593Smuzhiyun * be issued above/below 16 bytes.
1165*4882a593Smuzhiyun */
1166*4882a593Smuzhiyun if (uap->vendor->dma_threshold)
1167*4882a593Smuzhiyun pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1168*4882a593Smuzhiyun uap, REG_ST_DMAWM);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (uap->using_rx_dma) {
1171*4882a593Smuzhiyun if (pl011_dma_rx_trigger_dma(uap))
1172*4882a593Smuzhiyun dev_dbg(uap->port.dev, "could not trigger initial "
1173*4882a593Smuzhiyun "RX DMA job, fall back to interrupt mode\n");
1174*4882a593Smuzhiyun if (uap->dmarx.poll_rate) {
1175*4882a593Smuzhiyun timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
1176*4882a593Smuzhiyun mod_timer(&uap->dmarx.timer,
1177*4882a593Smuzhiyun jiffies +
1178*4882a593Smuzhiyun msecs_to_jiffies(uap->dmarx.poll_rate));
1179*4882a593Smuzhiyun uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1180*4882a593Smuzhiyun uap->dmarx.last_jiffies = jiffies;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
pl011_dma_shutdown(struct uart_amba_port * uap)1185*4882a593Smuzhiyun static void pl011_dma_shutdown(struct uart_amba_port *uap)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun if (!(uap->using_tx_dma || uap->using_rx_dma))
1188*4882a593Smuzhiyun return;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun /* Disable RX and TX DMA */
1191*4882a593Smuzhiyun while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1192*4882a593Smuzhiyun cpu_relax();
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun spin_lock_irq(&uap->port.lock);
1195*4882a593Smuzhiyun uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1196*4882a593Smuzhiyun pl011_write(uap->dmacr, uap, REG_DMACR);
1197*4882a593Smuzhiyun spin_unlock_irq(&uap->port.lock);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (uap->using_tx_dma) {
1200*4882a593Smuzhiyun /* In theory, this should already be done by pl011_dma_flush_buffer */
1201*4882a593Smuzhiyun dmaengine_terminate_all(uap->dmatx.chan);
1202*4882a593Smuzhiyun if (uap->dmatx.queued) {
1203*4882a593Smuzhiyun dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1204*4882a593Smuzhiyun DMA_TO_DEVICE);
1205*4882a593Smuzhiyun uap->dmatx.queued = false;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun kfree(uap->dmatx.buf);
1209*4882a593Smuzhiyun uap->using_tx_dma = false;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun if (uap->using_rx_dma) {
1213*4882a593Smuzhiyun dmaengine_terminate_all(uap->dmarx.chan);
1214*4882a593Smuzhiyun /* Clean up the RX DMA */
1215*4882a593Smuzhiyun pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1216*4882a593Smuzhiyun pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1217*4882a593Smuzhiyun if (uap->dmarx.poll_rate)
1218*4882a593Smuzhiyun del_timer_sync(&uap->dmarx.timer);
1219*4882a593Smuzhiyun uap->using_rx_dma = false;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
pl011_dma_rx_available(struct uart_amba_port * uap)1223*4882a593Smuzhiyun static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun return uap->using_rx_dma;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
pl011_dma_rx_running(struct uart_amba_port * uap)1228*4882a593Smuzhiyun static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun return uap->using_rx_dma && uap->dmarx.running;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun #else
1234*4882a593Smuzhiyun /* Blank functions if the DMA engine is not available */
pl011_dma_remove(struct uart_amba_port * uap)1235*4882a593Smuzhiyun static inline void pl011_dma_remove(struct uart_amba_port *uap)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
pl011_dma_startup(struct uart_amba_port * uap)1239*4882a593Smuzhiyun static inline void pl011_dma_startup(struct uart_amba_port *uap)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
pl011_dma_shutdown(struct uart_amba_port * uap)1243*4882a593Smuzhiyun static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
pl011_dma_tx_irq(struct uart_amba_port * uap)1247*4882a593Smuzhiyun static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun return false;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
pl011_dma_tx_stop(struct uart_amba_port * uap)1252*4882a593Smuzhiyun static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
pl011_dma_tx_start(struct uart_amba_port * uap)1256*4882a593Smuzhiyun static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun return false;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
pl011_dma_rx_irq(struct uart_amba_port * uap)1261*4882a593Smuzhiyun static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun
pl011_dma_rx_stop(struct uart_amba_port * uap)1265*4882a593Smuzhiyun static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1266*4882a593Smuzhiyun {
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun
pl011_dma_rx_trigger_dma(struct uart_amba_port * uap)1269*4882a593Smuzhiyun static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun return -EIO;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
pl011_dma_rx_available(struct uart_amba_port * uap)1274*4882a593Smuzhiyun static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun return false;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
pl011_dma_rx_running(struct uart_amba_port * uap)1279*4882a593Smuzhiyun static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1280*4882a593Smuzhiyun {
1281*4882a593Smuzhiyun return false;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun #define pl011_dma_flush_buffer NULL
1285*4882a593Smuzhiyun #endif
1286*4882a593Smuzhiyun
pl011_stop_tx(struct uart_port * port)1287*4882a593Smuzhiyun static void pl011_stop_tx(struct uart_port *port)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun struct uart_amba_port *uap =
1290*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun uap->im &= ~UART011_TXIM;
1293*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1294*4882a593Smuzhiyun pl011_dma_tx_stop(uap);
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun /* Start TX with programmed I/O only (no DMA) */
pl011_start_tx_pio(struct uart_amba_port * uap)1300*4882a593Smuzhiyun static void pl011_start_tx_pio(struct uart_amba_port *uap)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun if (pl011_tx_chars(uap, false)) {
1303*4882a593Smuzhiyun uap->im |= UART011_TXIM;
1304*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
pl011_start_tx(struct uart_port * port)1308*4882a593Smuzhiyun static void pl011_start_tx(struct uart_port *port)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun struct uart_amba_port *uap =
1311*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun if (!pl011_dma_tx_start(uap))
1314*4882a593Smuzhiyun pl011_start_tx_pio(uap);
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
pl011_stop_rx(struct uart_port * port)1317*4882a593Smuzhiyun static void pl011_stop_rx(struct uart_port *port)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun struct uart_amba_port *uap =
1320*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1323*4882a593Smuzhiyun UART011_PEIM|UART011_BEIM|UART011_OEIM);
1324*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun pl011_dma_rx_stop(uap);
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
pl011_throttle_rx(struct uart_port * port)1329*4882a593Smuzhiyun static void pl011_throttle_rx(struct uart_port *port)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun unsigned long flags;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun spin_lock_irqsave(&port->lock, flags);
1334*4882a593Smuzhiyun pl011_stop_rx(port);
1335*4882a593Smuzhiyun spin_unlock_irqrestore(&port->lock, flags);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
pl011_enable_ms(struct uart_port * port)1338*4882a593Smuzhiyun static void pl011_enable_ms(struct uart_port *port)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun struct uart_amba_port *uap =
1341*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1344*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
pl011_rx_chars(struct uart_amba_port * uap)1347*4882a593Smuzhiyun static void pl011_rx_chars(struct uart_amba_port *uap)
1348*4882a593Smuzhiyun __releases(&uap->port.lock)
1349*4882a593Smuzhiyun __acquires(&uap->port.lock)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun pl011_fifo_to_tty(uap);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun spin_unlock(&uap->port.lock);
1354*4882a593Smuzhiyun tty_flip_buffer_push(&uap->port.state->port);
1355*4882a593Smuzhiyun /*
1356*4882a593Smuzhiyun * If we were temporarily out of DMA mode for a while,
1357*4882a593Smuzhiyun * attempt to switch back to DMA mode again.
1358*4882a593Smuzhiyun */
1359*4882a593Smuzhiyun if (pl011_dma_rx_available(uap)) {
1360*4882a593Smuzhiyun if (pl011_dma_rx_trigger_dma(uap)) {
1361*4882a593Smuzhiyun dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1362*4882a593Smuzhiyun "fall back to interrupt mode again\n");
1363*4882a593Smuzhiyun uap->im |= UART011_RXIM;
1364*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1365*4882a593Smuzhiyun } else {
1366*4882a593Smuzhiyun #ifdef CONFIG_DMA_ENGINE
1367*4882a593Smuzhiyun /* Start Rx DMA poll */
1368*4882a593Smuzhiyun if (uap->dmarx.poll_rate) {
1369*4882a593Smuzhiyun uap->dmarx.last_jiffies = jiffies;
1370*4882a593Smuzhiyun uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1371*4882a593Smuzhiyun mod_timer(&uap->dmarx.timer,
1372*4882a593Smuzhiyun jiffies +
1373*4882a593Smuzhiyun msecs_to_jiffies(uap->dmarx.poll_rate));
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun #endif
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun spin_lock(&uap->port.lock);
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
pl011_tx_char(struct uart_amba_port * uap,unsigned char c,bool from_irq)1381*4882a593Smuzhiyun static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1382*4882a593Smuzhiyun bool from_irq)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun if (unlikely(!from_irq) &&
1385*4882a593Smuzhiyun pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1386*4882a593Smuzhiyun return false; /* unable to transmit character */
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun pl011_write(c, uap, REG_DR);
1389*4882a593Smuzhiyun uap->port.icount.tx++;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun return true;
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /* Returns true if tx interrupts have to be (kept) enabled */
pl011_tx_chars(struct uart_amba_port * uap,bool from_irq)1395*4882a593Smuzhiyun static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun struct circ_buf *xmit = &uap->port.state->xmit;
1398*4882a593Smuzhiyun int count = uap->fifosize >> 1;
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun if (uap->port.x_char) {
1401*4882a593Smuzhiyun if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1402*4882a593Smuzhiyun return true;
1403*4882a593Smuzhiyun uap->port.x_char = 0;
1404*4882a593Smuzhiyun --count;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1407*4882a593Smuzhiyun pl011_stop_tx(&uap->port);
1408*4882a593Smuzhiyun return false;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun /* If we are using DMA mode, try to send some characters. */
1412*4882a593Smuzhiyun if (pl011_dma_tx_irq(uap))
1413*4882a593Smuzhiyun return true;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun do {
1416*4882a593Smuzhiyun if (likely(from_irq) && count-- == 0)
1417*4882a593Smuzhiyun break;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1420*4882a593Smuzhiyun break;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1423*4882a593Smuzhiyun } while (!uart_circ_empty(xmit));
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1426*4882a593Smuzhiyun uart_write_wakeup(&uap->port);
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun if (uart_circ_empty(xmit)) {
1429*4882a593Smuzhiyun pl011_stop_tx(&uap->port);
1430*4882a593Smuzhiyun return false;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun return true;
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun
pl011_modem_status(struct uart_amba_port * uap)1435*4882a593Smuzhiyun static void pl011_modem_status(struct uart_amba_port *uap)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun unsigned int status, delta;
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun delta = status ^ uap->old_status;
1442*4882a593Smuzhiyun uap->old_status = status;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun if (!delta)
1445*4882a593Smuzhiyun return;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun if (delta & UART01x_FR_DCD)
1448*4882a593Smuzhiyun uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun if (delta & uap->vendor->fr_dsr)
1451*4882a593Smuzhiyun uap->port.icount.dsr++;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if (delta & uap->vendor->fr_cts)
1454*4882a593Smuzhiyun uart_handle_cts_change(&uap->port,
1455*4882a593Smuzhiyun status & uap->vendor->fr_cts);
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
check_apply_cts_event_workaround(struct uart_amba_port * uap)1460*4882a593Smuzhiyun static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun if (!uap->vendor->cts_event_workaround)
1463*4882a593Smuzhiyun return;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /* workaround to make sure that all bits are unlocked.. */
1466*4882a593Smuzhiyun pl011_write(0x00, uap, REG_ICR);
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /*
1469*4882a593Smuzhiyun * WA: introduce 26ns(1 uart clk) delay before W1C;
1470*4882a593Smuzhiyun * single apb access will incur 2 pclk(133.12Mhz) delay,
1471*4882a593Smuzhiyun * so add 2 dummy reads
1472*4882a593Smuzhiyun */
1473*4882a593Smuzhiyun pl011_read(uap, REG_ICR);
1474*4882a593Smuzhiyun pl011_read(uap, REG_ICR);
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
pl011_int(int irq,void * dev_id)1477*4882a593Smuzhiyun static irqreturn_t pl011_int(int irq, void *dev_id)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun struct uart_amba_port *uap = dev_id;
1480*4882a593Smuzhiyun unsigned long flags;
1481*4882a593Smuzhiyun unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1482*4882a593Smuzhiyun int handled = 0;
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun spin_lock_irqsave(&uap->port.lock, flags);
1485*4882a593Smuzhiyun status = pl011_read(uap, REG_RIS) & uap->im;
1486*4882a593Smuzhiyun if (status) {
1487*4882a593Smuzhiyun do {
1488*4882a593Smuzhiyun check_apply_cts_event_workaround(uap);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1491*4882a593Smuzhiyun UART011_RXIS),
1492*4882a593Smuzhiyun uap, REG_ICR);
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun if (status & (UART011_RTIS|UART011_RXIS)) {
1495*4882a593Smuzhiyun if (pl011_dma_rx_running(uap))
1496*4882a593Smuzhiyun pl011_dma_rx_irq(uap);
1497*4882a593Smuzhiyun else
1498*4882a593Smuzhiyun pl011_rx_chars(uap);
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun if (status & (UART011_DSRMIS|UART011_DCDMIS|
1501*4882a593Smuzhiyun UART011_CTSMIS|UART011_RIMIS))
1502*4882a593Smuzhiyun pl011_modem_status(uap);
1503*4882a593Smuzhiyun if (status & UART011_TXIS)
1504*4882a593Smuzhiyun pl011_tx_chars(uap, true);
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun if (pass_counter-- == 0)
1507*4882a593Smuzhiyun break;
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun status = pl011_read(uap, REG_RIS) & uap->im;
1510*4882a593Smuzhiyun } while (status != 0);
1511*4882a593Smuzhiyun handled = 1;
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun spin_unlock_irqrestore(&uap->port.lock, flags);
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun return IRQ_RETVAL(handled);
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
pl011_tx_empty(struct uart_port * port)1519*4882a593Smuzhiyun static unsigned int pl011_tx_empty(struct uart_port *port)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun struct uart_amba_port *uap =
1522*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun /* Allow feature register bits to be inverted to work around errata */
1525*4882a593Smuzhiyun unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1528*4882a593Smuzhiyun 0 : TIOCSER_TEMT;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
pl011_get_mctrl(struct uart_port * port)1531*4882a593Smuzhiyun static unsigned int pl011_get_mctrl(struct uart_port *port)
1532*4882a593Smuzhiyun {
1533*4882a593Smuzhiyun struct uart_amba_port *uap =
1534*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1535*4882a593Smuzhiyun unsigned int result = 0;
1536*4882a593Smuzhiyun unsigned int status = pl011_read(uap, REG_FR);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun #define TIOCMBIT(uartbit, tiocmbit) \
1539*4882a593Smuzhiyun if (status & uartbit) \
1540*4882a593Smuzhiyun result |= tiocmbit
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1543*4882a593Smuzhiyun TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1544*4882a593Smuzhiyun TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1545*4882a593Smuzhiyun TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1546*4882a593Smuzhiyun #undef TIOCMBIT
1547*4882a593Smuzhiyun return result;
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun
pl011_set_mctrl(struct uart_port * port,unsigned int mctrl)1550*4882a593Smuzhiyun static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun struct uart_amba_port *uap =
1553*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1554*4882a593Smuzhiyun unsigned int cr;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun cr = pl011_read(uap, REG_CR);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun #define TIOCMBIT(tiocmbit, uartbit) \
1559*4882a593Smuzhiyun if (mctrl & tiocmbit) \
1560*4882a593Smuzhiyun cr |= uartbit; \
1561*4882a593Smuzhiyun else \
1562*4882a593Smuzhiyun cr &= ~uartbit
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1565*4882a593Smuzhiyun TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1566*4882a593Smuzhiyun TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1567*4882a593Smuzhiyun TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1568*4882a593Smuzhiyun TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun if (port->status & UPSTAT_AUTORTS) {
1571*4882a593Smuzhiyun /* We need to disable auto-RTS if we want to turn RTS off */
1572*4882a593Smuzhiyun TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun #undef TIOCMBIT
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun pl011_write(cr, uap, REG_CR);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
pl011_break_ctl(struct uart_port * port,int break_state)1579*4882a593Smuzhiyun static void pl011_break_ctl(struct uart_port *port, int break_state)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun struct uart_amba_port *uap =
1582*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1583*4882a593Smuzhiyun unsigned long flags;
1584*4882a593Smuzhiyun unsigned int lcr_h;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun spin_lock_irqsave(&uap->port.lock, flags);
1587*4882a593Smuzhiyun lcr_h = pl011_read(uap, REG_LCRH_TX);
1588*4882a593Smuzhiyun if (break_state == -1)
1589*4882a593Smuzhiyun lcr_h |= UART01x_LCRH_BRK;
1590*4882a593Smuzhiyun else
1591*4882a593Smuzhiyun lcr_h &= ~UART01x_LCRH_BRK;
1592*4882a593Smuzhiyun pl011_write(lcr_h, uap, REG_LCRH_TX);
1593*4882a593Smuzhiyun spin_unlock_irqrestore(&uap->port.lock, flags);
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun #ifdef CONFIG_CONSOLE_POLL
1597*4882a593Smuzhiyun
pl011_quiesce_irqs(struct uart_port * port)1598*4882a593Smuzhiyun static void pl011_quiesce_irqs(struct uart_port *port)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun struct uart_amba_port *uap =
1601*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1604*4882a593Smuzhiyun /*
1605*4882a593Smuzhiyun * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1606*4882a593Smuzhiyun * we simply mask it. start_tx() will unmask it.
1607*4882a593Smuzhiyun *
1608*4882a593Smuzhiyun * Note we can race with start_tx(), and if the race happens, the
1609*4882a593Smuzhiyun * polling user might get another interrupt just after we clear it.
1610*4882a593Smuzhiyun * But it should be OK and can happen even w/o the race, e.g.
1611*4882a593Smuzhiyun * controller immediately got some new data and raised the IRQ.
1612*4882a593Smuzhiyun *
1613*4882a593Smuzhiyun * And whoever uses polling routines assumes that it manages the device
1614*4882a593Smuzhiyun * (including tx queue), so we're also fine with start_tx()'s caller
1615*4882a593Smuzhiyun * side.
1616*4882a593Smuzhiyun */
1617*4882a593Smuzhiyun pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1618*4882a593Smuzhiyun REG_IMSC);
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun
pl011_get_poll_char(struct uart_port * port)1621*4882a593Smuzhiyun static int pl011_get_poll_char(struct uart_port *port)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun struct uart_amba_port *uap =
1624*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1625*4882a593Smuzhiyun unsigned int status;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun /*
1628*4882a593Smuzhiyun * The caller might need IRQs lowered, e.g. if used with KDB NMI
1629*4882a593Smuzhiyun * debugger.
1630*4882a593Smuzhiyun */
1631*4882a593Smuzhiyun pl011_quiesce_irqs(port);
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun status = pl011_read(uap, REG_FR);
1634*4882a593Smuzhiyun if (status & UART01x_FR_RXFE)
1635*4882a593Smuzhiyun return NO_POLL_CHAR;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun return pl011_read(uap, REG_DR);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
pl011_put_poll_char(struct uart_port * port,unsigned char ch)1640*4882a593Smuzhiyun static void pl011_put_poll_char(struct uart_port *port,
1641*4882a593Smuzhiyun unsigned char ch)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun struct uart_amba_port *uap =
1644*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1647*4882a593Smuzhiyun cpu_relax();
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun pl011_write(ch, uap, REG_DR);
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun #endif /* CONFIG_CONSOLE_POLL */
1653*4882a593Smuzhiyun
pl011_hwinit(struct uart_port * port)1654*4882a593Smuzhiyun static int pl011_hwinit(struct uart_port *port)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun struct uart_amba_port *uap =
1657*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1658*4882a593Smuzhiyun int retval;
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun /* Optionaly enable pins to be muxed in and configured */
1661*4882a593Smuzhiyun pinctrl_pm_select_default_state(port->dev);
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /*
1664*4882a593Smuzhiyun * Try to enable the clock producer.
1665*4882a593Smuzhiyun */
1666*4882a593Smuzhiyun retval = clk_prepare_enable(uap->clk);
1667*4882a593Smuzhiyun if (retval)
1668*4882a593Smuzhiyun return retval;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun uap->port.uartclk = clk_get_rate(uap->clk);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun /* Clear pending error and receive interrupts */
1673*4882a593Smuzhiyun pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1674*4882a593Smuzhiyun UART011_FEIS | UART011_RTIS | UART011_RXIS,
1675*4882a593Smuzhiyun uap, REG_ICR);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun /*
1678*4882a593Smuzhiyun * Save interrupts enable mask, and enable RX interrupts in case if
1679*4882a593Smuzhiyun * the interrupt is used for NMI entry.
1680*4882a593Smuzhiyun */
1681*4882a593Smuzhiyun uap->im = pl011_read(uap, REG_IMSC);
1682*4882a593Smuzhiyun pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun if (dev_get_platdata(uap->port.dev)) {
1685*4882a593Smuzhiyun struct amba_pl011_data *plat;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun plat = dev_get_platdata(uap->port.dev);
1688*4882a593Smuzhiyun if (plat->init)
1689*4882a593Smuzhiyun plat->init();
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun return 0;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun
pl011_split_lcrh(const struct uart_amba_port * uap)1694*4882a593Smuzhiyun static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1695*4882a593Smuzhiyun {
1696*4882a593Smuzhiyun return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1697*4882a593Smuzhiyun pl011_reg_to_offset(uap, REG_LCRH_TX);
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun
pl011_write_lcr_h(struct uart_amba_port * uap,unsigned int lcr_h)1700*4882a593Smuzhiyun static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun pl011_write(lcr_h, uap, REG_LCRH_RX);
1703*4882a593Smuzhiyun if (pl011_split_lcrh(uap)) {
1704*4882a593Smuzhiyun int i;
1705*4882a593Smuzhiyun /*
1706*4882a593Smuzhiyun * Wait 10 PCLKs before writing LCRH_TX register,
1707*4882a593Smuzhiyun * to get this delay write read only register 10 times
1708*4882a593Smuzhiyun */
1709*4882a593Smuzhiyun for (i = 0; i < 10; ++i)
1710*4882a593Smuzhiyun pl011_write(0xff, uap, REG_MIS);
1711*4882a593Smuzhiyun pl011_write(lcr_h, uap, REG_LCRH_TX);
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
pl011_allocate_irq(struct uart_amba_port * uap)1715*4882a593Smuzhiyun static int pl011_allocate_irq(struct uart_amba_port *uap)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun /*
1723*4882a593Smuzhiyun * Enable interrupts, only timeouts when using DMA
1724*4882a593Smuzhiyun * if initial RX DMA job failed, start in interrupt mode
1725*4882a593Smuzhiyun * as well.
1726*4882a593Smuzhiyun */
pl011_enable_interrupts(struct uart_amba_port * uap)1727*4882a593Smuzhiyun static void pl011_enable_interrupts(struct uart_amba_port *uap)
1728*4882a593Smuzhiyun {
1729*4882a593Smuzhiyun unsigned long flags;
1730*4882a593Smuzhiyun unsigned int i;
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun spin_lock_irqsave(&uap->port.lock, flags);
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun /* Clear out any spuriously appearing RX interrupts */
1735*4882a593Smuzhiyun pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun /*
1738*4882a593Smuzhiyun * RXIS is asserted only when the RX FIFO transitions from below
1739*4882a593Smuzhiyun * to above the trigger threshold. If the RX FIFO is already
1740*4882a593Smuzhiyun * full to the threshold this can't happen and RXIS will now be
1741*4882a593Smuzhiyun * stuck off. Drain the RX FIFO explicitly to fix this:
1742*4882a593Smuzhiyun */
1743*4882a593Smuzhiyun for (i = 0; i < uap->fifosize * 2; ++i) {
1744*4882a593Smuzhiyun if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1745*4882a593Smuzhiyun break;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun pl011_read(uap, REG_DR);
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun uap->im = UART011_RTIM;
1751*4882a593Smuzhiyun if (!pl011_dma_rx_running(uap))
1752*4882a593Smuzhiyun uap->im |= UART011_RXIM;
1753*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1754*4882a593Smuzhiyun spin_unlock_irqrestore(&uap->port.lock, flags);
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun
pl011_unthrottle_rx(struct uart_port * port)1757*4882a593Smuzhiyun static void pl011_unthrottle_rx(struct uart_port *port)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun pl011_enable_interrupts(uap);
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun
pl011_startup(struct uart_port * port)1764*4882a593Smuzhiyun static int pl011_startup(struct uart_port *port)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun struct uart_amba_port *uap =
1767*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1768*4882a593Smuzhiyun unsigned int cr;
1769*4882a593Smuzhiyun int retval;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun retval = pl011_hwinit(port);
1772*4882a593Smuzhiyun if (retval)
1773*4882a593Smuzhiyun goto clk_dis;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun retval = pl011_allocate_irq(uap);
1776*4882a593Smuzhiyun if (retval)
1777*4882a593Smuzhiyun goto clk_dis;
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun spin_lock_irq(&uap->port.lock);
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun /* restore RTS and DTR */
1784*4882a593Smuzhiyun cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1785*4882a593Smuzhiyun cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1786*4882a593Smuzhiyun pl011_write(cr, uap, REG_CR);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun spin_unlock_irq(&uap->port.lock);
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun /*
1791*4882a593Smuzhiyun * initialise the old status of the modem signals
1792*4882a593Smuzhiyun */
1793*4882a593Smuzhiyun uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun /* Startup DMA */
1796*4882a593Smuzhiyun pl011_dma_startup(uap);
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun pl011_enable_interrupts(uap);
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun return 0;
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun clk_dis:
1803*4882a593Smuzhiyun clk_disable_unprepare(uap->clk);
1804*4882a593Smuzhiyun return retval;
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun
sbsa_uart_startup(struct uart_port * port)1807*4882a593Smuzhiyun static int sbsa_uart_startup(struct uart_port *port)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun struct uart_amba_port *uap =
1810*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1811*4882a593Smuzhiyun int retval;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun retval = pl011_hwinit(port);
1814*4882a593Smuzhiyun if (retval)
1815*4882a593Smuzhiyun return retval;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun retval = pl011_allocate_irq(uap);
1818*4882a593Smuzhiyun if (retval)
1819*4882a593Smuzhiyun return retval;
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun /* The SBSA UART does not support any modem status lines. */
1822*4882a593Smuzhiyun uap->old_status = 0;
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun pl011_enable_interrupts(uap);
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun return 0;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
pl011_shutdown_channel(struct uart_amba_port * uap,unsigned int lcrh)1829*4882a593Smuzhiyun static void pl011_shutdown_channel(struct uart_amba_port *uap,
1830*4882a593Smuzhiyun unsigned int lcrh)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun unsigned long val;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun val = pl011_read(uap, lcrh);
1835*4882a593Smuzhiyun val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1836*4882a593Smuzhiyun pl011_write(val, uap, lcrh);
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun /*
1840*4882a593Smuzhiyun * disable the port. It should not disable RTS and DTR.
1841*4882a593Smuzhiyun * Also RTS and DTR state should be preserved to restore
1842*4882a593Smuzhiyun * it during startup().
1843*4882a593Smuzhiyun */
pl011_disable_uart(struct uart_amba_port * uap)1844*4882a593Smuzhiyun static void pl011_disable_uart(struct uart_amba_port *uap)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun unsigned int cr;
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1849*4882a593Smuzhiyun spin_lock_irq(&uap->port.lock);
1850*4882a593Smuzhiyun cr = pl011_read(uap, REG_CR);
1851*4882a593Smuzhiyun uap->old_cr = cr;
1852*4882a593Smuzhiyun cr &= UART011_CR_RTS | UART011_CR_DTR;
1853*4882a593Smuzhiyun cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1854*4882a593Smuzhiyun pl011_write(cr, uap, REG_CR);
1855*4882a593Smuzhiyun spin_unlock_irq(&uap->port.lock);
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun /*
1858*4882a593Smuzhiyun * disable break condition and fifos
1859*4882a593Smuzhiyun */
1860*4882a593Smuzhiyun pl011_shutdown_channel(uap, REG_LCRH_RX);
1861*4882a593Smuzhiyun if (pl011_split_lcrh(uap))
1862*4882a593Smuzhiyun pl011_shutdown_channel(uap, REG_LCRH_TX);
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun
pl011_disable_interrupts(struct uart_amba_port * uap)1865*4882a593Smuzhiyun static void pl011_disable_interrupts(struct uart_amba_port *uap)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun spin_lock_irq(&uap->port.lock);
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun /* mask all interrupts and clear all pending ones */
1870*4882a593Smuzhiyun uap->im = 0;
1871*4882a593Smuzhiyun pl011_write(uap->im, uap, REG_IMSC);
1872*4882a593Smuzhiyun pl011_write(0xffff, uap, REG_ICR);
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun spin_unlock_irq(&uap->port.lock);
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
pl011_shutdown(struct uart_port * port)1877*4882a593Smuzhiyun static void pl011_shutdown(struct uart_port *port)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun struct uart_amba_port *uap =
1880*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun pl011_disable_interrupts(uap);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun pl011_dma_shutdown(uap);
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun free_irq(uap->port.irq, uap);
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun pl011_disable_uart(uap);
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun /*
1891*4882a593Smuzhiyun * Shut down the clock producer
1892*4882a593Smuzhiyun */
1893*4882a593Smuzhiyun clk_disable_unprepare(uap->clk);
1894*4882a593Smuzhiyun /* Optionally let pins go into sleep states */
1895*4882a593Smuzhiyun pinctrl_pm_select_sleep_state(port->dev);
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun if (dev_get_platdata(uap->port.dev)) {
1898*4882a593Smuzhiyun struct amba_pl011_data *plat;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun plat = dev_get_platdata(uap->port.dev);
1901*4882a593Smuzhiyun if (plat->exit)
1902*4882a593Smuzhiyun plat->exit();
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun if (uap->port.ops->flush_buffer)
1906*4882a593Smuzhiyun uap->port.ops->flush_buffer(port);
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun
sbsa_uart_shutdown(struct uart_port * port)1909*4882a593Smuzhiyun static void sbsa_uart_shutdown(struct uart_port *port)
1910*4882a593Smuzhiyun {
1911*4882a593Smuzhiyun struct uart_amba_port *uap =
1912*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun pl011_disable_interrupts(uap);
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun free_irq(uap->port.irq, uap);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun if (uap->port.ops->flush_buffer)
1919*4882a593Smuzhiyun uap->port.ops->flush_buffer(port);
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun static void
pl011_setup_status_masks(struct uart_port * port,struct ktermios * termios)1923*4882a593Smuzhiyun pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun port->read_status_mask = UART011_DR_OE | 255;
1926*4882a593Smuzhiyun if (termios->c_iflag & INPCK)
1927*4882a593Smuzhiyun port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1928*4882a593Smuzhiyun if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1929*4882a593Smuzhiyun port->read_status_mask |= UART011_DR_BE;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun /*
1932*4882a593Smuzhiyun * Characters to ignore
1933*4882a593Smuzhiyun */
1934*4882a593Smuzhiyun port->ignore_status_mask = 0;
1935*4882a593Smuzhiyun if (termios->c_iflag & IGNPAR)
1936*4882a593Smuzhiyun port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1937*4882a593Smuzhiyun if (termios->c_iflag & IGNBRK) {
1938*4882a593Smuzhiyun port->ignore_status_mask |= UART011_DR_BE;
1939*4882a593Smuzhiyun /*
1940*4882a593Smuzhiyun * If we're ignoring parity and break indicators,
1941*4882a593Smuzhiyun * ignore overruns too (for real raw support).
1942*4882a593Smuzhiyun */
1943*4882a593Smuzhiyun if (termios->c_iflag & IGNPAR)
1944*4882a593Smuzhiyun port->ignore_status_mask |= UART011_DR_OE;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun /*
1948*4882a593Smuzhiyun * Ignore all characters if CREAD is not set.
1949*4882a593Smuzhiyun */
1950*4882a593Smuzhiyun if ((termios->c_cflag & CREAD) == 0)
1951*4882a593Smuzhiyun port->ignore_status_mask |= UART_DUMMY_DR_RX;
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun static void
pl011_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)1955*4882a593Smuzhiyun pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1956*4882a593Smuzhiyun struct ktermios *old)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun struct uart_amba_port *uap =
1959*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
1960*4882a593Smuzhiyun unsigned int lcr_h, old_cr;
1961*4882a593Smuzhiyun unsigned long flags;
1962*4882a593Smuzhiyun unsigned int baud, quot, clkdiv;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun if (uap->vendor->oversampling)
1965*4882a593Smuzhiyun clkdiv = 8;
1966*4882a593Smuzhiyun else
1967*4882a593Smuzhiyun clkdiv = 16;
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun /*
1970*4882a593Smuzhiyun * Ask the core to calculate the divisor for us.
1971*4882a593Smuzhiyun */
1972*4882a593Smuzhiyun baud = uart_get_baud_rate(port, termios, old, 0,
1973*4882a593Smuzhiyun port->uartclk / clkdiv);
1974*4882a593Smuzhiyun #ifdef CONFIG_DMA_ENGINE
1975*4882a593Smuzhiyun /*
1976*4882a593Smuzhiyun * Adjust RX DMA polling rate with baud rate if not specified.
1977*4882a593Smuzhiyun */
1978*4882a593Smuzhiyun if (uap->dmarx.auto_poll_rate)
1979*4882a593Smuzhiyun uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1980*4882a593Smuzhiyun #endif
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun if (baud > port->uartclk/16)
1983*4882a593Smuzhiyun quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1984*4882a593Smuzhiyun else
1985*4882a593Smuzhiyun quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun switch (termios->c_cflag & CSIZE) {
1988*4882a593Smuzhiyun case CS5:
1989*4882a593Smuzhiyun lcr_h = UART01x_LCRH_WLEN_5;
1990*4882a593Smuzhiyun break;
1991*4882a593Smuzhiyun case CS6:
1992*4882a593Smuzhiyun lcr_h = UART01x_LCRH_WLEN_6;
1993*4882a593Smuzhiyun break;
1994*4882a593Smuzhiyun case CS7:
1995*4882a593Smuzhiyun lcr_h = UART01x_LCRH_WLEN_7;
1996*4882a593Smuzhiyun break;
1997*4882a593Smuzhiyun default: // CS8
1998*4882a593Smuzhiyun lcr_h = UART01x_LCRH_WLEN_8;
1999*4882a593Smuzhiyun break;
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun if (termios->c_cflag & CSTOPB)
2002*4882a593Smuzhiyun lcr_h |= UART01x_LCRH_STP2;
2003*4882a593Smuzhiyun if (termios->c_cflag & PARENB) {
2004*4882a593Smuzhiyun lcr_h |= UART01x_LCRH_PEN;
2005*4882a593Smuzhiyun if (!(termios->c_cflag & PARODD))
2006*4882a593Smuzhiyun lcr_h |= UART01x_LCRH_EPS;
2007*4882a593Smuzhiyun if (termios->c_cflag & CMSPAR)
2008*4882a593Smuzhiyun lcr_h |= UART011_LCRH_SPS;
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun if (uap->fifosize > 1)
2011*4882a593Smuzhiyun lcr_h |= UART01x_LCRH_FEN;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun spin_lock_irqsave(&port->lock, flags);
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun /*
2016*4882a593Smuzhiyun * Update the per-port timeout.
2017*4882a593Smuzhiyun */
2018*4882a593Smuzhiyun uart_update_timeout(port, termios->c_cflag, baud);
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun pl011_setup_status_masks(port, termios);
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun if (UART_ENABLE_MS(port, termios->c_cflag))
2023*4882a593Smuzhiyun pl011_enable_ms(port);
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun /* first, disable everything */
2026*4882a593Smuzhiyun old_cr = pl011_read(uap, REG_CR);
2027*4882a593Smuzhiyun pl011_write(0, uap, REG_CR);
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun if (termios->c_cflag & CRTSCTS) {
2030*4882a593Smuzhiyun if (old_cr & UART011_CR_RTS)
2031*4882a593Smuzhiyun old_cr |= UART011_CR_RTSEN;
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun old_cr |= UART011_CR_CTSEN;
2034*4882a593Smuzhiyun port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
2035*4882a593Smuzhiyun } else {
2036*4882a593Smuzhiyun old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2037*4882a593Smuzhiyun port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun if (uap->vendor->oversampling) {
2041*4882a593Smuzhiyun if (baud > port->uartclk / 16)
2042*4882a593Smuzhiyun old_cr |= ST_UART011_CR_OVSFACT;
2043*4882a593Smuzhiyun else
2044*4882a593Smuzhiyun old_cr &= ~ST_UART011_CR_OVSFACT;
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun /*
2048*4882a593Smuzhiyun * Workaround for the ST Micro oversampling variants to
2049*4882a593Smuzhiyun * increase the bitrate slightly, by lowering the divisor,
2050*4882a593Smuzhiyun * to avoid delayed sampling of start bit at high speeds,
2051*4882a593Smuzhiyun * else we see data corruption.
2052*4882a593Smuzhiyun */
2053*4882a593Smuzhiyun if (uap->vendor->oversampling) {
2054*4882a593Smuzhiyun if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2055*4882a593Smuzhiyun quot -= 1;
2056*4882a593Smuzhiyun else if ((baud > 3250000) && (quot > 2))
2057*4882a593Smuzhiyun quot -= 2;
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun /* Set baud rate */
2060*4882a593Smuzhiyun pl011_write(quot & 0x3f, uap, REG_FBRD);
2061*4882a593Smuzhiyun pl011_write(quot >> 6, uap, REG_IBRD);
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun /*
2064*4882a593Smuzhiyun * ----------v----------v----------v----------v-----
2065*4882a593Smuzhiyun * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2066*4882a593Smuzhiyun * REG_FBRD & REG_IBRD.
2067*4882a593Smuzhiyun * ----------^----------^----------^----------^-----
2068*4882a593Smuzhiyun */
2069*4882a593Smuzhiyun pl011_write_lcr_h(uap, lcr_h);
2070*4882a593Smuzhiyun pl011_write(old_cr, uap, REG_CR);
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun spin_unlock_irqrestore(&port->lock, flags);
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun static void
sbsa_uart_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)2076*4882a593Smuzhiyun sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2077*4882a593Smuzhiyun struct ktermios *old)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun struct uart_amba_port *uap =
2080*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
2081*4882a593Smuzhiyun unsigned long flags;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun /* The SBSA UART only supports 8n1 without hardware flow control. */
2086*4882a593Smuzhiyun termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2087*4882a593Smuzhiyun termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2088*4882a593Smuzhiyun termios->c_cflag |= CS8 | CLOCAL;
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun spin_lock_irqsave(&port->lock, flags);
2091*4882a593Smuzhiyun uart_update_timeout(port, CS8, uap->fixed_baud);
2092*4882a593Smuzhiyun pl011_setup_status_masks(port, termios);
2093*4882a593Smuzhiyun spin_unlock_irqrestore(&port->lock, flags);
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun
pl011_type(struct uart_port * port)2096*4882a593Smuzhiyun static const char *pl011_type(struct uart_port *port)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun struct uart_amba_port *uap =
2099*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
2100*4882a593Smuzhiyun return uap->port.type == PORT_AMBA ? uap->type : NULL;
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun /*
2104*4882a593Smuzhiyun * Configure/autoconfigure the port.
2105*4882a593Smuzhiyun */
pl011_config_port(struct uart_port * port,int flags)2106*4882a593Smuzhiyun static void pl011_config_port(struct uart_port *port, int flags)
2107*4882a593Smuzhiyun {
2108*4882a593Smuzhiyun if (flags & UART_CONFIG_TYPE)
2109*4882a593Smuzhiyun port->type = PORT_AMBA;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun /*
2113*4882a593Smuzhiyun * verify the new serial_struct (for TIOCSSERIAL).
2114*4882a593Smuzhiyun */
pl011_verify_port(struct uart_port * port,struct serial_struct * ser)2115*4882a593Smuzhiyun static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun int ret = 0;
2118*4882a593Smuzhiyun if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2119*4882a593Smuzhiyun ret = -EINVAL;
2120*4882a593Smuzhiyun if (ser->irq < 0 || ser->irq >= nr_irqs)
2121*4882a593Smuzhiyun ret = -EINVAL;
2122*4882a593Smuzhiyun if (ser->baud_base < 9600)
2123*4882a593Smuzhiyun ret = -EINVAL;
2124*4882a593Smuzhiyun if (port->mapbase != (unsigned long) ser->iomem_base)
2125*4882a593Smuzhiyun ret = -EINVAL;
2126*4882a593Smuzhiyun return ret;
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun static const struct uart_ops amba_pl011_pops = {
2130*4882a593Smuzhiyun .tx_empty = pl011_tx_empty,
2131*4882a593Smuzhiyun .set_mctrl = pl011_set_mctrl,
2132*4882a593Smuzhiyun .get_mctrl = pl011_get_mctrl,
2133*4882a593Smuzhiyun .stop_tx = pl011_stop_tx,
2134*4882a593Smuzhiyun .start_tx = pl011_start_tx,
2135*4882a593Smuzhiyun .stop_rx = pl011_stop_rx,
2136*4882a593Smuzhiyun .throttle = pl011_throttle_rx,
2137*4882a593Smuzhiyun .unthrottle = pl011_unthrottle_rx,
2138*4882a593Smuzhiyun .enable_ms = pl011_enable_ms,
2139*4882a593Smuzhiyun .break_ctl = pl011_break_ctl,
2140*4882a593Smuzhiyun .startup = pl011_startup,
2141*4882a593Smuzhiyun .shutdown = pl011_shutdown,
2142*4882a593Smuzhiyun .flush_buffer = pl011_dma_flush_buffer,
2143*4882a593Smuzhiyun .set_termios = pl011_set_termios,
2144*4882a593Smuzhiyun .type = pl011_type,
2145*4882a593Smuzhiyun .config_port = pl011_config_port,
2146*4882a593Smuzhiyun .verify_port = pl011_verify_port,
2147*4882a593Smuzhiyun #ifdef CONFIG_CONSOLE_POLL
2148*4882a593Smuzhiyun .poll_init = pl011_hwinit,
2149*4882a593Smuzhiyun .poll_get_char = pl011_get_poll_char,
2150*4882a593Smuzhiyun .poll_put_char = pl011_put_poll_char,
2151*4882a593Smuzhiyun #endif
2152*4882a593Smuzhiyun };
2153*4882a593Smuzhiyun
sbsa_uart_set_mctrl(struct uart_port * port,unsigned int mctrl)2154*4882a593Smuzhiyun static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2155*4882a593Smuzhiyun {
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun
sbsa_uart_get_mctrl(struct uart_port * port)2158*4882a593Smuzhiyun static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2159*4882a593Smuzhiyun {
2160*4882a593Smuzhiyun return 0;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun static const struct uart_ops sbsa_uart_pops = {
2164*4882a593Smuzhiyun .tx_empty = pl011_tx_empty,
2165*4882a593Smuzhiyun .set_mctrl = sbsa_uart_set_mctrl,
2166*4882a593Smuzhiyun .get_mctrl = sbsa_uart_get_mctrl,
2167*4882a593Smuzhiyun .stop_tx = pl011_stop_tx,
2168*4882a593Smuzhiyun .start_tx = pl011_start_tx,
2169*4882a593Smuzhiyun .stop_rx = pl011_stop_rx,
2170*4882a593Smuzhiyun .startup = sbsa_uart_startup,
2171*4882a593Smuzhiyun .shutdown = sbsa_uart_shutdown,
2172*4882a593Smuzhiyun .set_termios = sbsa_uart_set_termios,
2173*4882a593Smuzhiyun .type = pl011_type,
2174*4882a593Smuzhiyun .config_port = pl011_config_port,
2175*4882a593Smuzhiyun .verify_port = pl011_verify_port,
2176*4882a593Smuzhiyun #ifdef CONFIG_CONSOLE_POLL
2177*4882a593Smuzhiyun .poll_init = pl011_hwinit,
2178*4882a593Smuzhiyun .poll_get_char = pl011_get_poll_char,
2179*4882a593Smuzhiyun .poll_put_char = pl011_put_poll_char,
2180*4882a593Smuzhiyun #endif
2181*4882a593Smuzhiyun };
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun static struct uart_amba_port *amba_ports[UART_NR];
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2186*4882a593Smuzhiyun
pl011_console_putchar(struct uart_port * port,int ch)2187*4882a593Smuzhiyun static void pl011_console_putchar(struct uart_port *port, int ch)
2188*4882a593Smuzhiyun {
2189*4882a593Smuzhiyun struct uart_amba_port *uap =
2190*4882a593Smuzhiyun container_of(port, struct uart_amba_port, port);
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2193*4882a593Smuzhiyun cpu_relax();
2194*4882a593Smuzhiyun pl011_write(ch, uap, REG_DR);
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun
2197*4882a593Smuzhiyun static void
pl011_console_write(struct console * co,const char * s,unsigned int count)2198*4882a593Smuzhiyun pl011_console_write(struct console *co, const char *s, unsigned int count)
2199*4882a593Smuzhiyun {
2200*4882a593Smuzhiyun struct uart_amba_port *uap = amba_ports[co->index];
2201*4882a593Smuzhiyun unsigned int old_cr = 0, new_cr;
2202*4882a593Smuzhiyun unsigned long flags;
2203*4882a593Smuzhiyun int locked = 1;
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun clk_enable(uap->clk);
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun local_irq_save(flags);
2208*4882a593Smuzhiyun if (uap->port.sysrq)
2209*4882a593Smuzhiyun locked = 0;
2210*4882a593Smuzhiyun else if (oops_in_progress)
2211*4882a593Smuzhiyun locked = spin_trylock(&uap->port.lock);
2212*4882a593Smuzhiyun else
2213*4882a593Smuzhiyun spin_lock(&uap->port.lock);
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun /*
2216*4882a593Smuzhiyun * First save the CR then disable the interrupts
2217*4882a593Smuzhiyun */
2218*4882a593Smuzhiyun if (!uap->vendor->always_enabled) {
2219*4882a593Smuzhiyun old_cr = pl011_read(uap, REG_CR);
2220*4882a593Smuzhiyun new_cr = old_cr & ~UART011_CR_CTSEN;
2221*4882a593Smuzhiyun new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2222*4882a593Smuzhiyun pl011_write(new_cr, uap, REG_CR);
2223*4882a593Smuzhiyun }
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun uart_console_write(&uap->port, s, count, pl011_console_putchar);
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun /*
2228*4882a593Smuzhiyun * Finally, wait for transmitter to become empty and restore the
2229*4882a593Smuzhiyun * TCR. Allow feature register bits to be inverted to work around
2230*4882a593Smuzhiyun * errata.
2231*4882a593Smuzhiyun */
2232*4882a593Smuzhiyun while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2233*4882a593Smuzhiyun & uap->vendor->fr_busy)
2234*4882a593Smuzhiyun cpu_relax();
2235*4882a593Smuzhiyun if (!uap->vendor->always_enabled)
2236*4882a593Smuzhiyun pl011_write(old_cr, uap, REG_CR);
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun if (locked)
2239*4882a593Smuzhiyun spin_unlock(&uap->port.lock);
2240*4882a593Smuzhiyun local_irq_restore(flags);
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun clk_disable(uap->clk);
2243*4882a593Smuzhiyun }
2244*4882a593Smuzhiyun
pl011_console_get_options(struct uart_amba_port * uap,int * baud,int * parity,int * bits)2245*4882a593Smuzhiyun static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2246*4882a593Smuzhiyun int *parity, int *bits)
2247*4882a593Smuzhiyun {
2248*4882a593Smuzhiyun if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2249*4882a593Smuzhiyun unsigned int lcr_h, ibrd, fbrd;
2250*4882a593Smuzhiyun
2251*4882a593Smuzhiyun lcr_h = pl011_read(uap, REG_LCRH_TX);
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun *parity = 'n';
2254*4882a593Smuzhiyun if (lcr_h & UART01x_LCRH_PEN) {
2255*4882a593Smuzhiyun if (lcr_h & UART01x_LCRH_EPS)
2256*4882a593Smuzhiyun *parity = 'e';
2257*4882a593Smuzhiyun else
2258*4882a593Smuzhiyun *parity = 'o';
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2262*4882a593Smuzhiyun *bits = 7;
2263*4882a593Smuzhiyun else
2264*4882a593Smuzhiyun *bits = 8;
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun ibrd = pl011_read(uap, REG_IBRD);
2267*4882a593Smuzhiyun fbrd = pl011_read(uap, REG_FBRD);
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun if (uap->vendor->oversampling) {
2272*4882a593Smuzhiyun if (pl011_read(uap, REG_CR)
2273*4882a593Smuzhiyun & ST_UART011_CR_OVSFACT)
2274*4882a593Smuzhiyun *baud *= 2;
2275*4882a593Smuzhiyun }
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun }
2278*4882a593Smuzhiyun
pl011_console_setup(struct console * co,char * options)2279*4882a593Smuzhiyun static int pl011_console_setup(struct console *co, char *options)
2280*4882a593Smuzhiyun {
2281*4882a593Smuzhiyun struct uart_amba_port *uap;
2282*4882a593Smuzhiyun int baud = 38400;
2283*4882a593Smuzhiyun int bits = 8;
2284*4882a593Smuzhiyun int parity = 'n';
2285*4882a593Smuzhiyun int flow = 'n';
2286*4882a593Smuzhiyun int ret;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun /*
2289*4882a593Smuzhiyun * Check whether an invalid uart number has been specified, and
2290*4882a593Smuzhiyun * if so, search for the first available port that does have
2291*4882a593Smuzhiyun * console support.
2292*4882a593Smuzhiyun */
2293*4882a593Smuzhiyun if (co->index >= UART_NR)
2294*4882a593Smuzhiyun co->index = 0;
2295*4882a593Smuzhiyun uap = amba_ports[co->index];
2296*4882a593Smuzhiyun if (!uap)
2297*4882a593Smuzhiyun return -ENODEV;
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun /* Allow pins to be muxed in and configured */
2300*4882a593Smuzhiyun pinctrl_pm_select_default_state(uap->port.dev);
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun ret = clk_prepare(uap->clk);
2303*4882a593Smuzhiyun if (ret)
2304*4882a593Smuzhiyun return ret;
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun if (dev_get_platdata(uap->port.dev)) {
2307*4882a593Smuzhiyun struct amba_pl011_data *plat;
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun plat = dev_get_platdata(uap->port.dev);
2310*4882a593Smuzhiyun if (plat->init)
2311*4882a593Smuzhiyun plat->init();
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun uap->port.uartclk = clk_get_rate(uap->clk);
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun if (uap->vendor->fixed_options) {
2317*4882a593Smuzhiyun baud = uap->fixed_baud;
2318*4882a593Smuzhiyun } else {
2319*4882a593Smuzhiyun if (options)
2320*4882a593Smuzhiyun uart_parse_options(options,
2321*4882a593Smuzhiyun &baud, &parity, &bits, &flow);
2322*4882a593Smuzhiyun else
2323*4882a593Smuzhiyun pl011_console_get_options(uap, &baud, &parity, &bits);
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2327*4882a593Smuzhiyun }
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun /**
2330*4882a593Smuzhiyun * pl011_console_match - non-standard console matching
2331*4882a593Smuzhiyun * @co: registering console
2332*4882a593Smuzhiyun * @name: name from console command line
2333*4882a593Smuzhiyun * @idx: index from console command line
2334*4882a593Smuzhiyun * @options: ptr to option string from console command line
2335*4882a593Smuzhiyun *
2336*4882a593Smuzhiyun * Only attempts to match console command lines of the form:
2337*4882a593Smuzhiyun * console=pl011,mmio|mmio32,<addr>[,<options>]
2338*4882a593Smuzhiyun * console=pl011,0x<addr>[,<options>]
2339*4882a593Smuzhiyun * This form is used to register an initial earlycon boot console and
2340*4882a593Smuzhiyun * replace it with the amba_console at pl011 driver init.
2341*4882a593Smuzhiyun *
2342*4882a593Smuzhiyun * Performs console setup for a match (as required by interface)
2343*4882a593Smuzhiyun * If no <options> are specified, then assume the h/w is already setup.
2344*4882a593Smuzhiyun *
2345*4882a593Smuzhiyun * Returns 0 if console matches; otherwise non-zero to use default matching
2346*4882a593Smuzhiyun */
pl011_console_match(struct console * co,char * name,int idx,char * options)2347*4882a593Smuzhiyun static int pl011_console_match(struct console *co, char *name, int idx,
2348*4882a593Smuzhiyun char *options)
2349*4882a593Smuzhiyun {
2350*4882a593Smuzhiyun unsigned char iotype;
2351*4882a593Smuzhiyun resource_size_t addr;
2352*4882a593Smuzhiyun int i;
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun /*
2355*4882a593Smuzhiyun * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2356*4882a593Smuzhiyun * have a distinct console name, so make sure we check for that.
2357*4882a593Smuzhiyun * The actual implementation of the erratum occurs in the probe
2358*4882a593Smuzhiyun * function.
2359*4882a593Smuzhiyun */
2360*4882a593Smuzhiyun if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2361*4882a593Smuzhiyun return -ENODEV;
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun if (uart_parse_earlycon(options, &iotype, &addr, &options))
2364*4882a593Smuzhiyun return -ENODEV;
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2367*4882a593Smuzhiyun return -ENODEV;
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun /* try to match the port specified on the command line */
2370*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2371*4882a593Smuzhiyun struct uart_port *port;
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun if (!amba_ports[i])
2374*4882a593Smuzhiyun continue;
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun port = &amba_ports[i]->port;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun if (port->mapbase != addr)
2379*4882a593Smuzhiyun continue;
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun co->index = i;
2382*4882a593Smuzhiyun port->cons = co;
2383*4882a593Smuzhiyun return pl011_console_setup(co, options);
2384*4882a593Smuzhiyun }
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun return -ENODEV;
2387*4882a593Smuzhiyun }
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun static struct uart_driver amba_reg;
2390*4882a593Smuzhiyun static struct console amba_console = {
2391*4882a593Smuzhiyun .name = "ttyAMA",
2392*4882a593Smuzhiyun .write = pl011_console_write,
2393*4882a593Smuzhiyun .device = uart_console_device,
2394*4882a593Smuzhiyun .setup = pl011_console_setup,
2395*4882a593Smuzhiyun .match = pl011_console_match,
2396*4882a593Smuzhiyun .flags = CON_PRINTBUFFER | CON_ANYTIME,
2397*4882a593Smuzhiyun .index = -1,
2398*4882a593Smuzhiyun .data = &amba_reg,
2399*4882a593Smuzhiyun };
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun #define AMBA_CONSOLE (&amba_console)
2402*4882a593Smuzhiyun
qdf2400_e44_putc(struct uart_port * port,int c)2403*4882a593Smuzhiyun static void qdf2400_e44_putc(struct uart_port *port, int c)
2404*4882a593Smuzhiyun {
2405*4882a593Smuzhiyun while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2406*4882a593Smuzhiyun cpu_relax();
2407*4882a593Smuzhiyun writel(c, port->membase + UART01x_DR);
2408*4882a593Smuzhiyun while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2409*4882a593Smuzhiyun cpu_relax();
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun
qdf2400_e44_early_write(struct console * con,const char * s,unsigned n)2412*4882a593Smuzhiyun static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2413*4882a593Smuzhiyun {
2414*4882a593Smuzhiyun struct earlycon_device *dev = con->data;
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun
pl011_putc(struct uart_port * port,int c)2419*4882a593Smuzhiyun static void pl011_putc(struct uart_port *port, int c)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2422*4882a593Smuzhiyun cpu_relax();
2423*4882a593Smuzhiyun if (port->iotype == UPIO_MEM32)
2424*4882a593Smuzhiyun writel(c, port->membase + UART01x_DR);
2425*4882a593Smuzhiyun else
2426*4882a593Smuzhiyun writeb(c, port->membase + UART01x_DR);
2427*4882a593Smuzhiyun while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2428*4882a593Smuzhiyun cpu_relax();
2429*4882a593Smuzhiyun }
2430*4882a593Smuzhiyun
pl011_early_write(struct console * con,const char * s,unsigned n)2431*4882a593Smuzhiyun static void pl011_early_write(struct console *con, const char *s, unsigned n)
2432*4882a593Smuzhiyun {
2433*4882a593Smuzhiyun struct earlycon_device *dev = con->data;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun uart_console_write(&dev->port, s, n, pl011_putc);
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun #ifdef CONFIG_CONSOLE_POLL
pl011_getc(struct uart_port * port)2439*4882a593Smuzhiyun static int pl011_getc(struct uart_port *port)
2440*4882a593Smuzhiyun {
2441*4882a593Smuzhiyun if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
2442*4882a593Smuzhiyun return NO_POLL_CHAR;
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun if (port->iotype == UPIO_MEM32)
2445*4882a593Smuzhiyun return readl(port->membase + UART01x_DR);
2446*4882a593Smuzhiyun else
2447*4882a593Smuzhiyun return readb(port->membase + UART01x_DR);
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun
pl011_early_read(struct console * con,char * s,unsigned int n)2450*4882a593Smuzhiyun static int pl011_early_read(struct console *con, char *s, unsigned int n)
2451*4882a593Smuzhiyun {
2452*4882a593Smuzhiyun struct earlycon_device *dev = con->data;
2453*4882a593Smuzhiyun int ch, num_read = 0;
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun while (num_read < n) {
2456*4882a593Smuzhiyun ch = pl011_getc(&dev->port);
2457*4882a593Smuzhiyun if (ch == NO_POLL_CHAR)
2458*4882a593Smuzhiyun break;
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun s[num_read++] = ch;
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun return num_read;
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun #else
2466*4882a593Smuzhiyun #define pl011_early_read NULL
2467*4882a593Smuzhiyun #endif
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun /*
2470*4882a593Smuzhiyun * On non-ACPI systems, earlycon is enabled by specifying
2471*4882a593Smuzhiyun * "earlycon=pl011,<address>" on the kernel command line.
2472*4882a593Smuzhiyun *
2473*4882a593Smuzhiyun * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2474*4882a593Smuzhiyun * by specifying only "earlycon" on the command line. Because it requires
2475*4882a593Smuzhiyun * SPCR, the console starts after ACPI is parsed, which is later than a
2476*4882a593Smuzhiyun * traditional early console.
2477*4882a593Smuzhiyun *
2478*4882a593Smuzhiyun * To get the traditional early console that starts before ACPI is parsed,
2479*4882a593Smuzhiyun * specify the full "earlycon=pl011,<address>" option.
2480*4882a593Smuzhiyun */
pl011_early_console_setup(struct earlycon_device * device,const char * opt)2481*4882a593Smuzhiyun static int __init pl011_early_console_setup(struct earlycon_device *device,
2482*4882a593Smuzhiyun const char *opt)
2483*4882a593Smuzhiyun {
2484*4882a593Smuzhiyun if (!device->port.membase)
2485*4882a593Smuzhiyun return -ENODEV;
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun device->con->write = pl011_early_write;
2488*4882a593Smuzhiyun device->con->read = pl011_early_read;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun return 0;
2491*4882a593Smuzhiyun }
2492*4882a593Smuzhiyun OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2493*4882a593Smuzhiyun OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun /*
2496*4882a593Smuzhiyun * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2497*4882a593Smuzhiyun * Erratum 44, traditional earlycon can be enabled by specifying
2498*4882a593Smuzhiyun * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2499*4882a593Smuzhiyun *
2500*4882a593Smuzhiyun * Alternatively, you can just specify "earlycon", and the early console
2501*4882a593Smuzhiyun * will be enabled with the information from the SPCR table. In this
2502*4882a593Smuzhiyun * case, the SPCR code will detect the need for the E44 work-around,
2503*4882a593Smuzhiyun * and set the console name to "qdf2400_e44".
2504*4882a593Smuzhiyun */
2505*4882a593Smuzhiyun static int __init
qdf2400_e44_early_console_setup(struct earlycon_device * device,const char * opt)2506*4882a593Smuzhiyun qdf2400_e44_early_console_setup(struct earlycon_device *device,
2507*4882a593Smuzhiyun const char *opt)
2508*4882a593Smuzhiyun {
2509*4882a593Smuzhiyun if (!device->port.membase)
2510*4882a593Smuzhiyun return -ENODEV;
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun device->con->write = qdf2400_e44_early_write;
2513*4882a593Smuzhiyun return 0;
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun #else
2518*4882a593Smuzhiyun #define AMBA_CONSOLE NULL
2519*4882a593Smuzhiyun #endif
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun static struct uart_driver amba_reg = {
2522*4882a593Smuzhiyun .owner = THIS_MODULE,
2523*4882a593Smuzhiyun .driver_name = "ttyAMA",
2524*4882a593Smuzhiyun .dev_name = "ttyAMA",
2525*4882a593Smuzhiyun .major = SERIAL_AMBA_MAJOR,
2526*4882a593Smuzhiyun .minor = SERIAL_AMBA_MINOR,
2527*4882a593Smuzhiyun .nr = UART_NR,
2528*4882a593Smuzhiyun .cons = AMBA_CONSOLE,
2529*4882a593Smuzhiyun };
2530*4882a593Smuzhiyun
pl011_probe_dt_alias(int index,struct device * dev)2531*4882a593Smuzhiyun static int pl011_probe_dt_alias(int index, struct device *dev)
2532*4882a593Smuzhiyun {
2533*4882a593Smuzhiyun struct device_node *np;
2534*4882a593Smuzhiyun static bool seen_dev_with_alias = false;
2535*4882a593Smuzhiyun static bool seen_dev_without_alias = false;
2536*4882a593Smuzhiyun int ret = index;
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_OF))
2539*4882a593Smuzhiyun return ret;
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun np = dev->of_node;
2542*4882a593Smuzhiyun if (!np)
2543*4882a593Smuzhiyun return ret;
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun ret = of_alias_get_id(np, "serial");
2546*4882a593Smuzhiyun if (ret < 0) {
2547*4882a593Smuzhiyun seen_dev_without_alias = true;
2548*4882a593Smuzhiyun ret = index;
2549*4882a593Smuzhiyun } else {
2550*4882a593Smuzhiyun seen_dev_with_alias = true;
2551*4882a593Smuzhiyun if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2552*4882a593Smuzhiyun dev_warn(dev, "requested serial port %d not available.\n", ret);
2553*4882a593Smuzhiyun ret = index;
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun }
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun if (seen_dev_with_alias && seen_dev_without_alias)
2558*4882a593Smuzhiyun dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun return ret;
2561*4882a593Smuzhiyun }
2562*4882a593Smuzhiyun
2563*4882a593Smuzhiyun /* unregisters the driver also if no more ports are left */
pl011_unregister_port(struct uart_amba_port * uap)2564*4882a593Smuzhiyun static void pl011_unregister_port(struct uart_amba_port *uap)
2565*4882a593Smuzhiyun {
2566*4882a593Smuzhiyun int i;
2567*4882a593Smuzhiyun bool busy = false;
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2570*4882a593Smuzhiyun if (amba_ports[i] == uap)
2571*4882a593Smuzhiyun amba_ports[i] = NULL;
2572*4882a593Smuzhiyun else if (amba_ports[i])
2573*4882a593Smuzhiyun busy = true;
2574*4882a593Smuzhiyun }
2575*4882a593Smuzhiyun pl011_dma_remove(uap);
2576*4882a593Smuzhiyun if (!busy)
2577*4882a593Smuzhiyun uart_unregister_driver(&amba_reg);
2578*4882a593Smuzhiyun }
2579*4882a593Smuzhiyun
pl011_find_free_port(void)2580*4882a593Smuzhiyun static int pl011_find_free_port(void)
2581*4882a593Smuzhiyun {
2582*4882a593Smuzhiyun int i;
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2585*4882a593Smuzhiyun if (amba_ports[i] == NULL)
2586*4882a593Smuzhiyun return i;
2587*4882a593Smuzhiyun
2588*4882a593Smuzhiyun return -EBUSY;
2589*4882a593Smuzhiyun }
2590*4882a593Smuzhiyun
pl011_setup_port(struct device * dev,struct uart_amba_port * uap,struct resource * mmiobase,int index)2591*4882a593Smuzhiyun static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2592*4882a593Smuzhiyun struct resource *mmiobase, int index)
2593*4882a593Smuzhiyun {
2594*4882a593Smuzhiyun void __iomem *base;
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun base = devm_ioremap_resource(dev, mmiobase);
2597*4882a593Smuzhiyun if (IS_ERR(base))
2598*4882a593Smuzhiyun return PTR_ERR(base);
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun index = pl011_probe_dt_alias(index, dev);
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun uap->old_cr = 0;
2603*4882a593Smuzhiyun uap->port.dev = dev;
2604*4882a593Smuzhiyun uap->port.mapbase = mmiobase->start;
2605*4882a593Smuzhiyun uap->port.membase = base;
2606*4882a593Smuzhiyun uap->port.fifosize = uap->fifosize;
2607*4882a593Smuzhiyun uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
2608*4882a593Smuzhiyun uap->port.flags = UPF_BOOT_AUTOCONF;
2609*4882a593Smuzhiyun uap->port.line = index;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun amba_ports[index] = uap;
2612*4882a593Smuzhiyun
2613*4882a593Smuzhiyun return 0;
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun
pl011_register_port(struct uart_amba_port * uap)2616*4882a593Smuzhiyun static int pl011_register_port(struct uart_amba_port *uap)
2617*4882a593Smuzhiyun {
2618*4882a593Smuzhiyun int ret, i;
2619*4882a593Smuzhiyun
2620*4882a593Smuzhiyun /* Ensure interrupts from this UART are masked and cleared */
2621*4882a593Smuzhiyun pl011_write(0, uap, REG_IMSC);
2622*4882a593Smuzhiyun pl011_write(0xffff, uap, REG_ICR);
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun if (!amba_reg.state) {
2625*4882a593Smuzhiyun ret = uart_register_driver(&amba_reg);
2626*4882a593Smuzhiyun if (ret < 0) {
2627*4882a593Smuzhiyun dev_err(uap->port.dev,
2628*4882a593Smuzhiyun "Failed to register AMBA-PL011 driver\n");
2629*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2630*4882a593Smuzhiyun if (amba_ports[i] == uap)
2631*4882a593Smuzhiyun amba_ports[i] = NULL;
2632*4882a593Smuzhiyun return ret;
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun }
2635*4882a593Smuzhiyun
2636*4882a593Smuzhiyun ret = uart_add_one_port(&amba_reg, &uap->port);
2637*4882a593Smuzhiyun if (ret)
2638*4882a593Smuzhiyun pl011_unregister_port(uap);
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun return ret;
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun
pl011_probe(struct amba_device * dev,const struct amba_id * id)2643*4882a593Smuzhiyun static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2644*4882a593Smuzhiyun {
2645*4882a593Smuzhiyun struct uart_amba_port *uap;
2646*4882a593Smuzhiyun struct vendor_data *vendor = id->data;
2647*4882a593Smuzhiyun int portnr, ret;
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun portnr = pl011_find_free_port();
2650*4882a593Smuzhiyun if (portnr < 0)
2651*4882a593Smuzhiyun return portnr;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2654*4882a593Smuzhiyun GFP_KERNEL);
2655*4882a593Smuzhiyun if (!uap)
2656*4882a593Smuzhiyun return -ENOMEM;
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun uap->clk = devm_clk_get(&dev->dev, NULL);
2659*4882a593Smuzhiyun if (IS_ERR(uap->clk))
2660*4882a593Smuzhiyun return PTR_ERR(uap->clk);
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun uap->reg_offset = vendor->reg_offset;
2663*4882a593Smuzhiyun uap->vendor = vendor;
2664*4882a593Smuzhiyun uap->fifosize = vendor->get_fifosize(dev);
2665*4882a593Smuzhiyun uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2666*4882a593Smuzhiyun uap->port.irq = dev->irq[0];
2667*4882a593Smuzhiyun uap->port.ops = &amba_pl011_pops;
2668*4882a593Smuzhiyun
2669*4882a593Smuzhiyun snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2672*4882a593Smuzhiyun if (ret)
2673*4882a593Smuzhiyun return ret;
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun amba_set_drvdata(dev, uap);
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun return pl011_register_port(uap);
2678*4882a593Smuzhiyun }
2679*4882a593Smuzhiyun
pl011_remove(struct amba_device * dev)2680*4882a593Smuzhiyun static void pl011_remove(struct amba_device *dev)
2681*4882a593Smuzhiyun {
2682*4882a593Smuzhiyun struct uart_amba_port *uap = amba_get_drvdata(dev);
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun uart_remove_one_port(&amba_reg, &uap->port);
2685*4882a593Smuzhiyun pl011_unregister_port(uap);
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
pl011_suspend(struct device * dev)2689*4882a593Smuzhiyun static int pl011_suspend(struct device *dev)
2690*4882a593Smuzhiyun {
2691*4882a593Smuzhiyun struct uart_amba_port *uap = dev_get_drvdata(dev);
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun if (!uap)
2694*4882a593Smuzhiyun return -EINVAL;
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun return uart_suspend_port(&amba_reg, &uap->port);
2697*4882a593Smuzhiyun }
2698*4882a593Smuzhiyun
pl011_resume(struct device * dev)2699*4882a593Smuzhiyun static int pl011_resume(struct device *dev)
2700*4882a593Smuzhiyun {
2701*4882a593Smuzhiyun struct uart_amba_port *uap = dev_get_drvdata(dev);
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun if (!uap)
2704*4882a593Smuzhiyun return -EINVAL;
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun return uart_resume_port(&amba_reg, &uap->port);
2707*4882a593Smuzhiyun }
2708*4882a593Smuzhiyun #endif
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2711*4882a593Smuzhiyun
sbsa_uart_probe(struct platform_device * pdev)2712*4882a593Smuzhiyun static int sbsa_uart_probe(struct platform_device *pdev)
2713*4882a593Smuzhiyun {
2714*4882a593Smuzhiyun struct uart_amba_port *uap;
2715*4882a593Smuzhiyun struct resource *r;
2716*4882a593Smuzhiyun int portnr, ret;
2717*4882a593Smuzhiyun int baudrate;
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun /*
2720*4882a593Smuzhiyun * Check the mandatory baud rate parameter in the DT node early
2721*4882a593Smuzhiyun * so that we can easily exit with the error.
2722*4882a593Smuzhiyun */
2723*4882a593Smuzhiyun if (pdev->dev.of_node) {
2724*4882a593Smuzhiyun struct device_node *np = pdev->dev.of_node;
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun ret = of_property_read_u32(np, "current-speed", &baudrate);
2727*4882a593Smuzhiyun if (ret)
2728*4882a593Smuzhiyun return ret;
2729*4882a593Smuzhiyun } else {
2730*4882a593Smuzhiyun baudrate = 115200;
2731*4882a593Smuzhiyun }
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun portnr = pl011_find_free_port();
2734*4882a593Smuzhiyun if (portnr < 0)
2735*4882a593Smuzhiyun return portnr;
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2738*4882a593Smuzhiyun GFP_KERNEL);
2739*4882a593Smuzhiyun if (!uap)
2740*4882a593Smuzhiyun return -ENOMEM;
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun ret = platform_get_irq(pdev, 0);
2743*4882a593Smuzhiyun if (ret < 0)
2744*4882a593Smuzhiyun return ret;
2745*4882a593Smuzhiyun uap->port.irq = ret;
2746*4882a593Smuzhiyun
2747*4882a593Smuzhiyun #ifdef CONFIG_ACPI_SPCR_TABLE
2748*4882a593Smuzhiyun if (qdf2400_e44_present) {
2749*4882a593Smuzhiyun dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2750*4882a593Smuzhiyun uap->vendor = &vendor_qdt_qdf2400_e44;
2751*4882a593Smuzhiyun } else
2752*4882a593Smuzhiyun #endif
2753*4882a593Smuzhiyun uap->vendor = &vendor_sbsa;
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun uap->reg_offset = uap->vendor->reg_offset;
2756*4882a593Smuzhiyun uap->fifosize = 32;
2757*4882a593Smuzhiyun uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2758*4882a593Smuzhiyun uap->port.ops = &sbsa_uart_pops;
2759*4882a593Smuzhiyun uap->fixed_baud = baudrate;
2760*4882a593Smuzhiyun
2761*4882a593Smuzhiyun snprintf(uap->type, sizeof(uap->type), "SBSA");
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2766*4882a593Smuzhiyun if (ret)
2767*4882a593Smuzhiyun return ret;
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun platform_set_drvdata(pdev, uap);
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun return pl011_register_port(uap);
2772*4882a593Smuzhiyun }
2773*4882a593Smuzhiyun
sbsa_uart_remove(struct platform_device * pdev)2774*4882a593Smuzhiyun static int sbsa_uart_remove(struct platform_device *pdev)
2775*4882a593Smuzhiyun {
2776*4882a593Smuzhiyun struct uart_amba_port *uap = platform_get_drvdata(pdev);
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun uart_remove_one_port(&amba_reg, &uap->port);
2779*4882a593Smuzhiyun pl011_unregister_port(uap);
2780*4882a593Smuzhiyun return 0;
2781*4882a593Smuzhiyun }
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun static const struct of_device_id sbsa_uart_of_match[] = {
2784*4882a593Smuzhiyun { .compatible = "arm,sbsa-uart", },
2785*4882a593Smuzhiyun {},
2786*4882a593Smuzhiyun };
2787*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2790*4882a593Smuzhiyun { "ARMH0011", 0 },
2791*4882a593Smuzhiyun { "ARMHB000", 0 },
2792*4882a593Smuzhiyun {},
2793*4882a593Smuzhiyun };
2794*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun static struct platform_driver arm_sbsa_uart_platform_driver = {
2797*4882a593Smuzhiyun .probe = sbsa_uart_probe,
2798*4882a593Smuzhiyun .remove = sbsa_uart_remove,
2799*4882a593Smuzhiyun .driver = {
2800*4882a593Smuzhiyun .name = "sbsa-uart",
2801*4882a593Smuzhiyun .pm = &pl011_dev_pm_ops,
2802*4882a593Smuzhiyun .of_match_table = of_match_ptr(sbsa_uart_of_match),
2803*4882a593Smuzhiyun .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2804*4882a593Smuzhiyun .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2805*4882a593Smuzhiyun },
2806*4882a593Smuzhiyun };
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun static const struct amba_id pl011_ids[] = {
2809*4882a593Smuzhiyun {
2810*4882a593Smuzhiyun .id = 0x00041011,
2811*4882a593Smuzhiyun .mask = 0x000fffff,
2812*4882a593Smuzhiyun .data = &vendor_arm,
2813*4882a593Smuzhiyun },
2814*4882a593Smuzhiyun {
2815*4882a593Smuzhiyun .id = 0x00380802,
2816*4882a593Smuzhiyun .mask = 0x00ffffff,
2817*4882a593Smuzhiyun .data = &vendor_st,
2818*4882a593Smuzhiyun },
2819*4882a593Smuzhiyun {
2820*4882a593Smuzhiyun .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2821*4882a593Smuzhiyun .mask = 0x00ffffff,
2822*4882a593Smuzhiyun .data = &vendor_zte,
2823*4882a593Smuzhiyun },
2824*4882a593Smuzhiyun { 0, 0 },
2825*4882a593Smuzhiyun };
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun MODULE_DEVICE_TABLE(amba, pl011_ids);
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun static struct amba_driver pl011_driver = {
2830*4882a593Smuzhiyun .drv = {
2831*4882a593Smuzhiyun .name = "uart-pl011",
2832*4882a593Smuzhiyun .pm = &pl011_dev_pm_ops,
2833*4882a593Smuzhiyun .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
2834*4882a593Smuzhiyun },
2835*4882a593Smuzhiyun .id_table = pl011_ids,
2836*4882a593Smuzhiyun .probe = pl011_probe,
2837*4882a593Smuzhiyun .remove = pl011_remove,
2838*4882a593Smuzhiyun };
2839*4882a593Smuzhiyun
pl011_init(void)2840*4882a593Smuzhiyun static int __init pl011_init(void)
2841*4882a593Smuzhiyun {
2842*4882a593Smuzhiyun printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2843*4882a593Smuzhiyun
2844*4882a593Smuzhiyun if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2845*4882a593Smuzhiyun pr_warn("could not register SBSA UART platform driver\n");
2846*4882a593Smuzhiyun return amba_driver_register(&pl011_driver);
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun
pl011_exit(void)2849*4882a593Smuzhiyun static void __exit pl011_exit(void)
2850*4882a593Smuzhiyun {
2851*4882a593Smuzhiyun platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2852*4882a593Smuzhiyun amba_driver_unregister(&pl011_driver);
2853*4882a593Smuzhiyun }
2854*4882a593Smuzhiyun
2855*4882a593Smuzhiyun /*
2856*4882a593Smuzhiyun * While this can be a module, if builtin it's most likely the console
2857*4882a593Smuzhiyun * So let's leave module_exit but move module_init to an earlier place
2858*4882a593Smuzhiyun */
2859*4882a593Smuzhiyun arch_initcall(pl011_init);
2860*4882a593Smuzhiyun module_exit(pl011_exit);
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2863*4882a593Smuzhiyun MODULE_DESCRIPTION("ARM AMBA serial port driver");
2864*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2865