1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
7 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
8 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/pagemap.h>
25 #include <linux/platform_device.h>
26 #include <linux/reset.h>
27 #include <linux/serial.h>
28 #include <linux/serial_8250.h>
29 #include <linux/serial_core.h>
30 #include <linux/serial_reg.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/termios.h>
34 #include <linux/tty.h>
35 #include <linux/tty_flip.h>
36
37 #define TEGRA_UART_TYPE "TEGRA_UART"
38 #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
39 #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
40
41 #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
42 #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
43 #define TEGRA_UART_IER_EORD 0x20
44 #define TEGRA_UART_MCR_RTS_EN 0x40
45 #define TEGRA_UART_MCR_CTS_EN 0x20
46 #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
47 UART_LSR_PE | UART_LSR_FE)
48 #define TEGRA_UART_IRDA_CSR 0x08
49 #define TEGRA_UART_SIR_ENABLED 0x80
50
51 #define TEGRA_UART_TX_PIO 1
52 #define TEGRA_UART_TX_DMA 2
53 #define TEGRA_UART_MIN_DMA 16
54 #define TEGRA_UART_FIFO_SIZE 32
55
56 /*
57 * Tx fifo trigger level setting in tegra uart is in
58 * reverse way then conventional uart.
59 */
60 #define TEGRA_UART_TX_TRIG_16B 0x00
61 #define TEGRA_UART_TX_TRIG_8B 0x10
62 #define TEGRA_UART_TX_TRIG_4B 0x20
63 #define TEGRA_UART_TX_TRIG_1B 0x30
64
65 #define TEGRA_UART_MAXIMUM 8
66
67 /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
68 #define TEGRA_UART_DEFAULT_BAUD 115200
69 #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
70
71 /* Tx transfer mode */
72 #define TEGRA_TX_PIO 1
73 #define TEGRA_TX_DMA 2
74
75 #define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
76
77 /**
78 * tegra_uart_chip_data: SOC specific data.
79 *
80 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
82 * Tegra30 does not allow this.
83 * @support_clk_src_div: Clock source support the clock divider.
84 */
85 struct tegra_uart_chip_data {
86 bool tx_fifo_full_status;
87 bool allow_txfifo_reset_fifo_mode;
88 bool support_clk_src_div;
89 bool fifo_mode_enable_status;
90 int uart_max_port;
91 int max_dma_burst_bytes;
92 int error_tolerance_low_range;
93 int error_tolerance_high_range;
94 };
95
96 struct tegra_baud_tolerance {
97 u32 lower_range_baud;
98 u32 upper_range_baud;
99 s32 tolerance;
100 };
101
102 struct tegra_uart_port {
103 struct uart_port uport;
104 const struct tegra_uart_chip_data *cdata;
105
106 struct clk *uart_clk;
107 struct reset_control *rst;
108 unsigned int current_baud;
109
110 /* Register shadow */
111 unsigned long fcr_shadow;
112 unsigned long mcr_shadow;
113 unsigned long lcr_shadow;
114 unsigned long ier_shadow;
115 bool rts_active;
116
117 int tx_in_progress;
118 unsigned int tx_bytes;
119
120 bool enable_modem_interrupt;
121
122 bool rx_timeout;
123 int rx_in_progress;
124 int symb_bit;
125
126 struct dma_chan *rx_dma_chan;
127 struct dma_chan *tx_dma_chan;
128 dma_addr_t rx_dma_buf_phys;
129 dma_addr_t tx_dma_buf_phys;
130 unsigned char *rx_dma_buf_virt;
131 unsigned char *tx_dma_buf_virt;
132 struct dma_async_tx_descriptor *tx_dma_desc;
133 struct dma_async_tx_descriptor *rx_dma_desc;
134 dma_cookie_t tx_cookie;
135 dma_cookie_t rx_cookie;
136 unsigned int tx_bytes_requested;
137 unsigned int rx_bytes_requested;
138 struct tegra_baud_tolerance *baud_tolerance;
139 int n_adjustable_baud_rates;
140 int required_rate;
141 int configured_rate;
142 bool use_rx_pio;
143 bool use_tx_pio;
144 bool rx_dma_active;
145 };
146
147 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
148 static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
149 static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
150 bool dma_to_memory);
151
tegra_uart_read(struct tegra_uart_port * tup,unsigned long reg)152 static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
153 unsigned long reg)
154 {
155 return readl(tup->uport.membase + (reg << tup->uport.regshift));
156 }
157
tegra_uart_write(struct tegra_uart_port * tup,unsigned val,unsigned long reg)158 static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
159 unsigned long reg)
160 {
161 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
162 }
163
to_tegra_uport(struct uart_port * u)164 static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
165 {
166 return container_of(u, struct tegra_uart_port, uport);
167 }
168
tegra_uart_get_mctrl(struct uart_port * u)169 static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
170 {
171 struct tegra_uart_port *tup = to_tegra_uport(u);
172
173 /*
174 * RI - Ring detector is active
175 * CD/DCD/CAR - Carrier detect is always active. For some reason
176 * linux has different names for carrier detect.
177 * DSR - Data Set ready is active as the hardware doesn't support it.
178 * Don't know if the linux support this yet?
179 * CTS - Clear to send. Always set to active, as the hardware handles
180 * CTS automatically.
181 */
182 if (tup->enable_modem_interrupt)
183 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
184 return TIOCM_CTS;
185 }
186
set_rts(struct tegra_uart_port * tup,bool active)187 static void set_rts(struct tegra_uart_port *tup, bool active)
188 {
189 unsigned long mcr;
190
191 mcr = tup->mcr_shadow;
192 if (active)
193 mcr |= TEGRA_UART_MCR_RTS_EN;
194 else
195 mcr &= ~TEGRA_UART_MCR_RTS_EN;
196 if (mcr != tup->mcr_shadow) {
197 tegra_uart_write(tup, mcr, UART_MCR);
198 tup->mcr_shadow = mcr;
199 }
200 }
201
set_dtr(struct tegra_uart_port * tup,bool active)202 static void set_dtr(struct tegra_uart_port *tup, bool active)
203 {
204 unsigned long mcr;
205
206 mcr = tup->mcr_shadow;
207 if (active)
208 mcr |= UART_MCR_DTR;
209 else
210 mcr &= ~UART_MCR_DTR;
211 if (mcr != tup->mcr_shadow) {
212 tegra_uart_write(tup, mcr, UART_MCR);
213 tup->mcr_shadow = mcr;
214 }
215 }
216
set_loopbk(struct tegra_uart_port * tup,bool active)217 static void set_loopbk(struct tegra_uart_port *tup, bool active)
218 {
219 unsigned long mcr = tup->mcr_shadow;
220
221 if (active)
222 mcr |= UART_MCR_LOOP;
223 else
224 mcr &= ~UART_MCR_LOOP;
225
226 if (mcr != tup->mcr_shadow) {
227 tegra_uart_write(tup, mcr, UART_MCR);
228 tup->mcr_shadow = mcr;
229 }
230 }
231
tegra_uart_set_mctrl(struct uart_port * u,unsigned int mctrl)232 static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
233 {
234 struct tegra_uart_port *tup = to_tegra_uport(u);
235 int enable;
236
237 tup->rts_active = !!(mctrl & TIOCM_RTS);
238 set_rts(tup, tup->rts_active);
239
240 enable = !!(mctrl & TIOCM_DTR);
241 set_dtr(tup, enable);
242
243 enable = !!(mctrl & TIOCM_LOOP);
244 set_loopbk(tup, enable);
245 }
246
tegra_uart_break_ctl(struct uart_port * u,int break_ctl)247 static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
248 {
249 struct tegra_uart_port *tup = to_tegra_uport(u);
250 unsigned long lcr;
251
252 lcr = tup->lcr_shadow;
253 if (break_ctl)
254 lcr |= UART_LCR_SBC;
255 else
256 lcr &= ~UART_LCR_SBC;
257 tegra_uart_write(tup, lcr, UART_LCR);
258 tup->lcr_shadow = lcr;
259 }
260
261 /**
262 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
263 *
264 * @tup: Tegra serial port data structure.
265 * @cycles: Number of clock periods to wait.
266 *
267 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
268 * clock speed is 16X the current baud rate.
269 */
tegra_uart_wait_cycle_time(struct tegra_uart_port * tup,unsigned int cycles)270 static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
271 unsigned int cycles)
272 {
273 if (tup->current_baud)
274 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
275 }
276
277 /* Wait for a symbol-time. */
tegra_uart_wait_sym_time(struct tegra_uart_port * tup,unsigned int syms)278 static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
279 unsigned int syms)
280 {
281 if (tup->current_baud)
282 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
283 tup->current_baud));
284 }
285
tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port * tup)286 static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
287 {
288 unsigned long iir;
289 unsigned int tmout = 100;
290
291 do {
292 iir = tegra_uart_read(tup, UART_IIR);
293 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
294 return 0;
295 udelay(1);
296 } while (--tmout);
297
298 return -ETIMEDOUT;
299 }
300
tegra_uart_fifo_reset(struct tegra_uart_port * tup,u8 fcr_bits)301 static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
302 {
303 unsigned long fcr = tup->fcr_shadow;
304 unsigned int lsr, tmout = 10000;
305
306 if (tup->rts_active)
307 set_rts(tup, false);
308
309 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
310 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
311 tegra_uart_write(tup, fcr, UART_FCR);
312 } else {
313 fcr &= ~UART_FCR_ENABLE_FIFO;
314 tegra_uart_write(tup, fcr, UART_FCR);
315 udelay(60);
316 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
317 tegra_uart_write(tup, fcr, UART_FCR);
318 fcr |= UART_FCR_ENABLE_FIFO;
319 tegra_uart_write(tup, fcr, UART_FCR);
320 if (tup->cdata->fifo_mode_enable_status)
321 tegra_uart_wait_fifo_mode_enabled(tup);
322 }
323
324 /* Dummy read to ensure the write is posted */
325 tegra_uart_read(tup, UART_SCR);
326
327 /*
328 * For all tegra devices (up to t210), there is a hardware issue that
329 * requires software to wait for 32 UART clock periods for the flush
330 * to propagate, otherwise data could be lost.
331 */
332 tegra_uart_wait_cycle_time(tup, 32);
333
334 do {
335 lsr = tegra_uart_read(tup, UART_LSR);
336 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
337 break;
338 udelay(1);
339 } while (--tmout);
340
341 if (tup->rts_active)
342 set_rts(tup, true);
343 }
344
tegra_get_tolerance_rate(struct tegra_uart_port * tup,unsigned int baud,long rate)345 static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
346 unsigned int baud, long rate)
347 {
348 int i;
349
350 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
351 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
352 baud <= tup->baud_tolerance[i].upper_range_baud)
353 return (rate + (rate *
354 tup->baud_tolerance[i].tolerance) / 10000);
355 }
356
357 return rate;
358 }
359
tegra_check_rate_in_range(struct tegra_uart_port * tup)360 static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
361 {
362 long diff;
363
364 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
365 / tup->required_rate;
366 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
367 diff > (tup->cdata->error_tolerance_high_range * 100)) {
368 dev_err(tup->uport.dev,
369 "configured baud rate is out of range by %ld", diff);
370 return -EIO;
371 }
372
373 return 0;
374 }
375
tegra_set_baudrate(struct tegra_uart_port * tup,unsigned int baud)376 static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
377 {
378 unsigned long rate;
379 unsigned int divisor;
380 unsigned long lcr;
381 unsigned long flags;
382 int ret;
383
384 if (tup->current_baud == baud)
385 return 0;
386
387 if (tup->cdata->support_clk_src_div) {
388 rate = baud * 16;
389 tup->required_rate = rate;
390
391 if (tup->n_adjustable_baud_rates)
392 rate = tegra_get_tolerance_rate(tup, baud, rate);
393
394 ret = clk_set_rate(tup->uart_clk, rate);
395 if (ret < 0) {
396 dev_err(tup->uport.dev,
397 "clk_set_rate() failed for rate %lu\n", rate);
398 return ret;
399 }
400 tup->configured_rate = clk_get_rate(tup->uart_clk);
401 divisor = 1;
402 ret = tegra_check_rate_in_range(tup);
403 if (ret < 0)
404 return ret;
405 } else {
406 rate = clk_get_rate(tup->uart_clk);
407 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
408 }
409
410 spin_lock_irqsave(&tup->uport.lock, flags);
411 lcr = tup->lcr_shadow;
412 lcr |= UART_LCR_DLAB;
413 tegra_uart_write(tup, lcr, UART_LCR);
414
415 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
416 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
417
418 lcr &= ~UART_LCR_DLAB;
419 tegra_uart_write(tup, lcr, UART_LCR);
420
421 /* Dummy read to ensure the write is posted */
422 tegra_uart_read(tup, UART_SCR);
423 spin_unlock_irqrestore(&tup->uport.lock, flags);
424
425 tup->current_baud = baud;
426
427 /* wait two character intervals at new rate */
428 tegra_uart_wait_sym_time(tup, 2);
429 return 0;
430 }
431
tegra_uart_decode_rx_error(struct tegra_uart_port * tup,unsigned long lsr)432 static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
433 unsigned long lsr)
434 {
435 char flag = TTY_NORMAL;
436
437 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
438 if (lsr & UART_LSR_OE) {
439 /* Overrrun error */
440 flag = TTY_OVERRUN;
441 tup->uport.icount.overrun++;
442 dev_dbg(tup->uport.dev, "Got overrun errors\n");
443 } else if (lsr & UART_LSR_PE) {
444 /* Parity error */
445 flag = TTY_PARITY;
446 tup->uport.icount.parity++;
447 dev_dbg(tup->uport.dev, "Got Parity errors\n");
448 } else if (lsr & UART_LSR_FE) {
449 flag = TTY_FRAME;
450 tup->uport.icount.frame++;
451 dev_dbg(tup->uport.dev, "Got frame errors\n");
452 } else if (lsr & UART_LSR_BI) {
453 /*
454 * Break error
455 * If FIFO read error without any data, reset Rx FIFO
456 */
457 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
458 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
459 if (tup->uport.ignore_status_mask & UART_LSR_BI)
460 return TTY_BREAK;
461 flag = TTY_BREAK;
462 tup->uport.icount.brk++;
463 dev_dbg(tup->uport.dev, "Got Break\n");
464 }
465 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
466 }
467
468 return flag;
469 }
470
tegra_uart_request_port(struct uart_port * u)471 static int tegra_uart_request_port(struct uart_port *u)
472 {
473 return 0;
474 }
475
tegra_uart_release_port(struct uart_port * u)476 static void tegra_uart_release_port(struct uart_port *u)
477 {
478 /* Nothing to do here */
479 }
480
tegra_uart_fill_tx_fifo(struct tegra_uart_port * tup,int max_bytes)481 static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
482 {
483 struct circ_buf *xmit = &tup->uport.state->xmit;
484 int i;
485
486 for (i = 0; i < max_bytes; i++) {
487 BUG_ON(uart_circ_empty(xmit));
488 if (tup->cdata->tx_fifo_full_status) {
489 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
490 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
491 break;
492 }
493 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
494 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
495 tup->uport.icount.tx++;
496 }
497 }
498
tegra_uart_start_pio_tx(struct tegra_uart_port * tup,unsigned int bytes)499 static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
500 unsigned int bytes)
501 {
502 if (bytes > TEGRA_UART_MIN_DMA)
503 bytes = TEGRA_UART_MIN_DMA;
504
505 tup->tx_in_progress = TEGRA_UART_TX_PIO;
506 tup->tx_bytes = bytes;
507 tup->ier_shadow |= UART_IER_THRI;
508 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
509 }
510
tegra_uart_tx_dma_complete(void * args)511 static void tegra_uart_tx_dma_complete(void *args)
512 {
513 struct tegra_uart_port *tup = args;
514 struct circ_buf *xmit = &tup->uport.state->xmit;
515 struct dma_tx_state state;
516 unsigned long flags;
517 unsigned int count;
518
519 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
520 count = tup->tx_bytes_requested - state.residue;
521 async_tx_ack(tup->tx_dma_desc);
522 spin_lock_irqsave(&tup->uport.lock, flags);
523 uart_xmit_advance(&tup->uport, count);
524 tup->tx_in_progress = 0;
525 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
526 uart_write_wakeup(&tup->uport);
527 tegra_uart_start_next_tx(tup);
528 spin_unlock_irqrestore(&tup->uport.lock, flags);
529 }
530
tegra_uart_start_tx_dma(struct tegra_uart_port * tup,unsigned long count)531 static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
532 unsigned long count)
533 {
534 struct circ_buf *xmit = &tup->uport.state->xmit;
535 dma_addr_t tx_phys_addr;
536
537 tup->tx_bytes = count & ~(0xF);
538 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
539
540 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
541 tup->tx_bytes, DMA_TO_DEVICE);
542
543 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
544 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
545 DMA_PREP_INTERRUPT);
546 if (!tup->tx_dma_desc) {
547 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
548 return -EIO;
549 }
550
551 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
552 tup->tx_dma_desc->callback_param = tup;
553 tup->tx_in_progress = TEGRA_UART_TX_DMA;
554 tup->tx_bytes_requested = tup->tx_bytes;
555 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
556 dma_async_issue_pending(tup->tx_dma_chan);
557 return 0;
558 }
559
tegra_uart_start_next_tx(struct tegra_uart_port * tup)560 static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
561 {
562 unsigned long tail;
563 unsigned long count;
564 struct circ_buf *xmit = &tup->uport.state->xmit;
565
566 if (!tup->current_baud)
567 return;
568
569 tail = (unsigned long)&xmit->buf[xmit->tail];
570 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
571 if (!count)
572 return;
573
574 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
575 tegra_uart_start_pio_tx(tup, count);
576 else if (BYTES_TO_ALIGN(tail) > 0)
577 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
578 else
579 tegra_uart_start_tx_dma(tup, count);
580 }
581
582 /* Called by serial core driver with u->lock taken. */
tegra_uart_start_tx(struct uart_port * u)583 static void tegra_uart_start_tx(struct uart_port *u)
584 {
585 struct tegra_uart_port *tup = to_tegra_uport(u);
586 struct circ_buf *xmit = &u->state->xmit;
587
588 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
589 tegra_uart_start_next_tx(tup);
590 }
591
tegra_uart_tx_empty(struct uart_port * u)592 static unsigned int tegra_uart_tx_empty(struct uart_port *u)
593 {
594 struct tegra_uart_port *tup = to_tegra_uport(u);
595 unsigned int ret = 0;
596 unsigned long flags;
597
598 spin_lock_irqsave(&u->lock, flags);
599 if (!tup->tx_in_progress) {
600 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
601 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
602 ret = TIOCSER_TEMT;
603 }
604 spin_unlock_irqrestore(&u->lock, flags);
605 return ret;
606 }
607
tegra_uart_stop_tx(struct uart_port * u)608 static void tegra_uart_stop_tx(struct uart_port *u)
609 {
610 struct tegra_uart_port *tup = to_tegra_uport(u);
611 struct dma_tx_state state;
612 unsigned int count;
613
614 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
615 return;
616
617 dmaengine_terminate_all(tup->tx_dma_chan);
618 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
619 count = tup->tx_bytes_requested - state.residue;
620 async_tx_ack(tup->tx_dma_desc);
621 uart_xmit_advance(&tup->uport, count);
622 tup->tx_in_progress = 0;
623 }
624
tegra_uart_handle_tx_pio(struct tegra_uart_port * tup)625 static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
626 {
627 struct circ_buf *xmit = &tup->uport.state->xmit;
628
629 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
630 tup->tx_in_progress = 0;
631 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
632 uart_write_wakeup(&tup->uport);
633 tegra_uart_start_next_tx(tup);
634 }
635
tegra_uart_handle_rx_pio(struct tegra_uart_port * tup,struct tty_port * port)636 static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
637 struct tty_port *port)
638 {
639 do {
640 char flag = TTY_NORMAL;
641 unsigned long lsr = 0;
642 unsigned char ch;
643
644 lsr = tegra_uart_read(tup, UART_LSR);
645 if (!(lsr & UART_LSR_DR))
646 break;
647
648 flag = tegra_uart_decode_rx_error(tup, lsr);
649 if (flag != TTY_NORMAL)
650 continue;
651
652 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
653 tup->uport.icount.rx++;
654
655 if (uart_handle_sysrq_char(&tup->uport, ch))
656 continue;
657
658 if (tup->uport.ignore_status_mask & UART_LSR_DR)
659 continue;
660
661 tty_insert_flip_char(port, ch, flag);
662 } while (1);
663 }
664
tegra_uart_copy_rx_to_tty(struct tegra_uart_port * tup,struct tty_port * port,unsigned int count)665 static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
666 struct tty_port *port,
667 unsigned int count)
668 {
669 int copied;
670
671 /* If count is zero, then there is no data to be copied */
672 if (!count)
673 return;
674
675 tup->uport.icount.rx += count;
676
677 if (tup->uport.ignore_status_mask & UART_LSR_DR)
678 return;
679
680 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
681 count, DMA_FROM_DEVICE);
682 copied = tty_insert_flip_string(port,
683 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
684 if (copied != count) {
685 WARN_ON(1);
686 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
687 }
688 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
689 count, DMA_TO_DEVICE);
690 }
691
do_handle_rx_pio(struct tegra_uart_port * tup)692 static void do_handle_rx_pio(struct tegra_uart_port *tup)
693 {
694 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
695 struct tty_port *port = &tup->uport.state->port;
696
697 tegra_uart_handle_rx_pio(tup, port);
698 if (tty) {
699 tty_flip_buffer_push(port);
700 tty_kref_put(tty);
701 }
702 }
703
tegra_uart_rx_buffer_push(struct tegra_uart_port * tup,unsigned int residue)704 static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
705 unsigned int residue)
706 {
707 struct tty_port *port = &tup->uport.state->port;
708 unsigned int count;
709
710 async_tx_ack(tup->rx_dma_desc);
711 count = tup->rx_bytes_requested - residue;
712
713 /* If we are here, DMA is stopped */
714 tegra_uart_copy_rx_to_tty(tup, port, count);
715
716 do_handle_rx_pio(tup);
717 }
718
tegra_uart_rx_dma_complete(void * args)719 static void tegra_uart_rx_dma_complete(void *args)
720 {
721 struct tegra_uart_port *tup = args;
722 struct uart_port *u = &tup->uport;
723 unsigned long flags;
724 struct dma_tx_state state;
725 enum dma_status status;
726
727 spin_lock_irqsave(&u->lock, flags);
728
729 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
730
731 if (status == DMA_IN_PROGRESS) {
732 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
733 goto done;
734 }
735
736 /* Deactivate flow control to stop sender */
737 if (tup->rts_active)
738 set_rts(tup, false);
739
740 tup->rx_dma_active = false;
741 tegra_uart_rx_buffer_push(tup, 0);
742 tegra_uart_start_rx_dma(tup);
743
744 /* Activate flow control to start transfer */
745 if (tup->rts_active)
746 set_rts(tup, true);
747
748 done:
749 spin_unlock_irqrestore(&u->lock, flags);
750 }
751
tegra_uart_terminate_rx_dma(struct tegra_uart_port * tup)752 static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
753 {
754 struct dma_tx_state state;
755
756 if (!tup->rx_dma_active) {
757 do_handle_rx_pio(tup);
758 return;
759 }
760
761 dmaengine_terminate_all(tup->rx_dma_chan);
762 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
763
764 tegra_uart_rx_buffer_push(tup, state.residue);
765 tup->rx_dma_active = false;
766 }
767
tegra_uart_handle_rx_dma(struct tegra_uart_port * tup)768 static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
769 {
770 /* Deactivate flow control to stop sender */
771 if (tup->rts_active)
772 set_rts(tup, false);
773
774 tegra_uart_terminate_rx_dma(tup);
775
776 if (tup->rts_active)
777 set_rts(tup, true);
778 }
779
tegra_uart_start_rx_dma(struct tegra_uart_port * tup)780 static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
781 {
782 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
783
784 if (tup->rx_dma_active)
785 return 0;
786
787 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
788 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
789 DMA_PREP_INTERRUPT);
790 if (!tup->rx_dma_desc) {
791 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
792 return -EIO;
793 }
794
795 tup->rx_dma_active = true;
796 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
797 tup->rx_dma_desc->callback_param = tup;
798 tup->rx_bytes_requested = count;
799 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
800 dma_async_issue_pending(tup->rx_dma_chan);
801 return 0;
802 }
803
tegra_uart_handle_modem_signal_change(struct uart_port * u)804 static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
805 {
806 struct tegra_uart_port *tup = to_tegra_uport(u);
807 unsigned long msr;
808
809 msr = tegra_uart_read(tup, UART_MSR);
810 if (!(msr & UART_MSR_ANY_DELTA))
811 return;
812
813 if (msr & UART_MSR_TERI)
814 tup->uport.icount.rng++;
815 if (msr & UART_MSR_DDSR)
816 tup->uport.icount.dsr++;
817 /* We may only get DDCD when HW init and reset */
818 if (msr & UART_MSR_DDCD)
819 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
820 /* Will start/stop_tx accordingly */
821 if (msr & UART_MSR_DCTS)
822 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
823 }
824
tegra_uart_isr(int irq,void * data)825 static irqreturn_t tegra_uart_isr(int irq, void *data)
826 {
827 struct tegra_uart_port *tup = data;
828 struct uart_port *u = &tup->uport;
829 unsigned long iir;
830 unsigned long ier;
831 bool is_rx_start = false;
832 bool is_rx_int = false;
833 unsigned long flags;
834
835 spin_lock_irqsave(&u->lock, flags);
836 while (1) {
837 iir = tegra_uart_read(tup, UART_IIR);
838 if (iir & UART_IIR_NO_INT) {
839 if (!tup->use_rx_pio && is_rx_int) {
840 tegra_uart_handle_rx_dma(tup);
841 if (tup->rx_in_progress) {
842 ier = tup->ier_shadow;
843 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
844 TEGRA_UART_IER_EORD | UART_IER_RDI);
845 tup->ier_shadow = ier;
846 tegra_uart_write(tup, ier, UART_IER);
847 }
848 } else if (is_rx_start) {
849 tegra_uart_start_rx_dma(tup);
850 }
851 spin_unlock_irqrestore(&u->lock, flags);
852 return IRQ_HANDLED;
853 }
854
855 switch ((iir >> 1) & 0x7) {
856 case 0: /* Modem signal change interrupt */
857 tegra_uart_handle_modem_signal_change(u);
858 break;
859
860 case 1: /* Transmit interrupt only triggered when using PIO */
861 tup->ier_shadow &= ~UART_IER_THRI;
862 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
863 tegra_uart_handle_tx_pio(tup);
864 break;
865
866 case 4: /* End of data */
867 case 6: /* Rx timeout */
868 if (!tup->use_rx_pio) {
869 is_rx_int = tup->rx_in_progress;
870 /* Disable Rx interrupts */
871 ier = tup->ier_shadow;
872 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
873 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
874 tup->ier_shadow = ier;
875 tegra_uart_write(tup, ier, UART_IER);
876 break;
877 }
878 fallthrough;
879 case 2: /* Receive */
880 if (!tup->use_rx_pio) {
881 is_rx_start = tup->rx_in_progress;
882 tup->ier_shadow &= ~UART_IER_RDI;
883 tegra_uart_write(tup, tup->ier_shadow,
884 UART_IER);
885 } else {
886 do_handle_rx_pio(tup);
887 }
888 break;
889
890 case 3: /* Receive error */
891 tegra_uart_decode_rx_error(tup,
892 tegra_uart_read(tup, UART_LSR));
893 break;
894
895 case 5: /* break nothing to handle */
896 case 7: /* break nothing to handle */
897 break;
898 }
899 }
900 }
901
tegra_uart_stop_rx(struct uart_port * u)902 static void tegra_uart_stop_rx(struct uart_port *u)
903 {
904 struct tegra_uart_port *tup = to_tegra_uport(u);
905 struct tty_port *port = &tup->uport.state->port;
906 unsigned long ier;
907
908 if (tup->rts_active)
909 set_rts(tup, false);
910
911 if (!tup->rx_in_progress)
912 return;
913
914 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
915
916 ier = tup->ier_shadow;
917 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
918 TEGRA_UART_IER_EORD);
919 tup->ier_shadow = ier;
920 tegra_uart_write(tup, ier, UART_IER);
921 tup->rx_in_progress = 0;
922
923 if (!tup->use_rx_pio)
924 tegra_uart_terminate_rx_dma(tup);
925 else
926 tegra_uart_handle_rx_pio(tup, port);
927 }
928
tegra_uart_hw_deinit(struct tegra_uart_port * tup)929 static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
930 {
931 unsigned long flags;
932 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
933 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
934 unsigned long wait_time;
935 unsigned long lsr;
936 unsigned long msr;
937 unsigned long mcr;
938
939 /* Disable interrupts */
940 tegra_uart_write(tup, 0, UART_IER);
941
942 lsr = tegra_uart_read(tup, UART_LSR);
943 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
944 msr = tegra_uart_read(tup, UART_MSR);
945 mcr = tegra_uart_read(tup, UART_MCR);
946 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
947 dev_err(tup->uport.dev,
948 "Tx Fifo not empty, CTS disabled, waiting\n");
949
950 /* Wait for Tx fifo to be empty */
951 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
952 wait_time = min(fifo_empty_time, 100lu);
953 udelay(wait_time);
954 fifo_empty_time -= wait_time;
955 if (!fifo_empty_time) {
956 msr = tegra_uart_read(tup, UART_MSR);
957 mcr = tegra_uart_read(tup, UART_MCR);
958 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
959 (msr & UART_MSR_CTS))
960 dev_err(tup->uport.dev,
961 "Slave not ready\n");
962 break;
963 }
964 lsr = tegra_uart_read(tup, UART_LSR);
965 }
966 }
967
968 spin_lock_irqsave(&tup->uport.lock, flags);
969 /* Reset the Rx and Tx FIFOs */
970 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
971 tup->current_baud = 0;
972 spin_unlock_irqrestore(&tup->uport.lock, flags);
973
974 tup->rx_in_progress = 0;
975 tup->tx_in_progress = 0;
976
977 if (!tup->use_rx_pio)
978 tegra_uart_dma_channel_free(tup, true);
979 if (!tup->use_tx_pio)
980 tegra_uart_dma_channel_free(tup, false);
981
982 clk_disable_unprepare(tup->uart_clk);
983 }
984
tegra_uart_hw_init(struct tegra_uart_port * tup)985 static int tegra_uart_hw_init(struct tegra_uart_port *tup)
986 {
987 int ret;
988
989 tup->fcr_shadow = 0;
990 tup->mcr_shadow = 0;
991 tup->lcr_shadow = 0;
992 tup->ier_shadow = 0;
993 tup->current_baud = 0;
994
995 clk_prepare_enable(tup->uart_clk);
996
997 /* Reset the UART controller to clear all previous status.*/
998 reset_control_assert(tup->rst);
999 udelay(10);
1000 reset_control_deassert(tup->rst);
1001
1002 tup->rx_in_progress = 0;
1003 tup->tx_in_progress = 0;
1004
1005 /*
1006 * Set the trigger level
1007 *
1008 * For PIO mode:
1009 *
1010 * For receive, this will interrupt the CPU after that many number of
1011 * bytes are received, for the remaining bytes the receive timeout
1012 * interrupt is received. Rx high watermark is set to 4.
1013 *
1014 * For transmit, if the trasnmit interrupt is enabled, this will
1015 * interrupt the CPU when the number of entries in the FIFO reaches the
1016 * low watermark. Tx low watermark is set to 16 bytes.
1017 *
1018 * For DMA mode:
1019 *
1020 * Set the Tx trigger to 16. This should match the DMA burst size that
1021 * programmed in the DMA registers.
1022 */
1023 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
1024
1025 if (tup->use_rx_pio) {
1026 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1027 } else {
1028 if (tup->cdata->max_dma_burst_bytes == 8)
1029 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1030 else
1031 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1032 }
1033
1034 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1035 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1036
1037 /* Dummy read to ensure the write is posted */
1038 tegra_uart_read(tup, UART_SCR);
1039
1040 if (tup->cdata->fifo_mode_enable_status) {
1041 ret = tegra_uart_wait_fifo_mode_enabled(tup);
1042 if (ret < 0) {
1043 dev_err(tup->uport.dev,
1044 "Failed to enable FIFO mode: %d\n", ret);
1045 return ret;
1046 }
1047 } else {
1048 /*
1049 * For all tegra devices (up to t210), there is a hardware
1050 * issue that requires software to wait for 3 UART clock
1051 * periods after enabling the TX fifo, otherwise data could
1052 * be lost.
1053 */
1054 tegra_uart_wait_cycle_time(tup, 3);
1055 }
1056
1057 /*
1058 * Initialize the UART with default configuration
1059 * (115200, N, 8, 1) so that the receive DMA buffer may be
1060 * enqueued
1061 */
1062 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
1063 if (ret < 0) {
1064 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1065 return ret;
1066 }
1067 if (!tup->use_rx_pio) {
1068 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1069 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1070 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1071 } else {
1072 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1073 }
1074 tup->rx_in_progress = 1;
1075
1076 /*
1077 * Enable IE_RXS for the receive status interrupts like line errros.
1078 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1079 *
1080 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1081 * the DATA is sitting in the FIFO and couldn't be transferred to the
1082 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
1083 * triggered when there is a pause of the incomming data stream for 4
1084 * characters long.
1085 *
1086 * For pauses in the data which is not aligned to 4 bytes, we get
1087 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1088 * then the EORD.
1089 */
1090 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1091
1092 /*
1093 * If using DMA mode, enable EORD interrupt to notify about RX
1094 * completion.
1095 */
1096 if (!tup->use_rx_pio)
1097 tup->ier_shadow |= TEGRA_UART_IER_EORD;
1098
1099 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1100 return 0;
1101 }
1102
tegra_uart_dma_channel_free(struct tegra_uart_port * tup,bool dma_to_memory)1103 static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1104 bool dma_to_memory)
1105 {
1106 if (dma_to_memory) {
1107 dmaengine_terminate_all(tup->rx_dma_chan);
1108 dma_release_channel(tup->rx_dma_chan);
1109 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1110 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1111 tup->rx_dma_chan = NULL;
1112 tup->rx_dma_buf_phys = 0;
1113 tup->rx_dma_buf_virt = NULL;
1114 } else {
1115 dmaengine_terminate_all(tup->tx_dma_chan);
1116 dma_release_channel(tup->tx_dma_chan);
1117 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1118 UART_XMIT_SIZE, DMA_TO_DEVICE);
1119 tup->tx_dma_chan = NULL;
1120 tup->tx_dma_buf_phys = 0;
1121 tup->tx_dma_buf_virt = NULL;
1122 }
1123 }
1124
tegra_uart_dma_channel_allocate(struct tegra_uart_port * tup,bool dma_to_memory)1125 static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1126 bool dma_to_memory)
1127 {
1128 struct dma_chan *dma_chan;
1129 unsigned char *dma_buf;
1130 dma_addr_t dma_phys;
1131 int ret;
1132 struct dma_slave_config dma_sconfig;
1133
1134 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
1135 if (IS_ERR(dma_chan)) {
1136 ret = PTR_ERR(dma_chan);
1137 dev_err(tup->uport.dev,
1138 "DMA channel alloc failed: %d\n", ret);
1139 return ret;
1140 }
1141
1142 if (dma_to_memory) {
1143 dma_buf = dma_alloc_coherent(tup->uport.dev,
1144 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1145 &dma_phys, GFP_KERNEL);
1146 if (!dma_buf) {
1147 dev_err(tup->uport.dev,
1148 "Not able to allocate the dma buffer\n");
1149 dma_release_channel(dma_chan);
1150 return -ENOMEM;
1151 }
1152 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1153 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1154 DMA_TO_DEVICE);
1155 dma_sconfig.src_addr = tup->uport.mapbase;
1156 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1157 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
1158 tup->rx_dma_chan = dma_chan;
1159 tup->rx_dma_buf_virt = dma_buf;
1160 tup->rx_dma_buf_phys = dma_phys;
1161 } else {
1162 dma_phys = dma_map_single(tup->uport.dev,
1163 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1164 DMA_TO_DEVICE);
1165 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1166 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1167 dma_release_channel(dma_chan);
1168 return -ENOMEM;
1169 }
1170 dma_buf = tup->uport.state->xmit.buf;
1171 dma_sconfig.dst_addr = tup->uport.mapbase;
1172 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1173 dma_sconfig.dst_maxburst = 16;
1174 tup->tx_dma_chan = dma_chan;
1175 tup->tx_dma_buf_virt = dma_buf;
1176 tup->tx_dma_buf_phys = dma_phys;
1177 }
1178
1179 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1180 if (ret < 0) {
1181 dev_err(tup->uport.dev,
1182 "Dma slave config failed, err = %d\n", ret);
1183 tegra_uart_dma_channel_free(tup, dma_to_memory);
1184 return ret;
1185 }
1186
1187 return 0;
1188 }
1189
tegra_uart_startup(struct uart_port * u)1190 static int tegra_uart_startup(struct uart_port *u)
1191 {
1192 struct tegra_uart_port *tup = to_tegra_uport(u);
1193 int ret;
1194
1195 if (!tup->use_tx_pio) {
1196 ret = tegra_uart_dma_channel_allocate(tup, false);
1197 if (ret < 0) {
1198 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1199 ret);
1200 return ret;
1201 }
1202 }
1203
1204 if (!tup->use_rx_pio) {
1205 ret = tegra_uart_dma_channel_allocate(tup, true);
1206 if (ret < 0) {
1207 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1208 ret);
1209 goto fail_rx_dma;
1210 }
1211 }
1212
1213 ret = tegra_uart_hw_init(tup);
1214 if (ret < 0) {
1215 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1216 goto fail_hw_init;
1217 }
1218
1219 ret = request_irq(u->irq, tegra_uart_isr, 0,
1220 dev_name(u->dev), tup);
1221 if (ret < 0) {
1222 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1223 goto fail_hw_init;
1224 }
1225 return 0;
1226
1227 fail_hw_init:
1228 if (!tup->use_rx_pio)
1229 tegra_uart_dma_channel_free(tup, true);
1230 fail_rx_dma:
1231 if (!tup->use_tx_pio)
1232 tegra_uart_dma_channel_free(tup, false);
1233 return ret;
1234 }
1235
1236 /*
1237 * Flush any TX data submitted for DMA and PIO. Called when the
1238 * TX circular buffer is reset.
1239 */
tegra_uart_flush_buffer(struct uart_port * u)1240 static void tegra_uart_flush_buffer(struct uart_port *u)
1241 {
1242 struct tegra_uart_port *tup = to_tegra_uport(u);
1243
1244 tup->tx_bytes = 0;
1245 if (tup->tx_dma_chan)
1246 dmaengine_terminate_all(tup->tx_dma_chan);
1247 }
1248
tegra_uart_shutdown(struct uart_port * u)1249 static void tegra_uart_shutdown(struct uart_port *u)
1250 {
1251 struct tegra_uart_port *tup = to_tegra_uport(u);
1252
1253 tegra_uart_hw_deinit(tup);
1254 free_irq(u->irq, tup);
1255 }
1256
tegra_uart_enable_ms(struct uart_port * u)1257 static void tegra_uart_enable_ms(struct uart_port *u)
1258 {
1259 struct tegra_uart_port *tup = to_tegra_uport(u);
1260
1261 if (tup->enable_modem_interrupt) {
1262 tup->ier_shadow |= UART_IER_MSI;
1263 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1264 }
1265 }
1266
tegra_uart_set_termios(struct uart_port * u,struct ktermios * termios,struct ktermios * oldtermios)1267 static void tegra_uart_set_termios(struct uart_port *u,
1268 struct ktermios *termios, struct ktermios *oldtermios)
1269 {
1270 struct tegra_uart_port *tup = to_tegra_uport(u);
1271 unsigned int baud;
1272 unsigned long flags;
1273 unsigned int lcr;
1274 int symb_bit = 1;
1275 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1276 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1277 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
1278 int ret;
1279
1280 max_divider *= 16;
1281 spin_lock_irqsave(&u->lock, flags);
1282
1283 /* Changing configuration, it is safe to stop any rx now */
1284 if (tup->rts_active)
1285 set_rts(tup, false);
1286
1287 /* Clear all interrupts as configuration is going to be changed */
1288 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1289 tegra_uart_read(tup, UART_IER);
1290 tegra_uart_write(tup, 0, UART_IER);
1291 tegra_uart_read(tup, UART_IER);
1292
1293 /* Parity */
1294 lcr = tup->lcr_shadow;
1295 lcr &= ~UART_LCR_PARITY;
1296
1297 /* CMSPAR isn't supported by this driver */
1298 termios->c_cflag &= ~CMSPAR;
1299
1300 if ((termios->c_cflag & PARENB) == PARENB) {
1301 symb_bit++;
1302 if (termios->c_cflag & PARODD) {
1303 lcr |= UART_LCR_PARITY;
1304 lcr &= ~UART_LCR_EPAR;
1305 lcr &= ~UART_LCR_SPAR;
1306 } else {
1307 lcr |= UART_LCR_PARITY;
1308 lcr |= UART_LCR_EPAR;
1309 lcr &= ~UART_LCR_SPAR;
1310 }
1311 }
1312
1313 lcr &= ~UART_LCR_WLEN8;
1314 switch (termios->c_cflag & CSIZE) {
1315 case CS5:
1316 lcr |= UART_LCR_WLEN5;
1317 symb_bit += 5;
1318 break;
1319 case CS6:
1320 lcr |= UART_LCR_WLEN6;
1321 symb_bit += 6;
1322 break;
1323 case CS7:
1324 lcr |= UART_LCR_WLEN7;
1325 symb_bit += 7;
1326 break;
1327 default:
1328 lcr |= UART_LCR_WLEN8;
1329 symb_bit += 8;
1330 break;
1331 }
1332
1333 /* Stop bits */
1334 if (termios->c_cflag & CSTOPB) {
1335 lcr |= UART_LCR_STOP;
1336 symb_bit += 2;
1337 } else {
1338 lcr &= ~UART_LCR_STOP;
1339 symb_bit++;
1340 }
1341
1342 tegra_uart_write(tup, lcr, UART_LCR);
1343 tup->lcr_shadow = lcr;
1344 tup->symb_bit = symb_bit;
1345
1346 /* Baud rate. */
1347 baud = uart_get_baud_rate(u, termios, oldtermios,
1348 parent_clk_rate/max_divider,
1349 parent_clk_rate/16);
1350 spin_unlock_irqrestore(&u->lock, flags);
1351 ret = tegra_set_baudrate(tup, baud);
1352 if (ret < 0) {
1353 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1354 return;
1355 }
1356 if (tty_termios_baud_rate(termios))
1357 tty_termios_encode_baud_rate(termios, baud, baud);
1358 spin_lock_irqsave(&u->lock, flags);
1359
1360 /* Flow control */
1361 if (termios->c_cflag & CRTSCTS) {
1362 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1363 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1364 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1365 /* if top layer has asked to set rts active then do so here */
1366 if (tup->rts_active)
1367 set_rts(tup, true);
1368 } else {
1369 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1370 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1371 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1372 }
1373
1374 /* update the port timeout based on new settings */
1375 uart_update_timeout(u, termios->c_cflag, baud);
1376
1377 /* Make sure all writes have completed */
1378 tegra_uart_read(tup, UART_IER);
1379
1380 /* Re-enable interrupt */
1381 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1382 tegra_uart_read(tup, UART_IER);
1383
1384 tup->uport.ignore_status_mask = 0;
1385 /* Ignore all characters if CREAD is not set */
1386 if ((termios->c_cflag & CREAD) == 0)
1387 tup->uport.ignore_status_mask |= UART_LSR_DR;
1388 if (termios->c_iflag & IGNBRK)
1389 tup->uport.ignore_status_mask |= UART_LSR_BI;
1390
1391 spin_unlock_irqrestore(&u->lock, flags);
1392 }
1393
tegra_uart_type(struct uart_port * u)1394 static const char *tegra_uart_type(struct uart_port *u)
1395 {
1396 return TEGRA_UART_TYPE;
1397 }
1398
1399 static const struct uart_ops tegra_uart_ops = {
1400 .tx_empty = tegra_uart_tx_empty,
1401 .set_mctrl = tegra_uart_set_mctrl,
1402 .get_mctrl = tegra_uart_get_mctrl,
1403 .stop_tx = tegra_uart_stop_tx,
1404 .start_tx = tegra_uart_start_tx,
1405 .stop_rx = tegra_uart_stop_rx,
1406 .flush_buffer = tegra_uart_flush_buffer,
1407 .enable_ms = tegra_uart_enable_ms,
1408 .break_ctl = tegra_uart_break_ctl,
1409 .startup = tegra_uart_startup,
1410 .shutdown = tegra_uart_shutdown,
1411 .set_termios = tegra_uart_set_termios,
1412 .type = tegra_uart_type,
1413 .request_port = tegra_uart_request_port,
1414 .release_port = tegra_uart_release_port,
1415 };
1416
1417 static struct uart_driver tegra_uart_driver = {
1418 .owner = THIS_MODULE,
1419 .driver_name = "tegra_hsuart",
1420 .dev_name = "ttyTHS",
1421 .cons = NULL,
1422 .nr = TEGRA_UART_MAXIMUM,
1423 };
1424
tegra_uart_parse_dt(struct platform_device * pdev,struct tegra_uart_port * tup)1425 static int tegra_uart_parse_dt(struct platform_device *pdev,
1426 struct tegra_uart_port *tup)
1427 {
1428 struct device_node *np = pdev->dev.of_node;
1429 int port;
1430 int ret;
1431 int index;
1432 u32 pval;
1433 int count;
1434 int n_entries;
1435
1436 port = of_alias_get_id(np, "serial");
1437 if (port < 0) {
1438 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1439 return port;
1440 }
1441 tup->uport.line = port;
1442
1443 tup->enable_modem_interrupt = of_property_read_bool(np,
1444 "nvidia,enable-modem-interrupt");
1445
1446 index = of_property_match_string(np, "dma-names", "rx");
1447 if (index < 0) {
1448 tup->use_rx_pio = true;
1449 dev_info(&pdev->dev, "RX in PIO mode\n");
1450 }
1451 index = of_property_match_string(np, "dma-names", "tx");
1452 if (index < 0) {
1453 tup->use_tx_pio = true;
1454 dev_info(&pdev->dev, "TX in PIO mode\n");
1455 }
1456
1457 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1458 if (n_entries > 0) {
1459 tup->n_adjustable_baud_rates = n_entries / 3;
1460 tup->baud_tolerance =
1461 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1462 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1463 if (!tup->baud_tolerance)
1464 return -ENOMEM;
1465 for (count = 0, index = 0; count < n_entries; count += 3,
1466 index++) {
1467 ret =
1468 of_property_read_u32_index(np,
1469 "nvidia,adjust-baud-rates",
1470 count, &pval);
1471 if (!ret)
1472 tup->baud_tolerance[index].lower_range_baud =
1473 pval;
1474 ret =
1475 of_property_read_u32_index(np,
1476 "nvidia,adjust-baud-rates",
1477 count + 1, &pval);
1478 if (!ret)
1479 tup->baud_tolerance[index].upper_range_baud =
1480 pval;
1481 ret =
1482 of_property_read_u32_index(np,
1483 "nvidia,adjust-baud-rates",
1484 count + 2, &pval);
1485 if (!ret)
1486 tup->baud_tolerance[index].tolerance =
1487 (s32)pval;
1488 }
1489 } else {
1490 tup->n_adjustable_baud_rates = 0;
1491 }
1492
1493 return 0;
1494 }
1495
1496 static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1497 .tx_fifo_full_status = false,
1498 .allow_txfifo_reset_fifo_mode = true,
1499 .support_clk_src_div = false,
1500 .fifo_mode_enable_status = false,
1501 .uart_max_port = 5,
1502 .max_dma_burst_bytes = 4,
1503 .error_tolerance_low_range = -4,
1504 .error_tolerance_high_range = 4,
1505 };
1506
1507 static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1508 .tx_fifo_full_status = true,
1509 .allow_txfifo_reset_fifo_mode = false,
1510 .support_clk_src_div = true,
1511 .fifo_mode_enable_status = false,
1512 .uart_max_port = 5,
1513 .max_dma_burst_bytes = 4,
1514 .error_tolerance_low_range = -4,
1515 .error_tolerance_high_range = 4,
1516 };
1517
1518 static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1519 .tx_fifo_full_status = true,
1520 .allow_txfifo_reset_fifo_mode = false,
1521 .support_clk_src_div = true,
1522 .fifo_mode_enable_status = true,
1523 .uart_max_port = 8,
1524 .max_dma_burst_bytes = 8,
1525 .error_tolerance_low_range = 0,
1526 .error_tolerance_high_range = 4,
1527 };
1528
1529 static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1530 .tx_fifo_full_status = true,
1531 .allow_txfifo_reset_fifo_mode = false,
1532 .support_clk_src_div = true,
1533 .fifo_mode_enable_status = true,
1534 .uart_max_port = 8,
1535 .max_dma_burst_bytes = 8,
1536 .error_tolerance_low_range = -2,
1537 .error_tolerance_high_range = 2,
1538 };
1539
1540 static const struct of_device_id tegra_uart_of_match[] = {
1541 {
1542 .compatible = "nvidia,tegra30-hsuart",
1543 .data = &tegra30_uart_chip_data,
1544 }, {
1545 .compatible = "nvidia,tegra20-hsuart",
1546 .data = &tegra20_uart_chip_data,
1547 }, {
1548 .compatible = "nvidia,tegra186-hsuart",
1549 .data = &tegra186_uart_chip_data,
1550 }, {
1551 .compatible = "nvidia,tegra194-hsuart",
1552 .data = &tegra194_uart_chip_data,
1553 }, {
1554 },
1555 };
1556 MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1557
tegra_uart_probe(struct platform_device * pdev)1558 static int tegra_uart_probe(struct platform_device *pdev)
1559 {
1560 struct tegra_uart_port *tup;
1561 struct uart_port *u;
1562 struct resource *resource;
1563 int ret;
1564 const struct tegra_uart_chip_data *cdata;
1565 const struct of_device_id *match;
1566
1567 match = of_match_device(tegra_uart_of_match, &pdev->dev);
1568 if (!match) {
1569 dev_err(&pdev->dev, "Error: No device match found\n");
1570 return -ENODEV;
1571 }
1572 cdata = match->data;
1573
1574 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1575 if (!tup) {
1576 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1577 return -ENOMEM;
1578 }
1579
1580 ret = tegra_uart_parse_dt(pdev, tup);
1581 if (ret < 0)
1582 return ret;
1583
1584 u = &tup->uport;
1585 u->dev = &pdev->dev;
1586 u->ops = &tegra_uart_ops;
1587 u->type = PORT_TEGRA;
1588 u->fifosize = 32;
1589 tup->cdata = cdata;
1590
1591 platform_set_drvdata(pdev, tup);
1592 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1593 if (!resource) {
1594 dev_err(&pdev->dev, "No IO memory resource\n");
1595 return -ENODEV;
1596 }
1597
1598 u->mapbase = resource->start;
1599 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1600 if (IS_ERR(u->membase))
1601 return PTR_ERR(u->membase);
1602
1603 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1604 if (IS_ERR(tup->uart_clk)) {
1605 dev_err(&pdev->dev, "Couldn't get the clock\n");
1606 return PTR_ERR(tup->uart_clk);
1607 }
1608
1609 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1610 if (IS_ERR(tup->rst)) {
1611 dev_err(&pdev->dev, "Couldn't get the reset\n");
1612 return PTR_ERR(tup->rst);
1613 }
1614
1615 u->iotype = UPIO_MEM32;
1616 ret = platform_get_irq(pdev, 0);
1617 if (ret < 0)
1618 return ret;
1619 u->irq = ret;
1620 u->regshift = 2;
1621 ret = uart_add_one_port(&tegra_uart_driver, u);
1622 if (ret < 0) {
1623 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1624 return ret;
1625 }
1626 return ret;
1627 }
1628
tegra_uart_remove(struct platform_device * pdev)1629 static int tegra_uart_remove(struct platform_device *pdev)
1630 {
1631 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1632 struct uart_port *u = &tup->uport;
1633
1634 uart_remove_one_port(&tegra_uart_driver, u);
1635 return 0;
1636 }
1637
1638 #ifdef CONFIG_PM_SLEEP
tegra_uart_suspend(struct device * dev)1639 static int tegra_uart_suspend(struct device *dev)
1640 {
1641 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1642 struct uart_port *u = &tup->uport;
1643
1644 return uart_suspend_port(&tegra_uart_driver, u);
1645 }
1646
tegra_uart_resume(struct device * dev)1647 static int tegra_uart_resume(struct device *dev)
1648 {
1649 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1650 struct uart_port *u = &tup->uport;
1651
1652 return uart_resume_port(&tegra_uart_driver, u);
1653 }
1654 #endif
1655
1656 static const struct dev_pm_ops tegra_uart_pm_ops = {
1657 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1658 };
1659
1660 static struct platform_driver tegra_uart_platform_driver = {
1661 .probe = tegra_uart_probe,
1662 .remove = tegra_uart_remove,
1663 .driver = {
1664 .name = "serial-tegra",
1665 .of_match_table = tegra_uart_of_match,
1666 .pm = &tegra_uart_pm_ops,
1667 },
1668 };
1669
tegra_uart_init(void)1670 static int __init tegra_uart_init(void)
1671 {
1672 int ret;
1673 struct device_node *node;
1674 const struct of_device_id *match = NULL;
1675 const struct tegra_uart_chip_data *cdata = NULL;
1676
1677 node = of_find_matching_node(NULL, tegra_uart_of_match);
1678 if (node)
1679 match = of_match_node(tegra_uart_of_match, node);
1680 if (match)
1681 cdata = match->data;
1682 if (cdata)
1683 tegra_uart_driver.nr = cdata->uart_max_port;
1684
1685 ret = uart_register_driver(&tegra_uart_driver);
1686 if (ret < 0) {
1687 pr_err("Could not register %s driver\n",
1688 tegra_uart_driver.driver_name);
1689 return ret;
1690 }
1691
1692 ret = platform_driver_register(&tegra_uart_platform_driver);
1693 if (ret < 0) {
1694 pr_err("Uart platform driver register failed, e = %d\n", ret);
1695 uart_unregister_driver(&tegra_uart_driver);
1696 return ret;
1697 }
1698 return 0;
1699 }
1700
tegra_uart_exit(void)1701 static void __exit tegra_uart_exit(void)
1702 {
1703 pr_info("Unloading tegra uart driver\n");
1704 platform_driver_unregister(&tegra_uart_platform_driver);
1705 uart_unregister_driver(&tegra_uart_driver);
1706 }
1707
1708 module_init(tegra_uart_init);
1709 module_exit(tegra_uart_exit);
1710
1711 MODULE_ALIAS("platform:serial-tegra");
1712 MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1713 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1714 MODULE_LICENSE("GPL v2");
1715