1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * 8250_dma.c - DMA Engine API support for 8250.c
4 *
5 * Copyright (C) 2013 Intel Corporation
6 */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11
12 #include "8250.h"
13
14 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
15 #define MAX_TX_BYTES 64
16 #define MAX_FIFO_SIZE 64
17 #define UART_RFL_16550A 0x21
18 #endif
19
__dma_tx_complete(void * param)20 static void __dma_tx_complete(void *param)
21 {
22 struct uart_8250_port *p = param;
23 struct uart_8250_dma *dma = p->dma;
24 struct circ_buf *xmit = &p->port.state->xmit;
25 unsigned long flags;
26 int ret;
27
28 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
29 UART_XMIT_SIZE, DMA_TO_DEVICE);
30
31 spin_lock_irqsave(&p->port.lock, flags);
32
33 dma->tx_running = 0;
34
35 xmit->tail += dma->tx_size;
36 xmit->tail &= UART_XMIT_SIZE - 1;
37 p->port.icount.tx += dma->tx_size;
38
39 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
40 uart_write_wakeup(&p->port);
41
42 ret = serial8250_tx_dma(p);
43 if (ret)
44 serial8250_set_THRI(p);
45
46 spin_unlock_irqrestore(&p->port.lock, flags);
47 }
48
49 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
50
__dma_rx_complete(void * param)51 static void __dma_rx_complete(void *param)
52 {
53 struct uart_8250_port *p = param;
54 struct uart_8250_dma *dma = p->dma;
55 struct tty_port *tty_port = &p->port.state->port;
56 struct dma_tx_state state;
57 unsigned int count = 0, cur_index = 0;
58
59 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
60 cur_index = dma->rx_size - state.residue;
61
62 if (cur_index == dma->rx_index)
63 return;
64 else if (cur_index > dma->rx_index)
65 count = cur_index - dma->rx_index;
66 else
67 count = dma->rx_size - dma->rx_index;
68
69 tty_insert_flip_string(tty_port, dma->rx_buf + dma->rx_index, count);
70
71 if (cur_index < dma->rx_index) {
72 tty_insert_flip_string(tty_port, dma->rx_buf, cur_index);
73 count += cur_index;
74 }
75
76 p->port.icount.rx += count;
77 dma->rx_index = cur_index;
78 }
79
80 #else
81
__dma_rx_complete(void * param)82 static void __dma_rx_complete(void *param)
83 {
84 struct uart_8250_port *p = param;
85 struct uart_8250_dma *dma = p->dma;
86 struct tty_port *tty_port = &p->port.state->port;
87 struct dma_tx_state state;
88 int count;
89
90 dma->rx_running = 0;
91 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
92
93 count = dma->rx_size - state.residue;
94
95 tty_insert_flip_string(tty_port, dma->rx_buf, count);
96 p->port.icount.rx += count;
97
98 tty_flip_buffer_push(tty_port);
99 }
100
101 #endif
102
serial8250_tx_dma(struct uart_8250_port * p)103 int serial8250_tx_dma(struct uart_8250_port *p)
104 {
105 struct uart_8250_dma *dma = p->dma;
106 struct circ_buf *xmit = &p->port.state->xmit;
107 struct dma_async_tx_descriptor *desc;
108 struct uart_port *up = &p->port;
109 int ret;
110
111 if (dma->tx_running) {
112 if (up->x_char) {
113 dmaengine_pause(dma->txchan);
114 uart_xchar_out(up, UART_TX);
115 dmaengine_resume(dma->txchan);
116 }
117 return 0;
118 } else if (up->x_char) {
119 uart_xchar_out(up, UART_TX);
120 }
121
122 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
123 /* We have been called from __dma_tx_complete() */
124 serial8250_rpm_put_tx(p);
125 return 0;
126 }
127
128 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
129 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
130 if (dma->tx_size < MAX_TX_BYTES) {
131 ret = -EBUSY;
132 goto err;
133 }
134 #endif
135 desc = dmaengine_prep_slave_single(dma->txchan,
136 dma->tx_addr + xmit->tail,
137 dma->tx_size, DMA_MEM_TO_DEV,
138 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
139 if (!desc) {
140 ret = -EBUSY;
141 goto err;
142 }
143
144 dma->tx_running = 1;
145 desc->callback = __dma_tx_complete;
146 desc->callback_param = p;
147
148 dma->tx_cookie = dmaengine_submit(desc);
149
150 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
151 UART_XMIT_SIZE, DMA_TO_DEVICE);
152
153 dma_async_issue_pending(dma->txchan);
154 if (dma->tx_err) {
155 dma->tx_err = 0;
156 serial8250_clear_THRI(p);
157 }
158 return 0;
159 err:
160 dma->tx_err = 1;
161 return ret;
162 }
163
164 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
165
serial8250_rx_dma(struct uart_8250_port * p)166 int serial8250_rx_dma(struct uart_8250_port *p)
167 {
168 unsigned int rfl, i = 0, fcr = 0, cur_index = 0;
169 unsigned char buf[MAX_FIFO_SIZE];
170 struct uart_port *port = &p->port;
171 struct tty_port *tty_port = &p->port.state->port;
172 struct dma_tx_state state;
173 struct uart_8250_dma *dma = p->dma;
174
175 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_10 | UART_FCR_R_TRIG_11;
176 serial_port_out(port, UART_FCR, fcr);
177
178 do {
179 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
180 cur_index = dma->rx_size - state.residue;
181 } while (cur_index % dma->rxconf.src_maxburst);
182
183 rfl = serial_port_in(port, UART_RFL_16550A);
184 while (i < rfl)
185 buf[i++] = serial_port_in(port, UART_RX);
186
187 __dma_rx_complete(p);
188
189 tty_insert_flip_string(tty_port, buf, i);
190 p->port.icount.rx += i;
191 tty_flip_buffer_push(tty_port);
192
193 if (fcr)
194 serial_port_out(port, UART_FCR, p->fcr);
195 return 0;
196 }
197
serial8250_start_rx_dma(struct uart_8250_port * p)198 int serial8250_start_rx_dma(struct uart_8250_port *p)
199 {
200 struct uart_8250_dma *dma = p->dma;
201 struct dma_async_tx_descriptor *desc;
202
203 desc = dmaengine_prep_dma_cyclic(dma->rxchan, dma->rx_addr,
204 dma->rx_size, dma->rx_size,
205 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
206 DMA_CTRL_ACK);
207 if (!desc)
208 return -EBUSY;
209
210 dma->rx_running = 1;
211 desc->callback = NULL;
212 desc->callback_param = NULL;
213
214 dma->rx_cookie = dmaengine_submit(desc);
215 dma_async_issue_pending(dma->rxchan);
216 dma->rx_index = 0;
217 return 0;
218 }
219
220 #else
221
serial8250_rx_dma(struct uart_8250_port * p)222 int serial8250_rx_dma(struct uart_8250_port *p)
223 {
224 struct uart_8250_dma *dma = p->dma;
225 struct dma_async_tx_descriptor *desc;
226
227 if (dma->rx_running)
228 return 0;
229
230 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
231 dma->rx_size, DMA_DEV_TO_MEM,
232 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
233 if (!desc)
234 return -EBUSY;
235
236 dma->rx_running = 1;
237 desc->callback = __dma_rx_complete;
238 desc->callback_param = p;
239
240 dma->rx_cookie = dmaengine_submit(desc);
241
242 dma_async_issue_pending(dma->rxchan);
243
244 return 0;
245 }
246
247 #endif
248
serial8250_rx_dma_flush(struct uart_8250_port * p)249 void serial8250_rx_dma_flush(struct uart_8250_port *p)
250 {
251 struct uart_8250_dma *dma = p->dma;
252
253 if (dma->rx_running) {
254 dmaengine_pause(dma->rxchan);
255 __dma_rx_complete(p);
256 dmaengine_terminate_async(dma->rxchan);
257 }
258 }
259 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
260
serial8250_request_dma(struct uart_8250_port * p)261 int serial8250_request_dma(struct uart_8250_port *p)
262 {
263 struct uart_8250_dma *dma = p->dma;
264 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
265 dma->rx_dma_addr : p->port.mapbase;
266 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
267 dma->tx_dma_addr : p->port.mapbase;
268 dma_cap_mask_t mask;
269 struct dma_slave_caps caps;
270 int ret;
271
272 /* Default slave configuration parameters */
273 dma->rxconf.direction = DMA_DEV_TO_MEM;
274 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
275 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
276 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
277 if ((p->port.fifosize / 4) < 16)
278 dma->rxconf.src_maxburst = p->port.fifosize / 4;
279 else
280 dma->rxconf.src_maxburst = 16;
281 #endif
282
283 dma->txconf.direction = DMA_MEM_TO_DEV;
284 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
285 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
286 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
287 dma->txconf.dst_maxburst = 16;
288 #endif
289 dma_cap_zero(mask);
290 dma_cap_set(DMA_SLAVE, mask);
291
292 /* Get a channel for RX */
293 dma->rxchan = dma_request_slave_channel_compat(mask,
294 dma->fn, dma->rx_param,
295 p->port.dev, "rx");
296 if (!dma->rxchan)
297 return -ENODEV;
298
299 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
300 ret = dma_get_slave_caps(dma->rxchan, &caps);
301 if (ret)
302 goto release_rx;
303 if (!caps.cmd_pause || !caps.cmd_terminate ||
304 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
305 ret = -EINVAL;
306 goto release_rx;
307 }
308
309 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
310
311 /* RX buffer */
312 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
313 if (!dma->rx_size)
314 dma->rx_size = PAGE_SIZE * 2;
315 #else
316 if (!dma->rx_size)
317 dma->rx_size = PAGE_SIZE;
318 #endif
319
320 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
321 &dma->rx_addr, GFP_KERNEL);
322 if (!dma->rx_buf) {
323 ret = -ENOMEM;
324 goto release_rx;
325 }
326
327 /* Get a channel for TX */
328 dma->txchan = dma_request_slave_channel_compat(mask,
329 dma->fn, dma->tx_param,
330 p->port.dev, "tx");
331 if (dma->txchan) {
332 dmaengine_slave_config(dma->txchan, &dma->txconf);
333
334 /* TX buffer */
335 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
336 p->port.state->xmit.buf,
337 UART_XMIT_SIZE,
338 DMA_TO_DEVICE);
339 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
340 dma_free_coherent(dma->rxchan->device->dev,
341 dma->rx_size, dma->rx_buf,
342 dma->rx_addr);
343 dma_release_channel(dma->txchan);
344 dma->txchan = NULL;
345 }
346
347 dev_info_ratelimited(p->port.dev, "got rx and tx dma channels\n");
348 } else {
349 dev_info_ratelimited(p->port.dev, "got rx dma channels only\n");
350 }
351
352 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
353 /* start dma for rx*/
354 serial8250_start_rx_dma(p);
355 #endif
356 return 0;
357 release_rx:
358 dma_release_channel(dma->rxchan);
359 return ret;
360 }
361 EXPORT_SYMBOL_GPL(serial8250_request_dma);
362
serial8250_release_dma(struct uart_8250_port * p)363 void serial8250_release_dma(struct uart_8250_port *p)
364 {
365 struct uart_8250_dma *dma = p->dma;
366
367 if (!dma)
368 return;
369
370 /* Release RX resources */
371 dmaengine_terminate_sync(dma->rxchan);
372 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
373 dma->rx_addr);
374 dma_release_channel(dma->rxchan);
375 dma->rxchan = NULL;
376 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
377 dma->rx_running = 0;
378 #endif
379 /* Release TX resources */
380 if (dma->txchan) {
381 dmaengine_terminate_all(dma->txchan);
382 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
383 UART_XMIT_SIZE, DMA_TO_DEVICE);
384 dma_release_channel(dma->txchan);
385 dma->txchan = NULL;
386 dma->tx_running = 0;
387 }
388 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
389 }
390 EXPORT_SYMBOL_GPL(serial8250_release_dma);
391