blob: c2be22c3b7d1bd20f10916fca7128dc8c1c7187e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
David Brazdil0f672f62019-12-10 10:32:29 +00007 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmapool.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/pagemap.h>
25#include <linux/platform_device.h>
26#include <linux/reset.h>
27#include <linux/serial.h>
28#include <linux/serial_8250.h>
29#include <linux/serial_core.h>
30#include <linux/serial_reg.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/termios.h>
34#include <linux/tty.h>
35#include <linux/tty_flip.h>
36
37#define TEGRA_UART_TYPE "TEGRA_UART"
38#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
39#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
40
41#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
42#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
43#define TEGRA_UART_IER_EORD 0x20
44#define TEGRA_UART_MCR_RTS_EN 0x40
45#define TEGRA_UART_MCR_CTS_EN 0x20
46#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
47 UART_LSR_PE | UART_LSR_FE)
48#define TEGRA_UART_IRDA_CSR 0x08
49#define TEGRA_UART_SIR_ENABLED 0x80
50
51#define TEGRA_UART_TX_PIO 1
52#define TEGRA_UART_TX_DMA 2
53#define TEGRA_UART_MIN_DMA 16
54#define TEGRA_UART_FIFO_SIZE 32
55
56/*
57 * Tx fifo trigger level setting in tegra uart is in
58 * reverse way then conventional uart.
59 */
60#define TEGRA_UART_TX_TRIG_16B 0x00
61#define TEGRA_UART_TX_TRIG_8B 0x10
62#define TEGRA_UART_TX_TRIG_4B 0x20
63#define TEGRA_UART_TX_TRIG_1B 0x30
64
David Brazdil0f672f62019-12-10 10:32:29 +000065#define TEGRA_UART_MAXIMUM 8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
67/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
68#define TEGRA_UART_DEFAULT_BAUD 115200
69#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
70
71/* Tx transfer mode */
72#define TEGRA_TX_PIO 1
73#define TEGRA_TX_DMA 2
74
David Brazdil0f672f62019-12-10 10:32:29 +000075#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
76
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077/**
78 * tegra_uart_chip_data: SOC specific data.
79 *
80 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
82 * Tegra30 does not allow this.
83 * @support_clk_src_div: Clock source support the clock divider.
84 */
85struct tegra_uart_chip_data {
86 bool tx_fifo_full_status;
87 bool allow_txfifo_reset_fifo_mode;
88 bool support_clk_src_div;
David Brazdil0f672f62019-12-10 10:32:29 +000089 bool fifo_mode_enable_status;
90 int uart_max_port;
91 int max_dma_burst_bytes;
92 int error_tolerance_low_range;
93 int error_tolerance_high_range;
94};
95
96struct tegra_baud_tolerance {
97 u32 lower_range_baud;
98 u32 upper_range_baud;
99 s32 tolerance;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100};
101
102struct tegra_uart_port {
103 struct uart_port uport;
104 const struct tegra_uart_chip_data *cdata;
105
106 struct clk *uart_clk;
107 struct reset_control *rst;
108 unsigned int current_baud;
109
110 /* Register shadow */
111 unsigned long fcr_shadow;
112 unsigned long mcr_shadow;
113 unsigned long lcr_shadow;
114 unsigned long ier_shadow;
115 bool rts_active;
116
117 int tx_in_progress;
118 unsigned int tx_bytes;
119
120 bool enable_modem_interrupt;
121
122 bool rx_timeout;
123 int rx_in_progress;
124 int symb_bit;
125
126 struct dma_chan *rx_dma_chan;
127 struct dma_chan *tx_dma_chan;
128 dma_addr_t rx_dma_buf_phys;
129 dma_addr_t tx_dma_buf_phys;
130 unsigned char *rx_dma_buf_virt;
131 unsigned char *tx_dma_buf_virt;
132 struct dma_async_tx_descriptor *tx_dma_desc;
133 struct dma_async_tx_descriptor *rx_dma_desc;
134 dma_cookie_t tx_cookie;
135 dma_cookie_t rx_cookie;
136 unsigned int tx_bytes_requested;
137 unsigned int rx_bytes_requested;
David Brazdil0f672f62019-12-10 10:32:29 +0000138 struct tegra_baud_tolerance *baud_tolerance;
139 int n_adjustable_baud_rates;
140 int required_rate;
141 int configured_rate;
142 bool use_rx_pio;
143 bool use_tx_pio;
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 bool rx_dma_active;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145};
146
147static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
148static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
David Brazdil0f672f62019-12-10 10:32:29 +0000149static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
150 bool dma_to_memory);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151
152static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
153 unsigned long reg)
154{
155 return readl(tup->uport.membase + (reg << tup->uport.regshift));
156}
157
158static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
159 unsigned long reg)
160{
161 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
162}
163
164static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
165{
166 return container_of(u, struct tegra_uart_port, uport);
167}
168
169static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
170{
171 struct tegra_uart_port *tup = to_tegra_uport(u);
172
173 /*
174 * RI - Ring detector is active
175 * CD/DCD/CAR - Carrier detect is always active. For some reason
176 * linux has different names for carrier detect.
177 * DSR - Data Set ready is active as the hardware doesn't support it.
178 * Don't know if the linux support this yet?
179 * CTS - Clear to send. Always set to active, as the hardware handles
180 * CTS automatically.
181 */
182 if (tup->enable_modem_interrupt)
183 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
184 return TIOCM_CTS;
185}
186
187static void set_rts(struct tegra_uart_port *tup, bool active)
188{
189 unsigned long mcr;
190
191 mcr = tup->mcr_shadow;
192 if (active)
193 mcr |= TEGRA_UART_MCR_RTS_EN;
194 else
195 mcr &= ~TEGRA_UART_MCR_RTS_EN;
196 if (mcr != tup->mcr_shadow) {
197 tegra_uart_write(tup, mcr, UART_MCR);
198 tup->mcr_shadow = mcr;
199 }
200}
201
202static void set_dtr(struct tegra_uart_port *tup, bool active)
203{
204 unsigned long mcr;
205
206 mcr = tup->mcr_shadow;
207 if (active)
208 mcr |= UART_MCR_DTR;
209 else
210 mcr &= ~UART_MCR_DTR;
211 if (mcr != tup->mcr_shadow) {
212 tegra_uart_write(tup, mcr, UART_MCR);
213 tup->mcr_shadow = mcr;
214 }
215}
216
David Brazdil0f672f62019-12-10 10:32:29 +0000217static void set_loopbk(struct tegra_uart_port *tup, bool active)
218{
219 unsigned long mcr = tup->mcr_shadow;
220
221 if (active)
222 mcr |= UART_MCR_LOOP;
223 else
224 mcr &= ~UART_MCR_LOOP;
225
226 if (mcr != tup->mcr_shadow) {
227 tegra_uart_write(tup, mcr, UART_MCR);
228 tup->mcr_shadow = mcr;
229 }
230}
231
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000232static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
233{
234 struct tegra_uart_port *tup = to_tegra_uport(u);
David Brazdil0f672f62019-12-10 10:32:29 +0000235 int enable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236
237 tup->rts_active = !!(mctrl & TIOCM_RTS);
238 set_rts(tup, tup->rts_active);
239
David Brazdil0f672f62019-12-10 10:32:29 +0000240 enable = !!(mctrl & TIOCM_DTR);
241 set_dtr(tup, enable);
242
243 enable = !!(mctrl & TIOCM_LOOP);
244 set_loopbk(tup, enable);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245}
246
247static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
248{
249 struct tegra_uart_port *tup = to_tegra_uport(u);
250 unsigned long lcr;
251
252 lcr = tup->lcr_shadow;
253 if (break_ctl)
254 lcr |= UART_LCR_SBC;
255 else
256 lcr &= ~UART_LCR_SBC;
257 tegra_uart_write(tup, lcr, UART_LCR);
258 tup->lcr_shadow = lcr;
259}
260
261/**
262 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
263 *
264 * @tup: Tegra serial port data structure.
265 * @cycles: Number of clock periods to wait.
266 *
267 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
268 * clock speed is 16X the current baud rate.
269 */
270static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
271 unsigned int cycles)
272{
273 if (tup->current_baud)
274 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
275}
276
277/* Wait for a symbol-time. */
278static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
279 unsigned int syms)
280{
281 if (tup->current_baud)
282 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
283 tup->current_baud));
284}
285
David Brazdil0f672f62019-12-10 10:32:29 +0000286static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
287{
288 unsigned long iir;
289 unsigned int tmout = 100;
290
291 do {
292 iir = tegra_uart_read(tup, UART_IIR);
293 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
294 return 0;
295 udelay(1);
296 } while (--tmout);
297
298 return -ETIMEDOUT;
299}
300
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000301static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
302{
303 unsigned long fcr = tup->fcr_shadow;
David Brazdil0f672f62019-12-10 10:32:29 +0000304 unsigned int lsr, tmout = 10000;
305
306 if (tup->rts_active)
307 set_rts(tup, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000308
309 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
310 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
311 tegra_uart_write(tup, fcr, UART_FCR);
312 } else {
313 fcr &= ~UART_FCR_ENABLE_FIFO;
314 tegra_uart_write(tup, fcr, UART_FCR);
315 udelay(60);
316 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
317 tegra_uart_write(tup, fcr, UART_FCR);
318 fcr |= UART_FCR_ENABLE_FIFO;
319 tegra_uart_write(tup, fcr, UART_FCR);
David Brazdil0f672f62019-12-10 10:32:29 +0000320 if (tup->cdata->fifo_mode_enable_status)
321 tegra_uart_wait_fifo_mode_enabled(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000322 }
323
324 /* Dummy read to ensure the write is posted */
325 tegra_uart_read(tup, UART_SCR);
326
327 /*
328 * For all tegra devices (up to t210), there is a hardware issue that
329 * requires software to wait for 32 UART clock periods for the flush
330 * to propagate, otherwise data could be lost.
331 */
332 tegra_uart_wait_cycle_time(tup, 32);
David Brazdil0f672f62019-12-10 10:32:29 +0000333
334 do {
335 lsr = tegra_uart_read(tup, UART_LSR);
Olivier Deprez0e641232021-09-23 10:07:05 +0200336 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
David Brazdil0f672f62019-12-10 10:32:29 +0000337 break;
338 udelay(1);
339 } while (--tmout);
340
341 if (tup->rts_active)
342 set_rts(tup, true);
343}
344
345static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
346 unsigned int baud, long rate)
347{
348 int i;
349
350 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
351 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
352 baud <= tup->baud_tolerance[i].upper_range_baud)
353 return (rate + (rate *
354 tup->baud_tolerance[i].tolerance) / 10000);
355 }
356
357 return rate;
358}
359
360static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
361{
362 long diff;
363
364 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
365 / tup->required_rate;
366 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
367 diff > (tup->cdata->error_tolerance_high_range * 100)) {
368 dev_err(tup->uport.dev,
369 "configured baud rate is out of range by %ld", diff);
370 return -EIO;
371 }
372
373 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000374}
375
376static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
377{
378 unsigned long rate;
379 unsigned int divisor;
380 unsigned long lcr;
David Brazdil0f672f62019-12-10 10:32:29 +0000381 unsigned long flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382 int ret;
383
384 if (tup->current_baud == baud)
385 return 0;
386
387 if (tup->cdata->support_clk_src_div) {
388 rate = baud * 16;
David Brazdil0f672f62019-12-10 10:32:29 +0000389 tup->required_rate = rate;
390
391 if (tup->n_adjustable_baud_rates)
392 rate = tegra_get_tolerance_rate(tup, baud, rate);
393
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000394 ret = clk_set_rate(tup->uart_clk, rate);
395 if (ret < 0) {
396 dev_err(tup->uport.dev,
397 "clk_set_rate() failed for rate %lu\n", rate);
398 return ret;
399 }
David Brazdil0f672f62019-12-10 10:32:29 +0000400 tup->configured_rate = clk_get_rate(tup->uart_clk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401 divisor = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000402 ret = tegra_check_rate_in_range(tup);
403 if (ret < 0)
404 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 } else {
406 rate = clk_get_rate(tup->uart_clk);
407 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
408 }
409
David Brazdil0f672f62019-12-10 10:32:29 +0000410 spin_lock_irqsave(&tup->uport.lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411 lcr = tup->lcr_shadow;
412 lcr |= UART_LCR_DLAB;
413 tegra_uart_write(tup, lcr, UART_LCR);
414
415 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
416 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
417
418 lcr &= ~UART_LCR_DLAB;
419 tegra_uart_write(tup, lcr, UART_LCR);
420
421 /* Dummy read to ensure the write is posted */
422 tegra_uart_read(tup, UART_SCR);
David Brazdil0f672f62019-12-10 10:32:29 +0000423 spin_unlock_irqrestore(&tup->uport.lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424
425 tup->current_baud = baud;
426
427 /* wait two character intervals at new rate */
428 tegra_uart_wait_sym_time(tup, 2);
429 return 0;
430}
431
432static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
433 unsigned long lsr)
434{
435 char flag = TTY_NORMAL;
436
437 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
438 if (lsr & UART_LSR_OE) {
439 /* Overrrun error */
440 flag = TTY_OVERRUN;
441 tup->uport.icount.overrun++;
Olivier Deprez157378f2022-04-04 15:47:50 +0200442 dev_dbg(tup->uport.dev, "Got overrun errors\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443 } else if (lsr & UART_LSR_PE) {
444 /* Parity error */
445 flag = TTY_PARITY;
446 tup->uport.icount.parity++;
Olivier Deprez157378f2022-04-04 15:47:50 +0200447 dev_dbg(tup->uport.dev, "Got Parity errors\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448 } else if (lsr & UART_LSR_FE) {
449 flag = TTY_FRAME;
450 tup->uport.icount.frame++;
Olivier Deprez157378f2022-04-04 15:47:50 +0200451 dev_dbg(tup->uport.dev, "Got frame errors\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452 } else if (lsr & UART_LSR_BI) {
David Brazdil0f672f62019-12-10 10:32:29 +0000453 /*
454 * Break error
455 * If FIFO read error without any data, reset Rx FIFO
456 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
458 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
David Brazdil0f672f62019-12-10 10:32:29 +0000459 if (tup->uport.ignore_status_mask & UART_LSR_BI)
460 return TTY_BREAK;
461 flag = TTY_BREAK;
462 tup->uport.icount.brk++;
463 dev_dbg(tup->uport.dev, "Got Break\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 }
David Brazdil0f672f62019-12-10 10:32:29 +0000465 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466 }
David Brazdil0f672f62019-12-10 10:32:29 +0000467
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468 return flag;
469}
470
471static int tegra_uart_request_port(struct uart_port *u)
472{
473 return 0;
474}
475
476static void tegra_uart_release_port(struct uart_port *u)
477{
478 /* Nothing to do here */
479}
480
481static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
482{
483 struct circ_buf *xmit = &tup->uport.state->xmit;
484 int i;
485
486 for (i = 0; i < max_bytes; i++) {
487 BUG_ON(uart_circ_empty(xmit));
488 if (tup->cdata->tx_fifo_full_status) {
489 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
490 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
491 break;
492 }
493 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
494 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
495 tup->uport.icount.tx++;
496 }
497}
498
499static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
500 unsigned int bytes)
501{
502 if (bytes > TEGRA_UART_MIN_DMA)
503 bytes = TEGRA_UART_MIN_DMA;
504
505 tup->tx_in_progress = TEGRA_UART_TX_PIO;
506 tup->tx_bytes = bytes;
507 tup->ier_shadow |= UART_IER_THRI;
508 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
509}
510
511static void tegra_uart_tx_dma_complete(void *args)
512{
513 struct tegra_uart_port *tup = args;
514 struct circ_buf *xmit = &tup->uport.state->xmit;
515 struct dma_tx_state state;
516 unsigned long flags;
517 unsigned int count;
518
519 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
520 count = tup->tx_bytes_requested - state.residue;
521 async_tx_ack(tup->tx_dma_desc);
522 spin_lock_irqsave(&tup->uport.lock, flags);
523 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
524 tup->tx_in_progress = 0;
525 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
526 uart_write_wakeup(&tup->uport);
527 tegra_uart_start_next_tx(tup);
528 spin_unlock_irqrestore(&tup->uport.lock, flags);
529}
530
531static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
532 unsigned long count)
533{
534 struct circ_buf *xmit = &tup->uport.state->xmit;
535 dma_addr_t tx_phys_addr;
536
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537 tup->tx_bytes = count & ~(0xF);
538 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
Olivier Deprez157378f2022-04-04 15:47:50 +0200539
540 dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
541 tup->tx_bytes, DMA_TO_DEVICE);
542
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000543 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
544 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
545 DMA_PREP_INTERRUPT);
546 if (!tup->tx_dma_desc) {
547 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
548 return -EIO;
549 }
550
551 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
552 tup->tx_dma_desc->callback_param = tup;
553 tup->tx_in_progress = TEGRA_UART_TX_DMA;
554 tup->tx_bytes_requested = tup->tx_bytes;
555 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
556 dma_async_issue_pending(tup->tx_dma_chan);
557 return 0;
558}
559
560static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
561{
562 unsigned long tail;
563 unsigned long count;
564 struct circ_buf *xmit = &tup->uport.state->xmit;
565
David Brazdil0f672f62019-12-10 10:32:29 +0000566 if (!tup->current_baud)
567 return;
568
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569 tail = (unsigned long)&xmit->buf[xmit->tail];
570 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
571 if (!count)
572 return;
573
David Brazdil0f672f62019-12-10 10:32:29 +0000574 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 tegra_uart_start_pio_tx(tup, count);
576 else if (BYTES_TO_ALIGN(tail) > 0)
577 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
578 else
579 tegra_uart_start_tx_dma(tup, count);
580}
581
582/* Called by serial core driver with u->lock taken. */
583static void tegra_uart_start_tx(struct uart_port *u)
584{
585 struct tegra_uart_port *tup = to_tegra_uport(u);
586 struct circ_buf *xmit = &u->state->xmit;
587
588 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
589 tegra_uart_start_next_tx(tup);
590}
591
592static unsigned int tegra_uart_tx_empty(struct uart_port *u)
593{
594 struct tegra_uart_port *tup = to_tegra_uport(u);
595 unsigned int ret = 0;
596 unsigned long flags;
597
598 spin_lock_irqsave(&u->lock, flags);
599 if (!tup->tx_in_progress) {
600 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
601 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
602 ret = TIOCSER_TEMT;
603 }
604 spin_unlock_irqrestore(&u->lock, flags);
605 return ret;
606}
607
608static void tegra_uart_stop_tx(struct uart_port *u)
609{
610 struct tegra_uart_port *tup = to_tegra_uport(u);
611 struct circ_buf *xmit = &tup->uport.state->xmit;
612 struct dma_tx_state state;
613 unsigned int count;
614
615 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
616 return;
617
618 dmaengine_terminate_all(tup->tx_dma_chan);
619 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
620 count = tup->tx_bytes_requested - state.residue;
621 async_tx_ack(tup->tx_dma_desc);
622 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
623 tup->tx_in_progress = 0;
624}
625
626static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
627{
628 struct circ_buf *xmit = &tup->uport.state->xmit;
629
630 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
631 tup->tx_in_progress = 0;
632 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
633 uart_write_wakeup(&tup->uport);
634 tegra_uart_start_next_tx(tup);
635}
636
637static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
Olivier Deprez157378f2022-04-04 15:47:50 +0200638 struct tty_port *port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639{
640 do {
641 char flag = TTY_NORMAL;
642 unsigned long lsr = 0;
643 unsigned char ch;
644
645 lsr = tegra_uart_read(tup, UART_LSR);
646 if (!(lsr & UART_LSR_DR))
647 break;
648
649 flag = tegra_uart_decode_rx_error(tup, lsr);
David Brazdil0f672f62019-12-10 10:32:29 +0000650 if (flag != TTY_NORMAL)
651 continue;
652
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
654 tup->uport.icount.rx++;
655
Olivier Deprez0e641232021-09-23 10:07:05 +0200656 if (uart_handle_sysrq_char(&tup->uport, ch))
657 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000658
659 if (tup->uport.ignore_status_mask & UART_LSR_DR)
660 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +0200661
Olivier Deprez157378f2022-04-04 15:47:50 +0200662 tty_insert_flip_char(port, ch, flag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000663 } while (1);
664}
665
666static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
Olivier Deprez157378f2022-04-04 15:47:50 +0200667 struct tty_port *port,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 unsigned int count)
669{
670 int copied;
671
672 /* If count is zero, then there is no data to be copied */
673 if (!count)
674 return;
675
676 tup->uport.icount.rx += count;
David Brazdil0f672f62019-12-10 10:32:29 +0000677
678 if (tup->uport.ignore_status_mask & UART_LSR_DR)
679 return;
680
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000681 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
Olivier Deprez157378f2022-04-04 15:47:50 +0200682 count, DMA_FROM_DEVICE);
683 copied = tty_insert_flip_string(port,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
685 if (copied != count) {
686 WARN_ON(1);
687 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
688 }
689 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
Olivier Deprez157378f2022-04-04 15:47:50 +0200690 count, DMA_TO_DEVICE);
691}
692
693static void do_handle_rx_pio(struct tegra_uart_port *tup)
694{
695 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
696 struct tty_port *port = &tup->uport.state->port;
697
698 tegra_uart_handle_rx_pio(tup, port);
699 if (tty) {
700 tty_flip_buffer_push(port);
701 tty_kref_put(tty);
702 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000703}
704
705static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
706 unsigned int residue)
707{
708 struct tty_port *port = &tup->uport.state->port;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000709 unsigned int count;
710
711 async_tx_ack(tup->rx_dma_desc);
712 count = tup->rx_bytes_requested - residue;
713
714 /* If we are here, DMA is stopped */
715 tegra_uart_copy_rx_to_tty(tup, port, count);
716
Olivier Deprez157378f2022-04-04 15:47:50 +0200717 do_handle_rx_pio(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718}
719
720static void tegra_uart_rx_dma_complete(void *args)
721{
722 struct tegra_uart_port *tup = args;
723 struct uart_port *u = &tup->uport;
724 unsigned long flags;
725 struct dma_tx_state state;
726 enum dma_status status;
727
728 spin_lock_irqsave(&u->lock, flags);
729
730 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
731
732 if (status == DMA_IN_PROGRESS) {
733 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
734 goto done;
735 }
736
737 /* Deactivate flow control to stop sender */
738 if (tup->rts_active)
739 set_rts(tup, false);
740
Olivier Deprez157378f2022-04-04 15:47:50 +0200741 tup->rx_dma_active = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000742 tegra_uart_rx_buffer_push(tup, 0);
743 tegra_uart_start_rx_dma(tup);
744
745 /* Activate flow control to start transfer */
746 if (tup->rts_active)
747 set_rts(tup, true);
748
749done:
750 spin_unlock_irqrestore(&u->lock, flags);
751}
752
Olivier Deprez157378f2022-04-04 15:47:50 +0200753static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754{
755 struct dma_tx_state state;
756
Olivier Deprez157378f2022-04-04 15:47:50 +0200757 if (!tup->rx_dma_active) {
758 do_handle_rx_pio(tup);
759 return;
760 }
761
762 dmaengine_terminate_all(tup->rx_dma_chan);
763 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
764
765 tegra_uart_rx_buffer_push(tup, state.residue);
766 tup->rx_dma_active = false;
767}
768
769static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
770{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 /* Deactivate flow control to stop sender */
772 if (tup->rts_active)
773 set_rts(tup, false);
774
Olivier Deprez157378f2022-04-04 15:47:50 +0200775 tegra_uart_terminate_rx_dma(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776
777 if (tup->rts_active)
778 set_rts(tup, true);
779}
780
781static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
782{
783 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
784
Olivier Deprez157378f2022-04-04 15:47:50 +0200785 if (tup->rx_dma_active)
786 return 0;
787
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000788 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
789 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
790 DMA_PREP_INTERRUPT);
791 if (!tup->rx_dma_desc) {
792 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
793 return -EIO;
794 }
795
Olivier Deprez157378f2022-04-04 15:47:50 +0200796 tup->rx_dma_active = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000797 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
798 tup->rx_dma_desc->callback_param = tup;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799 tup->rx_bytes_requested = count;
800 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
801 dma_async_issue_pending(tup->rx_dma_chan);
802 return 0;
803}
804
805static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
806{
807 struct tegra_uart_port *tup = to_tegra_uport(u);
808 unsigned long msr;
809
810 msr = tegra_uart_read(tup, UART_MSR);
811 if (!(msr & UART_MSR_ANY_DELTA))
812 return;
813
814 if (msr & UART_MSR_TERI)
815 tup->uport.icount.rng++;
816 if (msr & UART_MSR_DDSR)
817 tup->uport.icount.dsr++;
818 /* We may only get DDCD when HW init and reset */
819 if (msr & UART_MSR_DDCD)
820 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
821 /* Will start/stop_tx accordingly */
822 if (msr & UART_MSR_DCTS)
823 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
824}
825
826static irqreturn_t tegra_uart_isr(int irq, void *data)
827{
828 struct tegra_uart_port *tup = data;
829 struct uart_port *u = &tup->uport;
830 unsigned long iir;
831 unsigned long ier;
Olivier Deprez157378f2022-04-04 15:47:50 +0200832 bool is_rx_start = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000833 bool is_rx_int = false;
834 unsigned long flags;
835
836 spin_lock_irqsave(&u->lock, flags);
837 while (1) {
838 iir = tegra_uart_read(tup, UART_IIR);
839 if (iir & UART_IIR_NO_INT) {
David Brazdil0f672f62019-12-10 10:32:29 +0000840 if (!tup->use_rx_pio && is_rx_int) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000841 tegra_uart_handle_rx_dma(tup);
842 if (tup->rx_in_progress) {
843 ier = tup->ier_shadow;
844 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
Olivier Deprez157378f2022-04-04 15:47:50 +0200845 TEGRA_UART_IER_EORD | UART_IER_RDI);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000846 tup->ier_shadow = ier;
847 tegra_uart_write(tup, ier, UART_IER);
848 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200849 } else if (is_rx_start) {
850 tegra_uart_start_rx_dma(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000851 }
852 spin_unlock_irqrestore(&u->lock, flags);
853 return IRQ_HANDLED;
854 }
855
856 switch ((iir >> 1) & 0x7) {
857 case 0: /* Modem signal change interrupt */
858 tegra_uart_handle_modem_signal_change(u);
859 break;
860
861 case 1: /* Transmit interrupt only triggered when using PIO */
862 tup->ier_shadow &= ~UART_IER_THRI;
863 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
864 tegra_uart_handle_tx_pio(tup);
865 break;
866
867 case 4: /* End of data */
868 case 6: /* Rx timeout */
Olivier Deprez157378f2022-04-04 15:47:50 +0200869 if (!tup->use_rx_pio) {
870 is_rx_int = tup->rx_in_progress;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000871 /* Disable Rx interrupts */
872 ier = tup->ier_shadow;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000873 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
874 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
875 tup->ier_shadow = ier;
876 tegra_uart_write(tup, ier, UART_IER);
Olivier Deprez157378f2022-04-04 15:47:50 +0200877 break;
878 }
879 fallthrough;
880 case 2: /* Receive */
881 if (!tup->use_rx_pio) {
882 is_rx_start = tup->rx_in_progress;
883 tup->ier_shadow &= ~UART_IER_RDI;
884 tegra_uart_write(tup, tup->ier_shadow,
885 UART_IER);
David Brazdil0f672f62019-12-10 10:32:29 +0000886 } else {
887 do_handle_rx_pio(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000888 }
889 break;
890
891 case 3: /* Receive error */
892 tegra_uart_decode_rx_error(tup,
893 tegra_uart_read(tup, UART_LSR));
894 break;
895
896 case 5: /* break nothing to handle */
897 case 7: /* break nothing to handle */
898 break;
899 }
900 }
901}
902
903static void tegra_uart_stop_rx(struct uart_port *u)
904{
905 struct tegra_uart_port *tup = to_tegra_uport(u);
David Brazdil0f672f62019-12-10 10:32:29 +0000906 struct tty_port *port = &tup->uport.state->port;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000907 unsigned long ier;
908
909 if (tup->rts_active)
910 set_rts(tup, false);
911
912 if (!tup->rx_in_progress)
913 return;
914
David Brazdil0f672f62019-12-10 10:32:29 +0000915 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000916
917 ier = tup->ier_shadow;
918 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
919 TEGRA_UART_IER_EORD);
920 tup->ier_shadow = ier;
921 tegra_uart_write(tup, ier, UART_IER);
922 tup->rx_in_progress = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200923
924 if (!tup->use_rx_pio)
925 tegra_uart_terminate_rx_dma(tup);
926 else
David Brazdil0f672f62019-12-10 10:32:29 +0000927 tegra_uart_handle_rx_pio(tup, port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000928}
929
930static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
931{
932 unsigned long flags;
933 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
934 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
935 unsigned long wait_time;
936 unsigned long lsr;
937 unsigned long msr;
938 unsigned long mcr;
939
940 /* Disable interrupts */
941 tegra_uart_write(tup, 0, UART_IER);
942
943 lsr = tegra_uart_read(tup, UART_LSR);
944 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
945 msr = tegra_uart_read(tup, UART_MSR);
946 mcr = tegra_uart_read(tup, UART_MCR);
947 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
948 dev_err(tup->uport.dev,
949 "Tx Fifo not empty, CTS disabled, waiting\n");
950
951 /* Wait for Tx fifo to be empty */
952 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
953 wait_time = min(fifo_empty_time, 100lu);
954 udelay(wait_time);
955 fifo_empty_time -= wait_time;
956 if (!fifo_empty_time) {
957 msr = tegra_uart_read(tup, UART_MSR);
958 mcr = tegra_uart_read(tup, UART_MCR);
959 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
960 (msr & UART_MSR_CTS))
961 dev_err(tup->uport.dev,
962 "Slave not ready\n");
963 break;
964 }
965 lsr = tegra_uart_read(tup, UART_LSR);
966 }
967 }
968
969 spin_lock_irqsave(&tup->uport.lock, flags);
970 /* Reset the Rx and Tx FIFOs */
971 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
972 tup->current_baud = 0;
973 spin_unlock_irqrestore(&tup->uport.lock, flags);
974
David Brazdil0f672f62019-12-10 10:32:29 +0000975 tup->rx_in_progress = 0;
976 tup->tx_in_progress = 0;
977
978 if (!tup->use_rx_pio)
979 tegra_uart_dma_channel_free(tup, true);
980 if (!tup->use_tx_pio)
981 tegra_uart_dma_channel_free(tup, false);
982
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000983 clk_disable_unprepare(tup->uart_clk);
984}
985
986static int tegra_uart_hw_init(struct tegra_uart_port *tup)
987{
988 int ret;
989
990 tup->fcr_shadow = 0;
991 tup->mcr_shadow = 0;
992 tup->lcr_shadow = 0;
993 tup->ier_shadow = 0;
994 tup->current_baud = 0;
995
996 clk_prepare_enable(tup->uart_clk);
997
998 /* Reset the UART controller to clear all previous status.*/
999 reset_control_assert(tup->rst);
1000 udelay(10);
1001 reset_control_deassert(tup->rst);
1002
1003 tup->rx_in_progress = 0;
1004 tup->tx_in_progress = 0;
1005
1006 /*
1007 * Set the trigger level
1008 *
1009 * For PIO mode:
1010 *
1011 * For receive, this will interrupt the CPU after that many number of
1012 * bytes are received, for the remaining bytes the receive timeout
1013 * interrupt is received. Rx high watermark is set to 4.
1014 *
1015 * For transmit, if the trasnmit interrupt is enabled, this will
1016 * interrupt the CPU when the number of entries in the FIFO reaches the
1017 * low watermark. Tx low watermark is set to 16 bytes.
1018 *
1019 * For DMA mode:
1020 *
1021 * Set the Tx trigger to 16. This should match the DMA burst size that
1022 * programmed in the DMA registers.
1023 */
1024 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
David Brazdil0f672f62019-12-10 10:32:29 +00001025
1026 if (tup->use_rx_pio) {
1027 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1028 } else {
1029 if (tup->cdata->max_dma_burst_bytes == 8)
1030 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1031 else
1032 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1033 }
1034
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1036 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1037
1038 /* Dummy read to ensure the write is posted */
1039 tegra_uart_read(tup, UART_SCR);
1040
David Brazdil0f672f62019-12-10 10:32:29 +00001041 if (tup->cdata->fifo_mode_enable_status) {
1042 ret = tegra_uart_wait_fifo_mode_enabled(tup);
Olivier Deprez0e641232021-09-23 10:07:05 +02001043 if (ret < 0) {
1044 dev_err(tup->uport.dev,
1045 "Failed to enable FIFO mode: %d\n", ret);
David Brazdil0f672f62019-12-10 10:32:29 +00001046 return ret;
Olivier Deprez0e641232021-09-23 10:07:05 +02001047 }
David Brazdil0f672f62019-12-10 10:32:29 +00001048 } else {
1049 /*
1050 * For all tegra devices (up to t210), there is a hardware
1051 * issue that requires software to wait for 3 UART clock
1052 * periods after enabling the TX fifo, otherwise data could
1053 * be lost.
1054 */
1055 tegra_uart_wait_cycle_time(tup, 3);
1056 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001057
1058 /*
1059 * Initialize the UART with default configuration
1060 * (115200, N, 8, 1) so that the receive DMA buffer may be
1061 * enqueued
1062 */
David Brazdil0f672f62019-12-10 10:32:29 +00001063 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064 if (ret < 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00001065 dev_err(tup->uport.dev, "Failed to set baud rate\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001066 return ret;
1067 }
David Brazdil0f672f62019-12-10 10:32:29 +00001068 if (!tup->use_rx_pio) {
1069 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1070 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1071 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
David Brazdil0f672f62019-12-10 10:32:29 +00001072 } else {
1073 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1074 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001075 tup->rx_in_progress = 1;
1076
1077 /*
1078 * Enable IE_RXS for the receive status interrupts like line errros.
1079 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1080 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1082 * the DATA is sitting in the FIFO and couldn't be transferred to the
David Brazdil0f672f62019-12-10 10:32:29 +00001083 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001084 * triggered when there is a pause of the incomming data stream for 4
1085 * characters long.
1086 *
1087 * For pauses in the data which is not aligned to 4 bytes, we get
1088 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1089 * then the EORD.
1090 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001091 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1092
1093 /*
1094 * If using DMA mode, enable EORD interrupt to notify about RX
1095 * completion.
1096 */
David Brazdil0f672f62019-12-10 10:32:29 +00001097 if (!tup->use_rx_pio)
Olivier Deprez157378f2022-04-04 15:47:50 +02001098 tup->ier_shadow |= TEGRA_UART_IER_EORD;
David Brazdil0f672f62019-12-10 10:32:29 +00001099
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1101 return 0;
1102}
1103
1104static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1105 bool dma_to_memory)
1106{
1107 if (dma_to_memory) {
1108 dmaengine_terminate_all(tup->rx_dma_chan);
1109 dma_release_channel(tup->rx_dma_chan);
1110 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1111 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1112 tup->rx_dma_chan = NULL;
1113 tup->rx_dma_buf_phys = 0;
1114 tup->rx_dma_buf_virt = NULL;
1115 } else {
1116 dmaengine_terminate_all(tup->tx_dma_chan);
1117 dma_release_channel(tup->tx_dma_chan);
1118 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1119 UART_XMIT_SIZE, DMA_TO_DEVICE);
1120 tup->tx_dma_chan = NULL;
1121 tup->tx_dma_buf_phys = 0;
1122 tup->tx_dma_buf_virt = NULL;
1123 }
1124}
1125
1126static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1127 bool dma_to_memory)
1128{
1129 struct dma_chan *dma_chan;
1130 unsigned char *dma_buf;
1131 dma_addr_t dma_phys;
1132 int ret;
1133 struct dma_slave_config dma_sconfig;
1134
Olivier Deprez157378f2022-04-04 15:47:50 +02001135 dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001136 if (IS_ERR(dma_chan)) {
1137 ret = PTR_ERR(dma_chan);
1138 dev_err(tup->uport.dev,
1139 "DMA channel alloc failed: %d\n", ret);
1140 return ret;
1141 }
1142
1143 if (dma_to_memory) {
1144 dma_buf = dma_alloc_coherent(tup->uport.dev,
1145 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1146 &dma_phys, GFP_KERNEL);
1147 if (!dma_buf) {
1148 dev_err(tup->uport.dev,
1149 "Not able to allocate the dma buffer\n");
1150 dma_release_channel(dma_chan);
1151 return -ENOMEM;
1152 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001153 dma_sync_single_for_device(tup->uport.dev, dma_phys,
1154 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1155 DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001156 dma_sconfig.src_addr = tup->uport.mapbase;
1157 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
David Brazdil0f672f62019-12-10 10:32:29 +00001158 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001159 tup->rx_dma_chan = dma_chan;
1160 tup->rx_dma_buf_virt = dma_buf;
1161 tup->rx_dma_buf_phys = dma_phys;
1162 } else {
1163 dma_phys = dma_map_single(tup->uport.dev,
1164 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1165 DMA_TO_DEVICE);
1166 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1167 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1168 dma_release_channel(dma_chan);
1169 return -ENOMEM;
1170 }
1171 dma_buf = tup->uport.state->xmit.buf;
1172 dma_sconfig.dst_addr = tup->uport.mapbase;
1173 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1174 dma_sconfig.dst_maxburst = 16;
1175 tup->tx_dma_chan = dma_chan;
1176 tup->tx_dma_buf_virt = dma_buf;
1177 tup->tx_dma_buf_phys = dma_phys;
1178 }
1179
1180 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1181 if (ret < 0) {
1182 dev_err(tup->uport.dev,
1183 "Dma slave config failed, err = %d\n", ret);
1184 tegra_uart_dma_channel_free(tup, dma_to_memory);
1185 return ret;
1186 }
1187
1188 return 0;
1189}
1190
1191static int tegra_uart_startup(struct uart_port *u)
1192{
1193 struct tegra_uart_port *tup = to_tegra_uport(u);
1194 int ret;
1195
David Brazdil0f672f62019-12-10 10:32:29 +00001196 if (!tup->use_tx_pio) {
1197 ret = tegra_uart_dma_channel_allocate(tup, false);
1198 if (ret < 0) {
1199 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1200 ret);
1201 return ret;
1202 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001203 }
1204
David Brazdil0f672f62019-12-10 10:32:29 +00001205 if (!tup->use_rx_pio) {
1206 ret = tegra_uart_dma_channel_allocate(tup, true);
1207 if (ret < 0) {
1208 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1209 ret);
1210 goto fail_rx_dma;
1211 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001212 }
1213
1214 ret = tegra_uart_hw_init(tup);
1215 if (ret < 0) {
1216 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1217 goto fail_hw_init;
1218 }
1219
1220 ret = request_irq(u->irq, tegra_uart_isr, 0,
1221 dev_name(u->dev), tup);
1222 if (ret < 0) {
1223 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1224 goto fail_hw_init;
1225 }
1226 return 0;
1227
1228fail_hw_init:
David Brazdil0f672f62019-12-10 10:32:29 +00001229 if (!tup->use_rx_pio)
1230 tegra_uart_dma_channel_free(tup, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001231fail_rx_dma:
David Brazdil0f672f62019-12-10 10:32:29 +00001232 if (!tup->use_tx_pio)
1233 tegra_uart_dma_channel_free(tup, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234 return ret;
1235}
1236
1237/*
1238 * Flush any TX data submitted for DMA and PIO. Called when the
1239 * TX circular buffer is reset.
1240 */
1241static void tegra_uart_flush_buffer(struct uart_port *u)
1242{
1243 struct tegra_uart_port *tup = to_tegra_uport(u);
1244
1245 tup->tx_bytes = 0;
1246 if (tup->tx_dma_chan)
1247 dmaengine_terminate_all(tup->tx_dma_chan);
1248}
1249
1250static void tegra_uart_shutdown(struct uart_port *u)
1251{
1252 struct tegra_uart_port *tup = to_tegra_uport(u);
1253
1254 tegra_uart_hw_deinit(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001255 free_irq(u->irq, tup);
1256}
1257
1258static void tegra_uart_enable_ms(struct uart_port *u)
1259{
1260 struct tegra_uart_port *tup = to_tegra_uport(u);
1261
1262 if (tup->enable_modem_interrupt) {
1263 tup->ier_shadow |= UART_IER_MSI;
1264 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1265 }
1266}
1267
1268static void tegra_uart_set_termios(struct uart_port *u,
1269 struct ktermios *termios, struct ktermios *oldtermios)
1270{
1271 struct tegra_uart_port *tup = to_tegra_uport(u);
1272 unsigned int baud;
1273 unsigned long flags;
1274 unsigned int lcr;
1275 int symb_bit = 1;
1276 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1277 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1278 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
David Brazdil0f672f62019-12-10 10:32:29 +00001279 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280
1281 max_divider *= 16;
1282 spin_lock_irqsave(&u->lock, flags);
1283
1284 /* Changing configuration, it is safe to stop any rx now */
1285 if (tup->rts_active)
1286 set_rts(tup, false);
1287
David Brazdil0f672f62019-12-10 10:32:29 +00001288 /* Clear all interrupts as configuration is going to be changed */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1290 tegra_uart_read(tup, UART_IER);
1291 tegra_uart_write(tup, 0, UART_IER);
1292 tegra_uart_read(tup, UART_IER);
1293
1294 /* Parity */
1295 lcr = tup->lcr_shadow;
1296 lcr &= ~UART_LCR_PARITY;
1297
1298 /* CMSPAR isn't supported by this driver */
1299 termios->c_cflag &= ~CMSPAR;
1300
1301 if ((termios->c_cflag & PARENB) == PARENB) {
1302 symb_bit++;
1303 if (termios->c_cflag & PARODD) {
1304 lcr |= UART_LCR_PARITY;
1305 lcr &= ~UART_LCR_EPAR;
1306 lcr &= ~UART_LCR_SPAR;
1307 } else {
1308 lcr |= UART_LCR_PARITY;
1309 lcr |= UART_LCR_EPAR;
1310 lcr &= ~UART_LCR_SPAR;
1311 }
1312 }
1313
1314 lcr &= ~UART_LCR_WLEN8;
1315 switch (termios->c_cflag & CSIZE) {
1316 case CS5:
1317 lcr |= UART_LCR_WLEN5;
1318 symb_bit += 5;
1319 break;
1320 case CS6:
1321 lcr |= UART_LCR_WLEN6;
1322 symb_bit += 6;
1323 break;
1324 case CS7:
1325 lcr |= UART_LCR_WLEN7;
1326 symb_bit += 7;
1327 break;
1328 default:
1329 lcr |= UART_LCR_WLEN8;
1330 symb_bit += 8;
1331 break;
1332 }
1333
1334 /* Stop bits */
1335 if (termios->c_cflag & CSTOPB) {
1336 lcr |= UART_LCR_STOP;
1337 symb_bit += 2;
1338 } else {
1339 lcr &= ~UART_LCR_STOP;
1340 symb_bit++;
1341 }
1342
1343 tegra_uart_write(tup, lcr, UART_LCR);
1344 tup->lcr_shadow = lcr;
1345 tup->symb_bit = symb_bit;
1346
1347 /* Baud rate. */
1348 baud = uart_get_baud_rate(u, termios, oldtermios,
1349 parent_clk_rate/max_divider,
1350 parent_clk_rate/16);
1351 spin_unlock_irqrestore(&u->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +00001352 ret = tegra_set_baudrate(tup, baud);
1353 if (ret < 0) {
1354 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1355 return;
1356 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001357 if (tty_termios_baud_rate(termios))
1358 tty_termios_encode_baud_rate(termios, baud, baud);
1359 spin_lock_irqsave(&u->lock, flags);
1360
1361 /* Flow control */
1362 if (termios->c_cflag & CRTSCTS) {
1363 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1364 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1365 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1366 /* if top layer has asked to set rts active then do so here */
1367 if (tup->rts_active)
1368 set_rts(tup, true);
1369 } else {
1370 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1371 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1372 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1373 }
1374
1375 /* update the port timeout based on new settings */
1376 uart_update_timeout(u, termios->c_cflag, baud);
1377
David Brazdil0f672f62019-12-10 10:32:29 +00001378 /* Make sure all writes have completed */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001379 tegra_uart_read(tup, UART_IER);
1380
David Brazdil0f672f62019-12-10 10:32:29 +00001381 /* Re-enable interrupt */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001382 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1383 tegra_uart_read(tup, UART_IER);
1384
David Brazdil0f672f62019-12-10 10:32:29 +00001385 tup->uport.ignore_status_mask = 0;
1386 /* Ignore all characters if CREAD is not set */
1387 if ((termios->c_cflag & CREAD) == 0)
1388 tup->uport.ignore_status_mask |= UART_LSR_DR;
1389 if (termios->c_iflag & IGNBRK)
1390 tup->uport.ignore_status_mask |= UART_LSR_BI;
1391
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001392 spin_unlock_irqrestore(&u->lock, flags);
1393}
1394
1395static const char *tegra_uart_type(struct uart_port *u)
1396{
1397 return TEGRA_UART_TYPE;
1398}
1399
1400static const struct uart_ops tegra_uart_ops = {
1401 .tx_empty = tegra_uart_tx_empty,
1402 .set_mctrl = tegra_uart_set_mctrl,
1403 .get_mctrl = tegra_uart_get_mctrl,
1404 .stop_tx = tegra_uart_stop_tx,
1405 .start_tx = tegra_uart_start_tx,
1406 .stop_rx = tegra_uart_stop_rx,
1407 .flush_buffer = tegra_uart_flush_buffer,
1408 .enable_ms = tegra_uart_enable_ms,
1409 .break_ctl = tegra_uart_break_ctl,
1410 .startup = tegra_uart_startup,
1411 .shutdown = tegra_uart_shutdown,
1412 .set_termios = tegra_uart_set_termios,
1413 .type = tegra_uart_type,
1414 .request_port = tegra_uart_request_port,
1415 .release_port = tegra_uart_release_port,
1416};
1417
1418static struct uart_driver tegra_uart_driver = {
1419 .owner = THIS_MODULE,
1420 .driver_name = "tegra_hsuart",
1421 .dev_name = "ttyTHS",
1422 .cons = NULL,
1423 .nr = TEGRA_UART_MAXIMUM,
1424};
1425
1426static int tegra_uart_parse_dt(struct platform_device *pdev,
1427 struct tegra_uart_port *tup)
1428{
1429 struct device_node *np = pdev->dev.of_node;
1430 int port;
David Brazdil0f672f62019-12-10 10:32:29 +00001431 int ret;
1432 int index;
1433 u32 pval;
1434 int count;
1435 int n_entries;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001436
1437 port = of_alias_get_id(np, "serial");
1438 if (port < 0) {
1439 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1440 return port;
1441 }
1442 tup->uport.line = port;
1443
1444 tup->enable_modem_interrupt = of_property_read_bool(np,
1445 "nvidia,enable-modem-interrupt");
David Brazdil0f672f62019-12-10 10:32:29 +00001446
1447 index = of_property_match_string(np, "dma-names", "rx");
1448 if (index < 0) {
1449 tup->use_rx_pio = true;
1450 dev_info(&pdev->dev, "RX in PIO mode\n");
1451 }
1452 index = of_property_match_string(np, "dma-names", "tx");
1453 if (index < 0) {
1454 tup->use_tx_pio = true;
1455 dev_info(&pdev->dev, "TX in PIO mode\n");
1456 }
1457
1458 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1459 if (n_entries > 0) {
1460 tup->n_adjustable_baud_rates = n_entries / 3;
1461 tup->baud_tolerance =
1462 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1463 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1464 if (!tup->baud_tolerance)
1465 return -ENOMEM;
1466 for (count = 0, index = 0; count < n_entries; count += 3,
1467 index++) {
1468 ret =
1469 of_property_read_u32_index(np,
1470 "nvidia,adjust-baud-rates",
1471 count, &pval);
1472 if (!ret)
1473 tup->baud_tolerance[index].lower_range_baud =
1474 pval;
1475 ret =
1476 of_property_read_u32_index(np,
1477 "nvidia,adjust-baud-rates",
1478 count + 1, &pval);
1479 if (!ret)
1480 tup->baud_tolerance[index].upper_range_baud =
1481 pval;
1482 ret =
1483 of_property_read_u32_index(np,
1484 "nvidia,adjust-baud-rates",
1485 count + 2, &pval);
1486 if (!ret)
1487 tup->baud_tolerance[index].tolerance =
1488 (s32)pval;
1489 }
1490 } else {
1491 tup->n_adjustable_baud_rates = 0;
1492 }
1493
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001494 return 0;
1495}
1496
1497static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1498 .tx_fifo_full_status = false,
1499 .allow_txfifo_reset_fifo_mode = true,
1500 .support_clk_src_div = false,
David Brazdil0f672f62019-12-10 10:32:29 +00001501 .fifo_mode_enable_status = false,
1502 .uart_max_port = 5,
1503 .max_dma_burst_bytes = 4,
Olivier Deprez157378f2022-04-04 15:47:50 +02001504 .error_tolerance_low_range = -4,
David Brazdil0f672f62019-12-10 10:32:29 +00001505 .error_tolerance_high_range = 4,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001506};
1507
1508static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1509 .tx_fifo_full_status = true,
1510 .allow_txfifo_reset_fifo_mode = false,
1511 .support_clk_src_div = true,
David Brazdil0f672f62019-12-10 10:32:29 +00001512 .fifo_mode_enable_status = false,
1513 .uart_max_port = 5,
1514 .max_dma_burst_bytes = 4,
Olivier Deprez157378f2022-04-04 15:47:50 +02001515 .error_tolerance_low_range = -4,
David Brazdil0f672f62019-12-10 10:32:29 +00001516 .error_tolerance_high_range = 4,
1517};
1518
1519static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1520 .tx_fifo_full_status = true,
1521 .allow_txfifo_reset_fifo_mode = false,
1522 .support_clk_src_div = true,
1523 .fifo_mode_enable_status = true,
1524 .uart_max_port = 8,
1525 .max_dma_burst_bytes = 8,
1526 .error_tolerance_low_range = 0,
1527 .error_tolerance_high_range = 4,
1528};
1529
1530static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1531 .tx_fifo_full_status = true,
1532 .allow_txfifo_reset_fifo_mode = false,
1533 .support_clk_src_div = true,
1534 .fifo_mode_enable_status = true,
1535 .uart_max_port = 8,
1536 .max_dma_burst_bytes = 8,
1537 .error_tolerance_low_range = -2,
1538 .error_tolerance_high_range = 2,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001539};
1540
1541static const struct of_device_id tegra_uart_of_match[] = {
1542 {
1543 .compatible = "nvidia,tegra30-hsuart",
1544 .data = &tegra30_uart_chip_data,
1545 }, {
1546 .compatible = "nvidia,tegra20-hsuart",
1547 .data = &tegra20_uart_chip_data,
1548 }, {
David Brazdil0f672f62019-12-10 10:32:29 +00001549 .compatible = "nvidia,tegra186-hsuart",
1550 .data = &tegra186_uart_chip_data,
1551 }, {
1552 .compatible = "nvidia,tegra194-hsuart",
1553 .data = &tegra194_uart_chip_data,
1554 }, {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001555 },
1556};
1557MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1558
1559static int tegra_uart_probe(struct platform_device *pdev)
1560{
1561 struct tegra_uart_port *tup;
1562 struct uart_port *u;
1563 struct resource *resource;
1564 int ret;
1565 const struct tegra_uart_chip_data *cdata;
1566 const struct of_device_id *match;
1567
1568 match = of_match_device(tegra_uart_of_match, &pdev->dev);
1569 if (!match) {
1570 dev_err(&pdev->dev, "Error: No device match found\n");
1571 return -ENODEV;
1572 }
1573 cdata = match->data;
1574
1575 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1576 if (!tup) {
1577 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1578 return -ENOMEM;
1579 }
1580
1581 ret = tegra_uart_parse_dt(pdev, tup);
1582 if (ret < 0)
1583 return ret;
1584
1585 u = &tup->uport;
1586 u->dev = &pdev->dev;
1587 u->ops = &tegra_uart_ops;
1588 u->type = PORT_TEGRA;
1589 u->fifosize = 32;
1590 tup->cdata = cdata;
1591
1592 platform_set_drvdata(pdev, tup);
1593 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1594 if (!resource) {
1595 dev_err(&pdev->dev, "No IO memory resource\n");
1596 return -ENODEV;
1597 }
1598
1599 u->mapbase = resource->start;
1600 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1601 if (IS_ERR(u->membase))
1602 return PTR_ERR(u->membase);
1603
1604 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1605 if (IS_ERR(tup->uart_clk)) {
1606 dev_err(&pdev->dev, "Couldn't get the clock\n");
1607 return PTR_ERR(tup->uart_clk);
1608 }
1609
1610 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1611 if (IS_ERR(tup->rst)) {
1612 dev_err(&pdev->dev, "Couldn't get the reset\n");
1613 return PTR_ERR(tup->rst);
1614 }
1615
1616 u->iotype = UPIO_MEM32;
1617 ret = platform_get_irq(pdev, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00001618 if (ret < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001619 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001620 u->irq = ret;
1621 u->regshift = 2;
1622 ret = uart_add_one_port(&tegra_uart_driver, u);
1623 if (ret < 0) {
1624 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1625 return ret;
1626 }
1627 return ret;
1628}
1629
1630static int tegra_uart_remove(struct platform_device *pdev)
1631{
1632 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1633 struct uart_port *u = &tup->uport;
1634
1635 uart_remove_one_port(&tegra_uart_driver, u);
1636 return 0;
1637}
1638
1639#ifdef CONFIG_PM_SLEEP
1640static int tegra_uart_suspend(struct device *dev)
1641{
1642 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1643 struct uart_port *u = &tup->uport;
1644
1645 return uart_suspend_port(&tegra_uart_driver, u);
1646}
1647
1648static int tegra_uart_resume(struct device *dev)
1649{
1650 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1651 struct uart_port *u = &tup->uport;
1652
1653 return uart_resume_port(&tegra_uart_driver, u);
1654}
1655#endif
1656
1657static const struct dev_pm_ops tegra_uart_pm_ops = {
1658 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1659};
1660
1661static struct platform_driver tegra_uart_platform_driver = {
1662 .probe = tegra_uart_probe,
1663 .remove = tegra_uart_remove,
1664 .driver = {
1665 .name = "serial-tegra",
1666 .of_match_table = tegra_uart_of_match,
1667 .pm = &tegra_uart_pm_ops,
1668 },
1669};
1670
1671static int __init tegra_uart_init(void)
1672{
1673 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +00001674 struct device_node *node;
1675 const struct of_device_id *match = NULL;
1676 const struct tegra_uart_chip_data *cdata = NULL;
1677
1678 node = of_find_matching_node(NULL, tegra_uart_of_match);
1679 if (node)
1680 match = of_match_node(tegra_uart_of_match, node);
1681 if (match)
1682 cdata = match->data;
1683 if (cdata)
1684 tegra_uart_driver.nr = cdata->uart_max_port;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001685
1686 ret = uart_register_driver(&tegra_uart_driver);
1687 if (ret < 0) {
1688 pr_err("Could not register %s driver\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001689 tegra_uart_driver.driver_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001690 return ret;
1691 }
1692
1693 ret = platform_driver_register(&tegra_uart_platform_driver);
1694 if (ret < 0) {
1695 pr_err("Uart platform driver register failed, e = %d\n", ret);
1696 uart_unregister_driver(&tegra_uart_driver);
1697 return ret;
1698 }
1699 return 0;
1700}
1701
1702static void __exit tegra_uart_exit(void)
1703{
1704 pr_info("Unloading tegra uart driver\n");
1705 platform_driver_unregister(&tegra_uart_platform_driver);
1706 uart_unregister_driver(&tegra_uart_driver);
1707}
1708
1709module_init(tegra_uart_init);
1710module_exit(tegra_uart_exit);
1711
1712MODULE_ALIAS("platform:serial-tegra");
1713MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1714MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1715MODULE_LICENSE("GPL v2");