blob: 64f18bf1e694a8cc6c493a6c7b516c079ad4f551 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * serial_tegra.c
4 *
5 * High-speed serial driver for NVIDIA Tegra SoCs
6 *
David Brazdil0f672f62019-12-10 10:32:29 +00007 * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 *
9 * Author: Laxman Dewangan <ldewangan@nvidia.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmapool.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/pagemap.h>
25#include <linux/platform_device.h>
26#include <linux/reset.h>
27#include <linux/serial.h>
28#include <linux/serial_8250.h>
29#include <linux/serial_core.h>
30#include <linux/serial_reg.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/termios.h>
34#include <linux/tty.h>
35#include <linux/tty_flip.h>
36
37#define TEGRA_UART_TYPE "TEGRA_UART"
38#define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
39#define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
40
41#define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
42#define TEGRA_UART_LSR_TXFIFO_FULL 0x100
43#define TEGRA_UART_IER_EORD 0x20
44#define TEGRA_UART_MCR_RTS_EN 0x40
45#define TEGRA_UART_MCR_CTS_EN 0x20
46#define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
47 UART_LSR_PE | UART_LSR_FE)
48#define TEGRA_UART_IRDA_CSR 0x08
49#define TEGRA_UART_SIR_ENABLED 0x80
50
51#define TEGRA_UART_TX_PIO 1
52#define TEGRA_UART_TX_DMA 2
53#define TEGRA_UART_MIN_DMA 16
54#define TEGRA_UART_FIFO_SIZE 32
55
56/*
57 * Tx fifo trigger level setting in tegra uart is in
58 * reverse way then conventional uart.
59 */
60#define TEGRA_UART_TX_TRIG_16B 0x00
61#define TEGRA_UART_TX_TRIG_8B 0x10
62#define TEGRA_UART_TX_TRIG_4B 0x20
63#define TEGRA_UART_TX_TRIG_1B 0x30
64
David Brazdil0f672f62019-12-10 10:32:29 +000065#define TEGRA_UART_MAXIMUM 8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
67/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
68#define TEGRA_UART_DEFAULT_BAUD 115200
69#define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
70
71/* Tx transfer mode */
72#define TEGRA_TX_PIO 1
73#define TEGRA_TX_DMA 2
74
David Brazdil0f672f62019-12-10 10:32:29 +000075#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
76
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077/**
78 * tegra_uart_chip_data: SOC specific data.
79 *
80 * @tx_fifo_full_status: Status flag available for checking tx fifo full.
81 * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
82 * Tegra30 does not allow this.
83 * @support_clk_src_div: Clock source support the clock divider.
84 */
85struct tegra_uart_chip_data {
86 bool tx_fifo_full_status;
87 bool allow_txfifo_reset_fifo_mode;
88 bool support_clk_src_div;
David Brazdil0f672f62019-12-10 10:32:29 +000089 bool fifo_mode_enable_status;
90 int uart_max_port;
91 int max_dma_burst_bytes;
92 int error_tolerance_low_range;
93 int error_tolerance_high_range;
94};
95
96struct tegra_baud_tolerance {
97 u32 lower_range_baud;
98 u32 upper_range_baud;
99 s32 tolerance;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100};
101
102struct tegra_uart_port {
103 struct uart_port uport;
104 const struct tegra_uart_chip_data *cdata;
105
106 struct clk *uart_clk;
107 struct reset_control *rst;
108 unsigned int current_baud;
109
110 /* Register shadow */
111 unsigned long fcr_shadow;
112 unsigned long mcr_shadow;
113 unsigned long lcr_shadow;
114 unsigned long ier_shadow;
115 bool rts_active;
116
117 int tx_in_progress;
118 unsigned int tx_bytes;
119
120 bool enable_modem_interrupt;
121
122 bool rx_timeout;
123 int rx_in_progress;
124 int symb_bit;
125
126 struct dma_chan *rx_dma_chan;
127 struct dma_chan *tx_dma_chan;
128 dma_addr_t rx_dma_buf_phys;
129 dma_addr_t tx_dma_buf_phys;
130 unsigned char *rx_dma_buf_virt;
131 unsigned char *tx_dma_buf_virt;
132 struct dma_async_tx_descriptor *tx_dma_desc;
133 struct dma_async_tx_descriptor *rx_dma_desc;
134 dma_cookie_t tx_cookie;
135 dma_cookie_t rx_cookie;
136 unsigned int tx_bytes_requested;
137 unsigned int rx_bytes_requested;
David Brazdil0f672f62019-12-10 10:32:29 +0000138 struct tegra_baud_tolerance *baud_tolerance;
139 int n_adjustable_baud_rates;
140 int required_rate;
141 int configured_rate;
142 bool use_rx_pio;
143 bool use_tx_pio;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144};
145
146static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
147static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
David Brazdil0f672f62019-12-10 10:32:29 +0000148static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
149 bool dma_to_memory);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150
151static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
152 unsigned long reg)
153{
154 return readl(tup->uport.membase + (reg << tup->uport.regshift));
155}
156
157static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
158 unsigned long reg)
159{
160 writel(val, tup->uport.membase + (reg << tup->uport.regshift));
161}
162
163static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
164{
165 return container_of(u, struct tegra_uart_port, uport);
166}
167
168static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
169{
170 struct tegra_uart_port *tup = to_tegra_uport(u);
171
172 /*
173 * RI - Ring detector is active
174 * CD/DCD/CAR - Carrier detect is always active. For some reason
175 * linux has different names for carrier detect.
176 * DSR - Data Set ready is active as the hardware doesn't support it.
177 * Don't know if the linux support this yet?
178 * CTS - Clear to send. Always set to active, as the hardware handles
179 * CTS automatically.
180 */
181 if (tup->enable_modem_interrupt)
182 return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
183 return TIOCM_CTS;
184}
185
186static void set_rts(struct tegra_uart_port *tup, bool active)
187{
188 unsigned long mcr;
189
190 mcr = tup->mcr_shadow;
191 if (active)
192 mcr |= TEGRA_UART_MCR_RTS_EN;
193 else
194 mcr &= ~TEGRA_UART_MCR_RTS_EN;
195 if (mcr != tup->mcr_shadow) {
196 tegra_uart_write(tup, mcr, UART_MCR);
197 tup->mcr_shadow = mcr;
198 }
199}
200
201static void set_dtr(struct tegra_uart_port *tup, bool active)
202{
203 unsigned long mcr;
204
205 mcr = tup->mcr_shadow;
206 if (active)
207 mcr |= UART_MCR_DTR;
208 else
209 mcr &= ~UART_MCR_DTR;
210 if (mcr != tup->mcr_shadow) {
211 tegra_uart_write(tup, mcr, UART_MCR);
212 tup->mcr_shadow = mcr;
213 }
214}
215
David Brazdil0f672f62019-12-10 10:32:29 +0000216static void set_loopbk(struct tegra_uart_port *tup, bool active)
217{
218 unsigned long mcr = tup->mcr_shadow;
219
220 if (active)
221 mcr |= UART_MCR_LOOP;
222 else
223 mcr &= ~UART_MCR_LOOP;
224
225 if (mcr != tup->mcr_shadow) {
226 tegra_uart_write(tup, mcr, UART_MCR);
227 tup->mcr_shadow = mcr;
228 }
229}
230
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
232{
233 struct tegra_uart_port *tup = to_tegra_uport(u);
David Brazdil0f672f62019-12-10 10:32:29 +0000234 int enable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235
236 tup->rts_active = !!(mctrl & TIOCM_RTS);
237 set_rts(tup, tup->rts_active);
238
David Brazdil0f672f62019-12-10 10:32:29 +0000239 enable = !!(mctrl & TIOCM_DTR);
240 set_dtr(tup, enable);
241
242 enable = !!(mctrl & TIOCM_LOOP);
243 set_loopbk(tup, enable);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244}
245
246static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
247{
248 struct tegra_uart_port *tup = to_tegra_uport(u);
249 unsigned long lcr;
250
251 lcr = tup->lcr_shadow;
252 if (break_ctl)
253 lcr |= UART_LCR_SBC;
254 else
255 lcr &= ~UART_LCR_SBC;
256 tegra_uart_write(tup, lcr, UART_LCR);
257 tup->lcr_shadow = lcr;
258}
259
260/**
261 * tegra_uart_wait_cycle_time: Wait for N UART clock periods
262 *
263 * @tup: Tegra serial port data structure.
264 * @cycles: Number of clock periods to wait.
265 *
266 * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
267 * clock speed is 16X the current baud rate.
268 */
269static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
270 unsigned int cycles)
271{
272 if (tup->current_baud)
273 udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
274}
275
276/* Wait for a symbol-time. */
277static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
278 unsigned int syms)
279{
280 if (tup->current_baud)
281 udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
282 tup->current_baud));
283}
284
David Brazdil0f672f62019-12-10 10:32:29 +0000285static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
286{
287 unsigned long iir;
288 unsigned int tmout = 100;
289
290 do {
291 iir = tegra_uart_read(tup, UART_IIR);
292 if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
293 return 0;
294 udelay(1);
295 } while (--tmout);
296
297 return -ETIMEDOUT;
298}
299
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000300static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
301{
302 unsigned long fcr = tup->fcr_shadow;
David Brazdil0f672f62019-12-10 10:32:29 +0000303 unsigned int lsr, tmout = 10000;
304
305 if (tup->rts_active)
306 set_rts(tup, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307
308 if (tup->cdata->allow_txfifo_reset_fifo_mode) {
309 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
310 tegra_uart_write(tup, fcr, UART_FCR);
311 } else {
312 fcr &= ~UART_FCR_ENABLE_FIFO;
313 tegra_uart_write(tup, fcr, UART_FCR);
314 udelay(60);
315 fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
316 tegra_uart_write(tup, fcr, UART_FCR);
317 fcr |= UART_FCR_ENABLE_FIFO;
318 tegra_uart_write(tup, fcr, UART_FCR);
David Brazdil0f672f62019-12-10 10:32:29 +0000319 if (tup->cdata->fifo_mode_enable_status)
320 tegra_uart_wait_fifo_mode_enabled(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321 }
322
323 /* Dummy read to ensure the write is posted */
324 tegra_uart_read(tup, UART_SCR);
325
326 /*
327 * For all tegra devices (up to t210), there is a hardware issue that
328 * requires software to wait for 32 UART clock periods for the flush
329 * to propagate, otherwise data could be lost.
330 */
331 tegra_uart_wait_cycle_time(tup, 32);
David Brazdil0f672f62019-12-10 10:32:29 +0000332
333 do {
334 lsr = tegra_uart_read(tup, UART_LSR);
Olivier Deprez0e641232021-09-23 10:07:05 +0200335 if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
David Brazdil0f672f62019-12-10 10:32:29 +0000336 break;
337 udelay(1);
338 } while (--tmout);
339
340 if (tup->rts_active)
341 set_rts(tup, true);
342}
343
344static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
345 unsigned int baud, long rate)
346{
347 int i;
348
349 for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
350 if (baud >= tup->baud_tolerance[i].lower_range_baud &&
351 baud <= tup->baud_tolerance[i].upper_range_baud)
352 return (rate + (rate *
353 tup->baud_tolerance[i].tolerance) / 10000);
354 }
355
356 return rate;
357}
358
359static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
360{
361 long diff;
362
363 diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
364 / tup->required_rate;
365 if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
366 diff > (tup->cdata->error_tolerance_high_range * 100)) {
367 dev_err(tup->uport.dev,
368 "configured baud rate is out of range by %ld", diff);
369 return -EIO;
370 }
371
372 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000373}
374
375static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
376{
377 unsigned long rate;
378 unsigned int divisor;
379 unsigned long lcr;
David Brazdil0f672f62019-12-10 10:32:29 +0000380 unsigned long flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000381 int ret;
382
383 if (tup->current_baud == baud)
384 return 0;
385
386 if (tup->cdata->support_clk_src_div) {
387 rate = baud * 16;
David Brazdil0f672f62019-12-10 10:32:29 +0000388 tup->required_rate = rate;
389
390 if (tup->n_adjustable_baud_rates)
391 rate = tegra_get_tolerance_rate(tup, baud, rate);
392
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393 ret = clk_set_rate(tup->uart_clk, rate);
394 if (ret < 0) {
395 dev_err(tup->uport.dev,
396 "clk_set_rate() failed for rate %lu\n", rate);
397 return ret;
398 }
David Brazdil0f672f62019-12-10 10:32:29 +0000399 tup->configured_rate = clk_get_rate(tup->uart_clk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 divisor = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000401 ret = tegra_check_rate_in_range(tup);
402 if (ret < 0)
403 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404 } else {
405 rate = clk_get_rate(tup->uart_clk);
406 divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
407 }
408
David Brazdil0f672f62019-12-10 10:32:29 +0000409 spin_lock_irqsave(&tup->uport.lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410 lcr = tup->lcr_shadow;
411 lcr |= UART_LCR_DLAB;
412 tegra_uart_write(tup, lcr, UART_LCR);
413
414 tegra_uart_write(tup, divisor & 0xFF, UART_TX);
415 tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
416
417 lcr &= ~UART_LCR_DLAB;
418 tegra_uart_write(tup, lcr, UART_LCR);
419
420 /* Dummy read to ensure the write is posted */
421 tegra_uart_read(tup, UART_SCR);
David Brazdil0f672f62019-12-10 10:32:29 +0000422 spin_unlock_irqrestore(&tup->uport.lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423
424 tup->current_baud = baud;
425
426 /* wait two character intervals at new rate */
427 tegra_uart_wait_sym_time(tup, 2);
428 return 0;
429}
430
431static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
432 unsigned long lsr)
433{
434 char flag = TTY_NORMAL;
435
436 if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
437 if (lsr & UART_LSR_OE) {
438 /* Overrrun error */
439 flag = TTY_OVERRUN;
440 tup->uport.icount.overrun++;
441 dev_err(tup->uport.dev, "Got overrun errors\n");
442 } else if (lsr & UART_LSR_PE) {
443 /* Parity error */
444 flag = TTY_PARITY;
445 tup->uport.icount.parity++;
446 dev_err(tup->uport.dev, "Got Parity errors\n");
447 } else if (lsr & UART_LSR_FE) {
448 flag = TTY_FRAME;
449 tup->uport.icount.frame++;
450 dev_err(tup->uport.dev, "Got frame errors\n");
451 } else if (lsr & UART_LSR_BI) {
David Brazdil0f672f62019-12-10 10:32:29 +0000452 /*
453 * Break error
454 * If FIFO read error without any data, reset Rx FIFO
455 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
457 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
David Brazdil0f672f62019-12-10 10:32:29 +0000458 if (tup->uport.ignore_status_mask & UART_LSR_BI)
459 return TTY_BREAK;
460 flag = TTY_BREAK;
461 tup->uport.icount.brk++;
462 dev_dbg(tup->uport.dev, "Got Break\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463 }
David Brazdil0f672f62019-12-10 10:32:29 +0000464 uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465 }
David Brazdil0f672f62019-12-10 10:32:29 +0000466
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 return flag;
468}
469
470static int tegra_uart_request_port(struct uart_port *u)
471{
472 return 0;
473}
474
475static void tegra_uart_release_port(struct uart_port *u)
476{
477 /* Nothing to do here */
478}
479
480static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
481{
482 struct circ_buf *xmit = &tup->uport.state->xmit;
483 int i;
484
485 for (i = 0; i < max_bytes; i++) {
486 BUG_ON(uart_circ_empty(xmit));
487 if (tup->cdata->tx_fifo_full_status) {
488 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
489 if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
490 break;
491 }
492 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
493 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
494 tup->uport.icount.tx++;
495 }
496}
497
498static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
499 unsigned int bytes)
500{
501 if (bytes > TEGRA_UART_MIN_DMA)
502 bytes = TEGRA_UART_MIN_DMA;
503
504 tup->tx_in_progress = TEGRA_UART_TX_PIO;
505 tup->tx_bytes = bytes;
506 tup->ier_shadow |= UART_IER_THRI;
507 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
508}
509
510static void tegra_uart_tx_dma_complete(void *args)
511{
512 struct tegra_uart_port *tup = args;
513 struct circ_buf *xmit = &tup->uport.state->xmit;
514 struct dma_tx_state state;
515 unsigned long flags;
516 unsigned int count;
517
518 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
519 count = tup->tx_bytes_requested - state.residue;
520 async_tx_ack(tup->tx_dma_desc);
521 spin_lock_irqsave(&tup->uport.lock, flags);
522 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
523 tup->tx_in_progress = 0;
524 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
525 uart_write_wakeup(&tup->uport);
526 tegra_uart_start_next_tx(tup);
527 spin_unlock_irqrestore(&tup->uport.lock, flags);
528}
529
530static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
531 unsigned long count)
532{
533 struct circ_buf *xmit = &tup->uport.state->xmit;
534 dma_addr_t tx_phys_addr;
535
536 dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
537 UART_XMIT_SIZE, DMA_TO_DEVICE);
538
539 tup->tx_bytes = count & ~(0xF);
540 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
541 tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
542 tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
543 DMA_PREP_INTERRUPT);
544 if (!tup->tx_dma_desc) {
545 dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
546 return -EIO;
547 }
548
549 tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
550 tup->tx_dma_desc->callback_param = tup;
551 tup->tx_in_progress = TEGRA_UART_TX_DMA;
552 tup->tx_bytes_requested = tup->tx_bytes;
553 tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
554 dma_async_issue_pending(tup->tx_dma_chan);
555 return 0;
556}
557
558static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
559{
560 unsigned long tail;
561 unsigned long count;
562 struct circ_buf *xmit = &tup->uport.state->xmit;
563
David Brazdil0f672f62019-12-10 10:32:29 +0000564 if (!tup->current_baud)
565 return;
566
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000567 tail = (unsigned long)&xmit->buf[xmit->tail];
568 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
569 if (!count)
570 return;
571
David Brazdil0f672f62019-12-10 10:32:29 +0000572 if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573 tegra_uart_start_pio_tx(tup, count);
574 else if (BYTES_TO_ALIGN(tail) > 0)
575 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
576 else
577 tegra_uart_start_tx_dma(tup, count);
578}
579
580/* Called by serial core driver with u->lock taken. */
581static void tegra_uart_start_tx(struct uart_port *u)
582{
583 struct tegra_uart_port *tup = to_tegra_uport(u);
584 struct circ_buf *xmit = &u->state->xmit;
585
586 if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
587 tegra_uart_start_next_tx(tup);
588}
589
590static unsigned int tegra_uart_tx_empty(struct uart_port *u)
591{
592 struct tegra_uart_port *tup = to_tegra_uport(u);
593 unsigned int ret = 0;
594 unsigned long flags;
595
596 spin_lock_irqsave(&u->lock, flags);
597 if (!tup->tx_in_progress) {
598 unsigned long lsr = tegra_uart_read(tup, UART_LSR);
599 if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
600 ret = TIOCSER_TEMT;
601 }
602 spin_unlock_irqrestore(&u->lock, flags);
603 return ret;
604}
605
606static void tegra_uart_stop_tx(struct uart_port *u)
607{
608 struct tegra_uart_port *tup = to_tegra_uport(u);
609 struct circ_buf *xmit = &tup->uport.state->xmit;
610 struct dma_tx_state state;
611 unsigned int count;
612
613 if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
614 return;
615
616 dmaengine_terminate_all(tup->tx_dma_chan);
617 dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
618 count = tup->tx_bytes_requested - state.residue;
619 async_tx_ack(tup->tx_dma_desc);
620 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
621 tup->tx_in_progress = 0;
622}
623
624static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
625{
626 struct circ_buf *xmit = &tup->uport.state->xmit;
627
628 tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
629 tup->tx_in_progress = 0;
630 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
631 uart_write_wakeup(&tup->uport);
632 tegra_uart_start_next_tx(tup);
633}
634
635static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
636 struct tty_port *tty)
637{
638 do {
639 char flag = TTY_NORMAL;
640 unsigned long lsr = 0;
641 unsigned char ch;
642
643 lsr = tegra_uart_read(tup, UART_LSR);
644 if (!(lsr & UART_LSR_DR))
645 break;
646
647 flag = tegra_uart_decode_rx_error(tup, lsr);
David Brazdil0f672f62019-12-10 10:32:29 +0000648 if (flag != TTY_NORMAL)
649 continue;
650
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000651 ch = (unsigned char) tegra_uart_read(tup, UART_RX);
652 tup->uport.icount.rx++;
653
Olivier Deprez0e641232021-09-23 10:07:05 +0200654 if (uart_handle_sysrq_char(&tup->uport, ch))
655 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000656
657 if (tup->uport.ignore_status_mask & UART_LSR_DR)
658 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +0200659
660 if (tty)
661 tty_insert_flip_char(tty, ch, flag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662 } while (1);
663}
664
665static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
666 struct tty_port *tty,
667 unsigned int count)
668{
669 int copied;
670
671 /* If count is zero, then there is no data to be copied */
672 if (!count)
673 return;
674
675 tup->uport.icount.rx += count;
676 if (!tty) {
677 dev_err(tup->uport.dev, "No tty port\n");
678 return;
679 }
David Brazdil0f672f62019-12-10 10:32:29 +0000680
681 if (tup->uport.ignore_status_mask & UART_LSR_DR)
682 return;
683
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
685 TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
686 copied = tty_insert_flip_string(tty,
687 ((unsigned char *)(tup->rx_dma_buf_virt)), count);
688 if (copied != count) {
689 WARN_ON(1);
690 dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
691 }
692 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
693 TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
694}
695
696static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
697 unsigned int residue)
698{
699 struct tty_port *port = &tup->uport.state->port;
700 struct tty_struct *tty = tty_port_tty_get(port);
701 unsigned int count;
702
703 async_tx_ack(tup->rx_dma_desc);
704 count = tup->rx_bytes_requested - residue;
705
706 /* If we are here, DMA is stopped */
707 tegra_uart_copy_rx_to_tty(tup, port, count);
708
709 tegra_uart_handle_rx_pio(tup, port);
710 if (tty) {
711 tty_flip_buffer_push(port);
712 tty_kref_put(tty);
713 }
714}
715
716static void tegra_uart_rx_dma_complete(void *args)
717{
718 struct tegra_uart_port *tup = args;
719 struct uart_port *u = &tup->uport;
720 unsigned long flags;
721 struct dma_tx_state state;
722 enum dma_status status;
723
724 spin_lock_irqsave(&u->lock, flags);
725
726 status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
727
728 if (status == DMA_IN_PROGRESS) {
729 dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
730 goto done;
731 }
732
733 /* Deactivate flow control to stop sender */
734 if (tup->rts_active)
735 set_rts(tup, false);
736
737 tegra_uart_rx_buffer_push(tup, 0);
738 tegra_uart_start_rx_dma(tup);
739
740 /* Activate flow control to start transfer */
741 if (tup->rts_active)
742 set_rts(tup, true);
743
744done:
745 spin_unlock_irqrestore(&u->lock, flags);
746}
747
748static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
749{
750 struct dma_tx_state state;
751
752 /* Deactivate flow control to stop sender */
753 if (tup->rts_active)
754 set_rts(tup, false);
755
756 dmaengine_terminate_all(tup->rx_dma_chan);
757 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
758 tegra_uart_rx_buffer_push(tup, state.residue);
759 tegra_uart_start_rx_dma(tup);
760
761 if (tup->rts_active)
762 set_rts(tup, true);
763}
764
765static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
766{
767 unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
768
769 tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
770 tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
771 DMA_PREP_INTERRUPT);
772 if (!tup->rx_dma_desc) {
773 dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
774 return -EIO;
775 }
776
777 tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
778 tup->rx_dma_desc->callback_param = tup;
779 dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
780 count, DMA_TO_DEVICE);
781 tup->rx_bytes_requested = count;
782 tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
783 dma_async_issue_pending(tup->rx_dma_chan);
784 return 0;
785}
786
787static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
788{
789 struct tegra_uart_port *tup = to_tegra_uport(u);
790 unsigned long msr;
791
792 msr = tegra_uart_read(tup, UART_MSR);
793 if (!(msr & UART_MSR_ANY_DELTA))
794 return;
795
796 if (msr & UART_MSR_TERI)
797 tup->uport.icount.rng++;
798 if (msr & UART_MSR_DDSR)
799 tup->uport.icount.dsr++;
800 /* We may only get DDCD when HW init and reset */
801 if (msr & UART_MSR_DDCD)
802 uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
803 /* Will start/stop_tx accordingly */
804 if (msr & UART_MSR_DCTS)
805 uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
806}
807
David Brazdil0f672f62019-12-10 10:32:29 +0000808static void do_handle_rx_pio(struct tegra_uart_port *tup)
809{
810 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
811 struct tty_port *port = &tup->uport.state->port;
812
813 tegra_uart_handle_rx_pio(tup, port);
814 if (tty) {
815 tty_flip_buffer_push(port);
816 tty_kref_put(tty);
817 }
818}
819
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820static irqreturn_t tegra_uart_isr(int irq, void *data)
821{
822 struct tegra_uart_port *tup = data;
823 struct uart_port *u = &tup->uport;
824 unsigned long iir;
825 unsigned long ier;
826 bool is_rx_int = false;
827 unsigned long flags;
828
829 spin_lock_irqsave(&u->lock, flags);
830 while (1) {
831 iir = tegra_uart_read(tup, UART_IIR);
832 if (iir & UART_IIR_NO_INT) {
David Brazdil0f672f62019-12-10 10:32:29 +0000833 if (!tup->use_rx_pio && is_rx_int) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000834 tegra_uart_handle_rx_dma(tup);
835 if (tup->rx_in_progress) {
836 ier = tup->ier_shadow;
837 ier |= (UART_IER_RLSI | UART_IER_RTOIE |
838 TEGRA_UART_IER_EORD);
839 tup->ier_shadow = ier;
840 tegra_uart_write(tup, ier, UART_IER);
841 }
842 }
843 spin_unlock_irqrestore(&u->lock, flags);
844 return IRQ_HANDLED;
845 }
846
847 switch ((iir >> 1) & 0x7) {
848 case 0: /* Modem signal change interrupt */
849 tegra_uart_handle_modem_signal_change(u);
850 break;
851
852 case 1: /* Transmit interrupt only triggered when using PIO */
853 tup->ier_shadow &= ~UART_IER_THRI;
854 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
855 tegra_uart_handle_tx_pio(tup);
856 break;
857
858 case 4: /* End of data */
859 case 6: /* Rx timeout */
860 case 2: /* Receive */
David Brazdil0f672f62019-12-10 10:32:29 +0000861 if (!tup->use_rx_pio && !is_rx_int) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000862 is_rx_int = true;
863 /* Disable Rx interrupts */
864 ier = tup->ier_shadow;
865 ier |= UART_IER_RDI;
866 tegra_uart_write(tup, ier, UART_IER);
867 ier &= ~(UART_IER_RDI | UART_IER_RLSI |
868 UART_IER_RTOIE | TEGRA_UART_IER_EORD);
869 tup->ier_shadow = ier;
870 tegra_uart_write(tup, ier, UART_IER);
David Brazdil0f672f62019-12-10 10:32:29 +0000871 } else {
872 do_handle_rx_pio(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000873 }
874 break;
875
876 case 3: /* Receive error */
877 tegra_uart_decode_rx_error(tup,
878 tegra_uart_read(tup, UART_LSR));
879 break;
880
881 case 5: /* break nothing to handle */
882 case 7: /* break nothing to handle */
883 break;
884 }
885 }
886}
887
888static void tegra_uart_stop_rx(struct uart_port *u)
889{
890 struct tegra_uart_port *tup = to_tegra_uport(u);
David Brazdil0f672f62019-12-10 10:32:29 +0000891 struct tty_port *port = &tup->uport.state->port;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000892 struct dma_tx_state state;
893 unsigned long ier;
894
895 if (tup->rts_active)
896 set_rts(tup, false);
897
898 if (!tup->rx_in_progress)
899 return;
900
David Brazdil0f672f62019-12-10 10:32:29 +0000901 tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000902
903 ier = tup->ier_shadow;
904 ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
905 TEGRA_UART_IER_EORD);
906 tup->ier_shadow = ier;
907 tegra_uart_write(tup, ier, UART_IER);
908 tup->rx_in_progress = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000909 if (tup->rx_dma_chan && !tup->use_rx_pio) {
910 dmaengine_terminate_all(tup->rx_dma_chan);
911 dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
912 tegra_uart_rx_buffer_push(tup, state.residue);
913 } else {
914 tegra_uart_handle_rx_pio(tup, port);
915 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000916}
917
918static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
919{
920 unsigned long flags;
921 unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
922 unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
923 unsigned long wait_time;
924 unsigned long lsr;
925 unsigned long msr;
926 unsigned long mcr;
927
928 /* Disable interrupts */
929 tegra_uart_write(tup, 0, UART_IER);
930
931 lsr = tegra_uart_read(tup, UART_LSR);
932 if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
933 msr = tegra_uart_read(tup, UART_MSR);
934 mcr = tegra_uart_read(tup, UART_MCR);
935 if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
936 dev_err(tup->uport.dev,
937 "Tx Fifo not empty, CTS disabled, waiting\n");
938
939 /* Wait for Tx fifo to be empty */
940 while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
941 wait_time = min(fifo_empty_time, 100lu);
942 udelay(wait_time);
943 fifo_empty_time -= wait_time;
944 if (!fifo_empty_time) {
945 msr = tegra_uart_read(tup, UART_MSR);
946 mcr = tegra_uart_read(tup, UART_MCR);
947 if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
948 (msr & UART_MSR_CTS))
949 dev_err(tup->uport.dev,
950 "Slave not ready\n");
951 break;
952 }
953 lsr = tegra_uart_read(tup, UART_LSR);
954 }
955 }
956
957 spin_lock_irqsave(&tup->uport.lock, flags);
958 /* Reset the Rx and Tx FIFOs */
959 tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
960 tup->current_baud = 0;
961 spin_unlock_irqrestore(&tup->uport.lock, flags);
962
David Brazdil0f672f62019-12-10 10:32:29 +0000963 tup->rx_in_progress = 0;
964 tup->tx_in_progress = 0;
965
966 if (!tup->use_rx_pio)
967 tegra_uart_dma_channel_free(tup, true);
968 if (!tup->use_tx_pio)
969 tegra_uart_dma_channel_free(tup, false);
970
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000971 clk_disable_unprepare(tup->uart_clk);
972}
973
974static int tegra_uart_hw_init(struct tegra_uart_port *tup)
975{
976 int ret;
977
978 tup->fcr_shadow = 0;
979 tup->mcr_shadow = 0;
980 tup->lcr_shadow = 0;
981 tup->ier_shadow = 0;
982 tup->current_baud = 0;
983
984 clk_prepare_enable(tup->uart_clk);
985
986 /* Reset the UART controller to clear all previous status.*/
987 reset_control_assert(tup->rst);
988 udelay(10);
989 reset_control_deassert(tup->rst);
990
991 tup->rx_in_progress = 0;
992 tup->tx_in_progress = 0;
993
994 /*
995 * Set the trigger level
996 *
997 * For PIO mode:
998 *
999 * For receive, this will interrupt the CPU after that many number of
1000 * bytes are received, for the remaining bytes the receive timeout
1001 * interrupt is received. Rx high watermark is set to 4.
1002 *
1003 * For transmit, if the trasnmit interrupt is enabled, this will
1004 * interrupt the CPU when the number of entries in the FIFO reaches the
1005 * low watermark. Tx low watermark is set to 16 bytes.
1006 *
1007 * For DMA mode:
1008 *
1009 * Set the Tx trigger to 16. This should match the DMA burst size that
1010 * programmed in the DMA registers.
1011 */
1012 tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
David Brazdil0f672f62019-12-10 10:32:29 +00001013
1014 if (tup->use_rx_pio) {
1015 tup->fcr_shadow |= UART_FCR_R_TRIG_11;
1016 } else {
1017 if (tup->cdata->max_dma_burst_bytes == 8)
1018 tup->fcr_shadow |= UART_FCR_R_TRIG_10;
1019 else
1020 tup->fcr_shadow |= UART_FCR_R_TRIG_01;
1021 }
1022
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001023 tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
1024 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1025
1026 /* Dummy read to ensure the write is posted */
1027 tegra_uart_read(tup, UART_SCR);
1028
David Brazdil0f672f62019-12-10 10:32:29 +00001029 if (tup->cdata->fifo_mode_enable_status) {
1030 ret = tegra_uart_wait_fifo_mode_enabled(tup);
Olivier Deprez0e641232021-09-23 10:07:05 +02001031 if (ret < 0) {
1032 dev_err(tup->uport.dev,
1033 "Failed to enable FIFO mode: %d\n", ret);
David Brazdil0f672f62019-12-10 10:32:29 +00001034 return ret;
Olivier Deprez0e641232021-09-23 10:07:05 +02001035 }
David Brazdil0f672f62019-12-10 10:32:29 +00001036 } else {
1037 /*
1038 * For all tegra devices (up to t210), there is a hardware
1039 * issue that requires software to wait for 3 UART clock
1040 * periods after enabling the TX fifo, otherwise data could
1041 * be lost.
1042 */
1043 tegra_uart_wait_cycle_time(tup, 3);
1044 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001045
1046 /*
1047 * Initialize the UART with default configuration
1048 * (115200, N, 8, 1) so that the receive DMA buffer may be
1049 * enqueued
1050 */
David Brazdil0f672f62019-12-10 10:32:29 +00001051 ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001052 if (ret < 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00001053 dev_err(tup->uport.dev, "Failed to set baud rate\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 return ret;
1055 }
David Brazdil0f672f62019-12-10 10:32:29 +00001056 if (!tup->use_rx_pio) {
1057 tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
1058 tup->fcr_shadow |= UART_FCR_DMA_SELECT;
1059 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1060
1061 ret = tegra_uart_start_rx_dma(tup);
1062 if (ret < 0) {
1063 dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
1064 return ret;
1065 }
1066 } else {
1067 tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
1068 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001069 tup->rx_in_progress = 1;
1070
1071 /*
1072 * Enable IE_RXS for the receive status interrupts like line errros.
1073 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
1074 *
1075 * If using DMA mode, enable EORD instead of receive interrupt which
1076 * will interrupt after the UART is done with the receive instead of
1077 * the interrupt when the FIFO "threshold" is reached.
1078 *
1079 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
1080 * the DATA is sitting in the FIFO and couldn't be transferred to the
David Brazdil0f672f62019-12-10 10:32:29 +00001081 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001082 * triggered when there is a pause of the incomming data stream for 4
1083 * characters long.
1084 *
1085 * For pauses in the data which is not aligned to 4 bytes, we get
1086 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
1087 * then the EORD.
1088 */
David Brazdil0f672f62019-12-10 10:32:29 +00001089 if (!tup->use_rx_pio)
1090 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
1091 TEGRA_UART_IER_EORD;
1092 else
1093 tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
1094
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001095 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1096 return 0;
1097}
1098
1099static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
1100 bool dma_to_memory)
1101{
1102 if (dma_to_memory) {
1103 dmaengine_terminate_all(tup->rx_dma_chan);
1104 dma_release_channel(tup->rx_dma_chan);
1105 dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
1106 tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
1107 tup->rx_dma_chan = NULL;
1108 tup->rx_dma_buf_phys = 0;
1109 tup->rx_dma_buf_virt = NULL;
1110 } else {
1111 dmaengine_terminate_all(tup->tx_dma_chan);
1112 dma_release_channel(tup->tx_dma_chan);
1113 dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
1114 UART_XMIT_SIZE, DMA_TO_DEVICE);
1115 tup->tx_dma_chan = NULL;
1116 tup->tx_dma_buf_phys = 0;
1117 tup->tx_dma_buf_virt = NULL;
1118 }
1119}
1120
1121static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
1122 bool dma_to_memory)
1123{
1124 struct dma_chan *dma_chan;
1125 unsigned char *dma_buf;
1126 dma_addr_t dma_phys;
1127 int ret;
1128 struct dma_slave_config dma_sconfig;
1129
1130 dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
1131 dma_to_memory ? "rx" : "tx");
1132 if (IS_ERR(dma_chan)) {
1133 ret = PTR_ERR(dma_chan);
1134 dev_err(tup->uport.dev,
1135 "DMA channel alloc failed: %d\n", ret);
1136 return ret;
1137 }
1138
1139 if (dma_to_memory) {
1140 dma_buf = dma_alloc_coherent(tup->uport.dev,
1141 TEGRA_UART_RX_DMA_BUFFER_SIZE,
1142 &dma_phys, GFP_KERNEL);
1143 if (!dma_buf) {
1144 dev_err(tup->uport.dev,
1145 "Not able to allocate the dma buffer\n");
1146 dma_release_channel(dma_chan);
1147 return -ENOMEM;
1148 }
1149 dma_sconfig.src_addr = tup->uport.mapbase;
1150 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
David Brazdil0f672f62019-12-10 10:32:29 +00001151 dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001152 tup->rx_dma_chan = dma_chan;
1153 tup->rx_dma_buf_virt = dma_buf;
1154 tup->rx_dma_buf_phys = dma_phys;
1155 } else {
1156 dma_phys = dma_map_single(tup->uport.dev,
1157 tup->uport.state->xmit.buf, UART_XMIT_SIZE,
1158 DMA_TO_DEVICE);
1159 if (dma_mapping_error(tup->uport.dev, dma_phys)) {
1160 dev_err(tup->uport.dev, "dma_map_single tx failed\n");
1161 dma_release_channel(dma_chan);
1162 return -ENOMEM;
1163 }
1164 dma_buf = tup->uport.state->xmit.buf;
1165 dma_sconfig.dst_addr = tup->uport.mapbase;
1166 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1167 dma_sconfig.dst_maxburst = 16;
1168 tup->tx_dma_chan = dma_chan;
1169 tup->tx_dma_buf_virt = dma_buf;
1170 tup->tx_dma_buf_phys = dma_phys;
1171 }
1172
1173 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
1174 if (ret < 0) {
1175 dev_err(tup->uport.dev,
1176 "Dma slave config failed, err = %d\n", ret);
1177 tegra_uart_dma_channel_free(tup, dma_to_memory);
1178 return ret;
1179 }
1180
1181 return 0;
1182}
1183
1184static int tegra_uart_startup(struct uart_port *u)
1185{
1186 struct tegra_uart_port *tup = to_tegra_uport(u);
1187 int ret;
1188
David Brazdil0f672f62019-12-10 10:32:29 +00001189 if (!tup->use_tx_pio) {
1190 ret = tegra_uart_dma_channel_allocate(tup, false);
1191 if (ret < 0) {
1192 dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
1193 ret);
1194 return ret;
1195 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001196 }
1197
David Brazdil0f672f62019-12-10 10:32:29 +00001198 if (!tup->use_rx_pio) {
1199 ret = tegra_uart_dma_channel_allocate(tup, true);
1200 if (ret < 0) {
1201 dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
1202 ret);
1203 goto fail_rx_dma;
1204 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205 }
1206
1207 ret = tegra_uart_hw_init(tup);
1208 if (ret < 0) {
1209 dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
1210 goto fail_hw_init;
1211 }
1212
1213 ret = request_irq(u->irq, tegra_uart_isr, 0,
1214 dev_name(u->dev), tup);
1215 if (ret < 0) {
1216 dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
1217 goto fail_hw_init;
1218 }
1219 return 0;
1220
1221fail_hw_init:
David Brazdil0f672f62019-12-10 10:32:29 +00001222 if (!tup->use_rx_pio)
1223 tegra_uart_dma_channel_free(tup, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001224fail_rx_dma:
David Brazdil0f672f62019-12-10 10:32:29 +00001225 if (!tup->use_tx_pio)
1226 tegra_uart_dma_channel_free(tup, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001227 return ret;
1228}
1229
1230/*
1231 * Flush any TX data submitted for DMA and PIO. Called when the
1232 * TX circular buffer is reset.
1233 */
1234static void tegra_uart_flush_buffer(struct uart_port *u)
1235{
1236 struct tegra_uart_port *tup = to_tegra_uport(u);
1237
1238 tup->tx_bytes = 0;
1239 if (tup->tx_dma_chan)
1240 dmaengine_terminate_all(tup->tx_dma_chan);
1241}
1242
1243static void tegra_uart_shutdown(struct uart_port *u)
1244{
1245 struct tegra_uart_port *tup = to_tegra_uport(u);
1246
1247 tegra_uart_hw_deinit(tup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 free_irq(u->irq, tup);
1249}
1250
1251static void tegra_uart_enable_ms(struct uart_port *u)
1252{
1253 struct tegra_uart_port *tup = to_tegra_uport(u);
1254
1255 if (tup->enable_modem_interrupt) {
1256 tup->ier_shadow |= UART_IER_MSI;
1257 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1258 }
1259}
1260
1261static void tegra_uart_set_termios(struct uart_port *u,
1262 struct ktermios *termios, struct ktermios *oldtermios)
1263{
1264 struct tegra_uart_port *tup = to_tegra_uport(u);
1265 unsigned int baud;
1266 unsigned long flags;
1267 unsigned int lcr;
1268 int symb_bit = 1;
1269 struct clk *parent_clk = clk_get_parent(tup->uart_clk);
1270 unsigned long parent_clk_rate = clk_get_rate(parent_clk);
1271 int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
David Brazdil0f672f62019-12-10 10:32:29 +00001272 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001273
1274 max_divider *= 16;
1275 spin_lock_irqsave(&u->lock, flags);
1276
1277 /* Changing configuration, it is safe to stop any rx now */
1278 if (tup->rts_active)
1279 set_rts(tup, false);
1280
David Brazdil0f672f62019-12-10 10:32:29 +00001281 /* Clear all interrupts as configuration is going to be changed */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001282 tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
1283 tegra_uart_read(tup, UART_IER);
1284 tegra_uart_write(tup, 0, UART_IER);
1285 tegra_uart_read(tup, UART_IER);
1286
1287 /* Parity */
1288 lcr = tup->lcr_shadow;
1289 lcr &= ~UART_LCR_PARITY;
1290
1291 /* CMSPAR isn't supported by this driver */
1292 termios->c_cflag &= ~CMSPAR;
1293
1294 if ((termios->c_cflag & PARENB) == PARENB) {
1295 symb_bit++;
1296 if (termios->c_cflag & PARODD) {
1297 lcr |= UART_LCR_PARITY;
1298 lcr &= ~UART_LCR_EPAR;
1299 lcr &= ~UART_LCR_SPAR;
1300 } else {
1301 lcr |= UART_LCR_PARITY;
1302 lcr |= UART_LCR_EPAR;
1303 lcr &= ~UART_LCR_SPAR;
1304 }
1305 }
1306
1307 lcr &= ~UART_LCR_WLEN8;
1308 switch (termios->c_cflag & CSIZE) {
1309 case CS5:
1310 lcr |= UART_LCR_WLEN5;
1311 symb_bit += 5;
1312 break;
1313 case CS6:
1314 lcr |= UART_LCR_WLEN6;
1315 symb_bit += 6;
1316 break;
1317 case CS7:
1318 lcr |= UART_LCR_WLEN7;
1319 symb_bit += 7;
1320 break;
1321 default:
1322 lcr |= UART_LCR_WLEN8;
1323 symb_bit += 8;
1324 break;
1325 }
1326
1327 /* Stop bits */
1328 if (termios->c_cflag & CSTOPB) {
1329 lcr |= UART_LCR_STOP;
1330 symb_bit += 2;
1331 } else {
1332 lcr &= ~UART_LCR_STOP;
1333 symb_bit++;
1334 }
1335
1336 tegra_uart_write(tup, lcr, UART_LCR);
1337 tup->lcr_shadow = lcr;
1338 tup->symb_bit = symb_bit;
1339
1340 /* Baud rate. */
1341 baud = uart_get_baud_rate(u, termios, oldtermios,
1342 parent_clk_rate/max_divider,
1343 parent_clk_rate/16);
1344 spin_unlock_irqrestore(&u->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +00001345 ret = tegra_set_baudrate(tup, baud);
1346 if (ret < 0) {
1347 dev_err(tup->uport.dev, "Failed to set baud rate\n");
1348 return;
1349 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001350 if (tty_termios_baud_rate(termios))
1351 tty_termios_encode_baud_rate(termios, baud, baud);
1352 spin_lock_irqsave(&u->lock, flags);
1353
1354 /* Flow control */
1355 if (termios->c_cflag & CRTSCTS) {
1356 tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
1357 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1358 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1359 /* if top layer has asked to set rts active then do so here */
1360 if (tup->rts_active)
1361 set_rts(tup, true);
1362 } else {
1363 tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
1364 tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
1365 tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
1366 }
1367
1368 /* update the port timeout based on new settings */
1369 uart_update_timeout(u, termios->c_cflag, baud);
1370
David Brazdil0f672f62019-12-10 10:32:29 +00001371 /* Make sure all writes have completed */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001372 tegra_uart_read(tup, UART_IER);
1373
David Brazdil0f672f62019-12-10 10:32:29 +00001374 /* Re-enable interrupt */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375 tegra_uart_write(tup, tup->ier_shadow, UART_IER);
1376 tegra_uart_read(tup, UART_IER);
1377
David Brazdil0f672f62019-12-10 10:32:29 +00001378 tup->uport.ignore_status_mask = 0;
1379 /* Ignore all characters if CREAD is not set */
1380 if ((termios->c_cflag & CREAD) == 0)
1381 tup->uport.ignore_status_mask |= UART_LSR_DR;
1382 if (termios->c_iflag & IGNBRK)
1383 tup->uport.ignore_status_mask |= UART_LSR_BI;
1384
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001385 spin_unlock_irqrestore(&u->lock, flags);
1386}
1387
1388static const char *tegra_uart_type(struct uart_port *u)
1389{
1390 return TEGRA_UART_TYPE;
1391}
1392
1393static const struct uart_ops tegra_uart_ops = {
1394 .tx_empty = tegra_uart_tx_empty,
1395 .set_mctrl = tegra_uart_set_mctrl,
1396 .get_mctrl = tegra_uart_get_mctrl,
1397 .stop_tx = tegra_uart_stop_tx,
1398 .start_tx = tegra_uart_start_tx,
1399 .stop_rx = tegra_uart_stop_rx,
1400 .flush_buffer = tegra_uart_flush_buffer,
1401 .enable_ms = tegra_uart_enable_ms,
1402 .break_ctl = tegra_uart_break_ctl,
1403 .startup = tegra_uart_startup,
1404 .shutdown = tegra_uart_shutdown,
1405 .set_termios = tegra_uart_set_termios,
1406 .type = tegra_uart_type,
1407 .request_port = tegra_uart_request_port,
1408 .release_port = tegra_uart_release_port,
1409};
1410
1411static struct uart_driver tegra_uart_driver = {
1412 .owner = THIS_MODULE,
1413 .driver_name = "tegra_hsuart",
1414 .dev_name = "ttyTHS",
1415 .cons = NULL,
1416 .nr = TEGRA_UART_MAXIMUM,
1417};
1418
1419static int tegra_uart_parse_dt(struct platform_device *pdev,
1420 struct tegra_uart_port *tup)
1421{
1422 struct device_node *np = pdev->dev.of_node;
1423 int port;
David Brazdil0f672f62019-12-10 10:32:29 +00001424 int ret;
1425 int index;
1426 u32 pval;
1427 int count;
1428 int n_entries;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001429
1430 port = of_alias_get_id(np, "serial");
1431 if (port < 0) {
1432 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
1433 return port;
1434 }
1435 tup->uport.line = port;
1436
1437 tup->enable_modem_interrupt = of_property_read_bool(np,
1438 "nvidia,enable-modem-interrupt");
David Brazdil0f672f62019-12-10 10:32:29 +00001439
1440 index = of_property_match_string(np, "dma-names", "rx");
1441 if (index < 0) {
1442 tup->use_rx_pio = true;
1443 dev_info(&pdev->dev, "RX in PIO mode\n");
1444 }
1445 index = of_property_match_string(np, "dma-names", "tx");
1446 if (index < 0) {
1447 tup->use_tx_pio = true;
1448 dev_info(&pdev->dev, "TX in PIO mode\n");
1449 }
1450
1451 n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
1452 if (n_entries > 0) {
1453 tup->n_adjustable_baud_rates = n_entries / 3;
1454 tup->baud_tolerance =
1455 devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
1456 sizeof(*tup->baud_tolerance), GFP_KERNEL);
1457 if (!tup->baud_tolerance)
1458 return -ENOMEM;
1459 for (count = 0, index = 0; count < n_entries; count += 3,
1460 index++) {
1461 ret =
1462 of_property_read_u32_index(np,
1463 "nvidia,adjust-baud-rates",
1464 count, &pval);
1465 if (!ret)
1466 tup->baud_tolerance[index].lower_range_baud =
1467 pval;
1468 ret =
1469 of_property_read_u32_index(np,
1470 "nvidia,adjust-baud-rates",
1471 count + 1, &pval);
1472 if (!ret)
1473 tup->baud_tolerance[index].upper_range_baud =
1474 pval;
1475 ret =
1476 of_property_read_u32_index(np,
1477 "nvidia,adjust-baud-rates",
1478 count + 2, &pval);
1479 if (!ret)
1480 tup->baud_tolerance[index].tolerance =
1481 (s32)pval;
1482 }
1483 } else {
1484 tup->n_adjustable_baud_rates = 0;
1485 }
1486
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001487 return 0;
1488}
1489
1490static struct tegra_uart_chip_data tegra20_uart_chip_data = {
1491 .tx_fifo_full_status = false,
1492 .allow_txfifo_reset_fifo_mode = true,
1493 .support_clk_src_div = false,
David Brazdil0f672f62019-12-10 10:32:29 +00001494 .fifo_mode_enable_status = false,
1495 .uart_max_port = 5,
1496 .max_dma_burst_bytes = 4,
1497 .error_tolerance_low_range = 0,
1498 .error_tolerance_high_range = 4,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001499};
1500
1501static struct tegra_uart_chip_data tegra30_uart_chip_data = {
1502 .tx_fifo_full_status = true,
1503 .allow_txfifo_reset_fifo_mode = false,
1504 .support_clk_src_div = true,
David Brazdil0f672f62019-12-10 10:32:29 +00001505 .fifo_mode_enable_status = false,
1506 .uart_max_port = 5,
1507 .max_dma_burst_bytes = 4,
1508 .error_tolerance_low_range = 0,
1509 .error_tolerance_high_range = 4,
1510};
1511
1512static struct tegra_uart_chip_data tegra186_uart_chip_data = {
1513 .tx_fifo_full_status = true,
1514 .allow_txfifo_reset_fifo_mode = false,
1515 .support_clk_src_div = true,
1516 .fifo_mode_enable_status = true,
1517 .uart_max_port = 8,
1518 .max_dma_burst_bytes = 8,
1519 .error_tolerance_low_range = 0,
1520 .error_tolerance_high_range = 4,
1521};
1522
1523static struct tegra_uart_chip_data tegra194_uart_chip_data = {
1524 .tx_fifo_full_status = true,
1525 .allow_txfifo_reset_fifo_mode = false,
1526 .support_clk_src_div = true,
1527 .fifo_mode_enable_status = true,
1528 .uart_max_port = 8,
1529 .max_dma_burst_bytes = 8,
1530 .error_tolerance_low_range = -2,
1531 .error_tolerance_high_range = 2,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001532};
1533
1534static const struct of_device_id tegra_uart_of_match[] = {
1535 {
1536 .compatible = "nvidia,tegra30-hsuart",
1537 .data = &tegra30_uart_chip_data,
1538 }, {
1539 .compatible = "nvidia,tegra20-hsuart",
1540 .data = &tegra20_uart_chip_data,
1541 }, {
David Brazdil0f672f62019-12-10 10:32:29 +00001542 .compatible = "nvidia,tegra186-hsuart",
1543 .data = &tegra186_uart_chip_data,
1544 }, {
1545 .compatible = "nvidia,tegra194-hsuart",
1546 .data = &tegra194_uart_chip_data,
1547 }, {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001548 },
1549};
1550MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
1551
1552static int tegra_uart_probe(struct platform_device *pdev)
1553{
1554 struct tegra_uart_port *tup;
1555 struct uart_port *u;
1556 struct resource *resource;
1557 int ret;
1558 const struct tegra_uart_chip_data *cdata;
1559 const struct of_device_id *match;
1560
1561 match = of_match_device(tegra_uart_of_match, &pdev->dev);
1562 if (!match) {
1563 dev_err(&pdev->dev, "Error: No device match found\n");
1564 return -ENODEV;
1565 }
1566 cdata = match->data;
1567
1568 tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
1569 if (!tup) {
1570 dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
1571 return -ENOMEM;
1572 }
1573
1574 ret = tegra_uart_parse_dt(pdev, tup);
1575 if (ret < 0)
1576 return ret;
1577
1578 u = &tup->uport;
1579 u->dev = &pdev->dev;
1580 u->ops = &tegra_uart_ops;
1581 u->type = PORT_TEGRA;
1582 u->fifosize = 32;
1583 tup->cdata = cdata;
1584
1585 platform_set_drvdata(pdev, tup);
1586 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1587 if (!resource) {
1588 dev_err(&pdev->dev, "No IO memory resource\n");
1589 return -ENODEV;
1590 }
1591
1592 u->mapbase = resource->start;
1593 u->membase = devm_ioremap_resource(&pdev->dev, resource);
1594 if (IS_ERR(u->membase))
1595 return PTR_ERR(u->membase);
1596
1597 tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
1598 if (IS_ERR(tup->uart_clk)) {
1599 dev_err(&pdev->dev, "Couldn't get the clock\n");
1600 return PTR_ERR(tup->uart_clk);
1601 }
1602
1603 tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
1604 if (IS_ERR(tup->rst)) {
1605 dev_err(&pdev->dev, "Couldn't get the reset\n");
1606 return PTR_ERR(tup->rst);
1607 }
1608
1609 u->iotype = UPIO_MEM32;
1610 ret = platform_get_irq(pdev, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00001611 if (ret < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001612 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001613 u->irq = ret;
1614 u->regshift = 2;
1615 ret = uart_add_one_port(&tegra_uart_driver, u);
1616 if (ret < 0) {
1617 dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
1618 return ret;
1619 }
1620 return ret;
1621}
1622
1623static int tegra_uart_remove(struct platform_device *pdev)
1624{
1625 struct tegra_uart_port *tup = platform_get_drvdata(pdev);
1626 struct uart_port *u = &tup->uport;
1627
1628 uart_remove_one_port(&tegra_uart_driver, u);
1629 return 0;
1630}
1631
1632#ifdef CONFIG_PM_SLEEP
1633static int tegra_uart_suspend(struct device *dev)
1634{
1635 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1636 struct uart_port *u = &tup->uport;
1637
1638 return uart_suspend_port(&tegra_uart_driver, u);
1639}
1640
1641static int tegra_uart_resume(struct device *dev)
1642{
1643 struct tegra_uart_port *tup = dev_get_drvdata(dev);
1644 struct uart_port *u = &tup->uport;
1645
1646 return uart_resume_port(&tegra_uart_driver, u);
1647}
1648#endif
1649
1650static const struct dev_pm_ops tegra_uart_pm_ops = {
1651 SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
1652};
1653
1654static struct platform_driver tegra_uart_platform_driver = {
1655 .probe = tegra_uart_probe,
1656 .remove = tegra_uart_remove,
1657 .driver = {
1658 .name = "serial-tegra",
1659 .of_match_table = tegra_uart_of_match,
1660 .pm = &tegra_uart_pm_ops,
1661 },
1662};
1663
1664static int __init tegra_uart_init(void)
1665{
1666 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +00001667 struct device_node *node;
1668 const struct of_device_id *match = NULL;
1669 const struct tegra_uart_chip_data *cdata = NULL;
1670
1671 node = of_find_matching_node(NULL, tegra_uart_of_match);
1672 if (node)
1673 match = of_match_node(tegra_uart_of_match, node);
1674 if (match)
1675 cdata = match->data;
1676 if (cdata)
1677 tegra_uart_driver.nr = cdata->uart_max_port;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001678
1679 ret = uart_register_driver(&tegra_uart_driver);
1680 if (ret < 0) {
1681 pr_err("Could not register %s driver\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001682 tegra_uart_driver.driver_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683 return ret;
1684 }
1685
1686 ret = platform_driver_register(&tegra_uart_platform_driver);
1687 if (ret < 0) {
1688 pr_err("Uart platform driver register failed, e = %d\n", ret);
1689 uart_unregister_driver(&tegra_uart_driver);
1690 return ret;
1691 }
1692 return 0;
1693}
1694
1695static void __exit tegra_uart_exit(void)
1696{
1697 pr_info("Unloading tegra uart driver\n");
1698 platform_driver_unregister(&tegra_uart_platform_driver);
1699 uart_unregister_driver(&tegra_uart_driver);
1700}
1701
1702module_init(tegra_uart_init);
1703module_exit(tegra_uart_exit);
1704
1705MODULE_ALIAS("platform:serial-tegra");
1706MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
1707MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1708MODULE_LICENSE("GPL v2");