blob: 23b7bdae173c80784263d5023d9f88bddffdb8bf [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics SA 2017
5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
6 * Gerald Baeza <gerald.baeza@st.com>
7 *
8 * Inspired by st-asc.c from STMicroelectronics (c)
9 */
10
11#if defined(CONFIG_SERIAL_STM32_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
12#define SUPPORT_SYSRQ
13#endif
14
15#include <linux/clk.h>
16#include <linux/console.h>
17#include <linux/delay.h>
18#include <linux/dma-direction.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/io.h>
22#include <linux/iopoll.h>
23#include <linux/irq.h>
24#include <linux/module.h>
25#include <linux/of.h>
26#include <linux/of_platform.h>
David Brazdil0f672f62019-12-10 10:32:29 +000027#include <linux/pinctrl/consumer.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pm_wakeirq.h>
31#include <linux/serial_core.h>
32#include <linux/serial.h>
33#include <linux/spinlock.h>
34#include <linux/sysrq.h>
35#include <linux/tty_flip.h>
36#include <linux/tty.h>
37
38#include "stm32-usart.h"
39
40static void stm32_stop_tx(struct uart_port *port);
41static void stm32_transmit_chars(struct uart_port *port);
42
43static inline struct stm32_port *to_stm32_port(struct uart_port *port)
44{
45 return container_of(port, struct stm32_port, port);
46}
47
48static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
49{
50 u32 val;
51
52 val = readl_relaxed(port->membase + reg);
53 val |= bits;
54 writel_relaxed(val, port->membase + reg);
55}
56
57static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
58{
59 u32 val;
60
61 val = readl_relaxed(port->membase + reg);
62 val &= ~bits;
63 writel_relaxed(val, port->membase + reg);
64}
65
66static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
67 u32 delay_DDE, u32 baud)
68{
69 u32 rs485_deat_dedt;
70 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
71 bool over8;
72
73 *cr3 |= USART_CR3_DEM;
74 over8 = *cr1 & USART_CR1_OVER8;
75
76 if (over8)
77 rs485_deat_dedt = delay_ADE * baud * 8;
78 else
79 rs485_deat_dedt = delay_ADE * baud * 16;
80
81 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
82 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
83 rs485_deat_dedt_max : rs485_deat_dedt;
84 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
85 USART_CR1_DEAT_MASK;
86 *cr1 |= rs485_deat_dedt;
87
88 if (over8)
89 rs485_deat_dedt = delay_DDE * baud * 8;
90 else
91 rs485_deat_dedt = delay_DDE * baud * 16;
92
93 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
94 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
95 rs485_deat_dedt_max : rs485_deat_dedt;
96 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
97 USART_CR1_DEDT_MASK;
98 *cr1 |= rs485_deat_dedt;
99}
100
101static int stm32_config_rs485(struct uart_port *port,
102 struct serial_rs485 *rs485conf)
103{
104 struct stm32_port *stm32_port = to_stm32_port(port);
105 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
106 struct stm32_usart_config *cfg = &stm32_port->info->cfg;
107 u32 usartdiv, baud, cr1, cr3;
108 bool over8;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110 stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
111
112 port->rs485 = *rs485conf;
113
114 rs485conf->flags |= SER_RS485_RX_DURING_TX;
115
116 if (rs485conf->flags & SER_RS485_ENABLED) {
117 cr1 = readl_relaxed(port->membase + ofs->cr1);
118 cr3 = readl_relaxed(port->membase + ofs->cr3);
119 usartdiv = readl_relaxed(port->membase + ofs->brr);
120 usartdiv = usartdiv & GENMASK(15, 0);
121 over8 = cr1 & USART_CR1_OVER8;
122
123 if (over8)
124 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
125 << USART_BRR_04_R_SHIFT;
126
127 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
128 stm32_config_reg_rs485(&cr1, &cr3,
129 rs485conf->delay_rts_before_send,
130 rs485conf->delay_rts_after_send, baud);
131
132 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
133 cr3 &= ~USART_CR3_DEP;
134 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
135 } else {
136 cr3 |= USART_CR3_DEP;
137 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
138 }
139
140 writel_relaxed(cr3, port->membase + ofs->cr3);
141 writel_relaxed(cr1, port->membase + ofs->cr1);
142 } else {
143 stm32_clr_bits(port, ofs->cr3, USART_CR3_DEM | USART_CR3_DEP);
144 stm32_clr_bits(port, ofs->cr1,
145 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
146 }
147
148 stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149
150 return 0;
151}
152
153static int stm32_init_rs485(struct uart_port *port,
154 struct platform_device *pdev)
155{
156 struct serial_rs485 *rs485conf = &port->rs485;
157
158 rs485conf->flags = 0;
159 rs485conf->delay_rts_before_send = 0;
160 rs485conf->delay_rts_after_send = 0;
161
162 if (!pdev->dev.of_node)
163 return -ENODEV;
164
165 uart_get_rs485_mode(&pdev->dev, rs485conf);
166
167 return 0;
168}
169
170static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
171 bool threaded)
172{
173 struct stm32_port *stm32_port = to_stm32_port(port);
174 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
175 enum dma_status status;
176 struct dma_tx_state state;
177
178 *sr = readl_relaxed(port->membase + ofs->isr);
179
180 if (threaded && stm32_port->rx_ch) {
181 status = dmaengine_tx_status(stm32_port->rx_ch,
182 stm32_port->rx_ch->cookie,
183 &state);
184 if ((status == DMA_IN_PROGRESS) &&
185 (*last_res != state.residue))
186 return 1;
187 else
188 return 0;
189 } else if (*sr & USART_SR_RXNE) {
190 return 1;
191 }
192 return 0;
193}
194
David Brazdil0f672f62019-12-10 10:32:29 +0000195static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
196 int *last_res)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197{
198 struct stm32_port *stm32_port = to_stm32_port(port);
199 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
200 unsigned long c;
201
202 if (stm32_port->rx_ch) {
203 c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--];
204 if ((*last_res) == 0)
205 *last_res = RX_BUF_L;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000207 c = readl_relaxed(port->membase + ofs->rdr);
208 /* apply RDR data mask */
209 c &= stm32_port->rdr_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210 }
David Brazdil0f672f62019-12-10 10:32:29 +0000211
212 return c;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213}
214
215static void stm32_receive_chars(struct uart_port *port, bool threaded)
216{
217 struct tty_port *tport = &port->state->port;
218 struct stm32_port *stm32_port = to_stm32_port(port);
219 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
220 unsigned long c;
221 u32 sr;
222 char flag;
223
224 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
225 pm_wakeup_event(tport->tty->dev, 0);
226
227 while (stm32_pending_rx(port, &sr, &stm32_port->last_res, threaded)) {
228 sr |= USART_SR_DUMMY_RX;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229 flag = TTY_NORMAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
David Brazdil0f672f62019-12-10 10:32:29 +0000231 /*
232 * Status bits has to be cleared before reading the RDR:
233 * In FIFO mode, reading the RDR will pop the next data
234 * (if any) along with its status bits into the SR.
235 * Not doing so leads to misalignement between RDR and SR,
236 * and clear status bits of the next rx data.
237 *
238 * Clear errors flags for stm32f7 and stm32h7 compatible
239 * devices. On stm32f4 compatible devices, the error bit is
240 * cleared by the sequence [read SR - read DR].
241 */
242 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
Olivier Deprez0e641232021-09-23 10:07:05 +0200243 writel_relaxed(sr & USART_SR_ERR_MASK,
244 port->membase + ofs->icr);
David Brazdil0f672f62019-12-10 10:32:29 +0000245
246 c = stm32_get_char(port, &sr, &stm32_port->last_res);
247 port->icount.rx++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 if (sr & USART_SR_ERR_MASK) {
David Brazdil0f672f62019-12-10 10:32:29 +0000249 if (sr & USART_SR_ORE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250 port->icount.overrun++;
251 } else if (sr & USART_SR_PE) {
252 port->icount.parity++;
253 } else if (sr & USART_SR_FE) {
David Brazdil0f672f62019-12-10 10:32:29 +0000254 /* Break detection if character is null */
255 if (!c) {
256 port->icount.brk++;
257 if (uart_handle_break(port))
258 continue;
259 } else {
260 port->icount.frame++;
261 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 }
263
264 sr &= port->read_status_mask;
265
David Brazdil0f672f62019-12-10 10:32:29 +0000266 if (sr & USART_SR_PE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 flag = TTY_PARITY;
David Brazdil0f672f62019-12-10 10:32:29 +0000268 } else if (sr & USART_SR_FE) {
269 if (!c)
270 flag = TTY_BREAK;
271 else
272 flag = TTY_FRAME;
273 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 }
275
276 if (uart_handle_sysrq_char(port, c))
277 continue;
278 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
279 }
280
281 spin_unlock(&port->lock);
282 tty_flip_buffer_push(tport);
283 spin_lock(&port->lock);
284}
285
286static void stm32_tx_dma_complete(void *arg)
287{
288 struct uart_port *port = arg;
289 struct stm32_port *stm32port = to_stm32_port(port);
290 struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291
292 stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
293 stm32port->tx_dma_busy = false;
294
295 /* Let's see if we have pending data to send */
296 stm32_transmit_chars(port);
297}
298
David Brazdil0f672f62019-12-10 10:32:29 +0000299static void stm32_tx_interrupt_enable(struct uart_port *port)
300{
301 struct stm32_port *stm32_port = to_stm32_port(port);
302 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
303
304 /*
305 * Enables TX FIFO threashold irq when FIFO is enabled,
306 * or TX empty irq when FIFO is disabled
307 */
308 if (stm32_port->fifoen)
309 stm32_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
310 else
311 stm32_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
312}
313
314static void stm32_tx_interrupt_disable(struct uart_port *port)
315{
316 struct stm32_port *stm32_port = to_stm32_port(port);
317 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
318
319 if (stm32_port->fifoen)
320 stm32_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
321 else
322 stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
323}
324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325static void stm32_transmit_chars_pio(struct uart_port *port)
326{
327 struct stm32_port *stm32_port = to_stm32_port(port);
328 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
329 struct circ_buf *xmit = &port->state->xmit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330
331 if (stm32_port->tx_dma_busy) {
332 stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
333 stm32_port->tx_dma_busy = false;
334 }
335
David Brazdil0f672f62019-12-10 10:32:29 +0000336 while (!uart_circ_empty(xmit)) {
337 /* Check that TDR is empty before filling FIFO */
338 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
339 break;
340 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
341 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
342 port->icount.tx++;
343 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344
David Brazdil0f672f62019-12-10 10:32:29 +0000345 /* rely on TXE irq (mask or unmask) for sending remaining data */
346 if (uart_circ_empty(xmit))
347 stm32_tx_interrupt_disable(port);
348 else
349 stm32_tx_interrupt_enable(port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350}
351
352static void stm32_transmit_chars_dma(struct uart_port *port)
353{
354 struct stm32_port *stm32port = to_stm32_port(port);
355 struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
356 struct circ_buf *xmit = &port->state->xmit;
357 struct dma_async_tx_descriptor *desc = NULL;
358 dma_cookie_t cookie;
359 unsigned int count, i;
360
361 if (stm32port->tx_dma_busy)
362 return;
363
364 stm32port->tx_dma_busy = true;
365
366 count = uart_circ_chars_pending(xmit);
367
368 if (count > TX_BUF_L)
369 count = TX_BUF_L;
370
371 if (xmit->tail < xmit->head) {
372 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
373 } else {
374 size_t one = UART_XMIT_SIZE - xmit->tail;
375 size_t two;
376
377 if (one > count)
378 one = count;
379 two = count - one;
380
381 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
382 if (two)
383 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
384 }
385
386 desc = dmaengine_prep_slave_single(stm32port->tx_ch,
387 stm32port->tx_dma_buf,
388 count,
389 DMA_MEM_TO_DEV,
390 DMA_PREP_INTERRUPT);
391
392 if (!desc) {
393 for (i = count; i > 0; i--)
394 stm32_transmit_chars_pio(port);
395 return;
396 }
397
398 desc->callback = stm32_tx_dma_complete;
399 desc->callback_param = port;
400
401 /* Push current DMA TX transaction in the pending queue */
402 cookie = dmaengine_submit(desc);
403
404 /* Issue pending DMA TX requests */
405 dma_async_issue_pending(stm32port->tx_ch);
406
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407 stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
408
409 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
410 port->icount.tx += count;
411}
412
413static void stm32_transmit_chars(struct uart_port *port)
414{
415 struct stm32_port *stm32_port = to_stm32_port(port);
416 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
417 struct circ_buf *xmit = &port->state->xmit;
418
419 if (port->x_char) {
420 if (stm32_port->tx_dma_busy)
421 stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
422 writel_relaxed(port->x_char, port->membase + ofs->tdr);
423 port->x_char = 0;
424 port->icount.tx++;
425 if (stm32_port->tx_dma_busy)
426 stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
427 return;
428 }
429
David Brazdil0f672f62019-12-10 10:32:29 +0000430 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
431 stm32_tx_interrupt_disable(port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000432 return;
433 }
434
David Brazdil0f672f62019-12-10 10:32:29 +0000435 if (ofs->icr == UNDEF_REG)
436 stm32_clr_bits(port, ofs->isr, USART_SR_TC);
437 else
Olivier Deprez0e641232021-09-23 10:07:05 +0200438 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439
440 if (stm32_port->tx_ch)
441 stm32_transmit_chars_dma(port);
442 else
443 stm32_transmit_chars_pio(port);
444
445 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
446 uart_write_wakeup(port);
447
448 if (uart_circ_empty(xmit))
David Brazdil0f672f62019-12-10 10:32:29 +0000449 stm32_tx_interrupt_disable(port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450}
451
452static irqreturn_t stm32_interrupt(int irq, void *ptr)
453{
454 struct uart_port *port = ptr;
455 struct stm32_port *stm32_port = to_stm32_port(port);
456 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
457 u32 sr;
458
459 spin_lock(&port->lock);
460
461 sr = readl_relaxed(port->membase + ofs->isr);
462
David Brazdil0f672f62019-12-10 10:32:29 +0000463 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
464 writel_relaxed(USART_ICR_RTOCF,
465 port->membase + ofs->icr);
466
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 if ((sr & USART_SR_WUF) && (ofs->icr != UNDEF_REG))
468 writel_relaxed(USART_ICR_WUCF,
469 port->membase + ofs->icr);
470
471 if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
472 stm32_receive_chars(port, false);
473
474 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
475 stm32_transmit_chars(port);
476
477 spin_unlock(&port->lock);
478
479 if (stm32_port->rx_ch)
480 return IRQ_WAKE_THREAD;
481 else
482 return IRQ_HANDLED;
483}
484
485static irqreturn_t stm32_threaded_interrupt(int irq, void *ptr)
486{
487 struct uart_port *port = ptr;
488 struct stm32_port *stm32_port = to_stm32_port(port);
489
490 spin_lock(&port->lock);
491
492 if (stm32_port->rx_ch)
493 stm32_receive_chars(port, true);
494
495 spin_unlock(&port->lock);
496
497 return IRQ_HANDLED;
498}
499
500static unsigned int stm32_tx_empty(struct uart_port *port)
501{
502 struct stm32_port *stm32_port = to_stm32_port(port);
503 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
504
Olivier Deprez0e641232021-09-23 10:07:05 +0200505 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
506 return TIOCSER_TEMT;
507
508 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000509}
510
511static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
512{
513 struct stm32_port *stm32_port = to_stm32_port(port);
514 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
515
516 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
517 stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
518 else
519 stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
520}
521
522static unsigned int stm32_get_mctrl(struct uart_port *port)
523{
524 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */
525 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
526}
527
528/* Transmit stop */
529static void stm32_stop_tx(struct uart_port *port)
530{
David Brazdil0f672f62019-12-10 10:32:29 +0000531 stm32_tx_interrupt_disable(port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532}
533
534/* There are probably characters waiting to be transmitted. */
535static void stm32_start_tx(struct uart_port *port)
536{
537 struct circ_buf *xmit = &port->state->xmit;
538
539 if (uart_circ_empty(xmit))
540 return;
541
542 stm32_transmit_chars(port);
543}
544
545/* Throttle the remote when input buffer is about to overflow. */
546static void stm32_throttle(struct uart_port *port)
547{
548 struct stm32_port *stm32_port = to_stm32_port(port);
549 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
550 unsigned long flags;
551
552 spin_lock_irqsave(&port->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000553 stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
554 if (stm32_port->cr3_irq)
555 stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
556
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 spin_unlock_irqrestore(&port->lock, flags);
558}
559
560/* Unthrottle the remote, the input buffer can now accept data. */
561static void stm32_unthrottle(struct uart_port *port)
562{
563 struct stm32_port *stm32_port = to_stm32_port(port);
564 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
565 unsigned long flags;
566
567 spin_lock_irqsave(&port->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000568 stm32_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
569 if (stm32_port->cr3_irq)
570 stm32_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
571
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572 spin_unlock_irqrestore(&port->lock, flags);
573}
574
575/* Receive stop */
576static void stm32_stop_rx(struct uart_port *port)
577{
578 struct stm32_port *stm32_port = to_stm32_port(port);
579 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
580
David Brazdil0f672f62019-12-10 10:32:29 +0000581 stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
582 if (stm32_port->cr3_irq)
583 stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
584
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585}
586
587/* Handle breaks - ignored by us */
588static void stm32_break_ctl(struct uart_port *port, int break_state)
589{
590}
591
592static int stm32_startup(struct uart_port *port)
593{
594 struct stm32_port *stm32_port = to_stm32_port(port);
595 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596 const char *name = to_platform_device(port->dev)->name;
597 u32 val;
598 int ret;
599
600 ret = request_threaded_irq(port->irq, stm32_interrupt,
601 stm32_threaded_interrupt,
602 IRQF_NO_SUSPEND, name, port);
603 if (ret)
604 return ret;
605
David Brazdil0f672f62019-12-10 10:32:29 +0000606 /* RX FIFO Flush */
607 if (ofs->rqr != UNDEF_REG)
608 stm32_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
609
610 /* Tx and RX FIFO configuration */
611 if (stm32_port->fifoen) {
612 val = readl_relaxed(port->membase + ofs->cr3);
613 val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
614 val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
615 val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
616 writel_relaxed(val, port->membase + ofs->cr3);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617 }
618
David Brazdil0f672f62019-12-10 10:32:29 +0000619 /* RX FIFO enabling */
620 val = stm32_port->cr1_irq | USART_CR1_RE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000621 if (stm32_port->fifoen)
622 val |= USART_CR1_FIFOEN;
623 stm32_set_bits(port, ofs->cr1, val);
624
625 return 0;
626}
627
628static void stm32_shutdown(struct uart_port *port)
629{
630 struct stm32_port *stm32_port = to_stm32_port(port);
631 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
632 struct stm32_usart_config *cfg = &stm32_port->info->cfg;
David Brazdil0f672f62019-12-10 10:32:29 +0000633 u32 val, isr;
634 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000635
David Brazdil0f672f62019-12-10 10:32:29 +0000636 val = USART_CR1_TXEIE | USART_CR1_TE;
637 val |= stm32_port->cr1_irq | USART_CR1_RE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000638 val |= BIT(cfg->uart_enable_bit);
639 if (stm32_port->fifoen)
640 val |= USART_CR1_FIFOEN;
David Brazdil0f672f62019-12-10 10:32:29 +0000641
642 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
643 isr, (isr & USART_SR_TC),
644 10, 100000);
645
646 if (ret)
647 dev_err(port->dev, "transmission complete not set\n");
648
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649 stm32_clr_bits(port, ofs->cr1, val);
650
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000651 free_irq(port->irq, port);
652}
653
David Brazdil0f672f62019-12-10 10:32:29 +0000654static unsigned int stm32_get_databits(struct ktermios *termios)
655{
656 unsigned int bits;
657
658 tcflag_t cflag = termios->c_cflag;
659
660 switch (cflag & CSIZE) {
661 /*
662 * CSIZE settings are not necessarily supported in hardware.
663 * CSIZE unsupported configurations are handled here to set word length
664 * to 8 bits word as default configuration and to print debug message.
665 */
666 case CS5:
667 bits = 5;
668 break;
669 case CS6:
670 bits = 6;
671 break;
672 case CS7:
673 bits = 7;
674 break;
675 /* default including CS8 */
676 default:
677 bits = 8;
678 break;
679 }
680
681 return bits;
682}
683
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
685 struct ktermios *old)
686{
687 struct stm32_port *stm32_port = to_stm32_port(port);
688 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
689 struct stm32_usart_config *cfg = &stm32_port->info->cfg;
690 struct serial_rs485 *rs485conf = &port->rs485;
David Brazdil0f672f62019-12-10 10:32:29 +0000691 unsigned int baud, bits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000692 u32 usartdiv, mantissa, fraction, oversampling;
693 tcflag_t cflag = termios->c_cflag;
Olivier Deprez0e641232021-09-23 10:07:05 +0200694 u32 cr1, cr2, cr3, isr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695 unsigned long flags;
Olivier Deprez0e641232021-09-23 10:07:05 +0200696 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000697
698 if (!stm32_port->hw_flow_control)
699 cflag &= ~CRTSCTS;
700
701 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
702
703 spin_lock_irqsave(&port->lock, flags);
704
Olivier Deprez0e641232021-09-23 10:07:05 +0200705 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
706 isr,
707 (isr & USART_SR_TC),
708 10, 100000);
709
710 /* Send the TC error message only when ISR_TC is not set. */
711 if (ret)
712 dev_err(port->dev, "Transmission is not complete\n");
713
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714 /* Stop serial port and reset value */
715 writel_relaxed(0, port->membase + ofs->cr1);
716
David Brazdil0f672f62019-12-10 10:32:29 +0000717 /* flush RX & TX FIFO */
718 if (ofs->rqr != UNDEF_REG)
719 stm32_set_bits(port, ofs->rqr,
720 USART_RQR_TXFRQ | USART_RQR_RXFRQ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000721
David Brazdil0f672f62019-12-10 10:32:29 +0000722 cr1 = USART_CR1_TE | USART_CR1_RE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 if (stm32_port->fifoen)
724 cr1 |= USART_CR1_FIFOEN;
725 cr2 = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000726 cr3 = readl_relaxed(port->membase + ofs->cr3);
727 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
728 | USART_CR3_TXFTCFG_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000729
730 if (cflag & CSTOPB)
731 cr2 |= USART_CR2_STOP_2B;
732
David Brazdil0f672f62019-12-10 10:32:29 +0000733 bits = stm32_get_databits(termios);
734 stm32_port->rdr_mask = (BIT(bits) - 1);
735
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 if (cflag & PARENB) {
David Brazdil0f672f62019-12-10 10:32:29 +0000737 bits++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000738 cr1 |= USART_CR1_PCE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000739 }
740
David Brazdil0f672f62019-12-10 10:32:29 +0000741 /*
742 * Word length configuration:
743 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
744 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
745 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
746 * M0 and M1 already cleared by cr1 initialization.
747 */
748 if (bits == 9)
749 cr1 |= USART_CR1_M0;
750 else if ((bits == 7) && cfg->has_7bits_data)
751 cr1 |= USART_CR1_M1;
752 else if (bits != 8)
753 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
754 , bits);
755
756 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
757 stm32_port->fifoen)) {
758 if (cflag & CSTOPB)
759 bits = bits + 3; /* 1 start bit + 2 stop bits */
760 else
761 bits = bits + 2; /* 1 start bit + 1 stop bit */
762
763 /* RX timeout irq to occur after last stop bit + bits */
764 stm32_port->cr1_irq = USART_CR1_RTOIE;
765 writel_relaxed(bits, port->membase + ofs->rtor);
766 cr2 |= USART_CR2_RTOEN;
767 /* Not using dma, enable fifo threshold irq */
768 if (!stm32_port->rx_ch)
769 stm32_port->cr3_irq = USART_CR3_RXFTIE;
770 }
771
772 cr1 |= stm32_port->cr1_irq;
773 cr3 |= stm32_port->cr3_irq;
774
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775 if (cflag & PARODD)
776 cr1 |= USART_CR1_PS;
777
778 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
779 if (cflag & CRTSCTS) {
780 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
781 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
782 }
783
784 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
785
786 /*
787 * The USART supports 16 or 8 times oversampling.
788 * By default we prefer 16 times oversampling, so that the receiver
789 * has a better tolerance to clock deviations.
790 * 8 times oversampling is only used to achieve higher speeds.
791 */
792 if (usartdiv < 16) {
793 oversampling = 8;
794 cr1 |= USART_CR1_OVER8;
795 stm32_set_bits(port, ofs->cr1, USART_CR1_OVER8);
796 } else {
797 oversampling = 16;
798 cr1 &= ~USART_CR1_OVER8;
799 stm32_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
800 }
801
802 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
803 fraction = usartdiv % oversampling;
804 writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
805
806 uart_update_timeout(port, cflag, baud);
807
808 port->read_status_mask = USART_SR_ORE;
809 if (termios->c_iflag & INPCK)
810 port->read_status_mask |= USART_SR_PE | USART_SR_FE;
811 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
David Brazdil0f672f62019-12-10 10:32:29 +0000812 port->read_status_mask |= USART_SR_FE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813
814 /* Characters to ignore */
815 port->ignore_status_mask = 0;
816 if (termios->c_iflag & IGNPAR)
817 port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
818 if (termios->c_iflag & IGNBRK) {
David Brazdil0f672f62019-12-10 10:32:29 +0000819 port->ignore_status_mask |= USART_SR_FE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 /*
821 * If we're ignoring parity and break indicators,
822 * ignore overruns too (for real raw support).
823 */
824 if (termios->c_iflag & IGNPAR)
825 port->ignore_status_mask |= USART_SR_ORE;
826 }
827
828 /* Ignore all characters if CREAD is not set */
829 if ((termios->c_cflag & CREAD) == 0)
830 port->ignore_status_mask |= USART_SR_DUMMY_RX;
831
832 if (stm32_port->rx_ch)
833 cr3 |= USART_CR3_DMAR;
834
835 if (rs485conf->flags & SER_RS485_ENABLED) {
836 stm32_config_reg_rs485(&cr1, &cr3,
837 rs485conf->delay_rts_before_send,
838 rs485conf->delay_rts_after_send, baud);
839 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
840 cr3 &= ~USART_CR3_DEP;
841 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
842 } else {
843 cr3 |= USART_CR3_DEP;
844 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
845 }
846
847 } else {
848 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
849 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
850 }
851
852 writel_relaxed(cr3, port->membase + ofs->cr3);
853 writel_relaxed(cr2, port->membase + ofs->cr2);
854 writel_relaxed(cr1, port->membase + ofs->cr1);
855
856 stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
857 spin_unlock_irqrestore(&port->lock, flags);
858}
859
860static const char *stm32_type(struct uart_port *port)
861{
862 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
863}
864
865static void stm32_release_port(struct uart_port *port)
866{
867}
868
869static int stm32_request_port(struct uart_port *port)
870{
871 return 0;
872}
873
874static void stm32_config_port(struct uart_port *port, int flags)
875{
876 if (flags & UART_CONFIG_TYPE)
877 port->type = PORT_STM32;
878}
879
880static int
881stm32_verify_port(struct uart_port *port, struct serial_struct *ser)
882{
883 /* No user changeable parameters */
884 return -EINVAL;
885}
886
887static void stm32_pm(struct uart_port *port, unsigned int state,
888 unsigned int oldstate)
889{
890 struct stm32_port *stm32port = container_of(port,
891 struct stm32_port, port);
892 struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
893 struct stm32_usart_config *cfg = &stm32port->info->cfg;
894 unsigned long flags = 0;
895
896 switch (state) {
897 case UART_PM_STATE_ON:
David Brazdil0f672f62019-12-10 10:32:29 +0000898 pm_runtime_get_sync(port->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000899 break;
900 case UART_PM_STATE_OFF:
901 spin_lock_irqsave(&port->lock, flags);
902 stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
903 spin_unlock_irqrestore(&port->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000904 pm_runtime_put_sync(port->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000905 break;
906 }
907}
908
909static const struct uart_ops stm32_uart_ops = {
910 .tx_empty = stm32_tx_empty,
911 .set_mctrl = stm32_set_mctrl,
912 .get_mctrl = stm32_get_mctrl,
913 .stop_tx = stm32_stop_tx,
914 .start_tx = stm32_start_tx,
915 .throttle = stm32_throttle,
916 .unthrottle = stm32_unthrottle,
917 .stop_rx = stm32_stop_rx,
918 .break_ctl = stm32_break_ctl,
919 .startup = stm32_startup,
920 .shutdown = stm32_shutdown,
921 .set_termios = stm32_set_termios,
922 .pm = stm32_pm,
923 .type = stm32_type,
924 .release_port = stm32_release_port,
925 .request_port = stm32_request_port,
926 .config_port = stm32_config_port,
927 .verify_port = stm32_verify_port,
928};
929
930static int stm32_init_port(struct stm32_port *stm32port,
931 struct platform_device *pdev)
932{
933 struct uart_port *port = &stm32port->port;
934 struct resource *res;
935 int ret;
936
937 port->iotype = UPIO_MEM;
938 port->flags = UPF_BOOT_AUTOCONF;
939 port->ops = &stm32_uart_ops;
940 port->dev = &pdev->dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000941 port->fifosize = stm32port->info->cfg.fifosize;
942
943 ret = platform_get_irq(pdev, 0);
944 if (ret <= 0)
945 return ret ? : -ENODEV;
946 port->irq = ret;
947
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000948 port->rs485_config = stm32_config_rs485;
949
950 stm32_init_rs485(port, pdev);
951
David Brazdil0f672f62019-12-10 10:32:29 +0000952 if (stm32port->info->cfg.has_wakeup) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200953 stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
David Brazdil0f672f62019-12-10 10:32:29 +0000954 if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
955 return stm32port->wakeirq ? : -ENODEV;
956 }
957
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000958 stm32port->fifoen = stm32port->info->cfg.has_fifo;
959
960 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
961 port->membase = devm_ioremap_resource(&pdev->dev, res);
962 if (IS_ERR(port->membase))
963 return PTR_ERR(port->membase);
964 port->mapbase = res->start;
965
966 spin_lock_init(&port->lock);
967
968 stm32port->clk = devm_clk_get(&pdev->dev, NULL);
969 if (IS_ERR(stm32port->clk))
970 return PTR_ERR(stm32port->clk);
971
972 /* Ensure that clk rate is correct by enabling the clk */
973 ret = clk_prepare_enable(stm32port->clk);
974 if (ret)
975 return ret;
976
977 stm32port->port.uartclk = clk_get_rate(stm32port->clk);
978 if (!stm32port->port.uartclk) {
979 clk_disable_unprepare(stm32port->clk);
980 ret = -EINVAL;
981 }
982
983 return ret;
984}
985
986static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
987{
988 struct device_node *np = pdev->dev.of_node;
989 int id;
990
991 if (!np)
992 return NULL;
993
994 id = of_alias_get_id(np, "serial");
995 if (id < 0) {
996 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
997 return NULL;
998 }
999
1000 if (WARN_ON(id >= STM32_MAX_PORTS))
1001 return NULL;
1002
1003 stm32_ports[id].hw_flow_control = of_property_read_bool(np,
1004 "st,hw-flow-ctrl");
1005 stm32_ports[id].port.line = id;
David Brazdil0f672f62019-12-10 10:32:29 +00001006 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1007 stm32_ports[id].cr3_irq = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001008 stm32_ports[id].last_res = RX_BUF_L;
1009 return &stm32_ports[id];
1010}
1011
1012#ifdef CONFIG_OF
1013static const struct of_device_id stm32_match[] = {
1014 { .compatible = "st,stm32-uart", .data = &stm32f4_info},
1015 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1016 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1017 {},
1018};
1019
1020MODULE_DEVICE_TABLE(of, stm32_match);
1021#endif
1022
1023static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
1024 struct platform_device *pdev)
1025{
1026 struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1027 struct uart_port *port = &stm32port->port;
1028 struct device *dev = &pdev->dev;
1029 struct dma_slave_config config;
1030 struct dma_async_tx_descriptor *desc = NULL;
1031 dma_cookie_t cookie;
1032 int ret;
1033
1034 /* Request DMA RX channel */
1035 stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
1036 if (!stm32port->rx_ch) {
1037 dev_info(dev, "rx dma alloc failed\n");
1038 return -ENODEV;
1039 }
1040 stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
1041 &stm32port->rx_dma_buf,
1042 GFP_KERNEL);
1043 if (!stm32port->rx_buf) {
1044 ret = -ENOMEM;
1045 goto alloc_err;
1046 }
1047
1048 /* Configure DMA channel */
1049 memset(&config, 0, sizeof(config));
1050 config.src_addr = port->mapbase + ofs->rdr;
1051 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1052
1053 ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1054 if (ret < 0) {
1055 dev_err(dev, "rx dma channel config failed\n");
1056 ret = -ENODEV;
1057 goto config_err;
1058 }
1059
1060 /* Prepare a DMA cyclic transaction */
1061 desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch,
1062 stm32port->rx_dma_buf,
1063 RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM,
1064 DMA_PREP_INTERRUPT);
1065 if (!desc) {
1066 dev_err(dev, "rx dma prep cyclic failed\n");
1067 ret = -ENODEV;
1068 goto config_err;
1069 }
1070
1071 /* No callback as dma buffer is drained on usart interrupt */
1072 desc->callback = NULL;
1073 desc->callback_param = NULL;
1074
1075 /* Push current DMA transaction in the pending queue */
1076 cookie = dmaengine_submit(desc);
1077
1078 /* Issue pending DMA requests */
1079 dma_async_issue_pending(stm32port->rx_ch);
1080
1081 return 0;
1082
1083config_err:
1084 dma_free_coherent(&pdev->dev,
1085 RX_BUF_L, stm32port->rx_buf,
1086 stm32port->rx_dma_buf);
1087
1088alloc_err:
1089 dma_release_channel(stm32port->rx_ch);
1090 stm32port->rx_ch = NULL;
1091
1092 return ret;
1093}
1094
1095static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
1096 struct platform_device *pdev)
1097{
1098 struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1099 struct uart_port *port = &stm32port->port;
1100 struct device *dev = &pdev->dev;
1101 struct dma_slave_config config;
1102 int ret;
1103
1104 stm32port->tx_dma_busy = false;
1105
1106 /* Request DMA TX channel */
1107 stm32port->tx_ch = dma_request_slave_channel(dev, "tx");
1108 if (!stm32port->tx_ch) {
1109 dev_info(dev, "tx dma alloc failed\n");
1110 return -ENODEV;
1111 }
1112 stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
1113 &stm32port->tx_dma_buf,
1114 GFP_KERNEL);
1115 if (!stm32port->tx_buf) {
1116 ret = -ENOMEM;
1117 goto alloc_err;
1118 }
1119
1120 /* Configure DMA channel */
1121 memset(&config, 0, sizeof(config));
1122 config.dst_addr = port->mapbase + ofs->tdr;
1123 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1124
1125 ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1126 if (ret < 0) {
1127 dev_err(dev, "tx dma channel config failed\n");
1128 ret = -ENODEV;
1129 goto config_err;
1130 }
1131
1132 return 0;
1133
1134config_err:
1135 dma_free_coherent(&pdev->dev,
1136 TX_BUF_L, stm32port->tx_buf,
1137 stm32port->tx_dma_buf);
1138
1139alloc_err:
1140 dma_release_channel(stm32port->tx_ch);
1141 stm32port->tx_ch = NULL;
1142
1143 return ret;
1144}
1145
1146static int stm32_serial_probe(struct platform_device *pdev)
1147{
1148 const struct of_device_id *match;
1149 struct stm32_port *stm32port;
1150 int ret;
1151
1152 stm32port = stm32_of_get_stm32_port(pdev);
1153 if (!stm32port)
1154 return -ENODEV;
1155
1156 match = of_match_device(stm32_match, &pdev->dev);
1157 if (match && match->data)
1158 stm32port->info = (struct stm32_usart_info *)match->data;
1159 else
1160 return -EINVAL;
1161
1162 ret = stm32_init_port(stm32port, pdev);
1163 if (ret)
1164 return ret;
1165
David Brazdil0f672f62019-12-10 10:32:29 +00001166 if (stm32port->wakeirq > 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001167 ret = device_init_wakeup(&pdev->dev, true);
1168 if (ret)
1169 goto err_uninit;
David Brazdil0f672f62019-12-10 10:32:29 +00001170
1171 ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
1172 stm32port->wakeirq);
1173 if (ret)
1174 goto err_nowup;
1175
1176 device_set_wakeup_enable(&pdev->dev, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001177 }
1178
1179 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1180 if (ret)
David Brazdil0f672f62019-12-10 10:32:29 +00001181 goto err_wirq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001182
1183 ret = stm32_of_dma_rx_probe(stm32port, pdev);
1184 if (ret)
1185 dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
1186
1187 ret = stm32_of_dma_tx_probe(stm32port, pdev);
1188 if (ret)
1189 dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
1190
1191 platform_set_drvdata(pdev, &stm32port->port);
1192
David Brazdil0f672f62019-12-10 10:32:29 +00001193 pm_runtime_get_noresume(&pdev->dev);
1194 pm_runtime_set_active(&pdev->dev);
1195 pm_runtime_enable(&pdev->dev);
1196 pm_runtime_put_sync(&pdev->dev);
1197
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001198 return 0;
1199
David Brazdil0f672f62019-12-10 10:32:29 +00001200err_wirq:
1201 if (stm32port->wakeirq > 0)
1202 dev_pm_clear_wake_irq(&pdev->dev);
1203
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001204err_nowup:
David Brazdil0f672f62019-12-10 10:32:29 +00001205 if (stm32port->wakeirq > 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001206 device_init_wakeup(&pdev->dev, false);
1207
1208err_uninit:
1209 clk_disable_unprepare(stm32port->clk);
1210
1211 return ret;
1212}
1213
1214static int stm32_serial_remove(struct platform_device *pdev)
1215{
1216 struct uart_port *port = platform_get_drvdata(pdev);
1217 struct stm32_port *stm32_port = to_stm32_port(port);
1218 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
David Brazdil0f672f62019-12-10 10:32:29 +00001219 int err;
1220
1221 pm_runtime_get_sync(&pdev->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222
1223 stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
1224
1225 if (stm32_port->rx_ch)
1226 dma_release_channel(stm32_port->rx_ch);
1227
1228 if (stm32_port->rx_dma_buf)
1229 dma_free_coherent(&pdev->dev,
1230 RX_BUF_L, stm32_port->rx_buf,
1231 stm32_port->rx_dma_buf);
1232
1233 stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1234
1235 if (stm32_port->tx_ch)
1236 dma_release_channel(stm32_port->tx_ch);
1237
1238 if (stm32_port->tx_dma_buf)
1239 dma_free_coherent(&pdev->dev,
1240 TX_BUF_L, stm32_port->tx_buf,
1241 stm32_port->tx_dma_buf);
1242
David Brazdil0f672f62019-12-10 10:32:29 +00001243 if (stm32_port->wakeirq > 0) {
1244 dev_pm_clear_wake_irq(&pdev->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001245 device_init_wakeup(&pdev->dev, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001246 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247
1248 clk_disable_unprepare(stm32_port->clk);
1249
David Brazdil0f672f62019-12-10 10:32:29 +00001250 err = uart_remove_one_port(&stm32_usart_driver, port);
1251
1252 pm_runtime_disable(&pdev->dev);
1253 pm_runtime_put_noidle(&pdev->dev);
1254
1255 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001256}
1257
1258
1259#ifdef CONFIG_SERIAL_STM32_CONSOLE
1260static void stm32_console_putchar(struct uart_port *port, int ch)
1261{
1262 struct stm32_port *stm32_port = to_stm32_port(port);
1263 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1264
1265 while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
1266 cpu_relax();
1267
1268 writel_relaxed(ch, port->membase + ofs->tdr);
1269}
1270
1271static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
1272{
1273 struct uart_port *port = &stm32_ports[co->index].port;
1274 struct stm32_port *stm32_port = to_stm32_port(port);
1275 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1276 struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1277 unsigned long flags;
1278 u32 old_cr1, new_cr1;
1279 int locked = 1;
1280
1281 local_irq_save(flags);
1282 if (port->sysrq)
1283 locked = 0;
1284 else if (oops_in_progress)
1285 locked = spin_trylock(&port->lock);
1286 else
1287 spin_lock(&port->lock);
1288
1289 /* Save and disable interrupts, enable the transmitter */
1290 old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1291 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1292 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
1293 writel_relaxed(new_cr1, port->membase + ofs->cr1);
1294
1295 uart_console_write(port, s, cnt, stm32_console_putchar);
1296
1297 /* Restore interrupt state */
1298 writel_relaxed(old_cr1, port->membase + ofs->cr1);
1299
1300 if (locked)
1301 spin_unlock(&port->lock);
1302 local_irq_restore(flags);
1303}
1304
1305static int stm32_console_setup(struct console *co, char *options)
1306{
1307 struct stm32_port *stm32port;
1308 int baud = 9600;
1309 int bits = 8;
1310 int parity = 'n';
1311 int flow = 'n';
1312
1313 if (co->index >= STM32_MAX_PORTS)
1314 return -ENODEV;
1315
1316 stm32port = &stm32_ports[co->index];
1317
1318 /*
1319 * This driver does not support early console initialization
1320 * (use ARM early printk support instead), so we only expect
1321 * this to be called during the uart port registration when the
1322 * driver gets probed and the port should be mapped at that point.
1323 */
1324 if (stm32port->port.mapbase == 0 || stm32port->port.membase == NULL)
1325 return -ENXIO;
1326
1327 if (options)
1328 uart_parse_options(options, &baud, &parity, &bits, &flow);
1329
1330 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1331}
1332
1333static struct console stm32_console = {
1334 .name = STM32_SERIAL_NAME,
1335 .device = uart_console_device,
1336 .write = stm32_console_write,
1337 .setup = stm32_console_setup,
1338 .flags = CON_PRINTBUFFER,
1339 .index = -1,
1340 .data = &stm32_usart_driver,
1341};
1342
1343#define STM32_SERIAL_CONSOLE (&stm32_console)
1344
1345#else
1346#define STM32_SERIAL_CONSOLE NULL
1347#endif /* CONFIG_SERIAL_STM32_CONSOLE */
1348
1349static struct uart_driver stm32_usart_driver = {
1350 .driver_name = DRIVER_NAME,
1351 .dev_name = STM32_SERIAL_NAME,
1352 .major = 0,
1353 .minor = 0,
1354 .nr = STM32_MAX_PORTS,
1355 .cons = STM32_SERIAL_CONSOLE,
1356};
1357
David Brazdil0f672f62019-12-10 10:32:29 +00001358static void __maybe_unused stm32_serial_enable_wakeup(struct uart_port *port,
1359 bool enable)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001360{
1361 struct stm32_port *stm32_port = to_stm32_port(port);
1362 struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1363 struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1364 u32 val;
1365
David Brazdil0f672f62019-12-10 10:32:29 +00001366 if (stm32_port->wakeirq <= 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001367 return;
1368
1369 if (enable) {
1370 stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1371 stm32_set_bits(port, ofs->cr1, USART_CR1_UESM);
1372 val = readl_relaxed(port->membase + ofs->cr3);
1373 val &= ~USART_CR3_WUS_MASK;
1374 /* Enable Wake up interrupt from low power on start bit */
1375 val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
1376 writel_relaxed(val, port->membase + ofs->cr3);
1377 stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1378 } else {
1379 stm32_clr_bits(port, ofs->cr1, USART_CR1_UESM);
1380 }
1381}
1382
David Brazdil0f672f62019-12-10 10:32:29 +00001383static int __maybe_unused stm32_serial_suspend(struct device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001384{
1385 struct uart_port *port = dev_get_drvdata(dev);
1386
1387 uart_suspend_port(&stm32_usart_driver, port);
1388
1389 if (device_may_wakeup(dev))
1390 stm32_serial_enable_wakeup(port, true);
1391 else
1392 stm32_serial_enable_wakeup(port, false);
1393
David Brazdil0f672f62019-12-10 10:32:29 +00001394 pinctrl_pm_select_sleep_state(dev);
1395
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001396 return 0;
1397}
1398
David Brazdil0f672f62019-12-10 10:32:29 +00001399static int __maybe_unused stm32_serial_resume(struct device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001400{
1401 struct uart_port *port = dev_get_drvdata(dev);
1402
David Brazdil0f672f62019-12-10 10:32:29 +00001403 pinctrl_pm_select_default_state(dev);
1404
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001405 if (device_may_wakeup(dev))
1406 stm32_serial_enable_wakeup(port, false);
1407
1408 return uart_resume_port(&stm32_usart_driver, port);
1409}
David Brazdil0f672f62019-12-10 10:32:29 +00001410
1411static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
1412{
1413 struct uart_port *port = dev_get_drvdata(dev);
1414 struct stm32_port *stm32port = container_of(port,
1415 struct stm32_port, port);
1416
1417 clk_disable_unprepare(stm32port->clk);
1418
1419 return 0;
1420}
1421
1422static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
1423{
1424 struct uart_port *port = dev_get_drvdata(dev);
1425 struct stm32_port *stm32port = container_of(port,
1426 struct stm32_port, port);
1427
1428 return clk_prepare_enable(stm32port->clk);
1429}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001430
1431static const struct dev_pm_ops stm32_serial_pm_ops = {
David Brazdil0f672f62019-12-10 10:32:29 +00001432 SET_RUNTIME_PM_OPS(stm32_serial_runtime_suspend,
1433 stm32_serial_runtime_resume, NULL)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001434 SET_SYSTEM_SLEEP_PM_OPS(stm32_serial_suspend, stm32_serial_resume)
1435};
1436
1437static struct platform_driver stm32_serial_driver = {
1438 .probe = stm32_serial_probe,
1439 .remove = stm32_serial_remove,
1440 .driver = {
1441 .name = DRIVER_NAME,
1442 .pm = &stm32_serial_pm_ops,
1443 .of_match_table = of_match_ptr(stm32_match),
1444 },
1445};
1446
1447static int __init usart_init(void)
1448{
1449 static char banner[] __initdata = "STM32 USART driver initialized";
1450 int ret;
1451
1452 pr_info("%s\n", banner);
1453
1454 ret = uart_register_driver(&stm32_usart_driver);
1455 if (ret)
1456 return ret;
1457
1458 ret = platform_driver_register(&stm32_serial_driver);
1459 if (ret)
1460 uart_unregister_driver(&stm32_usart_driver);
1461
1462 return ret;
1463}
1464
1465static void __exit usart_exit(void)
1466{
1467 platform_driver_unregister(&stm32_serial_driver);
1468 uart_unregister_driver(&stm32_usart_driver);
1469}
1470
1471module_init(usart_init);
1472module_exit(usart_exit);
1473
1474MODULE_ALIAS("platform:" DRIVER_NAME);
1475MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
1476MODULE_LICENSE("GPL v2");