Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index effba6c..22e5d4e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit))
sci_stop_tx(port);
- } else {
- ctrl = serial_port_in(port, SCSCR);
- if (port->type != PORT_SCI) {
- serial_port_in(port, SCxSR); /* Dummy read */
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
- }
-
- ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
- }
}
/* On SH3, SCIF may read end-of-break as a space->mark char */
@@ -1102,9 +1092,8 @@
scif_set_rtrg(port, 1);
}
-static ssize_t rx_trigger_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rx_fifo_trigger_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
@@ -1112,10 +1101,9 @@
return sprintf(buf, "%d\n", sci->rx_trigger);
}
-static ssize_t rx_trigger_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rx_fifo_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
@@ -1133,7 +1121,7 @@
return count;
}
-static DEVICE_ATTR(rx_fifo_trigger, 0644, rx_trigger_show, rx_trigger_store);
+static DEVICE_ATTR_RW(rx_fifo_trigger);
static ssize_t rx_fifo_timeout_show(struct device *dev,
struct device_attribute *attr,
@@ -1243,12 +1231,22 @@
return -1;
}
-static void sci_rx_dma_release(struct sci_port *s)
+static void sci_dma_rx_chan_invalidate(struct sci_port *s)
+{
+ unsigned int i;
+
+ s->chan_rx = NULL;
+ for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
+ s->cookie_rx[i] = -EINVAL;
+ s->active_rx = 0;
+}
+
+static void sci_dma_rx_release(struct sci_port *s)
{
struct dma_chan *chan = s->chan_rx_saved;
- s->chan_rx_saved = s->chan_rx = NULL;
- s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
+ s->chan_rx_saved = NULL;
+ sci_dma_rx_chan_invalidate(s);
dmaengine_terminate_sync(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
@@ -1264,6 +1262,20 @@
hrtimer_start(hrt, t, HRTIMER_MODE_REL);
}
+static void sci_dma_rx_reenable_irq(struct sci_port *s)
+{
+ struct uart_port *port = &s->port;
+ u16 scr;
+
+ /* Direct new serial port interrupts back to CPU */
+ scr = serial_port_in(port, SCSCR);
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ scr &= ~SCSCR_RDRQE;
+ enable_irq(s->irqs[SCIx_RXI_IRQ]);
+ }
+ serial_port_out(port, SCSCR, scr | SCSCR_RIE);
+}
+
static void sci_dma_rx_complete(void *arg)
{
struct sci_port *s = arg;
@@ -1313,12 +1325,13 @@
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
/* Switch to PIO */
spin_lock_irqsave(&port->lock, flags);
- s->chan_rx = NULL;
- sci_start_rx(port);
+ dmaengine_terminate_async(chan);
+ sci_dma_rx_chan_invalidate(s);
+ sci_dma_rx_reenable_irq(s);
spin_unlock_irqrestore(&port->lock, flags);
}
-static void sci_tx_dma_release(struct sci_port *s)
+static void sci_dma_tx_release(struct sci_port *s)
{
struct dma_chan *chan = s->chan_tx_saved;
@@ -1331,7 +1344,7 @@
dma_release_channel(chan);
}
-static void sci_submit_rx(struct sci_port *s)
+static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held)
{
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
@@ -1359,22 +1372,22 @@
s->active_rx = s->cookie_rx[0];
dma_async_issue_pending(chan);
- return;
+ return 0;
fail:
+ /* Switch to PIO */
+ if (!port_lock_held)
+ spin_lock_irqsave(&port->lock, flags);
if (i)
dmaengine_terminate_async(chan);
- for (i = 0; i < 2; i++)
- s->cookie_rx[i] = -EINVAL;
- s->active_rx = -EINVAL;
- /* Switch to PIO */
- spin_lock_irqsave(&port->lock, flags);
- s->chan_rx = NULL;
+ sci_dma_rx_chan_invalidate(s);
sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ if (!port_lock_held)
+ spin_unlock_irqrestore(&port->lock, flags);
+ return -EAGAIN;
}
-static void work_fn_tx(struct work_struct *work)
+static void sci_dma_tx_work_fn(struct work_struct *work)
{
struct sci_port *s = container_of(work, struct sci_port, work_tx);
struct dma_async_tx_descriptor *desc;
@@ -1383,6 +1396,7 @@
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
dma_addr_t buf;
+ int head, tail;
/*
* DMA is idle now.
@@ -1392,16 +1406,23 @@
* consistent xmit buffer state.
*/
spin_lock_irq(&port->lock);
- buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
+ head = xmit->head;
+ tail = xmit->tail;
+ buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
s->tx_dma_len = min_t(unsigned int,
- CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- spin_unlock_irq(&port->lock);
+ CIRC_CNT(head, tail, UART_XMIT_SIZE),
+ CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+ if (!s->tx_dma_len) {
+ /* Transmit buffer has been flushed */
+ spin_unlock_irq(&port->lock);
+ return;
+ }
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
+ spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
goto switch_to_pio;
}
@@ -1409,18 +1430,18 @@
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
DMA_TO_DEVICE);
- spin_lock_irq(&port->lock);
desc->callback = sci_dma_tx_complete;
desc->callback_param = s;
- spin_unlock_irq(&port->lock);
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
+ spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
goto switch_to_pio;
}
+ spin_unlock_irq(&port->lock);
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
- __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+ __func__, xmit->buf, tail, head, s->cookie_tx);
dma_async_issue_pending(chan);
return;
@@ -1433,7 +1454,7 @@
return;
}
-static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
+static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
{
struct sci_port *s = container_of(t, struct sci_port, rx_timer);
struct dma_chan *chan = s->chan_rx;
@@ -1443,7 +1464,6 @@
unsigned long flags;
unsigned int read;
int active, count;
- u16 scr;
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1491,15 +1511,9 @@
}
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- sci_submit_rx(s);
+ sci_dma_rx_submit(s, true);
- /* Direct new serial port interrupts back to CPU */
- scr = serial_port_in(port, SCSCR);
- if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- scr &= ~SCSCR_RDRQE;
- enable_irq(s->irqs[SCIx_RXI_IRQ]);
- }
- serial_port_out(port, SCSCR, scr | SCSCR_RIE);
+ sci_dma_rx_reenable_irq(s);
spin_unlock_irqrestore(&port->lock, flags);
@@ -1516,7 +1530,7 @@
chan = dma_request_slave_channel(port->dev,
dir == DMA_MEM_TO_DEV ? "tx" : "rx");
if (!chan) {
- dev_warn(port->dev, "dma_request_slave_channel failed\n");
+ dev_dbg(port->dev, "dma_request_slave_channel failed\n");
return NULL;
}
@@ -1549,6 +1563,13 @@
dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
+ /*
+ * DMA on console may interfere with Kernel log messages which use
+ * plain putchar(). So, simply don't use it with a console.
+ */
+ if (uart_console(port))
+ return;
+
if (!port->dev->of_node)
return;
@@ -1577,7 +1598,7 @@
__func__, UART_XMIT_SIZE,
port->state->xmit.buf, &s->tx_dma_addr);
- INIT_WORK(&s->work_tx, work_fn_tx);
+ INIT_WORK(&s->work_tx, sci_dma_tx_work_fn);
s->chan_tx_saved = s->chan_tx = chan;
}
}
@@ -1612,12 +1633,12 @@
}
hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s->rx_timer.function = rx_timer_fn;
+ s->rx_timer.function = sci_dma_rx_timer_fn;
s->chan_rx_saved = s->chan_rx = chan;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- sci_submit_rx(s);
+ sci_dma_rx_submit(s, false);
}
}
@@ -1626,18 +1647,25 @@
struct sci_port *s = to_sci_port(port);
if (s->chan_tx_saved)
- sci_tx_dma_release(s);
+ sci_dma_tx_release(s);
if (s->chan_rx_saved)
- sci_rx_dma_release(s);
+ sci_dma_rx_release(s);
}
static void sci_flush_buffer(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
+
/*
* In uart_flush_buffer(), the xmit circular buffer has just been
- * cleared, so we have to reset tx_dma_len accordingly.
+ * cleared, so we have to reset tx_dma_len accordingly, and stop any
+ * pending transfers
*/
- to_sci_port(port)->tx_dma_len = 0;
+ s->tx_dma_len = 0;
+ if (s->chan_tx) {
+ dmaengine_terminate_async(s->chan_tx);
+ s->cookie_tx = -EINVAL;
+ }
}
#else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port)
@@ -1666,8 +1694,10 @@
disable_irq_nosync(irq);
scr |= SCSCR_RDRQE;
} else {
+ if (sci_dma_rx_submit(s, false) < 0)
+ goto handle_pio;
+
scr &= ~SCSCR_RIE;
- sci_submit_rx(s);
}
serial_port_out(port, SCSCR, scr);
/* Clear current interrupt */
@@ -1679,6 +1709,8 @@
return IRQ_HANDLED;
}
+
+handle_pio:
#endif
if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
@@ -1693,7 +1725,7 @@
* of whether the I_IXOFF is set, otherwise, how is the interrupt
* to be disabled?
*/
- sci_receive_chars(ptr);
+ sci_receive_chars(port);
return IRQ_HANDLED;
}
@@ -1749,7 +1781,7 @@
} else {
sci_handle_fifo_overrun(port);
if (!s->chan_rx)
- sci_receive_chars(ptr);
+ sci_receive_chars(port);
}
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
@@ -1914,7 +1946,7 @@
static void sci_free_irq(struct sci_port *port)
{
- int i;
+ int i, j;
/*
* Intentionally in reverse order so we iterate over the muxed
@@ -1930,6 +1962,13 @@
if (unlikely(irq < 0))
continue;
+ /* Check if already freed (irq was muxed) */
+ for (j = 0; j < i; j++)
+ if (port->irqs[j] == irq)
+ j = i + 1;
+ if (j > i)
+ continue;
+
free_irq(port->irqs[i], port);
kfree(port->irqstr[i]);
@@ -2060,12 +2099,12 @@
if (s->autorts) {
if (sci_get_cts(port))
mctrl |= TIOCM_CTS;
- } else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
+ } else if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS)) {
mctrl |= TIOCM_CTS;
}
- if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
+ if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR))
mctrl |= TIOCM_DSR;
- if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
+ if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD))
mctrl |= TIOCM_CAR;
return mctrl;
@@ -2493,14 +2532,16 @@
* center of the last stop bit in sampling clocks.
*/
int last_stop = bits * 2 - 1;
- int deviation = min_err * srr * last_stop / 2 / baud;
+ int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+ (int)(srr + 1),
+ 2 * (int)baud);
if (abs(deviation) >= 2) {
/* At least two sampling clocks off at the
* last stop bit; we can increase the error
* margin by shifting the sampling point.
*/
- int shift = min(-8, max(7, deviation / 2));
+ int shift = clamp(deviation / 2, -8, 7);
hssrr |= (shift << HSCIF_SRHP_SHIFT) &
HSCIF_SRHP_MASK;
@@ -2853,8 +2894,12 @@
port->mapbase = res->start;
sci_port->reg_size = resource_size(res);
- for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
- sci_port->irqs[i] = platform_get_irq(dev, i);
+ for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) {
+ if (i)
+ sci_port->irqs[i] = platform_get_irq_optional(dev, i);
+ else
+ sci_port->irqs[i] = platform_get_irq(dev, i);
+ }
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
@@ -3109,14 +3154,10 @@
sci_cleanup_single(port);
- if (port->port.fifosize > 1) {
- sysfs_remove_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_trigger.attr);
- }
- if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
- sysfs_remove_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_timeout.attr);
- }
+ if (port->port.fifosize > 1)
+ device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
+ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
+ device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout);
return 0;
}
@@ -3244,14 +3285,12 @@
return ret;
sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
- if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
+ if (IS_ERR(sciport->gpios))
return PTR_ERR(sciport->gpios);
if (sciport->has_rtscts) {
- if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
- UART_GPIO_CTS)) ||
- !IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
- UART_GPIO_RTS))) {
+ if (mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_CTS) ||
+ mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_RTS)) {
dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
return -EINVAL;
}
@@ -3304,19 +3343,17 @@
return ret;
if (sp->port.fifosize > 1) {
- ret = sysfs_create_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_trigger.attr);
+ ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_trigger);
if (ret)
return ret;
}
if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB ||
sp->port.type == PORT_HSCIF) {
- ret = sysfs_create_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_timeout.attr);
+ ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_timeout);
if (ret) {
if (sp->port.fifosize > 1) {
- sysfs_remove_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_trigger.attr);
+ device_remove_file(&dev->dev,
+ &dev_attr_rx_fifo_trigger);
}
return ret;
}
@@ -3414,6 +3451,12 @@
{
return early_console_setup(device, PORT_SCIF);
}
+static int __init rzscifa_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ port_cfg.regtype = SCIx_RZ_SCIFA_REGTYPE;
+ return early_console_setup(device, PORT_SCIF);
+}
static int __init scifa_early_console_setup(struct earlycon_device *device,
const char *opt)
{
@@ -3432,6 +3475,7 @@
OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
+OF_EARLYCON_DECLARE(scif, "renesas,scif-r7s9210", rzscifa_early_console_setup);
OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);