Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ac05c9c..8c261ea 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -88,7 +88,7 @@
if (len) {
spi->driver_override = driver_override;
} else {
- /* Emptry string, disable driver override */
+ /* Empty string, disable driver override */
spi->driver_override = NULL;
kfree(driver_override);
}
@@ -466,7 +466,7 @@
static LIST_HEAD(spi_controller_list);
/*
- * Used to protect add/del opertion for board_info list and
+ * Used to protect add/del operation for board_info list and
* spi_controller list, and their matching process
* also used to protect object of type struct idr
*/
@@ -513,6 +513,7 @@
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
+ spi->mode = ctlr->buswidth_override_bits;
spin_lock_init(&spi->statistics.lock);
@@ -792,25 +793,56 @@
/*-------------------------------------------------------------------------*/
-static void spi_set_cs(struct spi_device *spi, bool enable)
+static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
{
+ bool enable1 = enable;
+
+ /*
+ * Avoid calling into the driver (or doing delays) if the chip select
+ * isn't actually changing from the last time this was called.
+ */
+ if (!force && (spi->controller->last_cs_enable == enable) &&
+ (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ return;
+
+ spi->controller->last_cs_enable = enable;
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+
+ if (!spi->controller->set_cs_timing) {
+ if (enable1)
+ spi_delay_exec(&spi->controller->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->controller->cs_hold, NULL);
+ }
+
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
- /*
- * Honour the SPI_NO_CS flag and invert the enable line, as
- * active low is default for SPI. Execution paths that handle
- * polarity inversion in gpiolib (such as device tree) will
- * enforce active high using the SPI_CS_HIGH resulting in a
- * double inversion through the code above.
- */
if (!(spi->mode & SPI_NO_CS)) {
- if (spi->cs_gpiod)
- gpiod_set_value_cansleep(spi->cs_gpiod,
- !enable);
- else
+ if (spi->cs_gpiod) {
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, enable1);
+ } else {
+ /*
+ * invert the enable line, as active low is
+ * default for SPI.
+ */
gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -819,6 +851,11 @@
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (!spi->controller->set_cs_timing) {
+ if (!enable1)
+ spi_delay_exec(&spi->controller->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
@@ -986,6 +1023,8 @@
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
+ ctlr->cur_msg_mapped = false;
+
return 0;
}
#else /* !CONFIG_HAS_DMA */
@@ -1027,7 +1066,8 @@
void *tmp;
unsigned int max_tx, max_rx;
- if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
+ if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
+ && !(msg->spi->mode & SPI_3WIRE)) {
max_tx = 0;
max_rx = 0;
@@ -1079,7 +1119,8 @@
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
- unsigned long long ms = 1;
+ u32 speed_hz = xfer->speed_hz;
+ unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
@@ -1087,8 +1128,11 @@
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -1125,42 +1169,81 @@
}
}
-static void _spi_transfer_cs_change_delay(struct spi_message *msg,
- struct spi_transfer *xfer)
+int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
{
- u32 delay = xfer->cs_change_delay;
- u32 unit = xfer->cs_change_delay_unit;
+ u32 delay = _delay->value;
+ u32 unit = _delay->unit;
u32 hz;
- /* return early on "fast" mode - for everything but USECS */
- if (!delay && unit != SPI_DELAY_UNIT_USECS)
- return;
+ if (!delay)
+ return 0;
switch (unit) {
case SPI_DELAY_UNIT_USECS:
- /* for compatibility use default of 10us */
- if (!delay)
- delay = 10000;
- else
- delay *= 1000;
+ delay *= 1000;
break;
case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
+ /* clock cycles need to be obtained from spi_transfer */
+ if (!xfer)
+ return -EINVAL;
/* if there is no effective speed know, then approximate
* by underestimating with half the requested hz
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ if (!hz)
+ return -EINVAL;
delay *= DIV_ROUND_UP(1000000000, hz);
break;
default:
+ return -EINVAL;
+ }
+
+ return delay;
+}
+EXPORT_SYMBOL_GPL(spi_delay_to_ns);
+
+int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
+{
+ int delay;
+
+ might_sleep();
+
+ if (!_delay)
+ return -EINVAL;
+
+ delay = spi_delay_to_ns(_delay, xfer);
+ if (delay < 0)
+ return delay;
+
+ _spi_transfer_delay_ns(delay);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_delay_exec);
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 delay = xfer->cs_change_delay.value;
+ u32 unit = xfer->cs_change_delay.unit;
+ int ret;
+
+ /* return early on "fast" mode - for everything but USECS */
+ if (!delay) {
+ if (unit == SPI_DELAY_UNIT_USECS)
+ _spi_transfer_delay_ns(10000);
+ return;
+ }
+
+ ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
+ if (ret) {
dev_err_once(&msg->spi->dev,
"Use of unsupported delay unit %i, using default of 10us\n",
- xfer->cs_change_delay_unit);
- delay = 10000;
+ unit);
+ _spi_transfer_delay_ns(10000);
}
- /* now sleep for the requested amount of time */
- _spi_transfer_delay_ns(delay);
}
/*
@@ -1179,7 +1262,7 @@
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
- spi_set_cs(msg->spi, true);
+ spi_set_cs(msg->spi, true, false);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
@@ -1190,11 +1273,25 @@
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
- if (xfer->tx_buf || xfer->rx_buf) {
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
+ if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
reinit_completion(&ctlr->xfer_completion);
+fallback_pio:
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ if (ctlr->cur_msg_mapped &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ __spi_unmap_msg(ctlr, msg);
+ ctlr->fallback = true;
+ xfer->error &= ~SPI_TRANS_FAIL_NO_START;
+ goto fallback_pio;
+ }
+
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
@@ -1216,22 +1313,26 @@
xfer->len);
}
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs)
- _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
+ spi_transfer_delay_exec(xfer);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
keep_cs = true;
} else {
- spi_set_cs(msg->spi, false);
+ spi_set_cs(msg->spi, false, false);
_spi_transfer_cs_change_delay(msg, xfer);
- spi_set_cs(msg->spi, true);
+ spi_set_cs(msg->spi, true, false);
}
}
@@ -1240,7 +1341,7 @@
out:
if (ret != 0 || !keep_cs)
- spi_set_cs(msg->spi, false);
+ spi_set_cs(msg->spi, false, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
@@ -1267,6 +1368,14 @@
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+static void spi_idle_runtime_pm(struct spi_controller *ctlr)
+{
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+}
+
/**
* __spi_pump_messages - function which processes spi message queue
* @ctlr: controller to process queue for
@@ -1282,6 +1391,7 @@
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
+ struct spi_transfer *xfer;
struct spi_message *msg;
bool was_busy = false;
unsigned long flags;
@@ -1298,7 +1408,7 @@
/* If another context is idling the device then defer */
if (ctlr->idling) {
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1310,10 +1420,17 @@
return;
}
- /* Only do teardown in the thread */
+ /* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
- kthread_queue_work(&ctlr->kworker,
- &ctlr->pump_messages);
+ if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
+ !ctlr->unprepare_transfer_hardware) {
+ spi_idle_runtime_pm(ctlr);
+ ctlr->busy = false;
+ trace_spi_controller_idle(ctlr);
+ } else {
+ kthread_queue_work(ctlr->kworker,
+ &ctlr->pump_messages);
+ }
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1330,10 +1447,7 @@
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
+ spi_idle_runtime_pm(ctlr);
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
@@ -1408,6 +1522,13 @@
goto out;
}
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
@@ -1436,6 +1557,91 @@
}
/**
+ * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. The frequency with which this function
+ * must be called (once per word, once for the whole
+ * transfer, once per batch of words etc) is arbitrary
+ * as long as the @tx buffer offset is greater than or
+ * equal to the requested byte at the time of the
+ * call. The timestamp is only taken once, at the
+ * first such call. It is assumed that the driver
+ * advances its @tx buffer pointer monotonically.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress > xfer->ptp_sts_word_pre)
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = progress;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper for drivers to collect the end of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds
+ * or is equal to the requested word will be
+ * timestamped.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @progress: How many words (not bytes) have been transferred so far
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off)
+{
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped)
+ return;
+
+ if (progress < xfer->ptp_sts_word_post)
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = progress;
+
+ xfer->timestamped = true;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
* spi_set_thread_rt - set the controller to pump at realtime priority
* @ctlr: controller to boost priority of
*
@@ -1452,11 +1658,9 @@
*/
static void spi_set_thread_rt(struct spi_controller *ctlr)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
-
dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
- sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
+ sched_set_fifo(ctlr->kworker->task);
}
static int spi_init_queue(struct spi_controller *ctlr)
@@ -1464,13 +1668,12 @@
ctlr->running = false;
ctlr->busy = false;
- kthread_init_worker(&ctlr->kworker);
- ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
- "%s", dev_name(&ctlr->dev));
- if (IS_ERR(ctlr->kworker_task)) {
- dev_err(&ctlr->dev, "failed to create message pump task\n");
- return PTR_ERR(ctlr->kworker_task);
+ ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker)) {
+ dev_err(&ctlr->dev, "failed to create message pump kworker\n");
+ return PTR_ERR(ctlr->kworker);
}
+
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
@@ -1520,6 +1723,7 @@
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
+ struct spi_transfer *xfer;
struct spi_message *mesg;
unsigned long flags;
int ret;
@@ -1528,6 +1732,17 @@
mesg = ctlr->cur_msg;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
+ if (unlikely(ctlr->ptp_sts_supported))
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list)
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
+
spi_unmap_msg(ctlr, mesg);
/* In the prepare_messages callback the spi bus has the opportunity to
@@ -1548,7 +1763,8 @@
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ ctlr->fallback = false;
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1574,7 +1790,7 @@
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
return 0;
}
@@ -1630,8 +1846,7 @@
return ret;
}
- kthread_flush_worker(&ctlr->kworker);
- kthread_stop(ctlr->kworker_task);
+ kthread_destroy_worker(ctlr->kworker);
return 0;
}
@@ -1654,7 +1869,7 @@
list_add_tail(&msg->queue, &ctlr->queue);
if (!ctlr->busy && need_pump)
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
@@ -1799,23 +2014,9 @@
}
spi->chip_select = value;
- /*
- * For descriptors associated with the device, polarity inversion is
- * handled in the gpiolib, so all gpio chip selects are "active high"
- * in the logical sense, the gpiolib will invert the line if need be.
- */
- if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
- ctlr->cs_gpiods[spi->chip_select])
- spi->mode |= SPI_CS_HIGH;
-
/* Device speed */
- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
- if (rc) {
- dev_err(&ctlr->dev,
- "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
- return rc;
- }
- spi->max_speed_hz = value;
+ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ spi->max_speed_hz = value;
return 0;
}
@@ -2037,9 +2238,10 @@
return AE_NO_MEMORY;
}
+
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
- spi->mode = lookup.mode;
+ spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
spi->chip_select = lookup.chip_select;
@@ -2358,6 +2560,8 @@
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
nb = gpiod_count(dev, "cs");
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
@@ -2399,7 +2603,23 @@
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
}
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
+
+ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
+ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
}
return 0;
@@ -2955,10 +3175,11 @@
/* add to list */
list_add(&xfer->transfer_list, rxfer->replaced_after);
- /* clear cs_change and delay_usecs for all but the last */
+ /* clear cs_change and delay for all but the last */
if (i) {
xfer->cs_change = false;
xfer->delay_usecs = 0;
+ xfer->delay.value = 0;
}
}
@@ -3172,10 +3393,37 @@
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->controller->max_speed_hz;
+ mutex_lock(&spi->controller->io_mutex);
+
if (spi->controller->setup)
status = spi->controller->setup(spi);
- spi_set_cs(spi, false);
+ if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ status = pm_runtime_get_sync(spi->controller->dev.parent);
+ if (status < 0) {
+ mutex_unlock(&spi->controller->io_mutex);
+ pm_runtime_put_noidle(spi->controller->dev.parent);
+ dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+ status);
+ return status;
+ }
+
+ /*
+ * We do not want to return positive value from pm_runtime_get,
+ * there are many instances of devices calling spi_setup() and
+ * checking for a non-zero return value instead of a negative
+ * return value.
+ */
+ status = 0;
+
+ spi_set_cs(spi, false, true);
+ pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ } else {
+ spi_set_cs(spi, false, true);
+ }
+
+ mutex_unlock(&spi->controller->io_mutex);
if (spi->rt && !spi->controller->rt) {
spi->controller->rt = true;
@@ -3198,18 +3446,71 @@
/**
* spi_set_cs_timing - configure CS setup, hold, and inactive delays
* @spi: the device that requires specific CS timing configuration
- * @setup: CS setup time in terms of clock count
- * @hold: CS hold time in terms of clock count
- * @inactive_dly: CS inactive delay between transfers in terms of clock count
+ * @setup: CS setup time specified via @spi_delay
+ * @hold: CS hold time specified via @spi_delay
+ * @inactive: CS inactive delay between transfers specified via @spi_delay
+ *
+ * Return: zero on success, else a negative error code.
*/
-void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
- u8 inactive_dly)
+int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive)
{
+ size_t len;
+
if (spi->controller->set_cs_timing)
- spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
+ return spi->controller->set_cs_timing(spi, setup, hold,
+ inactive);
+
+ if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
+ (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
+ (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
+ dev_err(&spi->dev,
+ "Clock-cycle delays for CS not supported in SW mode\n");
+ return -ENOTSUPP;
+ }
+
+ len = sizeof(struct spi_delay);
+
+ /* copy delays to controller */
+ if (setup)
+ memcpy(&spi->controller->cs_setup, setup, len);
+ else
+ memset(&spi->controller->cs_setup, 0, len);
+
+ if (hold)
+ memcpy(&spi->controller->cs_hold, hold, len);
+ else
+ memset(&spi->controller->cs_hold, 0, len);
+
+ if (inactive)
+ memcpy(&spi->controller->cs_inactive, inactive, len);
+ else
+ memset(&spi->controller->cs_inactive, 0, len);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(spi_set_cs_timing);
+static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
+ struct spi_device *spi)
+{
+ int delay1, delay2;
+
+ delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (delay1 < 0)
+ return delay1;
+
+ delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (delay2 < 0)
+ return delay2;
+
+ if (delay1 < delay2)
+ memcpy(&xfer->word_delay, &spi->word_delay,
+ sizeof(xfer->word_delay));
+
+ return 0;
+}
+
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -3345,8 +3646,8 @@
return -EINVAL;
}
- if (xfer->word_delay_usecs < spi->word_delay_usecs)
- xfer->word_delay_usecs = spi->word_delay_usecs;
+ if (_spi_xfer_word_delay_update(xfer, spi))
+ return -EINVAL;
}
message->status = -EINPROGRESS;
@@ -3357,6 +3658,7 @@
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
@@ -3372,6 +3674,13 @@
trace_spi_message_submit(message);
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
return ctlr->transfer(spi, message);
}
@@ -3675,8 +3984,7 @@
* is zero for success, else a negative errno status code.
* This call may only be used from a context that may sleep.
*
- * Parameters to this routine are always copied using a small buffer;
- * portable code should never use this for more than 32 bytes.
+ * Parameters to this routine are always copied using a small buffer.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with dma-safe buffers.
*
@@ -3848,7 +4156,7 @@
struct device *dev;
dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
- return dev ? to_spi_device(dev) : NULL;
+ return to_spi_device(dev);
}
static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,