Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1eb81f1..83e26fd 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -270,7 +270,7 @@
static int __init parse_core(struct device_node *core, int package_id,
int core_id)
{
- char name[10];
+ char name[20];
bool leaf = true;
int i = 0;
int cpu;
@@ -317,7 +317,7 @@
static int __init parse_cluster(struct device_node *cluster, int depth)
{
- char name[10];
+ char name[20];
bool leaf = true;
bool has_cores = false;
struct device_node *c;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 532a3a5..b9f20ad 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -102,11 +102,11 @@
seq_printf(s, "%-40s %20s\n", "device name", "status");
seq_puts(s, "-------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
- struct device *d = (struct device *)match->compare[i].data;
+ struct component *component = match->compare[i].component;
- seq_printf(s, "%-40s %20s\n", dev_name(d),
- match->compare[i].component ?
- "registered" : "not registered");
+ seq_printf(s, "%-40s %20s\n",
+ component ? dev_name(component->dev) : "(unknown)",
+ component ? (component->bound ? "bound" : "not bound") : "not registered");
}
mutex_unlock(&component_mutex);
@@ -257,7 +257,8 @@
ret = master->ops->bind(master->dev);
if (ret < 0) {
devres_release_group(master->dev, NULL);
- dev_info(master->dev, "master bind failed: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_info(master->dev, "master bind failed: %d\n", ret);
return ret;
}
@@ -611,8 +612,9 @@
devres_release_group(component->dev, NULL);
devres_release_group(master->dev, NULL);
- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
- dev_name(component->dev), component->ops, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
}
return ret;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bd9cd3..8b651bf 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -106,6 +106,16 @@
#endif
#endif /* !CONFIG_SRCU */
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -119,7 +129,12 @@
struct device_link *link;
int ret;
- if (dev == target)
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
@@ -454,8 +469,7 @@
dev_dbg(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_drop_link(link->consumer);
+ pm_runtime_drop_link(link);
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
@@ -469,8 +483,7 @@
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_drop_link(link->consumer);
+ pm_runtime_drop_link(link);
list_del(&link->s_node);
list_del(&link->c_node);
@@ -1709,6 +1722,7 @@
device_pm_init(dev);
set_dev_node(dev, -1);
#ifdef CONFIG_GENERIC_MSI_IRQ
+ raw_spin_lock_init(&dev->msi_lock);
INIT_LIST_HEAD(&dev->msi_list);
#endif
INIT_LIST_HEAD(&dev->links.consumers);
@@ -3400,9 +3414,10 @@
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
- if (fwnode) {
- struct fwnode_handle *fn = dev->fwnode;
+ struct device *parent = dev->parent;
+ struct fwnode_handle *fn = dev->fwnode;
+ if (fwnode) {
if (fwnode_is_primary(fn))
fn = fn->secondary;
@@ -3412,8 +3427,13 @@
}
dev->fwnode = fwnode;
} else {
- dev->fwnode = fwnode_is_primary(dev->fwnode) ?
- dev->fwnode->secondary : NULL;
+ if (fwnode_is_primary(fn)) {
+ dev->fwnode = fn->secondary;
+ if (!(parent && fn == parent->fwnode))
+ fn->secondary = NULL;
+ } else {
+ dev->fwnode = NULL;
+ }
}
}
EXPORT_SYMBOL_GPL(set_primary_fwnode);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 6265871..f00da44 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -567,6 +567,12 @@
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_srbds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -575,6 +581,7 @@
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -585,6 +592,7 @@
&dev_attr_mds.attr,
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
+ &dev_attr_srbds.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d811e60..cf7e5b4 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -300,14 +300,16 @@
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
- struct device_private *private, *p;
+ struct device_private *p;
deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
- list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending");
+ mutex_lock(&deferred_probe_mutex);
+ list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(p->device, "deferred probe pending\n");
+ mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -516,7 +518,11 @@
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
- WARN_ON(!list_empty(&dev->devres_head));
+ if (!list_empty(&dev->devres_head)) {
+ dev_crit(dev, "Resources present before probing\n");
+ ret = -EBUSY;
+ goto done;
+ }
re_probe:
dev->driver = drv;
@@ -636,7 +642,7 @@
ret = 0;
done:
atomic_dec(&probe_count);
- wake_up(&probe_waitqueue);
+ wake_up_all(&probe_waitqueue);
return ret;
}
@@ -869,7 +875,9 @@
int ret = 0;
device_lock(dev);
- if (dev->driver) {
+ if (dev->p->dead) {
+ goto out_unlock;
+ } else if (dev->driver) {
if (device_is_bound(dev)) {
ret = 1;
goto out_unlock;
@@ -1099,6 +1107,8 @@
drv = dev->driver;
if (drv) {
+ pm_runtime_get_sync(dev);
+
while (device_links_busy(dev)) {
__device_driver_unlock(dev, parent);
@@ -1110,13 +1120,12 @@
* have released the driver successfully while this one
* was waiting, so check for that.
*/
- if (dev->driver != drv)
+ if (dev->driver != drv) {
+ pm_runtime_put(dev);
return;
+ }
}
- pm_runtime_get_sync(dev);
- pm_runtime_clean_up_links(dev);
-
driver_sysfs_remove(dev);
if (dev->bus)
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 37e5ae3..5fa7ce3 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -8,7 +8,8 @@
obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
-FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))
+comma := ,
+FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
@@ -16,7 +17,7 @@
filechk_fwbin = \
echo "/* Generated by $(src)/Makefile */" ;\
echo " .section .rodata" ;\
- echo " .p2align $(ASM_ALIGN)" ;\
+ echo " .p2align 4" ;\
echo "_fw_$(FWSTR)_bin:" ;\
echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
echo "_fw_end:" ;\
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 62ee90b..5f3e5d8 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -86,12 +86,11 @@
{
/*
* There is a small window in which user can write to 'loading'
- * between loading done and disappearance of 'loading'
+ * between loading done/aborted and disappearance of 'loading'
*/
- if (fw_sysfs_done(fw_priv))
+ if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
return;
- list_del_init(&fw_priv->pending_list);
fw_state_aborted(fw_priv);
}
@@ -277,7 +276,6 @@
* Same logic as fw_load_abort, only the DONE bit
* is ignored and we set ABORT only on failure.
*/
- list_del_init(&fw_priv->pending_list);
if (rc) {
fw_state_aborted(fw_priv);
written = rc;
@@ -512,6 +510,11 @@
}
mutex_lock(&fw_lock);
+ if (fw_state_is_aborted(fw_priv)) {
+ mutex_unlock(&fw_lock);
+ retval = -EINTR;
+ goto out;
+ }
list_add(&fw_priv->pending_list, &pending_fw_head);
mutex_unlock(&fw_lock);
@@ -525,7 +528,7 @@
}
retval = fw_sysfs_wait_timeout(fw_priv, timeout);
- if (retval < 0) {
+ if (retval < 0 && retval != -ENOENT) {
mutex_lock(&fw_lock);
fw_load_abort(fw_sysfs);
mutex_unlock(&fw_lock);
@@ -534,11 +537,10 @@
if (fw_state_is_aborted(fw_priv)) {
if (retval == -ERESTARTSYS)
retval = -EINTR;
- else
- retval = -EAGAIN;
} else if (fw_priv->is_paged_buf && !fw_priv->data)
retval = -ENOMEM;
+out:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7ecd590..ffb2a27 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -108,8 +108,16 @@
WRITE_ONCE(fw_st->status, status);
- if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ /*
+ * Doing this here ensures that the fw_priv is deleted from
+ * the pending list in all abort/done paths.
+ */
+ list_del_init(&fw_priv->pending_list);
+#endif
complete_all(&fw_st->completion);
+ }
}
static inline void fw_state_aborted(struct fw_priv *fw_priv)
@@ -139,10 +147,12 @@
void fw_free_paged_buf(struct fw_priv *fw_priv);
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
int fw_map_paged_buf(struct fw_priv *fw_priv);
+bool fw_is_paged_buf(struct fw_priv *fw_priv);
#else
static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
#endif
#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index bf44c79..249349f 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -252,9 +252,11 @@
list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
- fw_free_paged_buf(fw_priv); /* free leftover pages */
- if (!fw_priv->allocated_size)
+ if (fw_is_paged_buf(fw_priv))
+ fw_free_paged_buf(fw_priv);
+ else if (!fw_priv->allocated_size)
vfree(fw_priv->data);
+
kfree_const(fw_priv->fw_name);
kfree(fw_priv);
}
@@ -268,6 +270,11 @@
}
#ifdef CONFIG_FW_LOADER_PAGED_BUF
+bool fw_is_paged_buf(struct fw_priv *fw_priv)
+{
+ return fw_priv->is_paged_buf;
+}
+
void fw_free_paged_buf(struct fw_priv *fw_priv)
{
int i;
@@ -275,6 +282,8 @@
if (!fw_priv->pages)
return;
+ vunmap(fw_priv->data);
+
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kvfree(fw_priv->pages);
@@ -328,10 +337,6 @@
if (!fw_priv->data)
return -ENOMEM;
- /* page table is no longer needed after mapping, let's free */
- kvfree(fw_priv->pages);
- fw_priv->pages = NULL;
-
return 0;
}
#endif
@@ -742,8 +747,10 @@
return;
fw_priv = fw->priv;
+ mutex_lock(&fw_lock);
if (!fw_state_is_aborted(fw_priv))
fw_state_aborted(fw_priv);
+ mutex_unlock(&fw_lock);
}
/* called from request_firmware() and request_firmware_work_func() */
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 84c4e1f..5a8c430 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -114,30 +114,13 @@
}
/*
- * Show whether the memory block is likely to be offlineable (or is already
- * offline). Once offline, the memory block could be removed. The return
- * value does, however, not indicate that there is a way to remove the
- * memory block.
+ * Legacy interface that we cannot remove. Always indicate "removable"
+ * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
*/
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct memory_block *mem = to_memory_block(dev);
- unsigned long pfn;
- int ret = 1, i;
-
- if (mem->state != MEM_ONLINE)
- goto out;
-
- for (i = 0; i < sections_per_block; i++) {
- if (!present_section_nr(mem->start_section_nr + i))
- continue;
- pfn = section_nr_to_pfn(mem->start_section_nr + i);
- ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
- }
-
-out:
- return sprintf(buf, "%d\n", ret);
+ return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
}
/*
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 296546f..c9976dc 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -262,21 +262,20 @@
if (!dev)
return;
+ device_initialize(dev);
dev->parent = &node->dev;
dev->release = node_cache_release;
if (dev_set_name(dev, "memory_side_cache"))
- goto free_dev;
+ goto put_device;
- if (device_register(dev))
- goto free_name;
+ if (device_add(dev))
+ goto put_device;
pm_runtime_no_callbacks(dev);
node->cache_dev = dev;
return;
-free_name:
- kfree_const(dev->kobj.name);
-free_dev:
- kfree(dev);
+put_device:
+ put_device(dev);
}
/**
@@ -313,25 +312,24 @@
return;
dev = &info->dev;
+ device_initialize(dev);
dev->parent = node->cache_dev;
dev->release = node_cacheinfo_release;
dev->groups = cache_groups;
if (dev_set_name(dev, "index%d", cache_attrs->level))
- goto free_cache;
+ goto put_device;
info->cache_attrs = *cache_attrs;
- if (device_register(dev)) {
+ if (device_add(dev)) {
dev_warn(&node->dev, "failed to add cache level:%d\n",
cache_attrs->level);
- goto free_name;
+ goto put_device;
}
pm_runtime_no_callbacks(dev);
list_add_tail(&info->node, &node->cache_attrs);
return;
-free_name:
- kfree_const(dev->kobj.name);
-free_cache:
- kfree(info);
+put_device:
+ put_device(dev);
}
static void node_remove_caches(struct node *node)
@@ -758,14 +756,36 @@
return pfn_to_nid(pfn);
}
+static int do_register_memory_block_under_node(int nid,
+ struct memory_block *mem_blk)
+{
+ int ret;
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node.
+ */
+ mem_blk->nid = nid;
+
+ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
+ &mem_blk->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
+}
+
/* register memory section under specified node if it spans that node */
-static int register_mem_sect_under_node(struct memory_block *mem_blk,
- void *arg)
+static int register_mem_block_under_node_early(struct memory_block *mem_blk,
+ void *arg)
{
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
- int ret, nid = *(int *)arg;
+ int nid = *(int *)arg;
unsigned long pfn;
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
@@ -782,39 +802,34 @@
}
/*
- * We need to check if page belongs to nid only for the boot
- * case, during hotplug we know that all pages in the memory
- * block belong to the same node.
+ * We need to check if page belongs to nid only at the boot
+ * case because node's ranges can be interleaved.
*/
- if (system_state == SYSTEM_BOOTING) {
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
- }
+ page_nid = get_nid_for_pfn(pfn);
+ if (page_nid < 0)
+ continue;
+ if (page_nid != nid)
+ continue;
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node.
- */
- mem_blk->nid = nid;
-
- ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
- &mem_blk->dev.kobj,
- kobject_name(&mem_blk->dev.kobj));
- if (ret)
- return ret;
-
- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
- &node_devices[nid]->dev.kobj,
- kobject_name(&node_devices[nid]->dev.kobj));
+ return do_register_memory_block_under_node(nid, mem_blk);
}
/* mem section does not span the specified node */
return 0;
}
/*
+ * During hotplug we know that all pages in the memory block belong to the same
+ * node.
+ */
+static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
+ void *arg)
+{
+ int nid = *(int *)arg;
+
+ return do_register_memory_block_under_node(nid, mem_blk);
+}
+
+/*
* Unregister a memory block device under the node it spans. Memory blocks
* with multiple nodes cannot be offlined and therefore also never be removed.
*/
@@ -829,11 +844,19 @@
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
+ enum meminit_context context)
{
+ walk_memory_blocks_func_t func;
+
+ if (context == MEMINIT_HOTPLUG)
+ func = register_mem_block_under_node_hotplug;
+ else
+ func = register_mem_block_under_node_early;
+
return walk_memory_blocks(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
- register_mem_sect_under_node);
+ func);
}
#ifdef CONFIG_HUGETLBFS
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3c0cd20..0b67d41 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -27,6 +27,7 @@
#include <linux/limits.h>
#include <linux/property.h>
#include <linux/kmemleak.h>
+#include <linux/types.h>
#include "base.h"
#include "power/power.h"
@@ -48,7 +49,7 @@
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -226,7 +227,7 @@
unsigned int type,
const char *name)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -334,10 +335,10 @@
{
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- if (!pdev->dma_mask)
- pdev->dma_mask = DMA_BIT_MASK(32);
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dma_mask;
+ if (!pdev->dev.dma_mask) {
+ pdev->platform_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->platform_dma_mask;
+ }
};
/**
@@ -473,7 +474,8 @@
*/
int platform_device_add(struct platform_device *pdev)
{
- int i, ret;
+ u32 i;
+ int ret;
if (!pdev)
return -EINVAL;
@@ -541,7 +543,7 @@
pdev->id = PLATFORM_DEVID_AUTO;
}
- while (--i >= 0) {
+ while (i--) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
@@ -562,7 +564,7 @@
*/
void platform_device_del(struct platform_device *pdev)
{
- int i;
+ u32 i;
if (!IS_ERR_OR_NULL(pdev)) {
device_del(&pdev->dev);
@@ -632,20 +634,8 @@
pdev->dev.of_node_reused = pdevinfo->of_node_reused;
if (pdevinfo->dma_mask) {
- /*
- * This memory isn't freed when the device is put,
- * I don't have a nice idea for that though. Conceptually
- * dma_mask in struct device should not be a pointer.
- * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
- */
- pdev->dev.dma_mask =
- kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
- if (!pdev->dev.dma_mask)
- goto err;
-
- kmemleak_ignore(pdev->dev.dma_mask);
-
- *pdev->dev.dma_mask = pdevinfo->dma_mask;
+ pdev->platform_dma_mask = pdevinfo->dma_mask;
+ pdev->dev.dma_mask = &pdev->platform_dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
@@ -670,7 +660,6 @@
if (ret) {
err:
ACPI_COMPANION_SET(&pdev->dev, NULL);
- kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}
@@ -813,6 +802,8 @@
/* temporary section violation during probe() */
drv->probe = probe;
retval = code = __platform_driver_register(drv, module);
+ if (retval)
+ return retval;
/*
* Fixup that section violation, being paranoid about code scanning
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cc85e87..8428d02 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2615,7 +2615,7 @@
ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
if (ret <= 0)
- return ret;
+ return ret == -ENOENT ? 0 : ret;
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 134a8af..23af545 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -273,10 +273,38 @@
device_links_read_unlock(idx);
}
-static void dpm_wait_for_superior(struct device *dev, bool async)
+static bool dpm_wait_for_superior(struct device *dev, bool async)
{
- dpm_wait(dev->parent, async);
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
@@ -621,7 +649,8 @@
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
skip_resume = dev_pm_may_skip_resume(dev);
@@ -697,7 +726,7 @@
if (is_async(dev)) {
get_device(dev);
- async_schedule(func, dev);
+ async_schedule_dev(func, dev);
return true;
}
@@ -829,7 +858,8 @@
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
callback = dpm_subsys_resume_early_cb(dev, state, &info);
@@ -944,7 +974,9 @@
goto Complete;
}
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1696,13 +1728,17 @@
}
/*
- * If a device configured to wake up the system from sleep states
- * has been suspended at run time and there's a resume request pending
- * for it, this is equivalent to the device signaling wakeup, so the
- * system suspend operation should be aborted.
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
*/
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
+ pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 48616f3..8fbd376 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -291,8 +291,7 @@
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
- READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -306,20 +305,38 @@
return 0;
}
-static void rpm_put_suppliers(struct device *dev)
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
- continue;
while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_put_noidle(link->supplier);
+
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
}
}
+static void rpm_put_suppliers(struct device *dev)
+{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -347,8 +364,10 @@
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
- if (retval)
+ if (retval) {
+ rpm_put_suppliers(dev);
goto fail;
+ }
device_links_read_unlock(idx);
}
@@ -371,9 +390,9 @@
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
- fail:
- rpm_put_suppliers(dev);
+ __rpm_put_suppliers(dev, false);
+fail:
device_links_read_unlock(idx);
}
@@ -647,8 +666,11 @@
goto out;
}
+ if (dev->power.irq_safe)
+ goto out;
+
/* Maybe the parent is now able to suspend. */
- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
@@ -657,6 +679,14 @@
spin_lock(&dev->power.lock);
}
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
out:
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
@@ -1580,6 +1610,7 @@
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
+ dev->power.needs_force_resume = 0;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@@ -1619,42 +1650,6 @@
}
/**
- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
- * @dev: Device whose driver is going to be removed.
- *
- * Check links from this device to any consumers and if any of them have active
- * runtime PM references to the device, drop the usage counter of the device
- * (as many times as needed).
- *
- * Links with the DL_FLAG_MANAGED flag unset are ignored.
- *
- * Since the device is guaranteed to be runtime-active at the point this is
- * called, nothing else needs to be done here.
- *
- * Moreover, this is called after device_links_busy() has returned 'false', so
- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
- * therefore rpm_active can't be manipulated concurrently.
- */
-void pm_runtime_clean_up_links(struct device *dev)
-{
- struct device_link *link;
- int idx;
-
- idx = device_links_read_lock();
-
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
- device_links_read_lock_held()) {
- if (!(link->flags & DL_FLAG_MANAGED))
- continue;
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put_noidle(dev);
- }
-
- device_links_read_unlock(idx);
-}
-
-/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
@@ -1669,8 +1664,8 @@
device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
- refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1683,6 +1678,8 @@
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
+ unsigned long flags;
+ bool put;
int idx;
idx = device_links_read_lock();
@@ -1691,7 +1688,11 @@
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
- if (refcount_dec_not_one(&link->rpm_active))
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
pm_runtime_put(link->supplier);
}
@@ -1705,7 +1706,7 @@
spin_unlock_irq(&dev->power.lock);
}
-void pm_runtime_drop_link(struct device *dev)
+static void pm_runtime_drop_link_count(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
@@ -1713,6 +1714,25 @@
spin_unlock_irq(&dev->power.lock);
}
+/**
+ * pm_runtime_drop_link - Prepare for device link removal.
+ * @link: Device link going away.
+ *
+ * Drop the link count of the consumer end of @link and decrement the supplier
+ * device's runtime PM usage counter as many times as needed to drop all of the
+ * PM runtime reference to it from the consumer.
+ */
+void pm_runtime_drop_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ return;
+
+ pm_runtime_drop_link_count(link->consumer);
+
+ while (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+}
+
static bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
@@ -1758,10 +1778,12 @@
* its parent, but set its status to RPM_SUSPENDED anyway in case this
* function will be called again for it in the meantime.
*/
- if (pm_runtime_need_not_resume(dev))
+ if (pm_runtime_need_not_resume(dev)) {
pm_runtime_set_suspended(dev);
- else
+ } else {
__update_runtime_status(dev, RPM_SUSPENDED);
+ dev->power.needs_force_resume = 1;
+ }
return 0;
@@ -1788,7 +1810,7 @@
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
goto out;
/*
@@ -1807,6 +1829,7 @@
pm_runtime_mark_last_busy(dev);
out:
+ dev->power.needs_force_resume = 0;
pm_runtime_enable(dev);
return ret;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 977d27b..9a9dec6 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
+#include <linux/init.h>
#include <linux/mc146818rtc.h>
@@ -165,6 +166,9 @@
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
+ if (!x86_platform.legacy.rtc)
+ return;
+
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@
static int early_resume_init(void)
{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
@@ -277,6 +284,9 @@
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5817b51..92f0960 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -241,7 +241,9 @@
{
if (ws) {
wakeup_source_remove(ws);
- wakeup_source_sysfs_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
wakeup_source_destroy(ws);
}
}
@@ -1071,6 +1073,9 @@
break;
}
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
return next_ws;
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 3d80c4b..d7c01b7 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -259,7 +259,7 @@
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len);
+ const void *val, size_t val_len, bool noinc);
void regmap_async_complete_cb(struct regmap_async *async, int ret);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index a93cafd..7f4b3b6 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -717,7 +717,7 @@
map->cache_bypass = true;
- ret = _regmap_raw_write(map, base, *data, count * val_bytes);
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index e72843f..4f2ff1b 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -227,6 +227,9 @@
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -371,6 +374,9 @@
if (*ppos < 0 || !count)
return -EINVAL;
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -457,29 +463,31 @@
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_only);
- ssize_t result;
- bool was_enabled, require_sync = false;
+ bool new_val, require_sync = false;
int err;
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
+
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
+
map->lock(map->lock_arg);
- was_enabled = map->cache_only;
-
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0) {
- map->unlock(map->lock_arg);
- return result;
- }
-
- if (map->cache_only && !was_enabled) {
+ if (new_val && !map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_only && was_enabled) {
+ } else if (!new_val && map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
require_sync = true;
}
+ map->cache_only = new_val;
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -487,7 +495,7 @@
dev_err(map->dev, "Failed to sync cache %d\n", err);
}
- return result;
+ return count;
}
static const struct file_operations regmap_cache_only_fops = {
@@ -502,28 +510,32 @@
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_bypass);
- ssize_t result;
- bool was_enabled;
+ bool new_val;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
+
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
map->lock(map->lock_arg);
- was_enabled = map->cache_bypass;
-
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0)
- goto out;
-
- if (map->cache_bypass && !was_enabled) {
+ if (new_val && !map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_bypass && was_enabled) {
+ } else if (!new_val && map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
}
+ map->cache_bypass = new_val;
-out:
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
- return result;
+ return count;
}
static const struct file_operations regmap_cache_bypass_fops = {
@@ -571,8 +583,12 @@
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -580,9 +596,10 @@
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
@@ -644,6 +661,7 @@
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name);
+ map->debugfs_name = NULL;
} else {
struct regmap_debugfs_node *node, *tmp;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 50a6638..e75168b 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -12,7 +12,7 @@
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- return sdw_write(slave, reg, val);
+ return sdw_write_no_pm(slave, reg, val);
}
static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
@@ -21,7 +21,7 @@
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read;
- read = sdw_read(slave, reg);
+ read = sdw_read_no_pm(slave, reg);
if (read < 0)
return read;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 19f57cc..43c0452 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -249,22 +250,20 @@
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
- __be16 *b = buf;
-
- b[0] = cpu_to_be16(val << shift);
+ put_unaligned_be16(val << shift, buf);
}
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
{
- __le16 *b = buf;
-
- b[0] = cpu_to_le16(val << shift);
+ put_unaligned_le16(val << shift, buf);
}
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u16 *)buf = val << shift;
+ u16 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -280,43 +279,39 @@
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
- __be32 *b = buf;
-
- b[0] = cpu_to_be32(val << shift);
+ put_unaligned_be32(val << shift, buf);
}
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
{
- __le32 *b = buf;
-
- b[0] = cpu_to_le32(val << shift);
+ put_unaligned_le32(val << shift, buf);
}
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u32 *)buf = val << shift;
+ u32 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#ifdef CONFIG_64BIT
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
{
- __be64 *b = buf;
-
- b[0] = cpu_to_be64((u64)val << shift);
+ put_unaligned_be64((u64) val << shift, buf);
}
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
{
- __le64 *b = buf;
-
- b[0] = cpu_to_le64((u64)val << shift);
+ put_unaligned_le64((u64) val << shift, buf);
}
static void regmap_format_64_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u64 *)buf = (u64)val << shift;
+ u64 v = (u64) val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#endif
@@ -333,35 +328,34 @@
static unsigned int regmap_parse_16_be(const void *buf)
{
- const __be16 *b = buf;
-
- return be16_to_cpu(b[0]);
+ return get_unaligned_be16(buf);
}
static unsigned int regmap_parse_16_le(const void *buf)
{
- const __le16 *b = buf;
-
- return le16_to_cpu(b[0]);
+ return get_unaligned_le16(buf);
}
static void regmap_parse_16_be_inplace(void *buf)
{
- __be16 *b = buf;
+ u16 v = get_unaligned_be16(buf);
- b[0] = be16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_16_le_inplace(void *buf)
{
- __le16 *b = buf;
+ u16 v = get_unaligned_le16(buf);
- b[0] = le16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_16_native(const void *buf)
{
- return *(u16 *)buf;
+ u16 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
static unsigned int regmap_parse_24(const void *buf)
@@ -376,69 +370,67 @@
static unsigned int regmap_parse_32_be(const void *buf)
{
- const __be32 *b = buf;
-
- return be32_to_cpu(b[0]);
+ return get_unaligned_be32(buf);
}
static unsigned int regmap_parse_32_le(const void *buf)
{
- const __le32 *b = buf;
-
- return le32_to_cpu(b[0]);
+ return get_unaligned_le32(buf);
}
static void regmap_parse_32_be_inplace(void *buf)
{
- __be32 *b = buf;
+ u32 v = get_unaligned_be32(buf);
- b[0] = be32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_32_le_inplace(void *buf)
{
- __le32 *b = buf;
+ u32 v = get_unaligned_le32(buf);
- b[0] = le32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_32_native(const void *buf)
{
- return *(u32 *)buf;
+ u32 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#ifdef CONFIG_64BIT
static unsigned int regmap_parse_64_be(const void *buf)
{
- const __be64 *b = buf;
-
- return be64_to_cpu(b[0]);
+ return get_unaligned_be64(buf);
}
static unsigned int regmap_parse_64_le(const void *buf)
{
- const __le64 *b = buf;
-
- return le64_to_cpu(b[0]);
+ return get_unaligned_le64(buf);
}
static void regmap_parse_64_be_inplace(void *buf)
{
- __be64 *b = buf;
+ u64 v = get_unaligned_be64(buf);
- b[0] = be64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_64_le_inplace(void *buf)
{
- __le64 *b = buf;
+ u64 v = get_unaligned_le64(buf);
- b[0] = le64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_64_native(const void *buf)
{
- return *(u64 *)buf;
+ u64 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#endif
@@ -1356,6 +1348,7 @@
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
+ kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1370,7 +1363,7 @@
/* If the user didn't specify a name match any */
if (data)
- return (*r)->name == data;
+ return !strcmp((*r)->name, data);
else
return 1;
}
@@ -1475,7 +1468,7 @@
}
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
+ const void *val, size_t val_len, bool noinc)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -1488,11 +1481,18 @@
WARN_ON(!map->bus);
- /* Check for unwritable registers before we start */
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_writeable(map,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ /* Check for unwritable or noinc registers in range
+ * before we start
+ */
+ if (!regmap_writeable_noinc(map, reg)) {
+ for (i = 0; i < val_len / map->format.val_bytes; i++) {
+ unsigned int element =
+ reg + regmap_get_offset(map, i);
+ if (!regmap_writeable(map, element) ||
+ regmap_writeable_noinc(map, element))
+ return -EINVAL;
+ }
+ }
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
@@ -1505,7 +1505,7 @@
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
- reg + i, ret);
+ reg + regmap_get_offset(map, i), ret);
return ret;
}
}
@@ -1527,7 +1527,7 @@
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write_impl(map, reg, val,
win_residue *
- map->format.val_bytes);
+ map->format.val_bytes, noinc);
if (ret != 0)
return ret;
@@ -1541,7 +1541,7 @@
win_residue = range->window_len - win_offset;
}
- ret = _regmap_select_page(map, ®, range, val_num);
+ ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
if (ret != 0)
return ret;
}
@@ -1749,7 +1749,8 @@
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
- map->format.val_bytes);
+ map->format.val_bytes,
+ false);
}
static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1843,7 +1844,7 @@
EXPORT_SYMBOL_GPL(regmap_write_async);
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
+ const void *val, size_t val_len, bool noinc)
{
size_t val_bytes = map->format.val_bytes;
size_t val_count = val_len / val_bytes;
@@ -1864,7 +1865,7 @@
/* Write as many bytes as possible with chunk_size */
for (i = 0; i < chunk_count; i++) {
- ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
+ ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
if (ret)
return ret;
@@ -1875,7 +1876,7 @@
/* Write remaining bytes */
if (val_len)
- ret = _regmap_raw_write_impl(map, reg, val, val_len);
+ ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
return ret;
}
@@ -1908,7 +1909,7 @@
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len);
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
map->unlock(map->lock_arg);
@@ -1966,7 +1967,7 @@
write_len = map->max_raw_write;
else
write_len = val_len;
- ret = _regmap_raw_write(map, reg, val, write_len);
+ ret = _regmap_raw_write(map, reg, val, write_len, true);
if (ret)
goto out_unlock;
val = ((u8 *)val) + write_len;
@@ -2443,7 +2444,7 @@
map->async = true;
- ret = _regmap_raw_write(map, reg, val, val_len);
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
map->async = false;
@@ -2454,7 +2455,7 @@
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
- unsigned int val_len)
+ unsigned int val_len, bool noinc)
{
struct regmap_range_node *range;
int ret;
@@ -2467,7 +2468,7 @@
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, ®, range,
- val_len / map->format.val_bytes);
+ noinc ? 1 : val_len / map->format.val_bytes);
if (ret != 0)
return ret;
}
@@ -2505,7 +2506,7 @@
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
if (ret == 0)
*val = map->format.parse_val(work_val);
@@ -2621,7 +2622,7 @@
/* Read bytes that fit into whole chunks */
for (i = 0; i < chunk_count; i++) {
- ret = _regmap_raw_read(map, reg, val, chunk_bytes);
+ ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
if (ret != 0)
goto out;
@@ -2632,7 +2633,7 @@
/* Read remaining bytes */
if (val_len) {
- ret = _regmap_raw_read(map, reg, val, val_len);
+ ret = _regmap_raw_read(map, reg, val, val_len, false);
if (ret != 0)
goto out;
}
@@ -2707,7 +2708,7 @@
read_len = map->max_raw_read;
else
read_len = val_len;
- ret = _regmap_raw_read(map, reg, val, read_len);
+ ret = _regmap_raw_read(map, reg, val, read_len, true);
if (ret)
goto out_unlock;
val = ((u8 *)val) + read_len;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index a1f3f09..4c3b981 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -520,7 +520,10 @@
{
struct swnode *swnode = to_swnode(fwnode);
- return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
+ if (!swnode || !swnode->parent)
+ return NULL;
+
+ return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *
@@ -531,14 +534,18 @@
struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
- (c && list_is_last(&c->entry, &p->children)))
+ (c && list_is_last(&c->entry, &p->children))) {
+ fwnode_handle_put(child);
return NULL;
+ }
if (c)
c = list_next_entry(c, entry);
else
c = list_first_entry(&p->children, struct swnode, entry);
- return &c->fwnode;
+
+ fwnode_handle_put(child);
+ return fwnode_handle_get(&c->fwnode);
}
static struct fwnode_handle *
@@ -676,6 +683,13 @@
{
struct swnode *swnode = kobj_to_swnode(kobj);
+ if (swnode->parent) {
+ ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ list_del(&swnode->entry);
+ } else {
+ ida_simple_remove(&swnode_root_ids, swnode->id);
+ }
+
if (swnode->allocated) {
property_entries_free(swnode->node->properties);
kfree(swnode->node);
@@ -798,6 +812,9 @@
if (software_node_to_swnode(node))
return -EEXIST;
+ if (node->parent && !parent)
+ return -EINVAL;
+
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
}
EXPORT_SYMBOL_GPL(software_node_register);
@@ -841,13 +858,6 @@
if (!swnode)
return;
- if (swnode->parent) {
- ida_simple_remove(&swnode->parent->child_ids, swnode->id);
- list_del(&swnode->entry);
- } else {
- ida_simple_remove(&swnode_root_ids, swnode->id);
- }
-
kobject_put(&swnode->kobj);
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index f4b1d8e..3bb7beb 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -44,7 +44,8 @@
* performing an async init on that node.
*/
if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
- if (dev_to_node(dev) != numa_node_id()) {
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ dev_to_node(dev) != numa_node_id()) {
dev_warn(dev, "NUMA node mismatch %d != %d\n",
dev_to_node(dev), numa_node_id());
atomic_inc(&warnings);