Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index e1bb691..ec5bb19 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
-obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
+obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5a42ae4..ced6863 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
*
* Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
@@ -13,6 +12,7 @@
#include <linux/pm_clock.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/of_clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pm_domain.h>
@@ -65,10 +65,15 @@
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
- clk_prepare(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
+ if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ } else {
+ ce->status = PCE_STATUS_ACQUIRED;
+ dev_dbg(dev,
+ "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
+ }
}
}
@@ -88,8 +93,6 @@
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
if (!ce->con_id) {
- dev_err(dev,
- "Not enough memory for clock connection ID.\n");
kfree(ce);
return -ENOMEM;
}
@@ -191,8 +194,7 @@
if (!dev || !dev->of_node)
return -EINVAL;
- count = of_count_phandle_with_args(dev->of_node, "clocks",
- "#clock-cells");
+ count = of_clk_get_parent_count(dev->of_node);
if (count <= 0)
return -ENODEV;
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b413951..8db98a1 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/common.c - Common device power management code.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -160,7 +158,7 @@
* For a detailed function description, see dev_pm_domain_attach_by_id().
*/
struct device *dev_pm_domain_attach_by_name(struct device *dev,
- char *name)
+ const char *name)
{
if (dev->pm_domain)
return ERR_PTR(-EEXIST);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4b57141..cc85e87 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain.c - Common code related to device power domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
+#include <linux/cpu.h>
#include "power.h"
@@ -126,6 +127,8 @@
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -146,29 +149,24 @@
return ret;
}
+static int genpd_runtime_suspend(struct device *dev);
+
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
* and checks that the PM domain pointer is a real generic PM domain.
* Any failure results in NULL being returned.
*/
-static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
+static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
return NULL;
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (&gpd->domain == dev->pm_domain) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
+ /* A genpd's always have its ->runtime_suspend() callback assigned. */
+ if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
+ return pd_to_genpd(dev->pm_domain);
- return genpd;
+ return NULL;
}
/*
@@ -239,6 +237,127 @@
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct generic_pm_domain_data *pd_data;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ return state;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ return state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ /*
+ * Traverse all sub-domains within the domain. This can be
+ * done without any additional locking as the link->performance_state
+ * field is protected by the master genpd->lock, which is already taken.
+ *
+ * Also note that link->performance_state (subdomain's performance state
+ * requirement to master domain) is different from
+ * link->slave->performance_state (current performance state requirement
+ * of the devices/sub-domains of the subdomain) and so can have a
+ * different value.
+ *
+ * Note that we also take vote from powered-off sub-domains into account
+ * as the same is done for devices right now.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ if (link->performance_state > state)
+ state = link->performance_state;
+ }
+
+ return state;
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *master;
+ struct gpd_link *link;
+ int master_state, ret;
+
+ if (state == genpd->performance_state)
+ return 0;
+
+ /* Propagate to masters of genpd */
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ master = link->master;
+
+ if (!master->set_performance_state)
+ continue;
+
+ /* Find master's performance state */
+ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ master->opp_table,
+ state);
+ if (unlikely(ret < 0))
+ goto err;
+
+ master_state = ret;
+
+ genpd_lock_nested(master, depth + 1);
+
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = master_state;
+ master_state = _genpd_reeval_performance_state(master,
+ master_state);
+ ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;
+
+ genpd_unlock(master);
+
+ if (ret)
+ goto err;
+ }
+
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+
+ genpd->performance_state = state;
+ return 0;
+
+err:
+ /* Encountered an error, lets rollback */
+ list_for_each_entry_continue_reverse(link, &genpd->slave_links,
+ slave_node) {
+ master = link->master;
+
+ if (!master->set_performance_state)
+ continue;
+
+ genpd_lock_nested(master, depth + 1);
+
+ master_state = link->prev_performance_state;
+ link->performance_state = master_state;
+
+ master_state = _genpd_reeval_performance_state(master,
+ master_state);
+ if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ master->name, master_state);
+ }
+
+ genpd_unlock(master);
+ }
+
+ return ret;
+}
+
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@@ -257,23 +376,20 @@
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data, *pd_data;
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data;
unsigned int prev;
- int ret = 0;
+ int ret;
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
return -ENODEV;
if (unlikely(!genpd->set_performance_state))
return -EINVAL;
- if (unlikely(!dev->power.subsys_data ||
- !dev->power.subsys_data->domain_data)) {
- WARN_ON(1);
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
return -EINVAL;
- }
genpd_lock(genpd);
@@ -281,47 +397,11 @@
prev = gpd_data->performance_state;
gpd_data->performance_state = state;
- /* New requested state is same as Max requested state */
- if (state == genpd->performance_state)
- goto unlock;
+ state = _genpd_reeval_performance_state(genpd, state);
+ ret = _genpd_set_performance_state(genpd, state, 0);
+ if (ret)
+ gpd_data->performance_state = prev;
- /* New requested state is higher than Max requested state */
- if (state > genpd->performance_state)
- goto update_state;
-
- /* Traverse all devices within the domain */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- pd_data = to_gpd_data(pdd);
-
- if (pd_data->performance_state > state)
- state = pd_data->performance_state;
- }
-
- if (state == genpd->performance_state)
- goto unlock;
-
- /*
- * We aren't propagating performance state changes of a subdomain to its
- * masters as we don't have hardware that needs it. Over that, the
- * performance states of subdomain and its masters may not have
- * one-to-one mapping and would require additional information. We can
- * get back to this once we have hardware that needs it. For that
- * reason, we don't have to consider performance state of the subdomains
- * of genpd here.
- */
-
-update_state:
- if (genpd_status_on(genpd)) {
- ret = genpd->set_performance_state(genpd, state);
- if (ret) {
- gpd_data->performance_state = prev;
- goto unlock;
- }
- }
-
- genpd->performance_state = state;
-
-unlock:
genpd_unlock(genpd);
return ret;
@@ -347,15 +427,6 @@
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
-
- if (unlikely(genpd->set_performance_state)) {
- ret = genpd->set_performance_state(genpd, genpd->performance_state);
- if (ret) {
- pr_warn("%s: Failed to set performance state %d (%d)\n",
- genpd->name, genpd->performance_state, ret);
- }
- }
-
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -382,19 +453,19 @@
time_start = ktime_get();
ret = genpd->power_off(genpd);
- if (ret == -EBUSY)
+ if (ret)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
- return ret;
+ return 0;
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
- return ret;
+ return 0;
}
/**
@@ -440,7 +511,9 @@
* (1) The domain is configured as always on.
* (2) When the domain has a subdomain being powered on.
*/
- if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
+ if (genpd_is_always_on(genpd) ||
+ genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
@@ -467,6 +540,10 @@
return -EAGAIN;
}
+ /* Default to shallowest state. */
+ if (!genpd->gov)
+ genpd->state_idx = 0;
+
if (genpd->power_off) {
int ret;
@@ -1315,8 +1392,7 @@
#endif /* CONFIG_PM_SLEEP */
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
- struct gpd_timing_data *td)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1331,9 +1407,6 @@
goto err_put;
}
- if (td)
- gpd_data->td = *td;
-
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
@@ -1373,8 +1446,57 @@
dev_pm_put_subsys_data(dev);
}
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+ int cpu, bool set, unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ struct generic_pm_domain *master = link->master;
+
+ genpd_lock_nested(master, depth + 1);
+ genpd_update_cpumask(master, cpu, set, depth + 1);
+ genpd_unlock(master);
+ }
+
+ if (set)
+ cpumask_set_cpu(cpu, genpd->cpus);
+ else
+ cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int cpu;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (get_cpu_device(cpu) == dev)
+ return cpu;
+ }
+
+ return -1;
+}
+
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
- struct gpd_timing_data *td)
+ struct device *base_dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1384,16 +1506,19 @@
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = genpd_alloc_dev_data(dev, td);
+ gpd_data = genpd_alloc_dev_data(dev);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- genpd_lock(genpd);
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
+ genpd_lock(genpd);
+
+ genpd_set_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
@@ -1401,13 +1526,13 @@
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- out:
genpd_unlock(genpd);
-
+ out:
if (ret)
genpd_free_dev_data(dev, gpd_data);
else
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
return ret;
}
@@ -1422,7 +1547,7 @@
int ret;
mutex_lock(&gpd_list_lock);
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
mutex_unlock(&gpd_list_lock);
return ret;
@@ -1440,7 +1565,8 @@
pdd = dev->power.subsys_data->domain_data;
gpd_data = to_gpd_data(pdd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
genpd_lock(genpd);
@@ -1452,22 +1578,23 @@
genpd->device_count--;
genpd->max_off_time_changed = true;
- if (genpd->detach_dev)
- genpd->detach_dev(genpd, dev);
-
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
genpd_unlock(genpd);
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
return ret;
}
@@ -1478,7 +1605,7 @@
*/
int pm_genpd_remove_device(struct device *dev)
{
- struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
+ struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
if (!genpd)
return -EINVAL;
@@ -1578,8 +1705,8 @@
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
- pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
- subdomain->name);
+ pr_warn("%s: unable to remove subdomain %s\n",
+ genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
@@ -1606,6 +1733,12 @@
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ kfree(states);
+}
+
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
struct genpd_power_state *state;
@@ -1616,7 +1749,7 @@
genpd->states = state;
genpd->state_count = 1;
- genpd->free = state;
+ genpd->free_states = genpd_free_default_power_state;
return 0;
}
@@ -1679,14 +1812,24 @@
}
/* Always-on domains must be powered on at initialization. */
- if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
+ !genpd_status_on(genpd))
return -EINVAL;
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
/* Use only one "off" state if there were no states declared */
if (genpd->state_count == 0) {
ret = genpd_set_default_power_state(genpd);
- if (ret)
+ if (ret) {
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
return ret;
+ }
+ } else if (!gov && genpd->state_count > 1) {
+ pr_warn("%s: no governor for states\n", genpd->name);
}
device_initialize(&genpd->dev);
@@ -1730,7 +1873,11 @@
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
- kfree(genpd->free);
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -1901,12 +2048,21 @@
ret);
goto unlock;
}
+
+ /*
+ * Save table for faster processing while setting performance
+ * state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
+ WARN_ON(!genpd->opp_table);
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
- if (genpd->set_performance_state)
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
+ }
goto unlock;
}
@@ -1959,6 +2115,13 @@
i, ret);
goto error;
}
+
+ /*
+ * Save table for faster processing while setting
+ * performance state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
+ WARN_ON(!genpd->opp_table);
}
genpd->provider = &np->fwnode;
@@ -1983,8 +2146,10 @@
genpd->provider = NULL;
genpd->has_provider = false;
- if (genpd->set_performance_state)
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
+ }
}
mutex_unlock(&gpd_list_lock);
@@ -2018,6 +2183,7 @@
if (!gpd->set_performance_state)
continue;
+ dev_pm_opp_put_opp_table(gpd->opp_table);
dev_pm_opp_of_remove_table(&gpd->dev);
}
}
@@ -2089,7 +2255,7 @@
goto out;
}
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
out:
mutex_unlock(&gpd_list_lock);
@@ -2173,6 +2339,7 @@
static void genpd_release_dev(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(dev);
}
@@ -2234,14 +2401,14 @@
genpd_queue_power_off_work(pd);
}
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
int ret;
- ret = of_parse_phandle_with_args(np, "power-domains",
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", index, &pd_args);
if (ret < 0)
return ret;
@@ -2253,12 +2420,12 @@
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return driver_deferred_probe_check_state(dev);
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- ret = genpd_add_device(pd, dev, NULL);
+ ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
@@ -2309,7 +2476,7 @@
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2332,45 +2499,46 @@
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index)
{
- struct device *genpd_dev;
+ struct device *virt_dev;
int num_domains;
int ret;
if (!dev->of_node)
return NULL;
- /* Deal only with devices using multiple PM domains. */
+ /* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (num_domains < 2 || index >= num_domains)
+ if (index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
- genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL);
- if (!genpd_dev)
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
+ if (!virt_dev)
return ERR_PTR(-ENOMEM);
- dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev));
- genpd_dev->bus = &genpd_bus_type;
- genpd_dev->release = genpd_release_dev;
+ dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
+ virt_dev->bus = &genpd_bus_type;
+ virt_dev->release = genpd_release_dev;
+ virt_dev->of_node = of_node_get(dev->of_node);
- ret = device_register(genpd_dev);
+ ret = device_register(virt_dev);
if (ret) {
- kfree(genpd_dev);
+ put_device(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
if (ret < 1) {
- device_unregister(genpd_dev);
+ device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
}
- pm_runtime_enable(genpd_dev);
- genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
+ pm_runtime_enable(virt_dev);
+ genpd_queue_power_off_work(dev_to_genpd(virt_dev));
- return genpd_dev;
+ return virt_dev;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
@@ -2383,7 +2551,7 @@
* power-domain-names DT property. For further description see
* genpd_dev_pm_attach_by_id().
*/
-struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
{
int index;
@@ -2414,7 +2582,7 @@
&entry_latency);
if (err) {
pr_debug(" * %pOF missing entry-latency-us property\n",
- state_node);
+ state_node);
return -EINVAL;
}
@@ -2422,7 +2590,7 @@
&exit_latency);
if (err) {
pr_debug(" * %pOF missing exit-latency-us property\n",
- state_node);
+ state_node);
return -EINVAL;
}
@@ -2478,8 +2646,8 @@
*
* Returns the device states parsed from the OF node. The memory for the states
* is allocated by this function and is the responsibility of the caller to
- * free the memory after use. If no domain idle states is found it returns
- * -EINVAL and in case of errors, a negative error code.
+ * free the memory after use. If any or zero compatible domain idle states is
+ * found it returns 0 and in case of errors, a negative error code is returned.
*/
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n)
@@ -2488,8 +2656,14 @@
int ret;
ret = genpd_iterate_idle_states(dn, NULL);
- if (ret <= 0)
- return ret < 0 ? ret : -EINVAL;
+ if (ret < 0)
+ return ret;
+
+ if (!ret) {
+ *states = NULL;
+ *n = 0;
+ return 0;
+ }
st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
if (!st)
@@ -2509,52 +2683,36 @@
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
/**
- * of_genpd_opp_to_performance_state- Gets performance state of device's
- * power domain corresponding to a DT node's "required-opps" property.
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
*
- * @dev: Device for which the performance-state needs to be found.
- * @np: DT node where the "required-opps" property is present. This can be
- * the device node itself (if it doesn't have an OPP table) or a node
- * within the OPP table of a device (if device has an OPP table).
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
+ * state.
*
- * Returns performance state corresponding to the "required-opps" property of
- * a DT node. This calls platform specific genpd->opp_to_performance_state()
- * callback to translate power domain OPP to performance state.
+ * Returns performance state encoded in the OPP of the genpd. This calls
+ * platform specific genpd->opp_to_performance_state() callback to translate
+ * power domain OPP to performance state.
*
* Returns performance state on success and 0 on failure.
*/
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np)
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
{
- struct generic_pm_domain *genpd;
- struct dev_pm_opp *opp;
- int state = 0;
+ struct generic_pm_domain *genpd = NULL;
+ int state;
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return 0;
+ genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
- if (unlikely(!genpd->set_performance_state))
+ if (unlikely(!genpd->opp_to_performance_state))
return 0;
genpd_lock(genpd);
-
- opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
- if (IS_ERR(opp)) {
- dev_err(dev, "Failed to find required OPP: %ld\n",
- PTR_ERR(opp));
- goto unlock;
- }
-
state = genpd->opp_to_performance_state(genpd, opp);
- dev_pm_opp_put(opp);
-
-unlock:
genpd_unlock(genpd);
return state;
}
-EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
+EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
static int __init genpd_bus_init(void)
{
@@ -2659,7 +2817,7 @@
return 0;
}
-static int genpd_summary_show(struct seq_file *s, void *data)
+static int summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -2682,7 +2840,7 @@
return ret;
}
-static int genpd_status_show(struct seq_file *s, void *data)
+static int status_show(struct seq_file *s, void *data)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2709,7 +2867,7 @@
return ret;
}
-static int genpd_sub_domains_show(struct seq_file *s, void *data)
+static int sub_domains_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct gpd_link *link;
@@ -2726,7 +2884,7 @@
return ret;
}
-static int genpd_idle_states_show(struct seq_file *s, void *data)
+static int idle_states_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
unsigned int i;
@@ -2755,7 +2913,7 @@
return ret;
}
-static int genpd_active_time_show(struct seq_file *s, void *data)
+static int active_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0;
@@ -2775,7 +2933,7 @@
return ret;
}
-static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+static int total_idle_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0, total = 0;
@@ -2803,7 +2961,7 @@
}
-static int genpd_devices_show(struct seq_file *s, void *data)
+static int devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
@@ -2829,7 +2987,7 @@
return ret;
}
-static int genpd_perf_state_show(struct seq_file *s, void *data)
+static int perf_state_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
@@ -2842,37 +3000,14 @@
return 0;
}
-#define define_genpd_open_function(name) \
-static int genpd_##name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, genpd_##name##_show, inode->i_private); \
-}
-
-define_genpd_open_function(summary);
-define_genpd_open_function(status);
-define_genpd_open_function(sub_domains);
-define_genpd_open_function(idle_states);
-define_genpd_open_function(active_time);
-define_genpd_open_function(total_idle_time);
-define_genpd_open_function(devices);
-define_genpd_open_function(perf_state);
-
-#define define_genpd_debugfs_fops(name) \
-static const struct file_operations genpd_##name##_fops = { \
- .open = genpd_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-}
-
-define_genpd_debugfs_fops(summary);
-define_genpd_debugfs_fops(status);
-define_genpd_debugfs_fops(sub_domains);
-define_genpd_debugfs_fops(idle_states);
-define_genpd_debugfs_fops(active_time);
-define_genpd_debugfs_fops(total_idle_time);
-define_genpd_debugfs_fops(devices);
-define_genpd_debugfs_fops(perf_state);
+DEFINE_SHOW_ATTRIBUTE(summary);
+DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(sub_domains);
+DEFINE_SHOW_ATTRIBUTE(idle_states);
+DEFINE_SHOW_ATTRIBUTE(active_time);
+DEFINE_SHOW_ATTRIBUTE(total_idle_time);
+DEFINE_SHOW_ATTRIBUTE(devices);
+DEFINE_SHOW_ATTRIBUTE(perf_state);
static int __init genpd_debug_init(void)
{
@@ -2881,34 +3016,27 @@
genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!genpd_debugfs_dir)
- return -ENOMEM;
-
- d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- genpd_debugfs_dir, NULL, &genpd_summary_fops);
- if (!d)
- return -ENOMEM;
+ debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
+ NULL, &summary_fops);
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
- if (!d)
- return -ENOMEM;
debugfs_create_file("current_state", 0444,
- d, genpd, &genpd_status_fops);
+ d, genpd, &status_fops);
debugfs_create_file("sub_domains", 0444,
- d, genpd, &genpd_sub_domains_fops);
+ d, genpd, &sub_domains_fops);
debugfs_create_file("idle_states", 0444,
- d, genpd, &genpd_idle_states_fops);
+ d, genpd, &idle_states_fops);
debugfs_create_file("active_time", 0444,
- d, genpd, &genpd_active_time_fops);
+ d, genpd, &active_time_fops);
debugfs_create_file("total_idle_time", 0444,
- d, genpd, &genpd_total_idle_time_fops);
+ d, genpd, &total_idle_time_fops);
debugfs_create_file("devices", 0444,
- d, genpd, &genpd_devices_fops);
+ d, genpd, &devices_fops);
if (genpd->set_performance_state)
debugfs_create_file("perf_state", 0444,
- d, genpd, &genpd_perf_state_fops);
+ d, genpd, &perf_state_fops);
}
return 0;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 99896fb..daa8c76 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -1,15 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain_governor.c - Governors for device PM domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
@@ -32,7 +33,7 @@
* take its current PM QoS constraint (that's the only thing
* known at this point anyway).
*/
- constraint_ns = dev_pm_qos_read_value(dev);
+ constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
constraint_ns *= NSEC_PER_USEC;
}
@@ -65,7 +66,7 @@
td->constraint_changed = false;
td->cached_suspend_ok = false;
td->effective_constraint_ns = 0;
- constraint_ns = __dev_pm_qos_read_value(dev);
+ constraint_ns = __dev_pm_qos_resume_latency(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -128,7 +129,6 @@
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;
-
min_off_time_ns = -1;
/*
* Check if subdomains can be off for enough time.
@@ -211,8 +211,10 @@
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
- if (!genpd->max_off_time_changed)
+ if (!genpd->max_off_time_changed) {
+ genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
+ }
/*
* We have to invalidate the cached results for the masters, so
@@ -237,6 +239,7 @@
genpd->state_idx--;
}
+ genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
@@ -245,6 +248,65 @@
return false;
}
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ s64 idle_duration_ns;
+ int cpu, i;
+
+ /* Validate dev PM QoS constraints. */
+ if (!default_power_down_ok(pd))
+ return false;
+
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+ * contains a mask of all CPUs from subdomains.
+ */
+ domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+ for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev) {
+ next_hrtimer = READ_ONCE(dev->next_hrtimer);
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
+ }
+
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ if (idle_duration_ns <= 0)
+ return false;
+
+ /*
+ * Find the deepest idle state that has its residency value satisfied
+ * and by also taking into account the power off latency for the state.
+ * Start at the state picked by the dev PM QoS constraint validation.
+ */
+ i = genpd->state_idx;
+ do {
+ if (idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) {
+ genpd->state_idx = i;
+ return true;
+ }
+ } while (--i >= 0);
+
+ return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
struct dev_power_governor simple_qos_governor = {
.suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index b2ed606..4fa5256 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a690fd4..134a8af 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
- * This file is released under the GPLv2
- *
- *
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
@@ -17,6 +15,8 @@
* subsystem list maintains.
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -32,6 +32,7 @@
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
+#include <linux/devfreq.h>
#include <linux/timer.h>
#include "../base.h"
@@ -123,7 +124,11 @@
*/
void device_pm_add(struct device *dev)
{
- pr_debug("PM: Adding info for %s:%s\n",
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
@@ -141,7 +146,10 @@
*/
void device_pm_remove(struct device *dev)
{
- pr_debug("PM: Removing info for %s:%s\n",
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
@@ -160,7 +168,7 @@
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s before %s:%s\n",
+ pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
@@ -174,7 +182,7 @@
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s after %s:%s\n",
+ pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
@@ -187,7 +195,7 @@
*/
void device_pm_move_last(struct device *dev)
{
- pr_debug("PM: Moving %s:%s to end of list\n",
+ pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
@@ -197,7 +205,7 @@
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -215,7 +223,7 @@
rettime = ktime_get();
nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
- dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
(unsigned long long)nsecs >> 10);
}
@@ -410,8 +418,8 @@
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ pr_err("Device %s failed to %s%s: error %d\n",
+ dev_name(dev), pm_verb(state.event), info, error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -468,7 +476,7 @@
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
@@ -522,21 +530,6 @@
/*------------------------- Resume routines -------------------------*/
/**
- * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
- * @dev: Target device.
- *
- * Make the core skip the "early resume" and "resume" phases for @dev.
- *
- * This function can be called by middle-layer code during the "noirq" phase of
- * system resume if necessary, but not by device drivers.
- */
-void dev_pm_skip_next_resume_phases(struct device *dev)
-{
- dev->power.is_late_suspended = false;
- dev->power.is_suspended = false;
-}
-
-/**
* suspend_event - Return a "suspend" message for given "resume" one.
* @resume_msg: PM message representing a system-wide resume transition.
*/
@@ -673,6 +666,9 @@
dev->power.is_noirq_suspended = false;
if (skip_resume) {
+ /* Make the next phases of resume skip the device. */
+ dev->power.is_late_suspended = false;
+ dev->power.is_suspended = false;
/*
* The device is going to be left in suspend, but it might not
* have been in runtime suspend before the system suspended, so
@@ -681,7 +677,6 @@
* device again.
*/
pm_runtime_set_suspended(dev);
- dev_pm_skip_next_resume_phases(dev);
}
Out:
@@ -696,6 +691,19 @@
&& !pm_trace_is_enabled();
}
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(func, dev);
+ return true;
+ }
+
+ return false;
+}
+
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
@@ -708,7 +716,7 @@
put_device(dev);
}
-void dpm_noirq_resume_devices(pm_message_t state)
+static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -722,13 +730,8 @@
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume_noirq, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ dpm_async_fn(dev, async_resume_noirq);
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
@@ -757,13 +760,6 @@
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
-void dpm_noirq_end(void)
-{
- resume_device_irqs();
- device_wakeup_disarm_wake_irqs();
- cpuidle_resume();
-}
-
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -774,7 +770,11 @@
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
- dpm_noirq_end();
+
+ resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
+
+ cpuidle_resume();
}
static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
@@ -879,13 +879,8 @@
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume_early, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ dpm_async_fn(dev, async_resume_early);
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
@@ -1043,13 +1038,8 @@
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ dpm_async_fn(dev, async_resume);
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
@@ -1078,6 +1068,7 @@
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
+ devfreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
@@ -1297,11 +1288,6 @@
if (async_error)
goto Complete;
- if (pm_wakeup_pending()) {
- async_error = -EBUSY;
- goto Complete;
- }
-
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
@@ -1362,24 +1348,13 @@
static int device_suspend_noirq(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend_noirq, dev);
+ if (dpm_async_fn(dev, async_suspend_noirq))
return 0;
- }
+
return __device_suspend_noirq(dev, pm_transition, false);
}
-void dpm_noirq_begin(void)
-{
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
-}
-
-int dpm_noirq_suspend_devices(pm_message_t state)
+static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
@@ -1436,7 +1411,11 @@
{
int ret;
- dpm_noirq_begin();
+ cpuidle_pause();
+
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
@@ -1565,13 +1544,8 @@
static int device_suspend_late(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend_late, dev);
+ if (dpm_async_fn(dev, async_suspend_late))
return 0;
- }
return __device_suspend_late(dev, pm_transition, false);
}
@@ -1633,17 +1607,20 @@
*/
int dpm_suspend_end(pm_message_t state)
{
- int error = dpm_suspend_late(state);
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_suspend_late(state);
if (error)
- return error;
+ goto out;
error = dpm_suspend_noirq(state);
- if (error) {
+ if (error)
dpm_resume_early(resume_event(state));
- return error;
- }
- return 0;
+out:
+ dpm_show_time(starttime, state, error, "end");
+ return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
@@ -1736,11 +1713,17 @@
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || dev->power.wakeup_path)
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
goto Complete;
+ }
pm_runtime_enable(dev);
}
@@ -1829,13 +1812,8 @@
static int device_suspend(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend, dev);
+ if (dpm_async_fn(dev, async_suspend))
return 0;
- }
return __device_suspend(dev, pm_transition, false);
}
@@ -1852,6 +1830,7 @@
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
+ devfreq_suspend();
cpufreq_suspend();
mutex_lock(&dpm_list_mtx);
@@ -2010,8 +1989,7 @@
error = 0;
continue;
}
- printk(KERN_INFO "PM: Device %s not prepared "
- "for power transition: code %d\n",
+ pr_info("Device %s not prepared for power transition: code %d\n",
dev_name(dev), error);
put_device(dev);
break;
@@ -2035,6 +2013,7 @@
*/
int dpm_suspend_start(pm_message_t state)
{
+ ktime_t starttime = ktime_get();
int error;
error = dpm_prepare(state);
@@ -2043,6 +2022,7 @@
dpm_save_failed_step(SUSPEND_PREPARE);
} else
error = dpm_suspend(state);
+ dpm_show_time(starttime, state, error, "start");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
@@ -2050,14 +2030,14 @@
void __suspend_report_result(const char *function, void *fn, int ret)
{
if (ret)
- printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
+ pr_err("%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
* @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c511def..39a06a0 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,6 +21,7 @@
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
@@ -148,3 +149,21 @@
device_pm_sleep_init(dev);
pm_runtime_init(dev);
}
+
+#ifdef CONFIG_PM_SLEEP
+
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3382542..350dcaf 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Devices PM QoS constraints management
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
* This module exposes the interface to kernel space for specifying
* per-device PM QoS dependencies. It provides infrastructure for registration
* of:
@@ -22,7 +18,7 @@
* per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
- * . The per-device constraints data struct ptr is tored into the device
+ * . The per-device constraints data struct ptr is stored into the device
* dev_pm_info.
* . To minimize the data usage by the per-device constraints, the data struct
* is only allocated at the first call to dev_pm_qos_add_request.
@@ -94,29 +90,39 @@
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
- * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
* This routine must be called with dev->power.lock held.
*/
-s32 __dev_pm_qos_read_value(struct device *dev)
+s32 __dev_pm_qos_resume_latency(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
- return dev_pm_qos_raw_read_value(dev);
+ return dev_pm_qos_raw_resume_latency(dev);
}
/**
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
+ * @type: QoS request type.
*/
-s32 dev_pm_qos_read_value(struct device *dev)
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
{
+ struct dev_pm_qos *qos = dev->power.qos;
unsigned long flags;
s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
- ret = __dev_pm_qos_read_value(dev);
+
+ if (type == DEV_PM_QOS_RESUME_LATENCY) {
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ : pm_qos_read_value(&qos->resume_latency);
+ } else {
+ WARN_ON(1);
+ ret = 0;
+ }
+
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
@@ -181,12 +187,11 @@
if (!qos)
return -ENOMEM;
- n = kzalloc(sizeof(*n), GFP_KERNEL);
+ n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
if (!n) {
kfree(qos);
return -ENOMEM;
}
- BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->resume_latency;
plist_head_init(&c->list);
@@ -195,6 +200,7 @@
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->latency_tolerance;
plist_head_init(&c->list);
@@ -256,11 +262,13 @@
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
c = &qos->latency_tolerance;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -471,6 +479,7 @@
*
* @dev: target device for the constraint
* @notifier: notifier block managed by caller.
+ * @type: request type.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
@@ -478,7 +487,8 @@
* If the device's constraints object doesn't exist when this routine is called,
* it will be created (or error code will be returned if that fails).
*/
-int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{
int ret = 0;
@@ -489,10 +499,20 @@
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
- if (!ret)
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+unlock:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
@@ -504,24 +524,36 @@
*
* @dev: target device for the constraint
* @notifier: notifier block to be removed.
+ * @type: request type.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value.
*/
int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{
- int retval = 0;
+ int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
- notifier);
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ goto unlock;
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+unlock:
mutex_unlock(&dev_pm_qos_mtx);
- return retval;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
@@ -581,6 +613,9 @@
req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
+ default:
+ WARN_ON(1);
+ return;
}
__dev_pm_qos_remove_request(req);
kfree(req);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index beb85c3..48616f3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,13 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/sched/mm.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -62,22 +62,32 @@
* runtime_status field is updated, to account the time in the old state
* correctly.
*/
-void update_pm_runtime_accounting(struct device *dev)
+static void update_pm_runtime_accounting(struct device *dev)
{
- unsigned long now = jiffies;
- unsigned long delta;
-
- delta = now - dev->power.accounting_timestamp;
-
- dev->power.accounting_timestamp = now;
+ u64 now, last, delta;
if (dev->power.disable_depth > 0)
return;
+ last = dev->power.accounting_timestamp;
+
+ now = ktime_get_mono_fast_ns();
+ dev->power.accounting_timestamp = now;
+
+ /*
+ * Because ktime_get_mono_fast_ns() is not monotonic during
+ * timekeeping updates, ensure that 'now' is after the last saved
+ * timesptamp.
+ */
+ if (now < last)
+ return;
+
+ delta = now - last;
+
if (dev->power.runtime_status == RPM_SUSPENDED)
- dev->power.suspended_jiffies += delta;
+ dev->power.suspended_time += delta;
else
- dev->power.active_jiffies += delta;
+ dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
@@ -86,6 +96,32 @@
dev->power.runtime_status = status;
}
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
+{
+ u64 time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ update_pm_runtime_accounting(dev);
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return time;
+}
+
+u64 pm_runtime_active_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, false);
+}
+
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
@@ -93,7 +129,7 @@
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- del_timer(&dev->power.suspend_timer);
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
@@ -119,43 +155,29 @@
* Compute the autosuspend-delay expiration time based on the device's
* power.last_busy time. If the delay has already expired or is disabled
* (negative) or the power.use_autosuspend flag isn't set, return 0.
- * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
*
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
*/
-unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
- long elapsed;
- unsigned long last_busy;
- unsigned long expires = 0;
+ u64 expires;
if (!dev->power.use_autosuspend)
- goto out;
+ return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
- goto out;
+ return 0;
- last_busy = READ_ONCE(dev->power.last_busy);
- elapsed = jiffies - last_busy;
- if (elapsed < 0)
- goto out; /* jiffies has wrapped around. */
+ expires = READ_ONCE(dev->power.last_busy);
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
+ if (expires > ktime_get_mono_fast_ns())
+ return expires; /* Expires in the future */
- /*
- * If the autosuspend_delay is >= 1 second, align the timer by rounding
- * up to the nearest second.
- */
- expires = last_busy + msecs_to_jiffies(autosuspend_delay);
- if (autosuspend_delay >= 1000)
- expires = round_jiffies(expires);
- expires += !expires;
- if (elapsed >= expires - last_busy)
- expires = 0; /* Already expired. */
-
- out:
- return expires;
+ return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
@@ -253,7 +275,7 @@
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) == 0)
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
@@ -265,14 +287,12 @@
{
struct device_link *link;
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
- continue;
-
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
- link->rpm_active)
+ if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
+ READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -281,7 +301,7 @@
pm_runtime_put_noidle(link->supplier);
return retval;
}
- link->rpm_active = true;
+ refcount_inc(&link->rpm_active);
}
return 0;
}
@@ -290,12 +310,14 @@
{
struct device_link *link;
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->rpm_active &&
- READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
+ continue;
+
+ while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
- link->rpm_active = false;
- }
+ }
}
/**
@@ -515,7 +537,7 @@
/* If the autosuspend_delay time hasn't expired yet, reschedule. */
if ((rpmflags & RPM_AUTO)
&& dev->power.runtime_status != RPM_SUSPENDING) {
- unsigned long expires = pm_runtime_autosuspend_expiration(dev);
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) {
/* Pending requests need to be canceled. */
@@ -528,10 +550,20 @@
* expire; pm_suspend_timer_fn() will take care of the
* rest.
*/
- if (!(dev->power.timer_expires && time_before_eq(
- dev->power.timer_expires, expires))) {
+ if (!(dev->power.timer_expires &&
+ dev->power.timer_expires <= expires)) {
+ /*
+ * We add a slack of 25% to gather wakeups
+ * without sacrificing the granularity.
+ */
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
+ (NSEC_PER_MSEC >> 2);
+
dev->power.timer_expires = expires;
- mod_timer(&dev->power.suspend_timer, expires);
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
+ ns_to_ktime(expires),
+ slack,
+ HRTIMER_MODE_ABS);
}
dev->power.timer_autosuspends = 1;
goto out;
@@ -895,23 +927,28 @@
*
* Check if the time is right and queue a suspend request.
*/
-static void pm_suspend_timer_fn(struct timer_list *t)
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
{
- struct device *dev = from_timer(dev, t, power.suspend_timer);
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
unsigned long flags;
- unsigned long expires;
+ u64 expires;
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
- /* If 'expire' is after 'jiffies' we've been called too early. */
- if (expires > 0 && !time_after(expires, jiffies)) {
+ /*
+ * If 'expires' is after the current time, we've been called
+ * too early.
+ */
+ if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
}
spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return HRTIMER_NORESTART;
}
/**
@@ -922,6 +959,7 @@
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
+ u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
@@ -938,10 +976,10 @@
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
- dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
- dev->power.timer_expires += !dev->power.timer_expires;
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
+ dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
- mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1084,24 +1122,57 @@
* and the device parent's counter of unsuspended children is modified to
* reflect the new status. If the new status is RPM_SUSPENDED, an idle
* notification request for the parent is submitted.
+ *
+ * If @dev has any suppliers (as reflected by device links to them), and @status
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
+ * of the @status value) and the suppliers will be deacticated on exit. The
+ * error returned by the failing supplier activation will be returned in that
+ * case.
*/
int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
struct device *parent = dev->parent;
- unsigned long flags;
bool notify_parent = false;
int error = 0;
if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
return -EINVAL;
- spin_lock_irqsave(&dev->power.lock, flags);
+ spin_lock_irq(&dev->power.lock);
- if (!dev->power.runtime_error && !dev->power.disable_depth) {
+ /*
+ * Prevent PM-runtime from being enabled for the device or return an
+ * error if it is enabled already and working.
+ */
+ if (dev->power.runtime_error || dev->power.disable_depth)
+ dev->power.disable_depth++;
+ else
error = -EAGAIN;
- goto out;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (error)
+ return error;
+
+ /*
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
+ * upfront regardless of the current status, because next time
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
+ * involved will be dropped down to one anyway.
+ */
+ if (status == RPM_ACTIVE) {
+ int idx = device_links_read_lock();
+
+ error = rpm_get_suppliers(dev);
+ if (error)
+ status = RPM_SUSPENDED;
+
+ device_links_read_unlock(idx);
}
+ spin_lock_irq(&dev->power.lock);
+
if (dev->power.runtime_status == status || !parent)
goto out_set;
@@ -1129,19 +1200,33 @@
spin_unlock(&parent->power.lock);
- if (error)
+ if (error) {
+ status = RPM_SUSPENDED;
goto out;
+ }
}
out_set:
__update_runtime_status(dev, status);
- dev->power.runtime_error = 0;
+ if (!error)
+ dev->power.runtime_error = 0;
+
out:
- spin_unlock_irqrestore(&dev->power.lock, flags);
+ spin_unlock_irq(&dev->power.lock);
if (notify_parent)
pm_request_idle(parent);
+ if (status == RPM_SUSPENDED) {
+ int idx = device_links_read_lock();
+
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
+ pm_runtime_enable(dev);
+
return error;
}
EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
@@ -1269,6 +1354,9 @@
pm_runtime_put_noidle(dev);
}
+ /* Update time accounting before disabling PM-runtime. */
+ update_pm_runtime_accounting(dev);
+
if (!dev->power.disable_depth++)
__pm_runtime_barrier(dev);
@@ -1287,10 +1375,15 @@
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.disable_depth > 0)
+ if (dev->power.disable_depth > 0) {
dev->power.disable_depth--;
- else
+
+ /* About to enable runtime pm, set accounting_timestamp to now */
+ if (!dev->power.disable_depth)
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+ } else {
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ }
WARN(!dev->power.disable_depth &&
dev->power.runtime_status == RPM_SUSPENDED &&
@@ -1487,11 +1580,11 @@
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.accounting_timestamp = jiffies;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1531,9 +1624,9 @@
*
* Check links from this device to any consumers and if any of them have active
* runtime PM references to the device, drop the usage counter of the device
- * (once per link).
+ * (as many times as needed).
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links with the DL_FLAG_MANAGED flag unset are ignored.
*
* Since the device is guaranteed to be runtime-active at the point this is
* called, nothing else needs to be done here.
@@ -1549,14 +1642,13 @@
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
+ device_links_read_lock_held()) {
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->rpm_active) {
+ while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put_noidle(dev);
- link->rpm_active = false;
- }
}
device_links_read_unlock(idx);
@@ -1573,9 +1665,13 @@
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->flags & DL_FLAG_PM_RUNTIME)
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->flags & DL_FLAG_PM_RUNTIME) {
+ link->supplier_preactivated = true;
+ refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ }
device_links_read_unlock(idx);
}
@@ -1591,9 +1687,13 @@
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_put(link->supplier);
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->supplier_preactivated) {
+ link->supplier_preactivated = false;
+ if (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+ }
device_links_read_unlock(idx);
}
@@ -1607,8 +1707,6 @@
void pm_runtime_drop_link(struct device *dev)
{
- rpm_put_suppliers(dev);
-
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d713738..d7d82db 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -1,12 +1,11 @@
-/*
- * drivers/base/power/sysfs.c - sysfs entries for device PM
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs entries for device PM */
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -125,10 +124,9 @@
struct device_attribute *attr, char *buf)
{
int ret;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
- spin_unlock_irq(&dev->power.lock);
+ u64 tmp = pm_runtime_active_time(dev);
+ do_div(tmp, NSEC_PER_MSEC);
+ ret = sprintf(buf, "%llu\n", tmp);
return ret;
}
@@ -138,11 +136,9 @@
struct device_attribute *attr, char *buf)
{
int ret;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n",
- jiffies_to_msecs(dev->power.suspended_jiffies));
- spin_unlock_irq(&dev->power.lock);
+ u64 tmp = pm_runtime_suspended_time(dev);
+ do_div(tmp, NSEC_PER_MSEC);
+ ret = sprintf(buf, "%llu\n", tmp);
return ret;
}
@@ -648,6 +644,10 @@
{
int rc;
+ /* No need to create PM sysfs if explicitly disabled. */
+ if (device_pm_not_required(dev))
+ return 0;
+
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
@@ -668,8 +668,13 @@
if (rc)
goto err_wakeup;
}
+ rc = pm_wakeup_source_sysfs_add(dev);
+ if (rc)
+ goto err_latency;
return 0;
+ err_latency:
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
err_wakeup:
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
err_runtime:
@@ -727,6 +732,8 @@
void dpm_sysfs_remove(struct device *dev)
{
+ if (device_pm_not_required(dev))
+ return;
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 1cda505..977d27b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/trace.c
*
@@ -6,6 +7,7 @@
* Trace facility for suspend/resume problems, when none of the
* devices may be working.
*/
+#define pr_fmt(fmt) "PM: " fmt
#include <linux/pm-trace.h>
#include <linux/export.h>
@@ -118,9 +120,7 @@
unsigned int val;
mc146818_get_time(&time);
- pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
- time.tm_hour, time.tm_min, time.tm_sec,
- time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
+ pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)
val -= 100;
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index b8fa5c0..5ce77d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -1,16 +1,5 @@
-/*
- * wakeirq.c - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* Device wakeirq helper functions */
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fa1898..5817b51 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/wakeup.c - System wakeup events framework
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
#include <linux/device.h>
#include <linux/slab.h>
@@ -72,22 +72,7 @@
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
};
-/**
- * wakeup_source_prepare - Prepare a new wakeup source for initialization.
- * @ws: Wakeup source to prepare.
- * @name: Pointer to the name of the new wakeup source.
- *
- * Callers must ensure that the @name string won't be freed when @ws is still in
- * use.
- */
-void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
-{
- if (ws) {
- memset(ws, 0, sizeof(*ws));
- ws->name = name;
- }
-}
-EXPORT_SYMBOL_GPL(wakeup_source_prepare);
+static DEFINE_IDA(wakeup_ida);
/**
* wakeup_source_create - Create a struct wakeup_source object.
@@ -96,33 +81,34 @@
struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
+ const char *ws_name;
+ int id;
- ws = kmalloc(sizeof(*ws), GFP_KERNEL);
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
- return NULL;
+ goto err_ws;
- wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
+ ws_name = kstrdup_const(name, GFP_KERNEL);
+ if (!ws_name)
+ goto err_name;
+ ws->name = ws_name;
+
+ id = ida_alloc(&wakeup_ida, GFP_KERNEL);
+ if (id < 0)
+ goto err_id;
+ ws->id = id;
+
return ws;
+
+err_id:
+ kfree_const(ws->name);
+err_name:
+ kfree(ws);
+err_ws:
+ return NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
-/**
- * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
- * @ws: Wakeup source to prepare for destruction.
- *
- * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
- * be run in parallel with this function for the same wakeup source object.
- */
-void wakeup_source_drop(struct wakeup_source *ws)
-{
- if (!ws)
- return;
-
- del_timer_sync(&ws->timer);
- __pm_relax(ws);
-}
-EXPORT_SYMBOL_GPL(wakeup_source_drop);
-
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
*/
@@ -151,6 +137,13 @@
spin_unlock_irqrestore(&deleted_ws.lock, flags);
}
+static void wakeup_source_free(struct wakeup_source *ws)
+{
+ ida_free(&wakeup_ida, ws->id);
+ kfree_const(ws->name);
+ kfree(ws);
+}
+
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
@@ -162,10 +155,9 @@
if (!ws)
return;
- wakeup_source_drop(ws);
+ __pm_relax(ws);
wakeup_source_record(ws);
- kfree_const(ws->name);
- kfree(ws);
+ wakeup_source_free(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
@@ -205,21 +197,38 @@
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
+
+ del_timer_sync(&ws->timer);
+ /*
+ * Clear timer.function to make wakeup_source_not_registered() treat
+ * this wakeup source as not registered.
+ */
+ ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
+ * @dev: Device this wakeup source is associated with (or NULL if virtual).
* @name: Name of the wakeup source to register.
*/
-struct wakeup_source *wakeup_source_register(const char *name)
+struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name)
{
struct wakeup_source *ws;
+ int ret;
ws = wakeup_source_create(name);
- if (ws)
+ if (ws) {
+ if (!dev || device_is_registered(dev)) {
+ ret = wakeup_source_sysfs_add(dev, ws);
+ if (ret) {
+ wakeup_source_free(ws);
+ return NULL;
+ }
+ }
wakeup_source_add(ws);
-
+ }
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_register);
@@ -232,6 +241,7 @@
{
if (ws) {
wakeup_source_remove(ws);
+ wakeup_source_sysfs_remove(ws);
wakeup_source_destroy(ws);
}
}
@@ -275,7 +285,7 @@
if (pm_suspend_target_state != PM_SUSPEND_ON)
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
- ws = wakeup_source_register(dev_name(dev));
+ ws = wakeup_source_register(dev, dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -783,7 +793,7 @@
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
- * pm_wakeup_event - Notify the PM core of a wakeup event.
+ * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
@@ -812,7 +822,7 @@
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_debug("active wakeup source: %s\n", ws->name);
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -823,7 +833,7 @@
}
if (!active && last_activity_ws)
- pr_debug("last active wakeup source: %s\n",
+ pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -853,7 +863,7 @@
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_debug("PM: Wakeup pending, aborting suspend\n");
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
@@ -869,7 +879,7 @@
void pm_system_cancel_wakeup(void)
{
- atomic_dec(&pm_abort_suspend);
+ atomic_dec_if_positive(&pm_abort_suspend);
}
void pm_wakeup_clear(bool reset)
@@ -978,8 +988,6 @@
}
#endif /* CONFIG_PM_AUTOSLEEP */
-static struct dentry *wakeup_sources_stats_dentry;
-
/**
* print_wakeup_source_stats - Print wakeup source statistics information.
* @m: seq_file to print the statistics into.
@@ -1109,8 +1117,8 @@
static int __init wakeup_sources_debugfs_init(void)
{
- wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
- S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
+ debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
+ &wakeup_sources_stats_fops);
return 0;
}
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
new file mode 100644
index 0000000..c773491
--- /dev/null
+++ b/drivers/base/power/wakeup_stats.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wakeup statistics in sysfs
+ *
+ * Copyright (c) 2019 Linux Foundation
+ * Copyright (c) 2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2019 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include "power.h"
+
+static struct class *wakeup_class;
+
+#define wakeup_attr(_name) \
+static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wakeup_source *ws = dev_get_drvdata(dev); \
+ \
+ return sprintf(buf, "%lu\n", ws->_name); \
+} \
+static DEVICE_ATTR_RO(_name)
+
+wakeup_attr(active_count);
+wakeup_attr(event_count);
+wakeup_attr(wakeup_count);
+wakeup_attr(expire_count);
+
+static ssize_t active_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time =
+ ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
+
+ return sprintf(buf, "%lld\n", ktime_to_ms(active_time));
+}
+static DEVICE_ATTR_RO(active_time_ms);
+
+static ssize_t total_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t total_time = ws->total_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ }
+ return sprintf(buf, "%lld\n", ktime_to_ms(total_time));
+}
+static DEVICE_ATTR_RO(total_time_ms);
+
+static ssize_t max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t max_time = ws->max_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ if (active_time > max_time)
+ max_time = active_time;
+ }
+ return sprintf(buf, "%lld\n", ktime_to_ms(max_time));
+}
+static DEVICE_ATTR_RO(max_time_ms);
+
+static ssize_t last_change_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld\n", ktime_to_ms(ws->last_time));
+}
+static DEVICE_ATTR_RO(last_change_ms);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ws->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t prevent_suspend_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t prevent_sleep_time = ws->prevent_sleep_time;
+
+ if (ws->active && ws->autosleep_enabled) {
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(ktime_get(), ws->start_prevent_time));
+ }
+ return sprintf(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
+}
+static DEVICE_ATTR_RO(prevent_suspend_time_ms);
+
+static struct attribute *wakeup_source_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_active_count.attr,
+ &dev_attr_event_count.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_expire_count.attr,
+ &dev_attr_active_time_ms.attr,
+ &dev_attr_total_time_ms.attr,
+ &dev_attr_max_time_ms.attr,
+ &dev_attr_last_change_ms.attr,
+ &dev_attr_prevent_suspend_time_ms.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wakeup_source);
+
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *wakeup_source_device_create(struct device *parent,
+ struct wakeup_source *ws)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->devt = MKDEV(0, 0);
+ dev->class = wakeup_class;
+ dev->parent = parent;
+ dev->groups = wakeup_source_groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, ws);
+ device_set_pm_not_required(dev);
+
+ retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+/**
+ * wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs.
+ * @parent: Device given wakeup source is associated with (or NULL if virtual).
+ * @ws: Wakeup source to be added in sysfs.
+ */
+int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
+{
+ struct device *dev;
+
+ dev = wakeup_source_device_create(parent, ws);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ ws->dev = dev;
+
+ return 0;
+}
+
+/**
+ * pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs
+ * for a device if they're missing.
+ * @parent: Device given wakeup source is associated with
+ */
+int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ if (!parent->power.wakeup || parent->power.wakeup->dev)
+ return 0;
+
+ return wakeup_source_sysfs_add(parent, parent->power.wakeup);
+}
+
+/**
+ * wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs.
+ * @ws: Wakeup source to be removed from sysfs.
+ */
+void wakeup_source_sysfs_remove(struct wakeup_source *ws)
+{
+ device_unregister(ws->dev);
+}
+
+static int __init wakeup_sources_sysfs_init(void)
+{
+ wakeup_class = class_create(THIS_MODULE, "wakeup");
+
+ return PTR_ERR_OR_ZERO(wakeup_class);
+}
+postcore_initcall(wakeup_sources_sysfs_init);