Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig
index a7fbb93..35dfc7e 100644
--- a/drivers/opp/Kconfig
+++ b/drivers/opp/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config PM_OPP
bool
select SRCU
@@ -10,4 +11,4 @@
OPP layer organizes the data internally using device pointers
representing individual voltage domains and provides SOC
implementations a ready to use framework to manage OPPs.
- For more information, read <file:Documentation/power/opp.txt>
+ For more information, read <file:Documentation/power/opp.rst>
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile
index 6ce6aef..f65ed59 100644
--- a/drivers/opp/Makefile
+++ b/drivers/opp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 31ff03d..9ff0538 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic OPP Interface
*
@@ -5,10 +6,6 @@
* Nishanth Menon
* Romit Dasgupta
* Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -48,9 +45,14 @@
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
{
struct opp_table *opp_table;
+ bool found;
list_for_each_entry(opp_table, &opp_tables, node) {
- if (_find_opp_dev(dev, opp_table)) {
+ mutex_lock(&opp_table->lock);
+ found = !!_find_opp_dev(dev, opp_table);
+ mutex_unlock(&opp_table->lock);
+
+ if (found) {
_get_opp_table_kref(opp_table);
return opp_table;
@@ -126,6 +128,24 @@
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
+ * @opp: opp for which level value has to be returned for
+ *
+ * Return: level read from device tree corresponding to the opp, else
+ * return 0.
+ */
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ return opp->level;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
@@ -191,12 +211,12 @@
if (IS_ERR(opp_table))
return 0;
- count = opp_table->regulator_count;
-
/* Regulator may not be required for the device */
- if (!count)
+ if (!opp_table->regulators)
goto put_opp_table;
+ count = opp_table->regulator_count;
+
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
if (!uV)
goto put_opp_table;
@@ -313,7 +333,7 @@
count = PTR_ERR(opp_table);
dev_dbg(dev, "%s: OPP table not found (%d)\n",
__func__, count);
- return 0;
+ return count;
}
count = _get_opp_count(opp_table);
@@ -381,6 +401,54 @@
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
+/**
+ * dev_pm_opp_find_level_exact() - search for an exact level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for exact match in the opp table and returns pointer to the
+ * matching opp if found, else returns ERR_PTR in case of error and should
+ * be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->level == level) {
+ opp = temp_opp;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -503,6 +571,60 @@
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+/**
+ * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
+ * target voltage.
+ * @dev: Device for which we do this operation.
+ * @u_volt: Target voltage.
+ *
+ * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
+ *
+ * Return: matching *opp, else returns ERR_PTR in case of error which should be
+ * handled using IS_ERR.
+ *
+ * Error return values can be:
+ * EINVAL: bad parameters
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ if (!dev || !u_volt) {
+ dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
+ u_volt);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available) {
+ if (temp_opp->supplies[0].u_volt > u_volt)
+ break;
+ opp = temp_opp;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp))
+ dev_pm_opp_get(opp);
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
+
static int _set_opp_voltage(struct device *dev, struct regulator *reg,
struct dev_pm_opp_supply *supply)
{
@@ -528,9 +650,8 @@
return ret;
}
-static inline int
-_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long old_freq, unsigned long freq)
+static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+ unsigned long freq)
{
int ret;
@@ -543,44 +664,6 @@
return ret;
}
-static inline int
-_generic_set_opp_domain(struct device *dev, struct clk *clk,
- unsigned long old_freq, unsigned long freq,
- unsigned int old_pstate, unsigned int new_pstate)
-{
- int ret;
-
- /* Scaling up? Scale domain performance state before frequency */
- if (freq > old_freq) {
- ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
- if (ret)
- return ret;
- }
-
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
- if (ret)
- goto restore_domain_state;
-
- /* Scaling down? Scale domain performance state after frequency */
- if (freq < old_freq) {
- ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
- if (ret)
- goto restore_freq;
- }
-
- return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
-restore_domain_state:
- if (freq > old_freq)
- dev_pm_genpd_set_performance_state(dev, old_pstate);
-
- return ret;
-}
-
static int _generic_set_opp_regulator(const struct opp_table *opp_table,
struct device *dev,
unsigned long old_freq,
@@ -605,7 +688,7 @@
}
/* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
if (ret)
goto restore_voltage;
@@ -619,7 +702,7 @@
return 0;
restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
+ if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
__func__, old_freq);
restore_voltage:
@@ -630,27 +713,102 @@
return ret;
}
+static int _set_opp_custom(const struct opp_table *opp_table,
+ struct device *dev, unsigned long old_freq,
+ unsigned long freq,
+ struct dev_pm_opp_supply *old_supply,
+ struct dev_pm_opp_supply *new_supply)
+{
+ struct dev_pm_set_opp_data *data;
+ int size;
+
+ data = opp_table->set_opp_data;
+ data->regulators = opp_table->regulators;
+ data->regulator_count = opp_table->regulator_count;
+ data->clk = opp_table->clk;
+ data->dev = dev;
+
+ data->old_opp.rate = old_freq;
+ size = sizeof(*old_supply) * opp_table->regulator_count;
+ if (!old_supply)
+ memset(data->old_opp.supplies, 0, size);
+ else
+ memcpy(data->old_opp.supplies, old_supply, size);
+
+ data->new_opp.rate = freq;
+ memcpy(data->new_opp.supplies, new_supply, size);
+
+ return opp_table->set_opp(data);
+}
+
+/* This is only called for PM domain for now */
+static int _set_required_opps(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
+{
+ struct opp_table **required_opp_tables = opp_table->required_opp_tables;
+ struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
+ unsigned int pstate;
+ int i, ret = 0;
+
+ if (!required_opp_tables)
+ return 0;
+
+ /* Single genpd case */
+ if (!genpd_virt_devs) {
+ pstate = likely(opp) ? opp->required_opps[0]->pstate : 0;
+ ret = dev_pm_genpd_set_performance_state(dev, pstate);
+ if (ret) {
+ dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
+ dev_name(dev), pstate, ret);
+ }
+ return ret;
+ }
+
+ /* Multiple genpd case */
+
+ /*
+ * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
+ * after it is freed from another thread.
+ */
+ mutex_lock(&opp_table->genpd_virt_dev_lock);
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
+
+ if (!genpd_virt_devs[i])
+ continue;
+
+ ret = dev_pm_genpd_set_performance_state(genpd_virt_devs[i], pstate);
+ if (ret) {
+ dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_name(genpd_virt_devs[i]), pstate, ret);
+ break;
+ }
+ }
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ return ret;
+}
+
/**
* dev_pm_opp_set_rate() - Configure new OPP based on frequency
* @dev: device for which we do this operation
* @target_freq: frequency to achieve
*
- * This configures the power-supplies and clock source to the levels specified
- * by the OPP corresponding to the target_freq.
+ * This configures the power-supplies to the levels specified by the OPP
+ * corresponding to the target_freq, and programs the clock to a value <=
+ * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
+ * provided by the opp, should have already rounded to the target OPP's
+ * frequency.
*/
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
- unsigned long freq, old_freq;
+ unsigned long freq, old_freq, temp_freq;
struct dev_pm_opp *old_opp, *opp;
struct clk *clk;
- int ret, size;
-
- if (unlikely(!target_freq)) {
- dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
- target_freq);
- return -EINVAL;
- }
+ int ret;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
@@ -658,6 +816,17 @@
return PTR_ERR(opp_table);
}
+ if (unlikely(!target_freq)) {
+ if (opp_table->required_opp_tables) {
+ ret = _set_required_opps(dev, opp_table, NULL);
+ } else {
+ dev_err(dev, "target frequency can't be 0\n");
+ ret = -EINVAL;
+ }
+
+ goto put_opp_table;
+ }
+
clk = opp_table->clk;
if (IS_ERR(clk)) {
dev_err(dev, "%s: No clock available for the device\n",
@@ -680,13 +849,15 @@
goto put_opp_table;
}
- old_opp = _find_freq_ceil(opp_table, &old_freq);
+ temp_freq = old_freq;
+ old_opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(old_opp)) {
dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
__func__, old_freq, PTR_ERR(old_opp));
}
- opp = _find_freq_ceil(opp_table, &freq);
+ temp_freq = freq;
+ opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
@@ -697,44 +868,34 @@
dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
old_freq, freq);
- /* Only frequency scaling */
- if (!opp_table->regulators) {
- /*
- * We don't support devices with both regulator and
- * domain performance-state for now.
- */
- if (opp_table->genpd_performance_state)
- ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
- IS_ERR(old_opp) ? 0 : old_opp->pstate,
- opp->pstate);
- else
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
- } else if (!opp_table->set_opp) {
+ /* Scaling up? Configure required OPPs before frequency */
+ if (freq >= old_freq) {
+ ret = _set_required_opps(dev, opp_table, opp);
+ if (ret)
+ goto put_opp;
+ }
+
+ if (opp_table->set_opp) {
+ ret = _set_opp_custom(opp_table, dev, old_freq, freq,
+ IS_ERR(old_opp) ? NULL : old_opp->supplies,
+ opp->supplies);
+ } else if (opp_table->regulators) {
ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
IS_ERR(old_opp) ? NULL : old_opp->supplies,
opp->supplies);
} else {
- struct dev_pm_set_opp_data *data;
-
- data = opp_table->set_opp_data;
- data->regulators = opp_table->regulators;
- data->regulator_count = opp_table->regulator_count;
- data->clk = clk;
- data->dev = dev;
-
- data->old_opp.rate = old_freq;
- size = sizeof(*opp->supplies) * opp_table->regulator_count;
- if (IS_ERR(old_opp))
- memset(data->old_opp.supplies, 0, size);
- else
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
-
- data->new_opp.rate = freq;
- memcpy(data->new_opp.supplies, opp->supplies, size);
-
- ret = opp_table->set_opp(data);
+ /* Only frequency scaling */
+ ret = _generic_set_opp_clk_only(dev, clk, freq);
}
+ /* Scaling down? Configure required OPPs after frequency */
+ if (!ret && freq < old_freq) {
+ ret = _set_required_opps(dev, opp_table, opp);
+ if (ret)
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
+ }
+
+put_opp:
dev_pm_opp_put(opp);
put_old_opp:
if (!IS_ERR(old_opp))
@@ -754,11 +915,10 @@
kfree(opp_dev);
}
-struct opp_device *_add_opp_dev(const struct device *dev,
- struct opp_table *opp_table)
+static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
+ struct opp_table *opp_table)
{
struct opp_device *opp_dev;
- int ret;
opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
if (!opp_dev)
@@ -766,18 +926,28 @@
/* Initialize opp-dev */
opp_dev->dev = dev;
+
list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
- ret = opp_debug_register(opp_dev, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
- __func__, ret);
+ opp_debug_register(opp_dev, opp_table);
return opp_dev;
}
-static struct opp_table *_allocate_opp_table(struct device *dev)
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
+{
+ struct opp_device *opp_dev;
+
+ mutex_lock(&opp_table->lock);
+ opp_dev = _add_opp_dev_unlocked(dev, opp_table);
+ mutex_unlock(&opp_table->lock);
+
+ return opp_dev;
+}
+
+static struct opp_table *_allocate_opp_table(struct device *dev, int index)
{
struct opp_table *opp_table;
struct opp_device *opp_dev;
@@ -791,15 +961,20 @@
if (!opp_table)
return NULL;
+ mutex_init(&opp_table->lock);
+ mutex_init(&opp_table->genpd_virt_dev_lock);
INIT_LIST_HEAD(&opp_table->dev_list);
+ /* Mark regulator count uninitialized */
+ opp_table->regulator_count = -1;
+
opp_dev = _add_opp_dev(dev, opp_table);
if (!opp_dev) {
kfree(opp_table);
return NULL;
}
- _of_init_opp_table(opp_table, dev);
+ _of_init_opp_table(opp_table, dev, index);
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
@@ -812,8 +987,8 @@
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
- mutex_init(&opp_table->lock);
kref_init(&opp_table->kref);
+ kref_init(&opp_table->list_kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
@@ -825,7 +1000,7 @@
kref_get(&opp_table->kref);
}
-struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
{
struct opp_table *opp_table;
@@ -836,32 +1011,60 @@
if (!IS_ERR(opp_table))
goto unlock;
- opp_table = _allocate_opp_table(dev);
+ opp_table = _managed_opp(dev, index);
+ if (opp_table) {
+ if (!_add_opp_dev_unlocked(dev, opp_table)) {
+ dev_pm_opp_put_opp_table(opp_table);
+ opp_table = NULL;
+ }
+ goto unlock;
+ }
+
+ opp_table = _allocate_opp_table(dev, index);
unlock:
mutex_unlock(&opp_table_lock);
return opp_table;
}
+
+struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+{
+ return _opp_get_opp_table(dev, 0);
+}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
+struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
+ int index)
+{
+ return _opp_get_opp_table(dev, index);
+}
+
static void _opp_table_kref_release(struct kref *kref)
{
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
- struct opp_device *opp_dev;
+ struct opp_device *opp_dev, *temp;
+
+ _of_clear_opp_table(opp_table);
/* Release clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
- opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
- node);
+ WARN_ON(!list_empty(&opp_table->opp_list));
- _remove_opp_dev(opp_dev, opp_table);
+ list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
+ /*
+ * The OPP table is getting removed, drop the performance state
+ * constraints.
+ */
+ if (opp_table->genpd_performance_state)
+ dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
- /* dev_list must be empty now */
- WARN_ON(!list_empty(&opp_table->dev_list));
+ _remove_opp_dev(opp_dev, opp_table);
+ }
+ mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
list_del(&opp_table->node);
kfree(opp_table);
@@ -869,6 +1072,33 @@
mutex_unlock(&opp_table_lock);
}
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+ struct dev_pm_opp *opp, *tmp;
+
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (!opp->dynamic)
+ dev_pm_opp_put(opp);
+ }
+
+ opp_table->parsed_static_opps = false;
+}
+
+static void _opp_table_list_kref_release(struct kref *kref)
+{
+ struct opp_table *opp_table = container_of(kref, struct opp_table,
+ list_kref);
+
+ _opp_remove_all_static(opp_table);
+ mutex_unlock(&opp_table_lock);
+}
+
+void _put_opp_list_kref(struct opp_table *opp_table)
+{
+ kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
+ &opp_table_lock);
+}
+
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -881,22 +1111,35 @@
kfree(opp);
}
-static void _opp_kref_release(struct kref *kref)
+static void _opp_kref_release(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
{
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
- struct opp_table *opp_table = opp->opp_table;
-
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
+ _of_opp_free_required_opps(opp_table, opp);
opp_debug_remove_one(opp);
list_del(&opp->node);
kfree(opp);
+}
+static void _opp_kref_release_unlocked(struct kref *kref)
+{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
+ _opp_kref_release(opp, opp_table);
+}
+
+static void _opp_kref_release_locked(struct kref *kref)
+{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
+ _opp_kref_release(opp, opp_table);
mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
}
void dev_pm_opp_get(struct dev_pm_opp *opp)
@@ -906,10 +1149,16 @@
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
- kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
+ kref_put_mutex(&opp->kref, _opp_kref_release_locked,
+ &opp->opp_table->lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);
+static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
+{
+ kref_put(&opp->kref, _opp_kref_release_unlocked);
+}
+
/**
* dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
@@ -940,22 +1189,60 @@
if (found) {
dev_pm_opp_put(opp);
+
+ /* Drop the reference taken by dev_pm_opp_add() */
+ dev_pm_opp_put_opp_table(opp_table);
} else {
dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
__func__, freq);
}
+ /* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+/**
+ * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
+ * @dev: device for which we do this operation
+ *
+ * This function removes all dynamically created OPPs from the opp table.
+ */
+void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp, *temp;
+ int count = 0;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return;
+
+ mutex_lock(&opp_table->lock);
+ list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
+ if (opp->dynamic) {
+ dev_pm_opp_put_unlocked(opp);
+ count++;
+ }
+ }
+ mutex_unlock(&opp_table->lock);
+
+ /* Drop the references taken by dev_pm_opp_add() */
+ while (count--)
+ dev_pm_opp_put_opp_table(opp_table);
+
+ /* Drop the reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
+
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{
struct dev_pm_opp *opp;
int count, supply_size;
/* Allocate space for at least one supply */
- count = table->regulator_count ? table->regulator_count : 1;
+ count = table->regulator_count > 0 ? table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * count;
/* allocate new OPP node and supplies structures */
@@ -976,6 +1263,9 @@
struct regulator *reg;
int i;
+ if (!opp_table->regulators)
+ return true;
+
for (i = 0; i < opp_table->regulator_count; i++) {
reg = opp_table->regulators[i];
@@ -1062,13 +1352,7 @@
new_opp->opp_table = opp_table;
kref_init(&new_opp->kref);
- /* Get a reference to the OPP table */
- _get_opp_table_kref(opp_table);
-
- ret = opp_debug_create_one(new_opp, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
- __func__, ret);
+ opp_debug_create_one(new_opp, opp_table);
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
@@ -1263,7 +1547,7 @@
struct dev_pm_set_opp_data *data;
int len, count = opp_table->regulator_count;
- if (WARN_ON(!count))
+ if (WARN_ON(!opp_table->regulators))
return -EINVAL;
/* space for set_opp_data */
@@ -1360,7 +1644,7 @@
kfree(opp_table->regulators);
opp_table->regulators = NULL;
- opp_table->regulator_count = 0;
+ opp_table->regulator_count = -1;
err:
dev_pm_opp_put_opp_table(opp_table);
@@ -1389,7 +1673,7 @@
kfree(opp_table->regulators);
opp_table->regulators = NULL;
- opp_table->regulator_count = 0;
+ opp_table->regulator_count = -1;
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
@@ -1516,6 +1800,201 @@
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+static void _opp_detach_genpd(struct opp_table *opp_table)
+{
+ int index;
+
+ for (index = 0; index < opp_table->required_opp_count; index++) {
+ if (!opp_table->genpd_virt_devs[index])
+ continue;
+
+ dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
+ opp_table->genpd_virt_devs[index] = NULL;
+ }
+
+ kfree(opp_table->genpd_virt_devs);
+ opp_table->genpd_virt_devs = NULL;
+}
+
+/**
+ * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
+ * @dev: Consumer device for which the genpd is getting attached.
+ * @names: Null terminated array of pointers containing names of genpd to attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
+ *
+ * Multiple generic power domains for a device are supported with the help of
+ * virtual genpd devices, which are created for each consumer device - genpd
+ * pair. These are the device structures which are attached to the power domain
+ * and are required by the OPP core to set the performance state of the genpd.
+ * The same API also works for the case where single genpd is available and so
+ * we don't need to support that separately.
+ *
+ * This helper will normally be called by the consumer driver of the device
+ * "dev", as only that has details of the genpd names.
+ *
+ * This helper needs to be called once with a list of all genpd to attach.
+ * Otherwise the original device structure will be used instead by the OPP core.
+ *
+ * The order of entries in the names array must match the order in which
+ * "required-opps" are added in DT.
+ */
+struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
+ const char **names, struct device ***virt_devs)
+{
+ struct opp_table *opp_table;
+ struct device *virt_dev;
+ int index = 0, ret = -EINVAL;
+ const char **name = names;
+
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * If the genpd's OPP table isn't already initialized, parsing of the
+ * required-opps fail for dev. We should retry this after genpd's OPP
+ * table is added.
+ */
+ if (!opp_table->required_opp_count) {
+ ret = -EPROBE_DEFER;
+ goto put_table;
+ }
+
+ mutex_lock(&opp_table->genpd_virt_dev_lock);
+
+ opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
+ sizeof(*opp_table->genpd_virt_devs),
+ GFP_KERNEL);
+ if (!opp_table->genpd_virt_devs)
+ goto unlock;
+
+ while (*name) {
+ if (index >= opp_table->required_opp_count) {
+ dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
+ *name, opp_table->required_opp_count, index);
+ goto err;
+ }
+
+ if (opp_table->genpd_virt_devs[index]) {
+ dev_err(dev, "Genpd virtual device already set %s\n",
+ *name);
+ goto err;
+ }
+
+ virt_dev = dev_pm_domain_attach_by_name(dev, *name);
+ if (IS_ERR(virt_dev)) {
+ ret = PTR_ERR(virt_dev);
+ dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
+ goto err;
+ }
+
+ opp_table->genpd_virt_devs[index] = virt_dev;
+ index++;
+ name++;
+ }
+
+ if (virt_devs)
+ *virt_devs = opp_table->genpd_virt_devs;
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ return opp_table;
+
+err:
+ _opp_detach_genpd(opp_table);
+unlock:
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+put_table:
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
+
+/**
+ * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
+ * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
+ *
+ * This detaches the genpd(s), resets the virtual device pointers, and puts the
+ * OPP table.
+ */
+void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
+{
+ /*
+ * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
+ * used in parallel.
+ */
+ mutex_lock(&opp_table->genpd_virt_dev_lock);
+ _opp_detach_genpd(opp_table);
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
+
+/**
+ * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
+ * @src_table: OPP table which has dst_table as one of its required OPP table.
+ * @dst_table: Required OPP table of the src_table.
+ * @pstate: Current performance state of the src_table.
+ *
+ * This Returns pstate of the OPP (present in @dst_table) pointed out by the
+ * "required-opps" property of the OPP (present in @src_table) which has
+ * performance state set to @pstate.
+ *
+ * Return: Zero or positive performance state on success, otherwise negative
+ * value on errors.
+ */
+int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
+ struct opp_table *dst_table,
+ unsigned int pstate)
+{
+ struct dev_pm_opp *opp;
+ int dest_pstate = -EINVAL;
+ int i;
+
+ if (!pstate)
+ return 0;
+
+ /*
+ * Normally the src_table will have the "required_opps" property set to
+ * point to one of the OPPs in the dst_table, but in some cases the
+ * genpd and its master have one to one mapping of performance states
+ * and so none of them have the "required-opps" property set. Return the
+ * pstate of the src_table as it is in such cases.
+ */
+ if (!src_table->required_opp_count)
+ return pstate;
+
+ for (i = 0; i < src_table->required_opp_count; i++) {
+ if (src_table->required_opp_tables[i]->np == dst_table->np)
+ break;
+ }
+
+ if (unlikely(i == src_table->required_opp_count)) {
+ pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
+ __func__, src_table, dst_table);
+ return -EINVAL;
+ }
+
+ mutex_lock(&src_table->lock);
+
+ list_for_each_entry(opp, &src_table->opp_list, node) {
+ if (opp->pstate == pstate) {
+ dest_pstate = opp->required_opps[i]->pstate;
+ goto unlock;
+ }
+ }
+
+ pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
+ dst_table);
+
+unlock:
+ mutex_unlock(&src_table->lock);
+
+ return dest_pstate;
+}
+
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
@@ -1542,9 +2021,13 @@
if (!opp_table)
return -ENOMEM;
- ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
+ /* Fix regulator count for dynamic OPPs */
+ opp_table->regulator_count = 1;
- dev_pm_opp_put_opp_table(opp_table);
+ ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
+ if (ret)
+ dev_pm_opp_put_opp_table(opp_table);
+
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
@@ -1707,35 +2190,7 @@
}
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
-/*
- * Free OPPs either created using static entries present in DT or even the
- * dynamically added entries based on remove_all param.
- */
-void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
- bool remove_all)
-{
- struct dev_pm_opp *opp, *tmp;
-
- /* Find if opp_table manages a single device */
- if (list_is_singular(&opp_table->dev_list)) {
- /* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (remove_all || !opp->dynamic)
- dev_pm_opp_put(opp);
- }
-
- /*
- * The OPP table is getting removed, drop the performance state
- * constraints.
- */
- if (opp_table->genpd_performance_state)
- dev_pm_genpd_set_performance_state(dev, 0);
- } else {
- _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
- }
-}
-
-void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
+void _dev_pm_opp_find_and_remove_table(struct device *dev)
{
struct opp_table *opp_table;
@@ -1752,8 +2207,12 @@
return;
}
- _dev_pm_opp_remove_table(opp_table, dev, remove_all);
+ _put_opp_list_kref(opp_table);
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+
+ /* Drop reference taken while the OPP table was added */
dev_pm_opp_put_opp_table(opp_table);
}
@@ -1766,6 +2225,6 @@
*/
void dev_pm_opp_remove_table(struct device *dev)
{
- _dev_pm_opp_find_and_remove_table(dev, true);
+ _dev_pm_opp_find_and_remove_table(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 0c09107..b5055cc 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic OPP helper interface for CPU device
*
@@ -5,10 +6,6 @@
* Nishanth Menon
* Romit Dasgupta
* Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -108,7 +105,8 @@
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
-void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
+ int last_cpu)
{
struct device *cpu_dev;
int cpu;
@@ -116,6 +114,9 @@
WARN_ON(cpumask_empty(cpumask));
for_each_cpu(cpu, cpumask) {
+ if (cpu == last_cpu)
+ break;
+
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
@@ -123,10 +124,7 @@
continue;
}
- if (of)
- dev_pm_opp_of_remove_table(cpu_dev);
- else
- dev_pm_opp_remove_table(cpu_dev);
+ _dev_pm_opp_find_and_remove_table(cpu_dev);
}
}
@@ -140,7 +138,7 @@
*/
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
{
- _dev_pm_opp_cpumask_remove_table(cpumask, false);
+ _dev_pm_opp_cpumask_remove_table(cpumask, -1);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
@@ -222,8 +220,10 @@
cpumask_clear(cpumask);
if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
+ mutex_lock(&opp_table->lock);
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
+ mutex_unlock(&opp_table->lock);
} else {
cpumask_set_cpu(cpu_dev->id, cpumask);
}
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index e6828e5..609665e 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic OPP debugfs interface
*
* Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -35,7 +32,7 @@
debugfs_remove_recursive(opp->dentry);
}
-static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
{
@@ -50,30 +47,21 @@
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- if (!d)
- return false;
+ debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+ &opp->supplies[i].u_volt);
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
- &opp->supplies[i].u_volt))
- return false;
+ debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+ &opp->supplies[i].u_volt_min);
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
- &opp->supplies[i].u_volt_min))
- return false;
+ debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+ &opp->supplies[i].u_volt_max);
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
- &opp->supplies[i].u_volt_max))
- return false;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
- &opp->supplies[i].u_amp))
- return false;
+ debugfs_create_ulong("u_amp", S_IRUGO, d,
+ &opp->supplies[i].u_amp);
}
-
- return true;
}
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
struct dentry *d;
@@ -95,40 +83,23 @@
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- if (!d)
- return -ENOMEM;
- if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
- return -ENOMEM;
+ debugfs_create_bool("available", S_IRUGO, d, &opp->available);
+ debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
+ debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
+ debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
+ debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
+ debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
+ debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ &opp->clock_latency_ns);
- if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
- return -ENOMEM;
-
- if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
- return -ENOMEM;
-
- if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
- return -ENOMEM;
-
- if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
- return -ENOMEM;
-
- if (!opp_debug_create_supplies(opp, opp_table, d))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
- &opp->clock_latency_ns))
- return -ENOMEM;
+ opp_debug_create_supplies(opp, opp_table, d);
opp->dentry = d;
- return 0;
}
-static int opp_list_debug_create_dir(struct opp_device *opp_dev,
- struct opp_table *opp_table)
+static void opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
const struct device *dev = opp_dev->dev;
struct dentry *d;
@@ -137,36 +108,21 @@
/* Create device specific directory */
d = debugfs_create_dir(opp_table->dentry_name, rootdir);
- if (!d) {
- dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
- return -ENOMEM;
- }
opp_dev->dentry = d;
opp_table->dentry = d;
-
- return 0;
}
-static int opp_list_debug_create_link(struct opp_device *opp_dev,
- struct opp_table *opp_table)
+static void opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = opp_dev->dev;
char name[NAME_MAX];
- struct dentry *d;
opp_set_dev_name(opp_dev->dev, name);
/* Create device specific directory link */
- d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
- if (!d) {
- dev_err(dev, "%s: Failed to create link\n", __func__);
- return -ENOMEM;
- }
-
- opp_dev->dentry = d;
-
- return 0;
+ opp_dev->dentry = debugfs_create_symlink(name, rootdir,
+ opp_table->dentry_name);
}
/**
@@ -177,20 +133,13 @@
* Dynamically adds device specific directory in debugfs 'opp' directory. If the
* device-opp is shared with other devices, then links will be created for all
* devices except the first.
- *
- * Return: 0 on success, otherwise negative error.
*/
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
{
- if (!rootdir) {
- pr_debug("%s: Uninitialized rootdir\n", __func__);
- return -EINVAL;
- }
-
if (opp_table->dentry)
- return opp_list_debug_create_link(opp_dev, opp_table);
-
- return opp_list_debug_create_dir(opp_dev, opp_table);
+ opp_list_debug_create_link(opp_dev, opp_table);
+ else
+ opp_list_debug_create_dir(opp_dev, opp_table);
}
static void opp_migrate_dentry(struct opp_device *opp_dev,
@@ -252,10 +201,6 @@
{
/* Create /sys/kernel/debug/opp directory */
rootdir = debugfs_create_dir("opp", NULL);
- if (!rootdir) {
- pr_err("%s: Failed to create root directory\n", __func__);
- return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 20988c4..1cbb582 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic OPP OF helpers
*
@@ -5,10 +6,6 @@
* Nishanth Menon
* Romit Dasgupta
* Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -20,14 +17,36 @@
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/energy_model.h>
#include "opp.h"
-static struct opp_table *_managed_opp(const struct device_node *np)
+/*
+ * Returns opp descriptor node for a device node, caller must
+ * do of_node_put().
+ */
+static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
+ int index)
+{
+ /* "operating-points-v2" can be an array for power domain providers */
+ return of_parse_phandle(np, "operating-points-v2", index);
+}
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return _opp_of_get_opp_desc_node(dev->of_node, 0);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
+
+struct opp_table *_managed_opp(struct device *dev, int index)
{
struct opp_table *opp_table, *managed_table = NULL;
+ struct device_node *np;
- mutex_lock(&opp_table_lock);
+ np = _opp_of_get_opp_desc_node(dev->of_node, index);
+ if (!np)
+ return NULL;
list_for_each_entry(opp_table, &opp_tables, node) {
if (opp_table->np == np) {
@@ -47,29 +66,270 @@
}
}
- mutex_unlock(&opp_table_lock);
+ of_node_put(np);
return managed_table;
}
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
+/* The caller must call dev_pm_opp_put() after the OPP is used */
+static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
+ struct device_node *opp_np)
{
- struct device_node *np;
+ struct dev_pm_opp *opp;
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+ if (opp->np == opp_np) {
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ return opp;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ return NULL;
+}
+
+static struct device_node *of_parse_required_opp(struct device_node *np,
+ int index)
+{
+ struct device_node *required_np;
+
+ required_np = of_parse_phandle(np, "required-opps", index);
+ if (unlikely(!required_np)) {
+ pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
+ __func__, np, index);
+ }
+
+ return required_np;
+}
+
+/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
+static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
+{
+ struct opp_table *opp_table;
+ struct device_node *opp_table_np;
+
+ lockdep_assert_held(&opp_table_lock);
+
+ opp_table_np = of_get_parent(opp_np);
+ if (!opp_table_np)
+ goto err;
+
+ /* It is safe to put the node now as all we need now is its address */
+ of_node_put(opp_table_np);
+
+ list_for_each_entry(opp_table, &opp_tables, node) {
+ if (opp_table_np == opp_table->np) {
+ _get_opp_table_kref(opp_table);
+ return opp_table;
+ }
+ }
+
+err:
+ return ERR_PTR(-ENODEV);
+}
+
+/* Free resources previously acquired by _opp_table_alloc_required_tables() */
+static void _opp_table_free_required_tables(struct opp_table *opp_table)
+{
+ struct opp_table **required_opp_tables = opp_table->required_opp_tables;
+ int i;
+
+ if (!required_opp_tables)
+ return;
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ if (IS_ERR_OR_NULL(required_opp_tables[i]))
+ break;
+
+ dev_pm_opp_put_opp_table(required_opp_tables[i]);
+ }
+
+ kfree(required_opp_tables);
+
+ opp_table->required_opp_count = 0;
+ opp_table->required_opp_tables = NULL;
+}
+
+/*
+ * Populate all devices and opp tables which are part of "required-opps" list.
+ * Checking only the first OPP node should be enough.
+ */
+static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
+ struct device *dev,
+ struct device_node *opp_np)
+{
+ struct opp_table **required_opp_tables;
+ struct device_node *required_np, *np;
+ int count, i;
+
+ /* Traversing the first OPP node is all we need */
+ np = of_get_next_available_child(opp_np, NULL);
+ if (!np) {
+ dev_err(dev, "Empty OPP table\n");
+ return;
+ }
+
+ count = of_count_phandle_with_args(np, "required-opps", NULL);
+ if (!count)
+ goto put_np;
+
+ required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
+ GFP_KERNEL);
+ if (!required_opp_tables)
+ goto put_np;
+
+ opp_table->required_opp_tables = required_opp_tables;
+ opp_table->required_opp_count = count;
+
+ for (i = 0; i < count; i++) {
+ required_np = of_parse_required_opp(np, i);
+ if (!required_np)
+ goto free_required_tables;
+
+ required_opp_tables[i] = _find_table_of_opp_np(required_np);
+ of_node_put(required_np);
+
+ if (IS_ERR(required_opp_tables[i]))
+ goto free_required_tables;
+
+ /*
+ * We only support genpd's OPPs in the "required-opps" for now,
+ * as we don't know how much about other cases. Error out if the
+ * required OPP doesn't belong to a genpd.
+ */
+ if (!required_opp_tables[i]->is_genpd) {
+ dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n",
+ required_np);
+ goto free_required_tables;
+ }
+ }
+
+ goto put_np;
+
+free_required_tables:
+ _opp_table_free_required_tables(opp_table);
+put_np:
+ of_node_put(np);
+}
+
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
+ int index)
+{
+ struct device_node *np, *opp_np;
+ u32 val;
/*
* Only required for backward compatibility with v1 bindings, but isn't
* harmful for other cases. And so we do it unconditionally.
*/
np = of_node_get(dev->of_node);
- if (np) {
- u32 val;
+ if (!np)
+ return;
- if (!of_property_read_u32(np, "clock-latency", &val))
- opp_table->clock_latency_ns_max = val;
- of_property_read_u32(np, "voltage-tolerance",
- &opp_table->voltage_tolerance_v1);
- of_node_put(np);
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+
+ if (of_find_property(np, "#power-domain-cells", NULL))
+ opp_table->is_genpd = true;
+
+ /* Get OPP table node */
+ opp_np = _opp_of_get_opp_desc_node(np, index);
+ of_node_put(np);
+
+ if (!opp_np)
+ return;
+
+ if (of_property_read_bool(opp_np, "opp-shared"))
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+ else
+ opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
+
+ opp_table->np = opp_np;
+
+ _opp_table_alloc_required_tables(opp_table, dev, opp_np);
+ of_node_put(opp_np);
+}
+
+void _of_clear_opp_table(struct opp_table *opp_table)
+{
+ _opp_table_free_required_tables(opp_table);
+}
+
+/*
+ * Release all resources previously acquired with a call to
+ * _of_opp_alloc_required_opps().
+ */
+void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
+{
+ struct dev_pm_opp **required_opps = opp->required_opps;
+ int i;
+
+ if (!required_opps)
+ return;
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ if (!required_opps[i])
+ break;
+
+ /* Put the reference back */
+ dev_pm_opp_put(required_opps[i]);
}
+
+ kfree(required_opps);
+ opp->required_opps = NULL;
+}
+
+/* Populate all required OPPs which are part of "required-opps" list */
+static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
+{
+ struct dev_pm_opp **required_opps;
+ struct opp_table *required_table;
+ struct device_node *np;
+ int i, ret, count = opp_table->required_opp_count;
+
+ if (!count)
+ return 0;
+
+ required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
+ if (!required_opps)
+ return -ENOMEM;
+
+ opp->required_opps = required_opps;
+
+ for (i = 0; i < count; i++) {
+ required_table = opp_table->required_opp_tables[i];
+
+ np = of_parse_required_opp(opp->np, i);
+ if (unlikely(!np)) {
+ ret = -ENODEV;
+ goto free_required_opps;
+ }
+
+ required_opps[i] = _find_opp_of_np(required_table, np);
+ of_node_put(np);
+
+ if (!required_opps[i]) {
+ pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
+ __func__, opp->np, i);
+ ret = -ENODEV;
+ goto free_required_opps;
+ }
+ }
+
+ return 0;
+
+free_required_opps:
+ _of_opp_free_required_opps(opp_table, opp);
+
+ return ret;
}
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
@@ -113,12 +373,10 @@
struct opp_table *opp_table)
{
u32 *microvolt, *microamp = NULL;
- int supplies, vcount, icount, ret, i, j;
+ int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
struct property *prop = NULL;
char name[NAME_MAX];
- supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
-
/* Search for "opp-microvolt-<name>" */
if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
@@ -133,7 +391,13 @@
/* Missing property isn't a problem, but an invalid entry is */
if (!prop) {
- if (!opp_table->regulator_count)
+ if (unlikely(supplies == -1)) {
+ /* Initialize regulator_count */
+ opp_table->regulator_count = 0;
+ return 0;
+ }
+
+ if (!supplies)
return 0;
dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
@@ -142,6 +406,14 @@
}
}
+ if (unlikely(supplies == -1)) {
+ /* Initialize regulator_count */
+ supplies = opp_table->regulator_count = 1;
+ } else if (unlikely(!supplies)) {
+ dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
+ return -EINVAL;
+ }
+
vcount = of_property_count_u32_elems(opp->np, name);
if (vcount < 0) {
dev_err(dev, "%s: Invalid %s property (%d)\n",
@@ -245,26 +517,10 @@
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- _dev_pm_opp_find_and_remove_table(dev, false);
+ _dev_pm_opp_find_and_remove_table(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-/* Returns opp descriptor node for a device node, caller must
- * do of_node_put() */
-static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
- int index)
-{
- /* "operating-points-v2" can be an array for power domain providers */
- return of_parse_phandle(np, "operating-points-v2", index);
-}
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
-{
- return _opp_of_get_opp_desc_node(dev->of_node, 0);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
-
/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @opp_table: OPP table
@@ -276,15 +532,21 @@
* removed by dev_pm_opp_remove.
*
* Return:
- * 0 On success OR
+ * Valid OPP pointer:
+ * On success
+ * NULL:
* Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
+ * OR if the OPP is not supported by hardware.
+ * ERR_PTR(-EEXIST):
+ * Freq are same and volt are different OR
* Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -EINVAL Failed parsing the OPP node
+ * ERR_PTR(-ENOMEM):
+ * Memory allocation failure
+ * ERR_PTR(-EINVAL):
+ * Failed parsing the OPP node
*/
-static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
- struct device_node *np)
+static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ struct device *dev, struct device_node *np)
{
struct dev_pm_opp *new_opp;
u64 rate = 0;
@@ -294,13 +556,12 @@
new_opp = _opp_allocate(opp_table);
if (!new_opp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ret = of_property_read_u64(np, "opp-hz", &rate);
if (ret < 0) {
/* "opp-hz" is optional for devices like power domains. */
- if (!of_find_property(dev->of_node, "#power-domain-cells",
- NULL)) {
+ if (!opp_table->is_genpd) {
dev_err(dev, "%s: opp-hz not found\n", __func__);
goto free_opp;
}
@@ -315,6 +576,8 @@
new_opp->rate = (unsigned long)rate;
}
+ of_property_read_u32(np, "opp-level", &new_opp->level);
+
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
@@ -327,29 +590,37 @@
new_opp->dynamic = false;
new_opp->available = true;
+ ret = _of_opp_alloc_required_opps(opp_table, new_opp);
+ if (ret)
+ goto free_opp;
+
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- new_opp->pstate = of_genpd_opp_to_performance_state(dev, np);
-
ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
- goto free_opp;
+ goto free_required_opps;
+
+ if (opp_table->is_genpd)
+ new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
ret = 0;
- goto free_opp;
+ goto free_required_opps;
}
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
if (opp_table->suspend_opp) {
- dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, opp_table->suspend_opp->rate,
- new_opp->rate);
+ /* Pick the OPP with higher rate as suspend OPP */
+ if (new_opp->rate > opp_table->suspend_opp->rate) {
+ opp_table->suspend_opp->suspend = false;
+ new_opp->suspend = true;
+ opp_table->suspend_opp = new_opp;
+ }
} else {
new_opp->suspend = true;
opp_table->suspend_opp = new_opp;
@@ -369,53 +640,53 @@
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
- return 0;
+ return new_opp;
+free_required_opps:
+ _of_opp_free_required_opps(opp_table, new_opp);
free_opp:
_opp_free(new_opp);
- return ret;
+ return ERR_PTR(ret);
}
/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
{
struct device_node *np;
- struct opp_table *opp_table;
- int ret = 0, count = 0, pstate_count = 0;
+ int ret, count = 0, pstate_count = 0;
struct dev_pm_opp *opp;
- opp_table = _managed_opp(opp_np);
- if (opp_table) {
- /* OPPs are already managed */
- if (!_add_opp_dev(dev, opp_table))
- ret = -ENOMEM;
- goto put_opp_table;
+ /* OPP table is already initialized for the device */
+ if (opp_table->parsed_static_opps) {
+ kref_get(&opp_table->list_kref);
+ return 0;
}
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
+ /*
+ * Re-initialize list_kref every time we add static OPPs to the OPP
+ * table as the reference count may be 0 after the last tie static OPPs
+ * were removed.
+ */
+ kref_init(&opp_table->list_kref);
/* We have opp-table node now, iterate over it and add OPPs */
- for_each_available_child_of_node(opp_np, np) {
- count++;
-
- ret = _opp_add_static_v2(opp_table, dev, np);
- if (ret) {
+ for_each_available_child_of_node(opp_table->np, np) {
+ opp = _opp_add_static_v2(opp_table, dev, np);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
- _dev_pm_opp_remove_table(opp_table, dev, false);
of_node_put(np);
- goto put_opp_table;
+ return ret;
+ } else if (opp) {
+ count++;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
- ret = -ENOENT;
- goto put_opp_table;
- }
+ if (WARN_ON(!count))
+ return -ENOENT;
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
@@ -424,30 +695,20 @@
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
- ret = -ENOENT;
- _dev_pm_opp_remove_table(opp_table, dev, false);
- goto put_opp_table;
+ return -ENOENT;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
- opp_table->np = opp_np;
- if (of_property_read_bool(opp_np, "opp-shared"))
- opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
- else
- opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
+ opp_table->parsed_static_opps = true;
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
+ return 0;
}
/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
+static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
{
- struct opp_table *opp_table;
const struct property *prop;
const __be32 *val;
int nr, ret = 0;
@@ -468,10 +729,6 @@
return -EINVAL;
}
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
-
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
@@ -481,13 +738,11 @@
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
- _dev_pm_opp_remove_table(opp_table, dev, false);
- break;
+ return ret;
}
nr -= 2;
}
- dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -510,24 +765,24 @@
*/
int dev_pm_opp_of_add_table(struct device *dev)
{
- struct device_node *opp_np;
+ struct opp_table *opp_table;
int ret;
- /*
- * OPPs have two version of bindings now. The older one is deprecated,
- * try for the new binding first.
- */
- opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
- if (!opp_np) {
- /*
- * Try old-deprecated bindings for backward compatibility with
- * older dtbs.
- */
- return _of_add_opp_table_v1(dev);
- }
+ opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
+ if (!opp_table)
+ return -ENOMEM;
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
+ /*
+ * OPPs have two version of bindings now. Also try the old (v1)
+ * bindings for backward compatibility with older dtbs.
+ */
+ if (opp_table->np)
+ ret = _of_add_opp_table_v2(dev, opp_table);
+ else
+ ret = _of_add_opp_table_v1(dev, opp_table);
+
+ if (ret)
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -554,28 +809,27 @@
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- struct device_node *opp_np;
+ struct opp_table *opp_table;
int ret, count;
-again:
- opp_np = _opp_of_get_opp_desc_node(dev->of_node, index);
- if (!opp_np) {
+ if (index) {
/*
* If only one phandle is present, then the same OPP table
* applies for all index requests.
*/
count = of_count_phandle_with_args(dev->of_node,
"operating-points-v2", NULL);
- if (count == 1 && index) {
+ if (count == 1)
index = 0;
- goto again;
- }
-
- return -ENODEV;
}
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
+ opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
+ if (!opp_table)
+ return -ENOMEM;
+
+ ret = _of_add_opp_table_v2(dev, opp_table);
+ if (ret)
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -592,7 +846,7 @@
*/
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
{
- _dev_pm_opp_cpumask_remove_table(cpumask, true);
+ _dev_pm_opp_cpumask_remove_table(cpumask, -1);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
@@ -605,16 +859,18 @@
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
struct device *cpu_dev;
- int cpu, ret = 0;
+ int cpu, ret;
- WARN_ON(cpumask_empty(cpumask));
+ if (WARN_ON(cpumask_empty(cpumask)))
+ return -ENODEV;
for_each_cpu(cpu, cpumask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
cpu);
- continue;
+ ret = -ENODEV;
+ goto remove_table;
}
ret = dev_pm_opp_of_add_table(cpu_dev);
@@ -626,12 +882,16 @@
pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
__func__, cpu, ret);
- /* Free all other OPPs */
- dev_pm_opp_of_cpumask_remove_table(cpumask);
- break;
+ goto remove_table;
}
}
+ return 0;
+
+remove_table:
+ /* Free all other OPPs */
+ _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
+
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
@@ -707,58 +967,48 @@
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
/**
- * of_dev_pm_opp_find_required_opp() - Search for required OPP.
- * @dev: The device whose OPP node is referenced by the 'np' DT node.
+ * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
* @np: Node that contains the "required-opps" property.
+ * @index: Index of the phandle to parse.
*
- * Returns the OPP of the device 'dev', whose phandle is present in the "np"
- * node. Although the "required-opps" property supports having multiple
- * phandles, this helper routine only parses the very first phandle in the list.
+ * Returns the performance state of the OPP pointed out by the "required-opps"
+ * property at @index in @np.
*
- * Return: Matching opp, else returns ERR_PTR in case of error and should be
- * handled using IS_ERR.
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
+ * Return: Zero or positive performance state on success, otherwise negative
+ * value on errors.
*/
-struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev,
- struct device_node *np)
+int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *opp;
struct device_node *required_np;
struct opp_table *opp_table;
+ int pstate = -EINVAL;
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
+ required_np = of_parse_required_opp(np, index);
+ if (!required_np)
+ return -EINVAL;
- required_np = of_parse_phandle(np, "required-opps", 0);
- if (unlikely(!required_np)) {
- dev_err(dev, "Unable to parse required-opps\n");
- goto put_opp_table;
+ opp_table = _find_table_of_opp_np(required_np);
+ if (IS_ERR(opp_table)) {
+ pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
+ __func__, np, PTR_ERR(opp_table));
+ goto put_required_np;
}
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->np == required_np) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
+ opp = _find_opp_of_np(opp_table, required_np);
+ if (opp) {
+ pstate = opp->pstate;
+ dev_pm_opp_put(opp);
}
- mutex_unlock(&opp_table->lock);
-
- of_node_put(required_np);
-put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
- return opp;
+put_required_np:
+ of_node_put(required_np);
+
+ return pstate;
}
-EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp);
+EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
/**
* dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
@@ -778,3 +1028,101 @@
return of_node_get(opp->np);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
+
+/*
+ * Callback function provided to the Energy Model framework upon registration.
+ * This computes the power estimated by @CPU at @kHz if it is the frequency
+ * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
+ * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
+ * frequency and @mW to the associated power. The power is estimated as
+ * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
+ * the voltage and frequency of the OPP.
+ *
+ * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
+ * calculation failed because of missing parameters, 0 otherwise.
+ */
+static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
+ int cpu)
+{
+ struct device *cpu_dev;
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long mV, Hz;
+ u32 cap;
+ u64 tmp;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
+ of_node_put(np);
+ if (ret)
+ return -EINVAL;
+
+ Hz = *kHz * 1000;
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
+ if (IS_ERR(opp))
+ return -EINVAL;
+
+ mV = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ if (!mV)
+ return -EINVAL;
+
+ tmp = (u64)cap * mV * mV * (Hz / 1000000);
+ do_div(tmp, 1000000000);
+
+ *mW = (unsigned long)tmp;
+ *kHz = Hz / 1000;
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
+ * @cpus : CPUs for which an Energy Model has to be registered
+ *
+ * This checks whether the "dynamic-power-coefficient" devicetree property has
+ * been specified, and tries to register an Energy Model with it if it has.
+ */
+void dev_pm_opp_of_register_em(struct cpumask *cpus)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
+ int ret, nr_opp, cpu = cpumask_first(cpus);
+ struct device *cpu_dev;
+ struct device_node *np;
+ u32 cap;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return;
+
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0)
+ return;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return;
+
+ /*
+ * Register an EM only if the 'dynamic-power-coefficient' property is
+ * set in devicetree. It is assumed the voltage values are known if that
+ * property is set since it is useless otherwise. If voltages are not
+ * known, just let the EM registration fail with an error to alert the
+ * user about the inconsistent configuration.
+ */
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
+ of_node_put(np);
+ if (ret || !cap)
+ return;
+
+ em_register_perf_domain(cpus, nr_opp, &em_cb);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 7c540fd..01a500e 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic OPP Interface
*
@@ -5,10 +6,6 @@
* Nishanth Menon
* Romit Dasgupta
* Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVER_OPP_H__
@@ -60,9 +57,11 @@
* @suspend: true if suspend OPP
* @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
+ * @level: Performance level
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
+ * @required_opps: List of OPPs that are required by this OPP.
* @opp_table: points back to the opp_table struct this opp belongs to
* @np: OPP's device node.
* @dentry: debugfs dentry pointer (per opp)
@@ -79,11 +78,13 @@
bool suspend;
unsigned int pstate;
unsigned long rate;
+ unsigned int level;
struct dev_pm_opp_supply *supplies;
unsigned long clock_latency_ns;
+ struct dev_pm_opp **required_opps;
struct opp_table *opp_table;
struct device_node *np;
@@ -126,18 +127,28 @@
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @lock: mutex protecting the opp_list.
+ * @list_kref: for reference count of the OPP list.
+ * @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
+ * @parsed_static_opps: True if OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
+ * @genpd_virt_devs: List of virtual devices for multiple genpd support.
+ * @required_opp_tables: List of device OPP tables that are required by OPPs in
+ * this table.
+ * @required_opp_count: Number of required devices.
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
* @clk: Device's clock handle
* @regulators: Supply regulators
- * @regulator_count: Number of power supply regulators
+ * @regulator_count: Number of power supply regulators. Its value can be -1
+ * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
+ * property).
* @genpd_performance_state: Device's power domain support performance state.
+ * @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
* @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
@@ -156,6 +167,7 @@
struct list_head dev_list;
struct list_head opp_list;
struct kref kref;
+ struct kref list_kref;
struct mutex lock;
struct device_node *np;
@@ -164,16 +176,23 @@
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
+ bool parsed_static_opps;
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
+ struct mutex genpd_virt_dev_lock;
+ struct device **genpd_virt_devs;
+ struct opp_table **required_opp_tables;
+ unsigned int required_opp_count;
+
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
struct clk *clk;
struct regulator **regulators;
- unsigned int regulator_count;
+ int regulator_count;
bool genpd_performance_state;
+ bool is_genpd;
int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_set_opp_data *set_opp_data;
@@ -186,39 +205,47 @@
/* Routines internal to opp core */
void dev_pm_opp_get(struct dev_pm_opp *opp);
+void _opp_remove_all_static(struct opp_table *opp_table);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
-void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
+void _dev_pm_opp_find_and_remove_table(struct device *dev);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
-void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table(struct device *dev);
+void _put_opp_list_kref(struct opp_table *opp_table);
#ifdef CONFIG_OF
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
+void _of_clear_opp_table(struct opp_table *opp_table);
+struct opp_table *_managed_opp(struct device *dev, int index);
+void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp);
#else
-static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
+static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
+static inline void _of_clear_opp_table(struct opp_table *opp_table) {}
+static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
+static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp) {}
#endif
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
#else
static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
-static inline int opp_debug_create_one(struct dev_pm_opp *opp,
- struct opp_table *opp_table)
-{ return 0; }
-static inline int opp_debug_register(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{ return 0; }
+static inline void opp_debug_create_one(struct dev_pm_opp *opp,
+ struct opp_table *opp_table) { }
+
+static inline void opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table) { }
static inline void opp_debug_unregister(struct opp_device *opp_dev,
struct opp_table *opp_table)
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 3f4fb4d..1c69c40 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -417,7 +417,6 @@
.probe = ti_opp_supply_probe,
.driver = {
.name = "ti_opp_supply",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ti_opp_supply_of_match),
},
};