Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1c677d7..6ff87cd 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -114,7 +114,11 @@
return 0;
ret = pm_runtime_get_sync(core->dev);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ pm_runtime_put_noidle(core->dev);
+ return ret;
+ }
+ return 0;
}
static void clk_pm_runtime_put(struct clk_core *core)
@@ -2642,12 +2646,14 @@
{
int ret;
- clk_prepare_lock();
+ lockdep_assert_held(&prepare_lock);
+ if (!core->ops->get_phase)
+ return 0;
+
/* Always try to update cached phase if possible */
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- ret = core->phase;
- clk_prepare_unlock();
+ ret = core->ops->get_phase(core->hw);
+ if (ret >= 0)
+ core->phase = ret;
return ret;
}
@@ -2661,10 +2667,16 @@
*/
int clk_get_phase(struct clk *clk)
{
+ int ret;
+
if (!clk)
return 0;
- return clk_core_get_phase(clk->core);
+ clk_prepare_lock();
+ ret = clk_core_get_phase(clk->core);
+ clk_prepare_unlock();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(clk_get_phase);
@@ -2878,13 +2890,21 @@
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
+ int phase;
+
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
- clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c),
- clk_core_get_scaled_duty_cycle(c, 100000));
+ clk_core_get_rate(c), clk_core_get_accuracy(c));
+
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "%5d", phase);
+ else
+ seq_puts(s, "-----");
+
+ seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2921,6 +2941,7 @@
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
+ int phase;
unsigned long min_rate, max_rate;
clk_core_get_boundaries(c, &min_rate, &max_rate);
@@ -2934,7 +2955,9 @@
seq_printf(s, "\"min_rate\": %lu,", min_rate);
seq_printf(s, "\"max_rate\": %lu,", max_rate);
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
- seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "\"phase\": %d,", phase);
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
@@ -3231,6 +3254,34 @@
}
#endif
+static void clk_core_reparent_orphans_nolock(void)
+{
+ struct clk_core *orphan;
+ struct hlist_node *tmp2;
+
+ /*
+ * walk the list of orphan clocks and reparent any that newly finds a
+ * parent.
+ */
+ hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
+ struct clk_core *parent = __clk_init_parent(orphan);
+
+ /*
+ * We need to use __clk_set_parent_before() and _after() to
+ * to properly migrate any prepare/enable count of the orphan
+ * clock. This is important for CLK_IS_CRITICAL clocks, which
+ * are enabled during init but might not have a parent yet.
+ */
+ if (parent) {
+ /* update the clk tree topology */
+ __clk_set_parent_before(orphan, parent);
+ __clk_set_parent_after(orphan, parent, NULL);
+ __clk_recalc_accuracies(orphan);
+ __clk_recalc_rates(orphan, 0);
+ }
+ }
+}
+
/**
* __clk_core_init - initialize the data structures in a struct clk_core
* @core: clk_core being initialized
@@ -3241,8 +3292,6 @@
static int __clk_core_init(struct clk_core *core)
{
int ret;
- struct clk_core *orphan;
- struct hlist_node *tmp2;
unsigned long rate;
if (!core)
@@ -3294,6 +3343,21 @@
goto out;
}
+ /*
+ * optional platform-specific magic
+ *
+ * The .init callback is not used by any of the basic clock types, but
+ * exists for weird hardware that must perform initialization magic.
+ * Please consider other ways of solving initialization problems before
+ * using this callback, as its use is discouraged.
+ *
+ * If it exist, this callback should called before any other callback of
+ * the clock
+ */
+ if (core->ops->init)
+ core->ops->init(core->hw);
+
+
core->parent = __clk_init_parent(core);
/*
@@ -3319,17 +3383,6 @@
}
/*
- * optional platform-specific magic
- *
- * The .init callback is not used by any of the basic clock types, but
- * exists for weird hardware that must perform initialization magic.
- * Please consider other ways of solving initialization problems before
- * using this callback, as its use is discouraged.
- */
- if (core->ops->init)
- core->ops->init(core->hw);
-
- /*
* Set clk's accuracy. The preferred method is to use
* .recalc_accuracy. For simple clocks and lazy developers the default
* fallback is to use the parent's accuracy. If a clock doesn't have a
@@ -3345,14 +3398,11 @@
core->accuracy = 0;
/*
- * Set clk's phase.
+ * Set clk's phase by clk_core_get_phase() caching the phase.
* Since a phase is by definition relative to its parent, just
* query the current clock phase, or just assume it's in phase.
*/
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- else
- core->phase = 0;
+ clk_core_get_phase(core);
/*
* Set clk's duty cycle.
@@ -3382,39 +3432,29 @@
if (core->flags & CLK_IS_CRITICAL) {
unsigned long flags;
- clk_core_prepare(core);
+ ret = clk_core_prepare(core);
+ if (ret)
+ goto out;
flags = clk_enable_lock();
- clk_core_enable(core);
+ ret = clk_core_enable(core);
clk_enable_unlock(flags);
- }
-
- /*
- * walk the list of orphan clocks and reparent any that newly finds a
- * parent.
- */
- hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- struct clk_core *parent = __clk_init_parent(orphan);
-
- /*
- * We need to use __clk_set_parent_before() and _after() to
- * to properly migrate any prepare/enable count of the orphan
- * clock. This is important for CLK_IS_CRITICAL clocks, which
- * are enabled during init but might not have a parent yet.
- */
- if (parent) {
- /* update the clk tree topology */
- __clk_set_parent_before(orphan, parent);
- __clk_set_parent_after(orphan, parent, NULL);
- __clk_recalc_accuracies(orphan);
- __clk_recalc_rates(orphan, 0);
+ if (ret) {
+ clk_core_unprepare(core);
+ goto out;
}
}
+ clk_core_reparent_orphans_nolock();
+
+
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
+ if (ret)
+ hlist_del_init(&core->child_node);
+
clk_prepare_unlock();
if (!ret)
@@ -3702,6 +3742,28 @@
}
/**
+ * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
+ * @dev: Device to get device node of
+ *
+ * Return: device node pointer of @dev, or the device node pointer of
+ * @dev->parent if dev doesn't have a device node, or NULL if neither
+ * @dev or @dev->parent have a device node.
+ */
+static struct device_node *dev_or_parent_of_node(struct device *dev)
+{
+ struct device_node *np;
+
+ if (!dev)
+ return NULL;
+
+ np = dev_of_node(dev);
+ if (!np)
+ np = dev_of_node(dev->parent);
+
+ return np;
+}
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
@@ -3716,7 +3778,7 @@
*/
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
- return __clk_register(dev, dev_of_node(dev), hw);
+ return __clk_register(dev, dev_or_parent_of_node(dev), hw);
}
EXPORT_SYMBOL_GPL(clk_register);
@@ -3732,7 +3794,8 @@
*/
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
- return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
+ return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
+ hw));
}
EXPORT_SYMBOL_GPL(clk_hw_register);
@@ -3879,6 +3942,7 @@
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
+ free_clk(clk);
unlock:
clk_prepare_unlock();
}
@@ -4087,20 +4151,19 @@
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
- break;
+ goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
- if (cn->clk != clk) {
- cn = kzalloc(sizeof(*cn), GFP_KERNEL);
- if (!cn)
- goto out;
+ cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+ if (!cn)
+ goto out;
- cn->clk = clk;
- srcu_init_notifier_head(&cn->notifier_head);
+ cn->clk = clk;
+ srcu_init_notifier_head(&cn->notifier_head);
- list_add(&cn->node, &clk_notifier_list);
- }
+ list_add(&cn->node, &clk_notifier_list);
+found:
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++;
@@ -4125,32 +4188,28 @@
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
- struct clk_notifier *cn = NULL;
- int ret = -EINVAL;
+ struct clk_notifier *cn;
+ int ret = -ENOENT;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
- list_for_each_entry(cn, &clk_notifier_list, node)
- if (cn->clk == clk)
+ list_for_each_entry(cn, &clk_notifier_list, node) {
+ if (cn->clk == clk) {
+ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+
+ clk->core->notifier_count--;
+
+ /* XXX the notifier code should handle this better */
+ if (!cn->notifier_head.head) {
+ srcu_cleanup_notifier_head(&cn->notifier_head);
+ list_del(&cn->node);
+ kfree(cn);
+ }
break;
-
- if (cn->clk == clk) {
- ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
-
- clk->core->notifier_count--;
-
- /* XXX the notifier code should handle this better */
- if (!cn->notifier_head.head) {
- srcu_cleanup_notifier_head(&cn->notifier_head);
- list_del(&cn->node);
- kfree(cn);
}
-
- } else {
- ret = -ENOENT;
}
clk_prepare_unlock();
@@ -4160,6 +4219,13 @@
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
#ifdef CONFIG_OF
+static void clk_core_reparent_orphans(void)
+{
+ clk_prepare_lock();
+ clk_core_reparent_orphans_nolock();
+ clk_prepare_unlock();
+}
+
/**
* struct of_clk_provider - Clock provider registration structure
* @link: Entry in global list of clock providers
@@ -4255,6 +4321,8 @@
mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %pOF\n", np);
+ clk_core_reparent_orphans();
+
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
@@ -4290,6 +4358,8 @@
mutex_unlock(&of_clk_mutex);
pr_debug("Added clk_hw provider from %pOF\n", np);
+ clk_core_reparent_orphans();
+
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);