Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a905796..25f11e9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -41,6 +41,7 @@
config ARM_ARMADA_8K_CPUFREQ
tristate "Armada 8K CPUFreq driver"
depends on ARCH_MVEBU && CPUFREQ_DT
+ select ARMADA_AP_CPU_CLK
help
This enables the CPUFreq driver support for Marvell
Armada8k SOCs.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d6f7df3..4195834 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -688,7 +688,8 @@
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
}
- if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+ if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
+ !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus,
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index aa0f06d..2de7fd1 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -25,6 +25,10 @@
#include "cpufreq-dt.h"
+/* Clk register set */
+#define ARMADA_37XX_CLK_TBG_SEL 0
+#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22
+
/* Power management in North Bridge register set */
#define ARMADA_37XX_NB_L0L1 0x18
#define ARMADA_37XX_NB_L2L3 0x1C
@@ -69,6 +73,8 @@
#define LOAD_LEVEL_NR 4
#define MIN_VOLT_MV 1000
+#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
+#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
/* AVS value for the corresponding voltage (in mV) */
static int avs_map[] = {
@@ -96,7 +102,11 @@
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ /*
+ * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+ * unstable because we do not know how to configure it properly.
+ */
+ /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
@@ -120,10 +130,15 @@
* will be configured then the DVFS will be enabled.
*/
static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
- struct clk *clk, u8 *divider)
+ struct regmap *clk_base, u8 *divider)
{
+ u32 cpu_tbg_sel;
int load_lvl;
- struct clk *parent;
+
+ /* Determine to which TBG clock is CPU connected */
+ regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
+ cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
+ cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
unsigned int reg, mask, val, offset = 0;
@@ -142,6 +157,11 @@
mask = (ARMADA_37XX_NB_CLK_SEL_MASK
<< ARMADA_37XX_NB_CLK_SEL_OFF);
+ /* Set TBG index, for all levels we use the same TBG */
+ val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
+ mask = (ARMADA_37XX_NB_TBG_SEL_MASK
+ << ARMADA_37XX_NB_TBG_SEL_OFF);
+
/*
* Set cpu divider based on the pre-computed array in
* order to have balanced step.
@@ -160,14 +180,6 @@
regmap_update_bits(base, reg, mask, val);
}
-
- /*
- * Set cpu clock source, for all the level we keep the same
- * clock source that the one already configured. For this one
- * we need to use the clock framework
- */
- parent = clk_get_parent(clk);
- clk_set_parent(clk, parent);
}
/*
@@ -202,6 +214,8 @@
* - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
* This function calculates L1 & L2 & L3 AVS values dynamically based
* on L0 voltage and fill all AVS values to the AVS value table.
+ * When base CPU frequency is 1000 or 1200 MHz then there is additional
+ * minimal avs value for load L1.
*/
static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
@@ -233,6 +247,19 @@
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
dvfs->avs[load_level] = avs_min;
+ /*
+ * Set the avs values for load L0 and L1 when base CPU frequency
+ * is 1000/1200 MHz to its typical initial values according to
+ * the Armada 3700 Hardware Specifications.
+ */
+ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
+ if (dvfs->cpu_freq_max >= 1200*1000*1000)
+ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
+ else
+ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
+ dvfs->avs[0] = dvfs->avs[1] = avs_min;
+ }
+
return;
}
@@ -252,6 +279,26 @@
target_vm = avs_map[l0_vdd_min] - 150;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
+ * otherwise the CPU gets stuck when switching from load L1 to load L0.
+ * Also ensure that avs value for load L1 is not higher than for L0.
+ */
+ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
+ u32 avs_min_l1;
+
+ if (dvfs->cpu_freq_max >= 1200*1000*1000)
+ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
+ else
+ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
+
+ if (avs_min_l1 > dvfs->avs[0])
+ avs_min_l1 = dvfs->avs[0];
+
+ if (dvfs->avs[1] < avs_min_l1)
+ dvfs->avs[1] = avs_min_l1;
+ }
}
static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
@@ -358,11 +405,16 @@
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency, base_frequency;
- struct regmap *nb_pm_base, *avs_base;
+ struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk, *parent;
+ nb_clk_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
+ if (IS_ERR(nb_clk_base))
+ return -ENODEV;
+
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -421,7 +473,7 @@
return -EINVAL;
}
- dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
if (!dvfs) {
clk_put(clk);
return -EINVAL;
@@ -439,7 +491,7 @@
armada37xx_cpufreq_avs_configure(avs_base, dvfs);
armada37xx_cpufreq_avs_setup(avs_base, dvfs);
- armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
@@ -456,6 +508,7 @@
/* Now that everything is setup, enable the DVFS at hardware level */
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+ memset(&pdata, 0, sizeof(pdata));
pdata.suspend = armada37xx_cpufreq_suspend;
pdata.resume = armada37xx_cpufreq_resume;
@@ -472,7 +525,7 @@
remove_opp:
/* clean-up the already added opp before leaving */
while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
- freq = cur_frequency / dvfs->divider[load_lvl];
+ freq = base_frequency / dvfs->divider[load_lvl];
dev_pm_opp_remove(cpu_dev, freq);
}
@@ -483,6 +536,12 @@
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
+static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
+ { .compatible = "marvell,armada-3700-nb-pm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
+
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 39e34f5..b0fc5e8 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -204,6 +204,12 @@
}
module_exit(armada_8k_cpufreq_exit);
+static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = {
+ { .compatible = "marvell,ap806-cpu-clock" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match);
+
MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
MODULE_DESCRIPTION("Armada 8K cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 77b0e5d..a3c82f5 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -566,6 +566,16 @@
return ret;
}
+static void brcm_avs_prepare_uninit(struct platform_device *pdev)
+{
+ struct private_data *priv;
+
+ priv = platform_get_drvdata(pdev);
+
+ iounmap(priv->avs_intr_base);
+ iounmap(priv->base);
+}
+
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@@ -701,21 +711,21 @@
brcm_avs_driver.driver_data = pdev;
- return cpufreq_register_driver(&brcm_avs_driver);
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (ret)
+ brcm_avs_prepare_uninit(pdev);
+
+ return ret;
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- struct private_data *priv;
int ret;
ret = cpufreq_unregister_driver(&brcm_avs_driver);
- if (ret)
- return ret;
+ WARN_ON(ret);
- priv = platform_get_drvdata(pdev);
- iounmap(priv->base);
- iounmap(priv->avs_intr_base);
+ brcm_avs_prepare_uninit(pdev);
return 0;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8d8da76..8910fd1 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -217,7 +217,7 @@
return ret;
}
-static int cppc_verify_policy(struct cpufreq_policy *policy)
+static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bca8d1f..1200842 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,8 @@
static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "arm,vexpress", },
+
{ .compatible = "calxeda,highbank", },
{ .compatible = "calxeda,ecx-2000", },
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index cd53272..f7a7bcf 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -291,7 +291,7 @@
* nforce2_verify - verifies a new CPUFreq policy
* @policy: new policy
*/
-static int nforce2_verify(struct cpufreq_policy *policy)
+static int nforce2_verify(struct cpufreq_policy_data *policy)
{
unsigned int fsb_pol_max;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index bc19d6c..c4e9283 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,6 +74,9 @@
static int cpufreq_start_governor(struct cpufreq_policy *policy);
static void cpufreq_stop_governor(struct cpufreq_policy *policy);
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -613,55 +616,51 @@
return NULL;
}
-static int cpufreq_parse_policy(char *str_governor,
- struct cpufreq_policy *policy)
-{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
- }
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
- }
- return -EINVAL;
-}
-
-/**
- * cpufreq_parse_governor - parse a governor string only for has_target()
- */
-static int cpufreq_parse_governor(char *str_governor,
- struct cpufreq_policy *policy)
+static struct cpufreq_governor *get_governor(const char *str_governor)
{
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
-
t = find_governor(str_governor);
- if (!t) {
- int ret;
+ if (!t)
+ goto unlock;
- mutex_unlock(&cpufreq_governor_mutex);
-
- ret = request_module("cpufreq_%s", str_governor);
- if (ret)
- return -EINVAL;
-
- mutex_lock(&cpufreq_governor_mutex);
-
- t = find_governor(str_governor);
- }
- if (t && !try_module_get(t->owner))
+ if (!try_module_get(t->owner))
t = NULL;
+unlock:
mutex_unlock(&cpufreq_governor_mutex);
- if (t) {
- policy->governor = t;
- return 0;
- }
+ return t;
+}
- return -EINVAL;
+static unsigned int cpufreq_parse_policy(char *str_governor)
+{
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_PERFORMANCE;
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_POWERSAVE;
+
+ return CPUFREQ_POLICY_UNKNOWN;
+}
+
+/**
+ * cpufreq_parse_governor - parse a governor string only for has_target()
+ * @str_governor: Governor name.
+ */
+static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ t = get_governor(str_governor);
+ if (t)
+ return t;
+
+ if (request_module("cpufreq_%s", str_governor))
+ return NULL;
+
+ return get_governor(str_governor);
}
/**
@@ -762,29 +761,34 @@
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
+ char str_governor[16];
int ret;
- char str_governor[16];
- struct cpufreq_policy new_policy;
-
- memcpy(&new_policy, policy, sizeof(*policy));
ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
if (cpufreq_driver->setpolicy) {
- if (cpufreq_parse_policy(str_governor, &new_policy))
+ unsigned int new_pol;
+
+ new_pol = cpufreq_parse_policy(str_governor);
+ if (!new_pol)
return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, NULL, new_pol);
} else {
- if (cpufreq_parse_governor(str_governor, &new_policy))
+ struct cpufreq_governor *new_gov;
+
+ new_gov = cpufreq_parse_governor(str_governor);
+ if (!new_gov)
return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, new_gov,
+ CPUFREQ_POLICY_UNKNOWN);
+
+ module_put(new_gov->owner);
}
- ret = cpufreq_set_policy(policy, &new_policy);
-
- if (new_policy.governor)
- module_put(new_policy.governor->owner);
-
return ret ? ret : count;
}
@@ -810,12 +814,14 @@
goto out;
}
+ mutex_lock(&cpufreq_governor_mutex);
for_each_governor(t) {
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
- goto out;
+ break;
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
+ mutex_unlock(&cpufreq_governor_mutex);
out:
i += sprintf(&buf[i], "\n");
return i;
@@ -1050,40 +1056,47 @@
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *gov = NULL, *def_gov = NULL;
- struct cpufreq_policy new_policy;
-
- memcpy(&new_policy, policy, sizeof(*policy));
-
- def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *gov = NULL;
+ unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
+ int ret;
if (has_target()) {
- /*
- * Update governor of new_policy to the governor used before
- * hotplug
- */
- gov = find_governor(policy->last_governor);
+ /* Update policy governor to the one used before hotplug. */
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, policy->cpu);
- } else {
- if (!def_gov)
- return -ENODATA;
+ policy->governor->name, policy->cpu);
+ } else if (def_gov) {
gov = def_gov;
+ __module_get(gov->owner);
+ } else {
+ return -ENODATA;
}
- new_policy.governor = gov;
} else {
/* Use the default policy if there is no last_policy. */
if (policy->last_policy) {
- new_policy.policy = policy->last_policy;
- } else {
- if (!def_gov)
- return -ENODATA;
- cpufreq_parse_policy(def_gov->name, &new_policy);
+ pol = policy->last_policy;
+ } else if (def_gov) {
+ pol = cpufreq_parse_policy(def_gov->name);
+ /*
+ * In case the default governor is neiter "performance"
+ * nor "powersave", fall back to the initial policy
+ * value set by the driver.
+ */
+ if (pol == CPUFREQ_POLICY_UNKNOWN)
+ pol = policy->policy;
}
+ if (pol != CPUFREQ_POLICY_PERFORMANCE &&
+ pol != CPUFREQ_POLICY_POWERSAVE)
+ return -ENODATA;
}
- return cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, gov, pol);
+ if (gov)
+ module_put(gov->owner);
+
+ return ret;
}
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
@@ -1111,13 +1124,10 @@
void refresh_frequency_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy new_policy;
-
if (!policy_is_inactive(policy)) {
- new_policy = *policy;
pr_debug("updating policy for CPU %u\n", policy->cpu);
- cpufreq_set_policy(policy, &new_policy);
+ cpufreq_set_policy(policy, policy->governor, policy->policy);
}
}
EXPORT_SYMBOL(refresh_frequency_limits);
@@ -1351,9 +1361,14 @@
goto out_free_policy;
}
+ /*
+ * The initialization has succeeded and the policy is online.
+ * If there is a problem with its frequency table, take it
+ * offline and drop it.
+ */
ret = cpufreq_table_validate_and_sort(policy);
if (ret)
- goto out_exit_policy;
+ goto out_offline_policy;
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@@ -1497,6 +1512,10 @@
up_write(&policy->rwsem);
+out_offline_policy:
+ if (cpufreq_driver->offline)
+ cpufreq_driver->offline(policy);
+
out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
@@ -2361,43 +2380,46 @@
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
- * @new_policy: New policy data.
+ * @new_gov: Policy governor pointer.
+ * @new_pol: Policy value (for drivers with built-in governors).
*
- * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
- * min and max parameters of @new_policy to @policy and either invoke the
- * driver's ->setpolicy() callback (if present) or carry out a governor update
- * for @policy. That is, run the current governor's ->limits() callback (if the
- * governor field in @new_policy points to the same object as the one in
- * @policy) or replace the governor for @policy with the new one stored in
- * @new_policy.
+ * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
+ * limits to be set for the policy, update @policy with the verified limits
+ * values and either invoke the driver's ->setpolicy() callback (if present) or
+ * carry out a governor update for @policy. That is, run the current governor's
+ * ->limits() callback (if @new_gov points to the same object as the one in
+ * @policy) or replace the governor for @policy with @new_gov.
*
* The cpuinfo part of @policy is not updated by this function.
*/
-int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol)
{
+ struct cpufreq_policy_data new_data;
struct cpufreq_governor *old_gov;
int ret;
- pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
- new_policy->cpu, new_policy->min, new_policy->max);
-
- memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
-
+ memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
+ new_data.freq_table = policy->freq_table;
+ new_data.cpu = policy->cpu;
/*
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
- new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
- new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+ new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_data.cpu, new_data.min, new_data.max);
/* verify the cpu speed can be set within this limit */
- ret = cpufreq_driver->verify(new_policy);
+ ret = cpufreq_driver->verify(&new_data);
if (ret)
return ret;
- policy->min = new_policy->min;
- policy->max = new_policy->max;
+ policy->min = new_data.min;
+ policy->max = new_data.max;
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
@@ -2406,12 +2428,12 @@
policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
- policy->policy = new_policy->policy;
+ policy->policy = new_pol;
pr_debug("setting range\n");
return cpufreq_driver->setpolicy(policy);
}
- if (new_policy->governor == policy->governor) {
+ if (new_gov == policy->governor) {
pr_debug("governor limits update\n");
cpufreq_governor_limits(policy);
return 0;
@@ -2428,7 +2450,7 @@
}
/* start new governor */
- policy->governor = new_policy->governor;
+ policy->governor = new_gov;
ret = cpufreq_init_governor(policy);
if (!ret) {
ret = cpufreq_start_governor(policy);
@@ -2506,26 +2528,27 @@
static int cpufreq_boost_set_sw(int state)
{
struct cpufreq_policy *policy;
- int ret = -EINVAL;
for_each_active_policy(policy) {
+ int ret;
+
if (!policy->freq_table)
- continue;
+ return -ENXIO;
ret = cpufreq_frequency_table_cpuinfo(policy,
policy->freq_table);
if (ret) {
pr_err("%s: Policy frequency update failed\n",
__func__);
- break;
+ return ret;
}
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
int cpufreq_boost_trigger_state(int state)
@@ -2634,6 +2657,13 @@
if (cpufreq_disabled())
return -ENODEV;
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index ded427e..e117b00 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -60,7 +60,7 @@
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
@@ -100,7 +100,7 @@
* Generic routine to verify policy & frequency table, requires driver to set
* policy->freq_table prior to it.
*/
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
{
if (!policy->freq_table)
return -ENODEV;
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index e97b573..75b3ef7 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -328,7 +328,7 @@
* for the hardware supported by the driver.
*/
-static int cpufreq_gx_verify(struct cpufreq_policy *policy)
+static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
{
unsigned int tmp_freq = 0;
u8 tmp1, tmp2;
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 5a7f6da..ac57cdd 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -101,6 +101,13 @@
}
module_init(hb_cpufreq_driver_init);
+static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = {
+ { .compatible = "calxeda,highbank" },
+ { .compatible = "calxeda,ecx-2000" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match);
+
MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 35db14c..85a6efd 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -44,19 +44,19 @@
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
- * Early samples without fuses written report "0 0" which means
- * consumer segment and minimum speed grading.
- *
- * According to datasheet minimum speed grading is not supported for
- * consumer parts so clamp to 1 to avoid warning for "no OPPs"
+ * Early samples without fuses written report "0 0" which may NOT
+ * match any OPP defined in DT. So clamp to minimum OPP defined in
+ * DT to avoid warning for "no OPPs".
*
* Applies to i.MX8M series SoCs.
*/
- if (mkt_segment == 0 && speed_grade == 0 && (
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mq")))
- speed_grade = 1;
+ if (mkt_segment == 0 && speed_grade == 0) {
+ if (of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mq"))
+ speed_grade = 1;
+ if (of_machine_is_compatible("fsl,imx8mn"))
+ speed_grade = 0xb;
+ }
supported_hw[0] = BIT(speed_grade);
supported_hw[1] = BIT(mkt_segment);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 648a09a..edef339 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -281,6 +281,9 @@
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "fsl,imx6ull-ocotp");
+ if (!np)
return -ENOENT;
base = of_iomap(np, 0);
@@ -378,23 +381,24 @@
goto put_reg;
}
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto put_node;
+ goto out_free_opp;
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
- goto put_node;
+ goto out_free_opp;
}
} else {
imx6q_opp_check_speed_grading(cpu_dev);
}
- /* Because we have added the OPPs here, we must free them */
- free_opp = true;
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8ab3170..88fe803 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -649,11 +649,12 @@
mutex_lock(&intel_pstate_limits_lock);
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
- u64 value;
-
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
- if (ret)
- goto return_pref;
+ /*
+ * Use the cached HWP Request MSR value, because the register
+ * itself may be updated by intel_pstate_hwp_boost_up() or
+ * intel_pstate_hwp_boost_down() at any time.
+ */
+ u64 value = READ_ONCE(cpu_data->hwp_req_cached);
value &= ~GENMASK_ULL(31, 24);
@@ -661,13 +662,18 @@
epp = epp_values[pref_index - 1];
value |= (u64)epp << 24;
+ /*
+ * The only other updater of hwp_req_cached in the active mode,
+ * intel_pstate_hwp_set(), is called under the same lock as this
+ * function, so it cannot run in parallel with the update below.
+ */
+ WRITE_ONCE(cpu_data->hwp_req_cached, value);
ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
} else {
if (epp == -EINVAL)
epp = (pref_index - 1) << 2;
ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
-return_pref:
mutex_unlock(&intel_pstate_limits_lock);
return ret;
@@ -756,7 +762,7 @@
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
- if (global.no_turbo)
+ if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
*current_max = HWP_HIGHEST_PERF(cap);
@@ -1058,7 +1064,7 @@
update_turbo_state();
if (global.turbo_disabled) {
- pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;
@@ -1560,20 +1566,22 @@
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ cpu->pstate.turbo_pstate = phy_max;
+ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
}
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2036,8 +2044,9 @@
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
-static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
- struct cpudata *cpu)
+static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ unsigned int policy_min,
+ unsigned int policy_max)
{
int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
@@ -2056,18 +2065,17 @@
turbo_max = cpu->pstate.turbo_pstate;
}
- max_policy_perf = max_state * policy->max / max_freq;
- if (policy->max == policy->min) {
+ max_policy_perf = max_state * policy_max / max_freq;
+ if (policy_max == policy_min) {
min_policy_perf = max_policy_perf;
} else {
- min_policy_perf = max_state * policy->min / max_freq;
+ min_policy_perf = max_state * policy_min / max_freq;
min_policy_perf = clamp_t(int32_t, min_policy_perf,
0, max_policy_perf);
}
pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
- policy->cpu, max_state,
- min_policy_perf, max_policy_perf);
+ cpu->cpu, max_state, min_policy_perf, max_policy_perf);
/* Normalize user input to [min_perf, max_perf] */
if (per_cpu_limits) {
@@ -2081,7 +2089,7 @@
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
global_min = clamp_t(int32_t, global_min, 0, global_max);
- pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
+ pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
global_min, global_max);
cpu->min_perf_ratio = max(min_policy_perf, global_min);
@@ -2094,7 +2102,7 @@
cpu->max_perf_ratio);
}
- pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
+ pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
cpu->max_perf_ratio,
cpu->min_perf_ratio);
}
@@ -2114,7 +2122,7 @@
mutex_lock(&intel_pstate_limits_lock);
- intel_pstate_update_perf_limits(policy, cpu);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
@@ -2143,8 +2151,8 @@
return 0;
}
-static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
- struct cpudata *cpu)
+static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
{
if (!hwp_active &&
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
@@ -2155,7 +2163,7 @@
}
}
-static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2163,11 +2171,7 @@
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
- if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
- policy->policy != CPUFREQ_POLICY_PERFORMANCE)
- return -EINVAL;
-
- intel_pstate_adjust_policy_max(policy, cpu);
+ intel_pstate_adjust_policy_max(cpu, policy);
return 0;
}
@@ -2268,7 +2272,7 @@
.name = "intel_pstate",
};
-static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
@@ -2276,9 +2280,9 @@
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
- intel_pstate_adjust_policy_max(policy, cpu);
+ intel_pstate_adjust_policy_max(cpu, policy);
- intel_pstate_update_perf_limits(policy, cpu);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
return 0;
}
@@ -2530,9 +2534,15 @@
{
int ret;
- if (size == 3 && !strncmp(buf, "off", size))
- return intel_pstate_driver ?
- intel_pstate_unregister_driver() : -EINVAL;
+ if (size == 3 && !strncmp(buf, "off", size)) {
+ if (!intel_pstate_driver)
+ return -EINVAL;
+
+ if (hwp_active)
+ return -EBUSY;
+
+ return intel_pstate_unregister_driver();
+ }
if (size == 6 && !strncmp(buf, "active", size)) {
if (intel_pstate_driver) {
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 64b8689..0b08be8 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -122,7 +122,7 @@
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
*/
-static int longrun_verify_policy(struct cpufreq_policy *policy)
+static int longrun_verify_policy(struct cpufreq_policy_data *policy)
{
if (!policy)
return -EINVAL;
@@ -130,10 +130,6 @@
policy->cpu = 0;
cpufreq_verify_within_cpu_limits(policy);
- if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
- (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
- return -EINVAL;
-
return 0;
}
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index 0ea8877..86f6125 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -216,6 +216,7 @@
module_platform_driver(ls1x_cpufreq_platdrv);
+MODULE_ALIAS("platform:ls1x-cpufreq");
MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 0c98dd0..927ebc5 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -540,6 +540,7 @@
{ }
};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
static int __init mtk_cpufreq_driver_init(void)
{
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index fdc767f..f902730 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -109,7 +109,7 @@
static struct pcc_cpu __percpu *pcc_cpu_info;
-static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
+static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 2db2f17..1b2ec3b 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6061850..c636c9b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -36,6 +36,7 @@
#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
+#define MAX_NR_CHIPS 32
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -884,12 +885,15 @@
unsigned long action, void *unused)
{
int cpu;
- struct cpufreq_policy cpu_policy;
+ struct cpufreq_policy *cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
- cpufreq_get_policy(&cpu_policy, cpu);
- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ continue;
+ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
+ cpufreq_cpu_put(cpu_policy);
}
return NOTIFY_DONE;
@@ -902,6 +906,7 @@
void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
+ struct cpufreq_policy *policy;
unsigned int cpu;
cpumask_t mask;
@@ -916,12 +921,14 @@
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
- struct cpufreq_policy policy;
- cpufreq_get_policy(&policy, cpu);
- index = cpufreq_table_find_index_c(&policy, policy.cur);
- powernv_cpufreq_target_index(&policy, index);
- cpumask_andnot(&mask, &mask, policy.cpus);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ index = cpufreq_table_find_index_c(policy, policy->cur);
+ powernv_cpufreq_target_index(policy, index);
+ cpumask_andnot(&mask, &mask, policy->cpus);
+ cpufreq_cpu_put(policy);
}
out:
put_online_cpus();
@@ -1041,9 +1048,22 @@
static int init_chip_info(void)
{
- unsigned int chip[256];
+ unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
+ cpumask_t *chip_cpu_mask;
+ int ret = 0;
+
+ chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* Allocate a chip cpu mask large enough to fit mask for all chips */
+ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
+ if (!chip_cpu_mask) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
@@ -1052,25 +1072,38 @@
prev_chip_id = id;
chip[nr_chips++] = id;
}
+ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
+ if (!chips) {
+ ret = -ENOMEM;
+ goto out_free_chip_cpu_mask;
+ }
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
+ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
for_each_cpu(cpu, &chips[i].mask)
per_cpu(chip_info, cpu) = &chips[i];
}
- return 0;
+out_free_chip_cpu_mask:
+ kfree(chip_cpu_mask);
+free_and_return:
+ kfree(chip);
+ return ret;
}
static inline void clean_chip_info(void)
{
+ int i;
+
+ /* flush any pending work items */
+ if (chips)
+ for (i = 0; i < nr_chips; i++)
+ cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index f0d2d50..1e77d19 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -305,6 +305,7 @@
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{},
};
+MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
/*
* Since the driver depends on smem and nvmem drivers, which may
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2b51e07..b341ffb 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -239,6 +239,7 @@
};
module_platform_driver(scpi_cpufreq_platdrv);
+MODULE_ALIAS("platform:scpi-cpufreq");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 5096c0a..0ac265d 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,7 +87,7 @@
return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
}
-static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
struct cpufreq_frequency_table *freq_table;
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 8f16bbb..7ade407 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -141,7 +141,8 @@
static const struct reg_field *sti_cpufreq_match(void)
{
if (of_machine_is_compatible("st,stih407") ||
- of_machine_is_compatible("st,stih410"))
+ of_machine_is_compatible("st,stih410") ||
+ of_machine_is_compatible("st,stih418"))
return sti_stih407_dvfs_regfields;
return NULL;
@@ -258,7 +259,8 @@
int ret;
if ((!of_machine_is_compatible("st,stih407")) &&
- (!of_machine_is_compatible("st,stih410")))
+ (!of_machine_is_compatible("st,stih410")) &&
+ (!of_machine_is_compatible("st,stih418")))
return -ENODEV;
ddata.cpu = get_cpu_device(0);
@@ -290,6 +292,13 @@
}
module_init(sti_cpufreq_init);
+static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
+ { .compatible = "st,stih407" },
+ { .compatible = "st,stih410" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
+
MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver");
MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>");
MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index eca32e4..2deed8d 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -25,7 +25,7 @@
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
/**
- * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
* @versions: Set to the value parsed from efuse
*
* Returns 0 if success.
@@ -69,21 +69,16 @@
return PTR_ERR(speedbin);
efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
- switch (efuse_value) {
- case 0b0001:
- *versions = 1;
- break;
- case 0b0011:
- *versions = 2;
- break;
- default:
- /*
- * For other situations, we treat it as bin0.
- * This vf table can be run for any good cpu.
- */
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ *versions = efuse_value - 1;
+ else
*versions = 0;
- break;
- }
kfree(speedbin);
return 0;
@@ -172,6 +167,7 @@
{ .compatible = "allwinner,sun50i-h6" },
{}
};
+MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
static const struct of_device_id *sun50i_cpufreq_match_node(void)
{
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 707dbc1..98d3921 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -22,7 +22,7 @@
/* make sure that only the "userspace" governor is run
* -- anything else wouldn't make sense on this platform, anyway.
*/
-static int ucv2_verify_speed(struct cpufreq_policy *policy)
+static int ucv2_verify_speed(struct cpufreq_policy_data *policy)
{
if (policy->cpu)
return -EINVAL;