Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 7e48eb5..88727b7 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -1,10 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
menu "CPU Idle"
config CPU_IDLE
bool "CPU idle PM support"
default y if ACPI || PPC_PSERIES
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
- select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE)
+ select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
@@ -23,6 +24,26 @@
config CPU_IDLE_GOV_MENU
bool "Menu governor (for tickless system)"
+config CPU_IDLE_GOV_TEO
+ bool "Timer events oriented (TEO) governor (for tickless systems)"
+ help
+ This governor implements a simplified idle state selection method
+ focused on timer events and does not do any interactivity boosting.
+
+ Some workloads benefit from using it and it generally should be safe
+ to use. Say Y here if you are not happy with the alternatives.
+
+config CPU_IDLE_GOV_HALTPOLL
+ bool "Haltpoll governor (for virtualized systems)"
+ depends on KVM_GUEST
+ help
+ This governor implements haltpoll idle state selection, to be
+ used in conjunction with the haltpoll cpuidle driver, allowing
+ for polling for a certain amount of time before entering idle
+ state.
+
+ Some virtualized workloads benefit from using it.
+
config DT_IDLE_STATES
bool
@@ -41,6 +62,15 @@
source "drivers/cpuidle/Kconfig.powerpc"
endmenu
+config HALTPOLL_CPUIDLE
+ tristate "Halt poll cpuidle driver"
+ depends on X86 && KVM_GUEST
+ default y
+ help
+ This option enables halt poll cpuidle driver, which allows to poll
+ before halting in the guest (more efficient than polling in the
+ host via halt_poll_ns for some scenarios).
+
endif
config ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index f521448..d853047 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# ARM CPU Idle drivers
#
@@ -12,6 +13,16 @@
initialized by calling the CPU operations init idle hook
provided by architecture code.
+config ARM_PSCI_CPUIDLE
+ bool "PSCI CPU idle Driver"
+ depends on ARM_PSCI_FW
+ select DT_IDLE_STATES
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this to enable PSCI firmware based CPUidle driver for ARM.
+ It provides an idle driver that is capable of detecting and
+ managing idle states through the PSCI firmware interface.
+
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 512ee37..c3c011a 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# MIPS CPU Idle Drivers
#
diff --git a/drivers/cpuidle/Kconfig.powerpc b/drivers/cpuidle/Kconfig.powerpc
index 66c3a09..a797a02 100644
--- a/drivers/cpuidle/Kconfig.powerpc
+++ b/drivers/cpuidle/Kconfig.powerpc
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# POWERPC CPU Idle Drivers
#
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9d7176c..ee70d5c 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
+obj-$(CONFIG_HALTPOLL_CPUIDLE) += cpuidle-haltpoll.o
##################################################################################
# ARM SoC drivers
@@ -20,6 +21,7 @@
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 147f38e..b607278 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* coupled.c - helper functions to enter the same idle state on multiple cpus
*
* Copyright (c) 2011 Google, Inc.
*
* Author: Colin Cross <ccross@android.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index df564d7..9e5156d 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM/ARM64 generic CPU idle driver.
*
* Copyright (C) 2014 ARM Ltd.
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "CPUidle arm: " fmt
@@ -18,7 +15,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
-#include <linux/topology.h>
#include <asm/cpuidle.h>
@@ -82,7 +78,6 @@
{
int ret;
struct cpuidle_driver *drv;
- struct cpuidle_device *dev;
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -110,42 +105,27 @@
ret = arm_cpuidle_init(cpu);
/*
- * Allow the initialization to continue for other CPUs, if the reported
- * failure is a HW misconfiguration/breakage (-ENXIO).
+ * Allow the initialization to continue for other CPUs, if the
+ * reported failure is a HW misconfiguration/breakage (-ENXIO).
+ *
+ * Some platforms do not support idle operations
+ * (arm_cpuidle_init() returning -EOPNOTSUPP), we should
+ * not flag this case as an error, it is a valid
+ * configuration.
*/
if (ret) {
- pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ if (ret != -EOPNOTSUPP)
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
ret = ret == -ENXIO ? 0 : ret;
goto out_kfree_drv;
}
- ret = cpuidle_register_driver(drv);
- if (ret) {
- if (ret != -EBUSY)
- pr_err("Failed to register cpuidle driver\n");
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
goto out_kfree_drv;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto out_unregister_drv;
- }
- dev->cpu = cpu;
-
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpuidle device for CPU %d\n",
- cpu);
- goto out_kfree_dev;
- }
return 0;
-out_kfree_dev:
- kfree(dev);
-out_unregister_drv:
- cpuidle_unregister_driver(drv);
out_kfree_drv:
kfree(drv);
return ret;
@@ -176,9 +156,7 @@
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev);
- cpuidle_unregister_device(dev);
- cpuidle_unregister_driver(drv);
- kfree(dev);
+ cpuidle_unregister(drv);
kfree(drv);
}
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index db2ede5..7f8ddc0 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 ARM/Linaro
*
@@ -5,10 +6,6 @@
* Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
* Nicolas Pitre <nicolas.pitre@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
* Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
*/
@@ -167,6 +164,7 @@
{
int ret;
struct device_node *root = of_find_node_by_path("/");
+ const struct of_device_id *match_id;
if (!root)
return -ENODEV;
@@ -174,7 +172,11 @@
/*
* Initialize the driver just for a compliant set of machines
*/
- if (!of_match_node(compatible_machine_match, root))
+ match_id = of_match_node(compatible_machine_match, root);
+
+ of_node_put(root);
+
+ if (!match_id)
return -ENODEV;
if (!mcpm_is_available())
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index ea9728f..b17d9a8 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Calxeda, Inc.
*
@@ -5,18 +6,6 @@
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2012 Linaro Ltd.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Maintainer: Rob Herring <rob.herring@calxeda.com>
*/
diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c
index 66a9f23..6e36740 100644
--- a/drivers/cpuidle/cpuidle-clps711x.c
+++ b/drivers/cpuidle/cpuidle-clps711x.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CLPS711X CPU idle driver
*
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/cpuidle.h>
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index dac8ff6..dff0ff4 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <paul.burton@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/cpu_pm.h>
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 0171a6e..b2b5666 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -5,10 +6,6 @@
* Coupled cpuidle support based on the work of:
* Colin Cross <ccross@android.com>
* Daniel Lezcano <daniel.lezcano@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/cpuidle.h>
@@ -84,7 +81,7 @@
[1] = {
.enter = exynos_enter_lowpower,
.exit_latency = 300,
- .target_residency = 100000,
+ .target_residency = 10000,
.name = "C1",
.desc = "ARM power down",
},
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
new file mode 100644
index 0000000..b0ce9bc
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cpuidle driver for haltpoll governor.
+ *
+ * Copyright 2019 Red Hat, Inc. and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Authors: Marcelo Tosatti <mtosatti@redhat.com>
+ */
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <linux/sched/idle.h>
+#include <linux/kvm_para.h>
+#include <linux/cpuidle_haltpoll.h>
+
+static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
+static enum cpuhp_state haltpoll_hp_state;
+
+static int default_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ if (current_clr_polling_and_test()) {
+ local_irq_enable();
+ return index;
+ }
+ default_idle();
+ return index;
+}
+
+static struct cpuidle_driver haltpoll_driver = {
+ .name = "haltpoll",
+ .governor = "haltpoll",
+ .states = {
+ { /* entry 0 is for polling */ },
+ {
+ .enter = default_enter_idle,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = -1,
+ .name = "haltpoll idle",
+ .desc = "default architecture idle",
+ },
+ },
+ .safe_state_index = 0,
+ .state_count = 2,
+};
+
+static int haltpoll_cpu_online(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
+ if (!dev->registered) {
+ dev->cpu = cpu;
+ if (cpuidle_register_device(dev)) {
+ pr_notice("cpuidle_register_device %d failed!\n", cpu);
+ return -EIO;
+ }
+ arch_haltpoll_enable(cpu);
+ }
+
+ return 0;
+}
+
+static int haltpoll_cpu_offline(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
+ if (dev->registered) {
+ arch_haltpoll_disable(cpu);
+ cpuidle_unregister_device(dev);
+ }
+
+ return 0;
+}
+
+static void haltpoll_uninit(void)
+{
+ if (haltpoll_hp_state)
+ cpuhp_remove_state(haltpoll_hp_state);
+ cpuidle_unregister_driver(&haltpoll_driver);
+
+ free_percpu(haltpoll_cpuidle_devices);
+ haltpoll_cpuidle_devices = NULL;
+}
+
+static int __init haltpoll_init(void)
+{
+ int ret;
+ struct cpuidle_driver *drv = &haltpoll_driver;
+
+ /* Do not load haltpoll if idle= is passed */
+ if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+ cpuidle_poll_state_init(drv);
+
+ if (!kvm_para_available() ||
+ !kvm_para_has_hint(KVM_HINTS_REALTIME))
+ return -ENODEV;
+
+ ret = cpuidle_register_driver(drv);
+ if (ret < 0)
+ return ret;
+
+ haltpoll_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (haltpoll_cpuidle_devices == NULL) {
+ cpuidle_unregister_driver(drv);
+ return -ENOMEM;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpuidle/haltpoll:online",
+ haltpoll_cpu_online, haltpoll_cpu_offline);
+ if (ret < 0) {
+ haltpoll_uninit();
+ } else {
+ haltpoll_hp_state = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void __exit haltpoll_exit(void)
+{
+ haltpoll_uninit();
+}
+
+module_init(haltpoll_init);
+module_exit(haltpoll_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marcelo Tosatti <mtosatti@redhat.com>");
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
new file mode 100644
index 0000000..f3c1a23
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PSCI CPU idle driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ */
+
+#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+
+#include <asm/cpuidle.h>
+
+#include "dt_idle_states.h"
+
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ u32 *state = __this_cpu_read(psci_power_state);
+
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ idx, state[idx - 1]);
+}
+
+static struct cpuidle_driver psci_idle_driver __initdata = {
+ .name = "psci_idle",
+ .owner = THIS_MODULE,
+ /*
+ * PSCI idle states relies on architectural WFI to
+ * be represented as state index 0.
+ */
+ .states[0] = {
+ .enter = psci_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ }
+};
+
+static const struct of_device_id psci_idle_state_match[] __initconst = {
+ { .compatible = "arm,idle-state",
+ .data = psci_enter_idle_state },
+ { },
+};
+
+static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!psci_power_state_is_valid(*state)) {
+ pr_warn("Invalid PSCI power state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+ int i, ret = 0, count = 0;
+ u32 *psci_states;
+ struct device_node *state_node;
+
+ /* Count idle states */
+ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ count))) {
+ count++;
+ of_node_put(state_node);
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+ of_node_put(state_node);
+
+ if (ret)
+ goto free_mem;
+
+ pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
+ }
+
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+
+free_mem:
+ kfree(psci_states);
+ return ret;
+}
+
+static __init int psci_cpu_init_idle(unsigned int cpu)
+{
+ struct device_node *cpu_node;
+ int ret;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static int __init psci_idle_init_cpu(int cpu)
+{
+ struct cpuidle_driver *drv;
+ struct device_node *cpu_node;
+ const char *enable_method;
+ int ret = 0;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ /*
+ * Check whether the enable-method for the cpu is PSCI, fail
+ * if it is not.
+ */
+ enable_method = of_get_property(cpu_node, "enable-method", NULL);
+ if (!enable_method || (strcmp(enable_method, "psci")))
+ ret = -ENODEV;
+
+ of_node_put(cpu_node);
+ if (ret)
+ return ret;
+
+ drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1, since
+ * by default idle state 0 is the quiescent state reached
+ * by the cpu by executing the wfi instruction.
+ *
+ * If no DT idle states are detected (ret == 0) let the driver
+ * initialization fail accordingly since there is no reason to
+ * initialize the idle driver if only wfi is supported, the
+ * default archictectural back-end already executes wfi
+ * on idle entry.
+ */
+ ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_kfree_drv;
+ }
+
+ /*
+ * Initialize PSCI idle states.
+ */
+ ret = psci_cpu_init_idle(cpu);
+ if (ret) {
+ pr_err("CPU %d failed to PSCI idle\n", cpu);
+ goto out_kfree_drv;
+ }
+
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
+ goto out_kfree_drv;
+
+ return 0;
+
+out_kfree_drv:
+ kfree(drv);
+ return ret;
+}
+
+/*
+ * psci_idle_init - Initializes PSCI cpuidle driver
+ *
+ * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
+ * to register cpuidle driver then rollback to cancel all CPUs
+ * registration.
+ */
+static int __init psci_idle_init(void)
+{
+ int cpu, ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+
+ for_each_possible_cpu(cpu) {
+ ret = psci_idle_init_cpu(cpu);
+ if (ret)
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ while (--cpu >= 0) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
+ cpuidle_unregister(drv);
+ kfree(drv);
+ }
+
+ return ret;
+}
+device_initcall(psci_idle_init);
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 9e56bc4..74c2479 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -247,7 +247,13 @@
return -ENODEV;
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- if (lppaca_shared_proc(get_lppaca())) {
+ /*
+ * Use local_paca instead of get_lppaca() since
+ * preemption is not disabled, and it is not required in
+ * fact, since lppaca_ptr does not need to be the value
+ * associated to the current CPU, it can be from any CPU.
+ */
+ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
cpuidle_state_table = shared_states;
max_idle_state = ARRAY_SIZE(shared_states);
} else {
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index 7941a09..a2d34be 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
*
* Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
* and Jonas Aaberg <jonas.aberg@stericsson.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 6f4257f..a79610e 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2013 Xilinx
*
@@ -5,18 +6,6 @@
*
* based on arch/arm/mach-at91/cpuidle.c
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* The cpu idle uses wait-for-interrupt and RAM self refresh in order
* to implement two idle states -
* #1 wait-for-interrupt
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6df894d..0895b98 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -202,7 +202,6 @@
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
- s64 diff;
/*
* Tell the time framework to switch to a broadcast timer because our
@@ -247,19 +246,49 @@
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- diff = ktime_us_delta(time_end, time_start);
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
if (entered_state >= 0) {
- /* Update cpuidle counters */
- /* This can be moved to within driver enter routine
+ s64 diff, delay = drv->states[entered_state].exit_latency;
+ int i;
+
+ /*
+ * Update cpuidle counters
+ * This can be moved to within driver enter routine,
* but that results in multiple copies of same code.
*/
+ diff = ktime_us_delta(time_end, time_start);
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int)diff;
dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
+
+ if (diff < drv->states[entered_state].target_residency) {
+ for (i = entered_state - 1; i >= 0; i--) {
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
+ continue;
+
+ /* Shallower states are enabled, so update. */
+ dev->states_usage[entered_state].above++;
+ break;
+ }
+ } else if (diff > delay) {
+ for (i = entered_state + 1; i < drv->state_count; i++) {
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
+ continue;
+
+ /*
+ * Update if a deeper state would have been a
+ * better match for the observed idle duration.
+ */
+ if (diff - delay >= drv->states[i].target_residency)
+ dev->states_usage[entered_state].below++;
+
+ break;
+ }
+ }
} else {
dev->last_residency = 0;
}
@@ -299,9 +328,23 @@
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int index)
{
+ int ret = 0;
+
+ /*
+ * Store the next hrtimer, which becomes either next tick or the next
+ * timer event, whatever expires first. Additionally, to make this data
+ * useful for consumers outside cpuidle, we rely on that the governor's
+ * ->select() callback have decided, whether to stop the tick or not.
+ */
+ WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
+
if (cpuidle_state_is_coupled(drv, index))
- return cpuidle_enter_state_coupled(dev, drv, index);
- return cpuidle_enter_state(dev, drv, index);
+ ret = cpuidle_enter_state_coupled(dev, drv, index);
+ else
+ ret = cpuidle_enter_state(dev, drv, index);
+
+ WRITE_ONCE(dev->next_hrtimer, 0);
+ return ret;
}
/**
@@ -319,6 +362,36 @@
}
/**
+ * cpuidle_poll_time - return amount of time to poll for,
+ * governors can override dev->poll_limit_ns if necessary
+ *
+ * @drv: the cpuidle driver tied with the cpu
+ * @dev: the cpuidle device
+ *
+ */
+u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ int i;
+ u64 limit_ns;
+
+ if (dev->poll_limit_ns)
+ return dev->poll_limit_ns;
+
+ limit_ns = TICK_NSEC;
+ for (i = 1; i < drv->state_count; i++) {
+ if (drv->states[i].disabled || dev->states_usage[i].disable)
+ continue;
+
+ limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+ }
+
+ dev->poll_limit_ns = limit_ns;
+
+ return dev->poll_limit_ns;
+}
+
+/**
* cpuidle_install_idle_handler - installs the cpuidle idle loop handler
*/
void cpuidle_install_idle_handler(void)
@@ -482,6 +555,7 @@
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
+ dev->next_hrtimer = 0;
}
/**
@@ -702,4 +776,5 @@
}
module_param(off, int, 0444);
+module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444);
core_initcall(cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 2965ab3..9f336af 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -7,7 +7,9 @@
#define __DRIVER_CPUIDLE_H
/* For internal use only */
+extern char param_governor[];
extern struct cpuidle_governor *cpuidle_curr_governor;
+extern struct cpuidle_governor *cpuidle_prev_governor;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
@@ -21,6 +23,7 @@
extern void cpuidle_uninstall_idle_handler(void);
/* governors */
+extern struct cpuidle_governor *cpuidle_find_governor(const char *str);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index dc32f34..80c1a83 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -254,12 +254,25 @@
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
+ struct cpuidle_governor *gov;
int ret;
spin_lock(&cpuidle_driver_lock);
ret = __cpuidle_register_driver(drv);
spin_unlock(&cpuidle_driver_lock);
+ if (!ret && !strlen(param_governor) && drv->governor &&
+ (cpuidle_get_driver() == drv)) {
+ mutex_lock(&cpuidle_lock);
+ gov = cpuidle_find_governor(drv->governor);
+ if (gov) {
+ cpuidle_prev_governor = cpuidle_curr_governor;
+ if (cpuidle_switch_governor(gov) < 0)
+ cpuidle_prev_governor = NULL;
+ }
+ mutex_unlock(&cpuidle_lock);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
@@ -274,9 +287,21 @@
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
+ bool enabled = (cpuidle_get_driver() == drv);
+
spin_lock(&cpuidle_driver_lock);
__cpuidle_unregister_driver(drv);
spin_unlock(&cpuidle_driver_lock);
+
+ if (!enabled)
+ return;
+
+ mutex_lock(&cpuidle_lock);
+ if (cpuidle_prev_governor) {
+ if (!cpuidle_switch_governor(cpuidle_prev_governor))
+ cpuidle_prev_governor = NULL;
+ }
+ mutex_unlock(&cpuidle_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 53342b7..d06d21a 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* DT idle states parsing code.
*
* Copyright (C) 2014 ARM Ltd.
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "DT idle-states: " fmt
@@ -22,16 +19,12 @@
#include "dt_idle_states.h"
static int init_state_node(struct cpuidle_state *idle_state,
- const struct of_device_id *matches,
+ const struct of_device_id *match_id,
struct device_node *state_node)
{
int err;
- const struct of_device_id *match_id;
const char *desc;
- match_id = of_match_node(matches, state_node);
- if (!match_id)
- return -ENODEV;
/*
* CPUidle drivers are expected to initialize the const void *data
* pointer of the passed in struct of_device_id array to the idle
@@ -160,6 +153,7 @@
{
struct cpuidle_state *idle_state;
struct device_node *state_node, *cpu_node;
+ const struct of_device_id *match_id;
int i, err = 0;
const cpumask_t *cpumask;
unsigned int state_idx = start_idx;
@@ -180,6 +174,12 @@
if (!state_node)
break;
+ match_id = of_match_node(matches, state_node);
+ if (!match_id) {
+ err = -ENODEV;
+ break;
+ }
+
if (!of_device_is_available(state_node)) {
of_node_put(state_node);
continue;
@@ -198,7 +198,7 @@
}
idle_state = &drv->states[state_idx++];
- err = init_state_node(idle_state, matches, state_node);
+ err = init_state_node(idle_state, match_id, state_node);
if (err) {
pr_err("Parsing idle state node %pOF failed with err %d\n",
state_node, err);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 9fed1b8..e9801f2 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -11,20 +11,24 @@
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/pm_qos.h>
#include "cpuidle.h"
+char param_governor[CPUIDLE_NAME_LEN];
+
LIST_HEAD(cpuidle_governors);
struct cpuidle_governor *cpuidle_curr_governor;
+struct cpuidle_governor *cpuidle_prev_governor;
/**
- * __cpuidle_find_governor - finds a governor of the specified name
+ * cpuidle_find_governor - finds a governor of the specified name
* @str: the name
*
* Must be called with cpuidle_lock acquired.
*/
-static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
+struct cpuidle_governor *cpuidle_find_governor(const char *str)
{
struct cpuidle_governor *gov;
@@ -84,11 +88,14 @@
return -ENODEV;
mutex_lock(&cpuidle_lock);
- if (__cpuidle_find_governor(gov->name) == NULL) {
+ if (cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor ||
- cpuidle_curr_governor->rating < gov->rating)
+ !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
+ (cpuidle_curr_governor->rating < gov->rating &&
+ strncasecmp(param_governor, cpuidle_curr_governor->name,
+ CPUIDLE_NAME_LEN)))
cpuidle_switch_governor(gov);
}
mutex_unlock(&cpuidle_lock);
@@ -104,7 +111,7 @@
{
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu);
- int device_req = dev_pm_qos_raw_read_value(device);
+ int device_req = dev_pm_qos_raw_resume_latency(device);
return device_req < global_req ? device_req : global_req;
}
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile
index 1b51272..63abb53 100644
--- a/drivers/cpuidle/governors/Makefile
+++ b/drivers/cpuidle/governors/Makefile
@@ -1,6 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for cpuidle governors.
#
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
+obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o
+obj-$(CONFIG_CPU_IDLE_GOV_HALTPOLL) += haltpoll.o
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
new file mode 100644
index 0000000..7a703d2
--- /dev/null
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * haltpoll.c - haltpoll idle governor
+ *
+ * Copyright 2019 Red Hat, Inc. and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Authors: Marcelo Tosatti <mtosatti@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpuidle.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/kvm_para.h>
+
+static unsigned int guest_halt_poll_ns __read_mostly = 200000;
+module_param(guest_halt_poll_ns, uint, 0644);
+
+/* division factor to shrink halt_poll_ns */
+static unsigned int guest_halt_poll_shrink __read_mostly = 2;
+module_param(guest_halt_poll_shrink, uint, 0644);
+
+/* multiplication factor to grow per-cpu poll_limit_ns */
+static unsigned int guest_halt_poll_grow __read_mostly = 2;
+module_param(guest_halt_poll_grow, uint, 0644);
+
+/* value in us to start growing per-cpu halt_poll_ns */
+static unsigned int guest_halt_poll_grow_start __read_mostly = 50000;
+module_param(guest_halt_poll_grow_start, uint, 0644);
+
+/* allow shrinking guest halt poll */
+static bool guest_halt_poll_allow_shrink __read_mostly = true;
+module_param(guest_halt_poll_allow_shrink, bool, 0644);
+
+/**
+ * haltpoll_select - selects the next idle state to enter
+ * @drv: cpuidle driver containing state data
+ * @dev: the CPU
+ * @stop_tick: indication on whether or not to stop the tick
+ */
+static int haltpoll_select(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev,
+ bool *stop_tick)
+{
+ int latency_req = cpuidle_governor_latency_req(dev->cpu);
+
+ if (!drv->state_count || latency_req == 0) {
+ *stop_tick = false;
+ return 0;
+ }
+
+ if (dev->poll_limit_ns == 0)
+ return 1;
+
+ /* Last state was poll? */
+ if (dev->last_state_idx == 0) {
+ /* Halt if no event occurred on poll window */
+ if (dev->poll_time_limit == true)
+ return 1;
+
+ *stop_tick = false;
+ /* Otherwise, poll again */
+ return 0;
+ }
+
+ *stop_tick = false;
+ /* Last state was halt: poll */
+ return 0;
+}
+
+static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us)
+{
+ unsigned int val;
+ u64 block_ns = block_us*NSEC_PER_USEC;
+
+ /* Grow cpu_halt_poll_us if
+ * cpu_halt_poll_us < block_ns < guest_halt_poll_us
+ */
+ if (block_ns > dev->poll_limit_ns && block_ns <= guest_halt_poll_ns) {
+ val = dev->poll_limit_ns * guest_halt_poll_grow;
+
+ if (val < guest_halt_poll_grow_start)
+ val = guest_halt_poll_grow_start;
+ if (val > guest_halt_poll_ns)
+ val = guest_halt_poll_ns;
+
+ dev->poll_limit_ns = val;
+ } else if (block_ns > guest_halt_poll_ns &&
+ guest_halt_poll_allow_shrink) {
+ unsigned int shrink = guest_halt_poll_shrink;
+
+ val = dev->poll_limit_ns;
+ if (shrink == 0)
+ val = 0;
+ else
+ val /= shrink;
+ dev->poll_limit_ns = val;
+ }
+}
+
+/**
+ * haltpoll_reflect - update variables and update poll time
+ * @dev: the CPU
+ * @index: the index of actual entered state
+ */
+static void haltpoll_reflect(struct cpuidle_device *dev, int index)
+{
+ dev->last_state_idx = index;
+
+ if (index != 0)
+ adjust_poll_limit(dev, dev->last_residency);
+}
+
+/**
+ * haltpoll_enable_device - scans a CPU's states and does setup
+ * @drv: cpuidle driver
+ * @dev: the CPU
+ */
+static int haltpoll_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ dev->poll_limit_ns = 0;
+
+ return 0;
+}
+
+static struct cpuidle_governor haltpoll_governor = {
+ .name = "haltpoll",
+ .rating = 9,
+ .enable = haltpoll_enable_device,
+ .select = haltpoll_select,
+ .reflect = haltpoll_reflect,
+};
+
+static int __init init_haltpoll(void)
+{
+ if (kvm_para_available())
+ return cpuidle_register_governor(&haltpoll_governor);
+
+ return 0;
+}
+
+postcore_initcall(init_haltpoll);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 704880a..428eeb8 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -38,7 +38,6 @@
struct ladder_device {
struct ladder_device_state states[CPUIDLE_STATE_MAX];
- int last_state_idx;
};
static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
@@ -49,12 +48,13 @@
* @old_idx: the current state index
* @new_idx: the new target state index
*/
-static inline void ladder_do_selection(struct ladder_device *ldev,
+static inline void ladder_do_selection(struct cpuidle_device *dev,
+ struct ladder_device *ldev,
int old_idx, int new_idx)
{
ldev->states[old_idx].stats.promotion_count = 0;
ldev->states[old_idx].stats.demotion_count = 0;
- ldev->last_state_idx = new_idx;
+ dev->last_state_idx = new_idx;
}
/**
@@ -68,19 +68,19 @@
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
- int last_residency, last_idx = ldev->last_state_idx;
+ int last_residency, last_idx = dev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = cpuidle_governor_latency_req(dev->cpu);
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
- ladder_do_selection(ldev, last_idx, 0);
+ ladder_do_selection(dev, ldev, last_idx, 0);
return 0;
}
last_state = &ldev->states[last_idx];
- last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
+ last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
@@ -91,7 +91,7 @@
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
- ladder_do_selection(ldev, last_idx, last_idx + 1);
+ ladder_do_selection(dev, ldev, last_idx, last_idx + 1);
return last_idx + 1;
}
}
@@ -107,7 +107,7 @@
if (drv->states[i].exit_latency <= latency_req)
break;
}
- ladder_do_selection(ldev, last_idx, i);
+ ladder_do_selection(dev, ldev, last_idx, i);
return i;
}
@@ -116,7 +116,7 @@
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
- ladder_do_selection(ldev, last_idx, last_idx - 1);
+ ladder_do_selection(dev, ldev, last_idx, last_idx - 1);
return last_idx - 1;
}
}
@@ -139,7 +139,7 @@
struct ladder_device_state *lstate;
struct cpuidle_state *state;
- ldev->last_state_idx = first_idx;
+ dev->last_state_idx = first_idx;
for (i = first_idx; i < drv->state_count; i++) {
state = &drv->states[i];
@@ -167,9 +167,8 @@
*/
static void ladder_reflect(struct cpuidle_device *dev, int index)
{
- struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
if (index > 0)
- ldev->last_state_idx = index;
+ dev->last_state_idx = index;
}
static struct cpuidle_governor ladder_governor = {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index e26a409..e5a5d0c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* menu.c - the menu idle governor
*
@@ -5,9 +6,6 @@
* Copyright (C) 2009 Intel Corporation
* Author:
* Arjan van de Ven <arjan@linux.intel.com>
- *
- * This code is licenced under the GPL version 2 as described
- * in the COPYING file that acompanies the Linux Kernel.
*/
#include <linux/kernel.h>
@@ -119,27 +117,16 @@
*/
struct menu_device {
- int last_state_idx;
int needs_update;
int tick_wakeup;
unsigned int next_timer_us;
- unsigned int predicted_us;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
int interval_ptr;
};
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
-static inline int get_loadavg(unsigned long load)
-{
- return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
-}
-
static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
{
int bucket = 0;
@@ -173,18 +160,10 @@
* to be, the higher this multiplier, and thus the higher
* the barrier to go to an expensive C state.
*/
-static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
+static inline int performance_multiplier(unsigned long nr_iowaiters)
{
- int mult = 1;
-
- /* for higher loadavg, we are more reluctant */
-
- mult += 2 * get_loadavg(load);
-
- /* for IO wait tasks (per cpu!) we add 5x each */
- mult += 10 * nr_iowaiters;
-
- return mult;
+ /* for IO wait tasks (per cpu!) we add 10x each */
+ return 1 + 10 * nr_iowaiters;
}
static DEFINE_PER_CPU(struct menu_device, menu_devices);
@@ -197,17 +176,19 @@
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static unsigned int get_typical_interval(struct menu_device *data)
+static unsigned int get_typical_interval(struct menu_device *data,
+ unsigned int predicted_us)
{
int i, divisor;
- unsigned int max, thresh, avg;
+ unsigned int min, max, thresh, avg;
uint64_t sum, variance;
- thresh = UINT_MAX; /* Discard outliers above this value */
+ thresh = INT_MAX; /* Discard outliers above this value */
again:
/* First calculate the average of past intervals */
+ min = UINT_MAX;
max = 0;
sum = 0;
divisor = 0;
@@ -218,8 +199,19 @@
divisor++;
if (value > max)
max = value;
+
+ if (value < min)
+ min = value;
}
}
+
+ /*
+ * If the result of the computation is going to be discarded anyway,
+ * avoid the computation altogether.
+ */
+ if (min >= predicted_us)
+ return UINT_MAX;
+
if (divisor == INTERVALS)
avg = sum >> INTERVAL_SHIFT;
else
@@ -286,11 +278,10 @@
struct menu_device *data = this_cpu_ptr(&menu_devices);
int latency_req = cpuidle_governor_latency_req(dev->cpu);
int i;
- int first_idx;
int idx;
unsigned int interactivity_req;
- unsigned int expected_interval;
- unsigned long nr_iowaiters, cpu_load;
+ unsigned int predicted_us;
+ unsigned long nr_iowaiters;
ktime_t delta_next;
if (data->needs_update) {
@@ -298,50 +289,37 @@
data->needs_update = 0;
}
- /* Special case when user has set very strict latency requirement */
- if (unlikely(latency_req == 0)) {
- *stop_tick = false;
- return 0;
- }
-
/* determine the expected residency time, round up */
data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
- get_iowait_load(&nr_iowaiters, &cpu_load);
+ nr_iowaiters = nr_iowait_cpu(dev->cpu);
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+ if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
+ ((data->next_timer_us < drv->states[1].target_residency ||
+ latency_req < drv->states[1].exit_latency) &&
+ !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+ /*
+ * In this case state[0] will be used no matter what, so return
+ * it right away and keep the tick running if state[0] is a
+ * polling one.
+ */
+ *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
+ return 0;
+ }
+
/*
* Force the result of multiplication to be 64 bits even if both
* operands are 32 bits.
* Make sure to round up for half microseconds.
*/
- data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
+ predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
-
- expected_interval = get_typical_interval(data);
- expected_interval = min(expected_interval, data->next_timer_us);
-
- first_idx = 0;
- if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
- struct cpuidle_state *s = &drv->states[1];
- unsigned int polling_threshold;
-
- /*
- * Default to a physical idle state, not to busy polling, unless
- * a timer is going to trigger really really soon.
- */
- polling_threshold = max_t(unsigned int, 20, s->target_residency);
- if (data->next_timer_us > polling_threshold &&
- latency_req > s->exit_latency && !s->disabled &&
- !dev->states_usage[1].disable)
- first_idx = 1;
- }
-
/*
* Use the lowest expected idle interval to pick the idle state.
*/
- data->predicted_us = min(data->predicted_us, expected_interval);
+ predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
if (tick_nohz_tick_stopped()) {
/*
@@ -352,34 +330,46 @@
* the known time till the closest timer event for the idle
* state selection.
*/
- if (data->predicted_us < TICK_USEC)
- data->predicted_us = ktime_to_us(delta_next);
+ if (predicted_us < TICK_USEC)
+ predicted_us = ktime_to_us(delta_next);
} else {
/*
* Use the performance multiplier and the user-configurable
* latency_req to determine the maximum exit latency.
*/
- interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
+ interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
if (latency_req > interactivity_req)
latency_req = interactivity_req;
}
- expected_interval = data->predicted_us;
/*
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
idx = -1;
- for (i = first_idx; i < drv->state_count; i++) {
+ for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable)
continue;
+
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > data->predicted_us) {
- if (data->predicted_us < TICK_USEC)
+
+ if (s->target_residency > predicted_us) {
+ /*
+ * Use a physical idle state, not busy polling, unless
+ * a timer is going to trigger soon enough.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->exit_latency <= latency_req &&
+ s->target_residency <= data->next_timer_us) {
+ predicted_us = s->target_residency;
+ idx = i;
+ break;
+ }
+ if (predicted_us < TICK_USEC)
break;
if (!tick_nohz_tick_stopped()) {
@@ -389,7 +379,7 @@
* tick in that case and let the governor run
* again in the next iteration of the loop.
*/
- expected_interval = drv->states[idx].target_residency;
+ predicted_us = drv->states[idx].target_residency;
break;
}
@@ -403,18 +393,11 @@
s->target_residency <= ktime_to_us(delta_next))
idx = i;
- goto out;
+ return idx;
}
- if (s->exit_latency > latency_req) {
- /*
- * If we break out of the loop for latency reasons, use
- * the target residency of the selected state as the
- * expected idle duration so that the tick is retained
- * as long as that target residency is low enough.
- */
- expected_interval = drv->states[idx].target_residency;
+ if (s->exit_latency > latency_req)
break;
- }
+
idx = i;
}
@@ -426,7 +409,7 @@
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
+ predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
unsigned int delta_next_us = ktime_to_us(delta_next);
*stop_tick = false;
@@ -450,10 +433,7 @@
}
}
-out:
- data->last_state_idx = idx;
-
- return data->last_state_idx;
+ return idx;
}
/**
@@ -468,7 +448,7 @@
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- data->last_state_idx = index;
+ dev->last_state_idx = index;
data->needs_update = 1;
data->tick_wakeup = tick_nohz_idle_got_tick();
}
@@ -481,7 +461,7 @@
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- int last_idx = data->last_state_idx;
+ int last_idx = dev->last_state_idx;
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
unsigned int new_factor;
@@ -512,9 +492,19 @@
* duration predictor do a better job next time.
*/
measured_us = 9 * MAX_INTERESTING / 10;
+ } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
+ dev->poll_time_limit) {
+ /*
+ * The CPU exited the "polling" state due to a time limit, so
+ * the idle duration prediction leading to the selection of that
+ * state was inaccurate. If a better prediction had been made,
+ * the CPU might have been woken up from idle by the next timer.
+ * Assume that to be the case.
+ */
+ measured_us = data->next_timer_us;
} else {
/* measured value */
- measured_us = cpuidle_get_last_residency(dev);
+ measured_us = dev->last_residency;
/* Deduct exit latency */
if (measured_us > 2 * target->exit_latency)
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
new file mode 100644
index 0000000..b5a0e49
--- /dev/null
+++ b/drivers/cpuidle/governors/teo.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer events oriented CPU idle governor
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * The idea of this governor is based on the observation that on many systems
+ * timer events are two or more orders of magnitude more frequent than any
+ * other interrupts, so they are likely to be the most significant source of CPU
+ * wakeups from idle states. Moreover, information about what happened in the
+ * (relatively recent) past can be used to estimate whether or not the deepest
+ * idle state with target residency within the time to the closest timer is
+ * likely to be suitable for the upcoming idle time of the CPU and, if not, then
+ * which of the shallower idle states to choose.
+ *
+ * Of course, non-timer wakeup sources are more important in some use cases and
+ * they can be covered by taking a few most recent idle time intervals of the
+ * CPU into account. However, even in that case it is not necessary to consider
+ * idle duration values greater than the time till the closest timer, as the
+ * patterns that they may belong to produce average values close enough to
+ * the time till the closest timer (sleep length) anyway.
+ *
+ * Thus this governor estimates whether or not the upcoming idle time of the CPU
+ * is likely to be significantly shorter than the sleep length and selects an
+ * idle state for it in accordance with that, as follows:
+ *
+ * - Find an idle state on the basis of the sleep length and state statistics
+ * collected over time:
+ *
+ * o Find the deepest idle state whose target residency is less than or equal
+ * to the sleep length.
+ *
+ * o Select it if it matched both the sleep length and the observed idle
+ * duration in the past more often than it matched the sleep length alone
+ * (i.e. the observed idle duration was significantly shorter than the sleep
+ * length matched by it).
+ *
+ * o Otherwise, select the shallower state with the greatest matched "early"
+ * wakeups metric.
+ *
+ * - If the majority of the most recent idle duration values are below the
+ * target residency of the idle state selected so far, use those values to
+ * compute the new expected idle duration and find an idle state matching it
+ * (which has to be shallower than the one selected so far).
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched/clock.h>
+#include <linux/tick.h>
+
+/*
+ * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
+ * is used for decreasing metrics on a regular basis.
+ */
+#define PULSE 1024
+#define DECAY_SHIFT 3
+
+/*
+ * Number of the most recent idle duration values to take into consideration for
+ * the detection of wakeup patterns.
+ */
+#define INTERVALS 8
+
+/**
+ * struct teo_idle_state - Idle state data used by the TEO cpuidle governor.
+ * @early_hits: "Early" CPU wakeups "matching" this state.
+ * @hits: "On time" CPU wakeups "matching" this state.
+ * @misses: CPU wakeups "missing" this state.
+ *
+ * A CPU wakeup is "matched" by a given idle state if the idle duration measured
+ * after the wakeup is between the target residency of that state and the target
+ * residency of the next one (or if this is the deepest available idle state, it
+ * "matches" a CPU wakeup when the measured idle duration is at least equal to
+ * its target residency).
+ *
+ * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if
+ * it occurs significantly earlier than the closest expected timer event (that
+ * is, early enough to match an idle state shallower than the one matching the
+ * time till the closest timer event). Otherwise, the wakeup is "on time", or
+ * it is a "hit".
+ *
+ * A "miss" occurs when the given state doesn't match the wakeup, but it matches
+ * the time till the closest timer event used for idle state selection.
+ */
+struct teo_idle_state {
+ unsigned int early_hits;
+ unsigned int hits;
+ unsigned int misses;
+};
+
+/**
+ * struct teo_cpu - CPU data used by the TEO cpuidle governor.
+ * @time_span_ns: Time between idle state selection and post-wakeup update.
+ * @sleep_length_ns: Time till the closest timer event (at the selection time).
+ * @states: Idle states data corresponding to this CPU.
+ * @interval_idx: Index of the most recent saved idle interval.
+ * @intervals: Saved idle duration values.
+ */
+struct teo_cpu {
+ u64 time_span_ns;
+ u64 sleep_length_ns;
+ struct teo_idle_state states[CPUIDLE_STATE_MAX];
+ int interval_idx;
+ unsigned int intervals[INTERVALS];
+};
+
+static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
+
+/**
+ * teo_update - Update CPU data after wakeup.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ */
+static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
+ int i, idx_hit = -1, idx_timer = -1;
+ unsigned int measured_us;
+
+ if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ /*
+ * One of the safety nets has triggered or the wakeup was close
+ * enough to the closest timer event expected at the idle state
+ * selection time to be discarded.
+ */
+ measured_us = UINT_MAX;
+ } else {
+ unsigned int lat;
+
+ lat = drv->states[dev->last_state_idx].exit_latency;
+
+ measured_us = ktime_to_us(cpu_data->time_span_ns);
+ /*
+ * The delay between the wakeup and the first instruction
+ * executed by the CPU is not likely to be worst-case every
+ * time, so take 1/2 of the exit latency as a very rough
+ * approximation of the average of it.
+ */
+ if (measured_us >= lat)
+ measured_us -= lat / 2;
+ else
+ measured_us /= 2;
+ }
+
+ /*
+ * Decay the "early hits" metric for all of the states and find the
+ * states matching the sleep length and the measured idle duration.
+ */
+ for (i = 0; i < drv->state_count; i++) {
+ unsigned int early_hits = cpu_data->states[i].early_hits;
+
+ cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
+
+ if (drv->states[i].target_residency <= sleep_length_us) {
+ idx_timer = i;
+ if (drv->states[i].target_residency <= measured_us)
+ idx_hit = i;
+ }
+ }
+
+ /*
+ * Update the "hits" and "misses" data for the state matching the sleep
+ * length. If it matches the measured idle duration too, this is a hit,
+ * so increase the "hits" metric for it then. Otherwise, this is a
+ * miss, so increase the "misses" metric for it. In the latter case
+ * also increase the "early hits" metric for the state that actually
+ * matches the measured idle duration.
+ */
+ if (idx_timer >= 0) {
+ unsigned int hits = cpu_data->states[idx_timer].hits;
+ unsigned int misses = cpu_data->states[idx_timer].misses;
+
+ hits -= hits >> DECAY_SHIFT;
+ misses -= misses >> DECAY_SHIFT;
+
+ if (idx_timer > idx_hit) {
+ misses += PULSE;
+ if (idx_hit >= 0)
+ cpu_data->states[idx_hit].early_hits += PULSE;
+ } else {
+ hits += PULSE;
+ }
+
+ cpu_data->states[idx_timer].misses = misses;
+ cpu_data->states[idx_timer].hits = hits;
+ }
+
+ /*
+ * Save idle duration values corresponding to non-timer wakeups for
+ * pattern detection.
+ */
+ cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+ if (cpu_data->interval_idx > INTERVALS)
+ cpu_data->interval_idx = 0;
+}
+
+/**
+ * teo_find_shallower_state - Find shallower idle state matching given duration.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ * @state_idx: Index of the capping idle state.
+ * @duration_us: Idle duration value to match.
+ */
+static int teo_find_shallower_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int state_idx,
+ unsigned int duration_us)
+{
+ int i;
+
+ for (i = state_idx - 1; i >= 0; i--) {
+ if (drv->states[i].disabled || dev->states_usage[i].disable)
+ continue;
+
+ state_idx = i;
+ if (drv->states[i].target_residency <= duration_us)
+ break;
+ }
+ return state_idx;
+}
+
+/**
+ * teo_select - Selects the next idle state to enter.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ * @stop_tick: Indication on whether or not to stop the scheduler tick.
+ */
+static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ bool *stop_tick)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ unsigned int duration_us, count;
+ int max_early_idx, constraint_idx, idx, i;
+ ktime_t delta_tick;
+
+ if (dev->last_state_idx >= 0) {
+ teo_update(drv, dev);
+ dev->last_state_idx = -1;
+ }
+
+ cpu_data->time_span_ns = local_clock();
+
+ cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
+ duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+
+ count = 0;
+ max_early_idx = -1;
+ constraint_idx = drv->state_count;
+ idx = -1;
+
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+ struct cpuidle_state_usage *su = &dev->states_usage[i];
+
+ if (s->disabled || su->disable) {
+ /*
+ * If the "early hits" metric of a disabled state is
+ * greater than the current maximum, it should be taken
+ * into account, because it would be a mistake to select
+ * a deeper state with lower "early hits" metric. The
+ * index cannot be changed to point to it, however, so
+ * just increase the max count alone and let the index
+ * still point to a shallower idle state.
+ */
+ if (max_early_idx >= 0 &&
+ count < cpu_data->states[i].early_hits)
+ count = cpu_data->states[i].early_hits;
+
+ continue;
+ }
+
+ if (idx < 0)
+ idx = i; /* first enabled state */
+
+ if (s->target_residency > duration_us)
+ break;
+
+ if (s->exit_latency > latency_req && constraint_idx > i)
+ constraint_idx = i;
+
+ idx = i;
+
+ if (count < cpu_data->states[i].early_hits &&
+ !(tick_nohz_tick_stopped() &&
+ drv->states[i].target_residency < TICK_USEC)) {
+ count = cpu_data->states[i].early_hits;
+ max_early_idx = i;
+ }
+ }
+
+ /*
+ * If the "hits" metric of the idle state matching the sleep length is
+ * greater than its "misses" metric, that is the one to use. Otherwise,
+ * it is more likely that one of the shallower states will match the
+ * idle duration observed after wakeup, so take the one with the maximum
+ * "early hits" metric, but if that cannot be determined, just use the
+ * state selected so far.
+ */
+ if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
+ max_early_idx >= 0) {
+ idx = max_early_idx;
+ duration_us = drv->states[idx].target_residency;
+ }
+
+ /*
+ * If there is a latency constraint, it may be necessary to use a
+ * shallower idle state than the one selected so far.
+ */
+ if (constraint_idx < idx)
+ idx = constraint_idx;
+
+ if (idx < 0) {
+ idx = 0; /* No states enabled. Must use 0. */
+ } else if (idx > 0) {
+ u64 sum = 0;
+
+ count = 0;
+
+ /*
+ * Count and sum the most recent idle duration values less than
+ * the current expected idle duration value.
+ */
+ for (i = 0; i < INTERVALS; i++) {
+ unsigned int val = cpu_data->intervals[i];
+
+ if (val >= duration_us)
+ continue;
+
+ count++;
+ sum += val;
+ }
+
+ /*
+ * Give up unless the majority of the most recent idle duration
+ * values are in the interesting range.
+ */
+ if (count > INTERVALS / 2) {
+ unsigned int avg_us = div64_u64(sum, count);
+
+ /*
+ * Avoid spending too much time in an idle state that
+ * would be too shallow.
+ */
+ if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
+ duration_us = avg_us;
+ if (drv->states[idx].target_residency > avg_us)
+ idx = teo_find_shallower_state(drv, dev,
+ idx, avg_us);
+ }
+ }
+ }
+
+ /*
+ * Don't stop the tick if the selected state is a polling one or if the
+ * expected idle duration is shorter than the tick period length.
+ */
+ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
+ unsigned int delta_tick_us = ktime_to_us(delta_tick);
+
+ *stop_tick = false;
+
+ /*
+ * The tick is not going to be stopped, so if the target
+ * residency of the state to be returned is not within the time
+ * till the closest timer including the tick, try to correct
+ * that.
+ */
+ if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+ }
+
+ return idx;
+}
+
+/**
+ * teo_reflect - Note that governor data for the CPU need to be updated.
+ * @dev: Target CPU.
+ * @state: Entered state.
+ */
+static void teo_reflect(struct cpuidle_device *dev, int state)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+
+ dev->last_state_idx = state;
+ /*
+ * If the wakeup was not "natural", but triggered by one of the safety
+ * nets, assume that the CPU might have been idle for the entire sleep
+ * length time.
+ */
+ if (dev->poll_time_limit ||
+ (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ dev->poll_time_limit = false;
+ cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ } else {
+ cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ }
+}
+
+/**
+ * teo_enable_device - Initialize the governor's data for the target CPU.
+ * @drv: cpuidle driver (not used).
+ * @dev: Target CPU.
+ */
+static int teo_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int i;
+
+ memset(cpu_data, 0, sizeof(*cpu_data));
+
+ for (i = 0; i < INTERVALS; i++)
+ cpu_data->intervals[i] = UINT_MAX;
+
+ return 0;
+}
+
+static struct cpuidle_governor teo_governor = {
+ .name = "teo",
+ .rating = 19,
+ .enable = teo_enable_device,
+ .select = teo_select,
+ .reflect = teo_reflect,
+};
+
+static int __init teo_governor_init(void)
+{
+ return cpuidle_register_governor(&teo_governor);
+}
+
+postcore_initcall(teo_governor_init);
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 3f86d23..c8fa5f4 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* poll_state.c - Polling idle state
- *
- * This file is released under the GPLv2.
*/
#include <linux/cpuidle.h>
@@ -9,7 +8,6 @@
#include <linux/sched/clock.h>
#include <linux/sched/idle.h>
-#define POLL_IDLE_TIME_LIMIT (TICK_NSEC / 16)
#define POLL_IDLE_RELAX_COUNT 200
static int __cpuidle poll_idle(struct cpuidle_device *dev,
@@ -17,9 +15,14 @@
{
u64 time_start = local_clock();
+ dev->poll_time_limit = false;
+
local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
+ u64 limit;
+
+ limit = cpuidle_poll_time(drv, dev);
while (!need_resched()) {
cpu_relax();
@@ -27,8 +30,10 @@
continue;
loop_count = 0;
- if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT)
+ if (local_clock() - time_start > limit) {
+ dev->poll_time_limit = true;
break;
+ }
}
}
current_clr_polling();
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index e754c7a..2bb2683 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -301,6 +301,8 @@
define_show_state_str_function(desc)
define_show_state_ull_function(disable)
define_store_state_ull_function(disable)
+define_show_state_ull_function(above)
+define_show_state_ull_function(below)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
@@ -310,6 +312,8 @@
define_one_state_ro(usage, show_state_usage);
define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
+define_one_state_ro(above, show_state_above);
+define_one_state_ro(below, show_state_below);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
@@ -320,6 +324,8 @@
&attr_usage.attr,
&attr_time.attr,
&attr_disable.attr,
+ &attr_above.attr,
+ &attr_below.attr,
NULL
};
@@ -328,6 +334,7 @@
struct cpuidle_state_usage *state_usage;
struct completion kobj_unregister;
struct kobject kobj;
+ struct cpuidle_device *device;
};
#ifdef CONFIG_SUSPEND
@@ -385,6 +392,7 @@
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
+#define kobj_to_device(k) (kobj_to_state_obj(k)->device)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
@@ -408,10 +416,14 @@
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
+ struct cpuidle_device *dev = kobj_to_device(kobj);
if (cattr->store)
ret = cattr->store(state, state_usage, buf, size);
+ /* reset poll time cache */
+ dev->poll_limit_ns = 0;
+
return ret;
}
@@ -462,6 +474,7 @@
}
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
+ kobj->device = device;
init_completion(&kobj->kobj_unregister);
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,