v4.19.13 snapshot.
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
new file mode 100644
index 0000000..3e63a90
--- /dev/null
+++ b/drivers/base/Kconfig
@@ -0,0 +1,279 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Generic Driver Options"
+
+config UEVENT_HELPER
+	bool "Support for uevent helper"
+	default y
+	help
+	  The uevent helper program is forked by the kernel for
+	  every uevent.
+	  Before the switch to the netlink-based uevent source, this was
+	  used to hook hotplug scripts into kernel device events. It
+	  usually pointed to a shell script at /sbin/hotplug.
+	  This should not be used today, because usual systems create
+	  many events at bootup or device discovery in a very short time
+	  frame. One forked process per event can create so many processes
+	  that it creates a high system load, or on smaller systems
+	  it is known to create out-of-memory situations during bootup.
+
+config UEVENT_HELPER_PATH
+	string "path to uevent helper"
+	depends on UEVENT_HELPER
+	default ""
+	help
+	  To disable user space helper program execution at by default
+	  specify an empty string here. This setting can still be altered
+	  via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper
+	  later at runtime.
+
+config DEVTMPFS
+	bool "Maintain a devtmpfs filesystem to mount at /dev"
+	help
+	  This creates a tmpfs/ramfs filesystem instance early at bootup.
+	  In this filesystem, the kernel driver core maintains device
+	  nodes with their default names and permissions for all
+	  registered devices with an assigned major/minor number.
+	  Userspace can modify the filesystem content as needed, add
+	  symlinks, and apply needed permissions.
+	  It provides a fully functional /dev directory, where usually
+	  udev runs on top, managing permissions and adding meaningful
+	  symlinks.
+	  In very limited environments, it may provide a sufficient
+	  functional /dev without any further help. It also allows simple
+	  rescue systems, and reliably handles dynamic major/minor numbers.
+
+	  Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+	  file system will be used instead.
+
+config DEVTMPFS_MOUNT
+	bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
+	depends on DEVTMPFS
+	help
+	  This will instruct the kernel to automatically mount the
+	  devtmpfs filesystem at /dev, directly after the kernel has
+	  mounted the root filesystem. The behavior can be overridden
+	  with the commandline parameter: devtmpfs.mount=0|1.
+	  This option does not affect initramfs based booting, here
+	  the devtmpfs filesystem always needs to be mounted manually
+	  after the rootfs is mounted.
+	  With this option enabled, it allows to bring up a system in
+	  rescue mode with init=/bin/sh, even when the /dev directory
+	  on the rootfs is completely empty.
+
+config STANDALONE
+	bool "Select only drivers that don't need compile-time external firmware"
+	default y
+	help
+	  Select this option if you don't have magic firmware for drivers that
+	  need it.
+
+	  If unsure, say Y.
+
+config PREVENT_FIRMWARE_BUILD
+	bool "Disable drivers features which enable custom firmware building"
+	default y
+	help
+	  Say yes to disable driver features which enable building a custom
+	  driver firmware at kernel build time. These drivers do not use the
+	  kernel firmware API to load firmware (CONFIG_FW_LOADER), instead they
+	  use their own custom loading mechanism. The required firmware is
+	  usually shipped with the driver, building the driver firmware
+	  should only be needed if you have an updated firmware source.
+
+	  Firmware should not be being built as part of kernel, these days
+	  you should always prevent this and say Y here. There are only two
+	  old drivers which enable building of its firmware at kernel build
+	  time:
+
+	    o CONFIG_WANXL through CONFIG_WANXL_BUILD_FIRMWARE
+	    o CONFIG_SCSI_AIC79XX through CONFIG_AIC79XX_BUILD_FIRMWARE
+
+source "drivers/base/firmware_loader/Kconfig"
+
+config WANT_DEV_COREDUMP
+	bool
+	help
+	  Drivers should "select" this option if they desire to use the
+	  device coredump mechanism.
+
+config ALLOW_DEV_COREDUMP
+	bool "Allow device coredump" if EXPERT
+	default y
+	help
+	  This option controls if the device coredump mechanism is available or
+	  not; if disabled, the mechanism will be omitted even if drivers that
+	  can use it are enabled.
+	  Say 'N' for more sensitive systems or systems that don't want
+	  to ever access the information to not have the code, nor keep any
+	  data.
+
+	  If unsure, say Y.
+
+config DEV_COREDUMP
+	bool
+	default y if WANT_DEV_COREDUMP
+	depends on ALLOW_DEV_COREDUMP
+
+config DEBUG_DRIVER
+	bool "Driver Core verbose debug messages"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here if you want the Driver core to produce a bunch of
+	  debug messages to the system log. Select this if you are having a
+	  problem with the driver core and want to see more of what is
+	  going on.
+
+	  If you are unsure about this, say N here.
+
+config DEBUG_DEVRES
+	bool "Managed device resources verbose debug messages"
+	depends on DEBUG_KERNEL
+	help
+	  This option enables kernel parameter devres.log. If set to
+	  non-zero, devres debug messages are printed. Select this if
+	  you are having a problem with devres or want to debug
+	  resource management for a managed device. devres.log can be
+	  switched on and off from sysfs node.
+
+	  If you are unsure about this, Say N here.
+
+config DEBUG_TEST_DRIVER_REMOVE
+	bool "Test driver remove calls during probe (UNSTABLE)"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here if you want the Driver core to test driver remove functions
+	  by calling probe, remove, probe. This tests the remove path without
+	  having to unbind the driver or unload the driver module.
+
+	  This option is expected to find errors and may render your system
+	  unusable. You should say N here unless you are explicitly looking to
+	  test this functionality.
+
+source "drivers/base/test/Kconfig"
+
+config SYS_HYPERVISOR
+	bool
+	default n
+
+config GENERIC_CPU_DEVICES
+	bool
+	default n
+
+config GENERIC_CPU_AUTOPROBE
+	bool
+
+config GENERIC_CPU_VULNERABILITIES
+	bool
+
+config SOC_BUS
+	bool
+	select GLOB
+
+source "drivers/base/regmap/Kconfig"
+
+config DMA_SHARED_BUFFER
+	bool
+	default n
+	select ANON_INODES
+	select IRQ_WORK
+	help
+	  This option enables the framework for buffer-sharing between
+	  multiple drivers. A buffer is associated with a file using driver
+	  APIs extension; the file's descriptor can then be passed on to other
+	  driver.
+
+config DMA_FENCE_TRACE
+	bool "Enable verbose DMA_FENCE_TRACE messages"
+	depends on DMA_SHARED_BUFFER
+	help
+	  Enable the DMA_FENCE_TRACE printks. This will add extra
+	  spam to the console log, but will make it easier to diagnose
+	  lockup related problems for dma-buffers shared across multiple
+	  devices.
+
+config DMA_CMA
+	bool "DMA Contiguous Memory Allocator"
+	depends on HAVE_DMA_CONTIGUOUS && CMA
+	help
+	  This enables the Contiguous Memory Allocator which allows drivers
+	  to allocate big physically-contiguous blocks of memory for use with
+	  hardware components that do not support I/O map nor scatter-gather.
+
+	  You can disable CMA by specifying "cma=0" on the kernel's command
+	  line.
+
+	  For more information see <include/linux/dma-contiguous.h>.
+	  If unsure, say "n".
+
+if  DMA_CMA
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+	int "Size in Mega Bytes"
+	depends on !CMA_SIZE_SEL_PERCENTAGE
+	default 0 if X86
+	default 16
+	help
+	  Defines the size (in MiB) of the default memory area for Contiguous
+	  Memory Allocator.  If the size of 0 is selected, CMA is disabled by
+	  default, but it can be enabled by passing cma=size[MG] to the kernel.
+
+
+config CMA_SIZE_PERCENTAGE
+	int "Percentage of total memory"
+	depends on !CMA_SIZE_SEL_MBYTES
+	default 0 if X86
+	default 10
+	help
+	  Defines the size of the default memory area for Contiguous Memory
+	  Allocator as a percentage of the total memory in the system.
+	  If 0 percent is selected, CMA is disabled by default, but it can be
+	  enabled by passing cma=size[MG] to the kernel.
+
+choice
+	prompt "Selected region size"
+	default CMA_SIZE_SEL_MBYTES
+
+config CMA_SIZE_SEL_MBYTES
+	bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+	bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+	bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+	bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+	int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+	range 4 12
+	default 8
+	help
+	  DMA mapping framework by default aligns all buffers to the smallest
+	  PAGE_SIZE order which is greater than or equal to the requested buffer
+	  size. This works well for buffers up to a few hundreds kilobytes, but
+	  for larger buffers it just a memory waste. With this parameter you can
+	  specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+	  buffers will be aligned only to this specified order. The order is
+	  expressed as a power of two multiplied by the PAGE_SIZE.
+
+	  For example, if your system defaults to 4KiB pages, the order value
+	  of 8 means that the buffers will be aligned up to 1MiB only.
+
+	  If unsure, leave the default value "8".
+
+endif
+
+config GENERIC_ARCH_TOPOLOGY
+	bool
+	help
+	  Enable support for architectures common topology code: e.g., parsing
+	  CPU capacity information from DT, usage of such information for
+	  appropriate scaling, sysfs interface for changing capacity values at
+	  runtime.
+
+endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
new file mode 100644
index 0000000..704f442
--- /dev/null
+++ b/drivers/base/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Linux device tree
+
+obj-y			:= component.o core.o bus.o dd.o syscore.o \
+			   driver.o class.o platform.o \
+			   cpu.o firmware.o init.o map.o devres.o \
+			   attribute_container.o transport_class.o \
+			   topology.o container.o property.o cacheinfo.o \
+			   devcon.o
+obj-$(CONFIG_DEVTMPFS)	+= devtmpfs.o
+obj-y			+= power/
+obj-$(CONFIG_ISA_BUS_API)	+= isa.o
+obj-y				+= firmware_loader/
+obj-$(CONFIG_NUMA)	+= node.o
+obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
+ifeq ($(CONFIG_SYSFS),y)
+obj-$(CONFIG_MODULES)	+= module.o
+endif
+obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+obj-$(CONFIG_REGMAP)	+= regmap/
+obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_PINCTRL) += pinctrl.o
+obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
+obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
+
+obj-y			+= test/
+
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
new file mode 100644
index 0000000..e7cb0c6
--- /dev/null
+++ b/drivers/base/arch_topology.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arch specific cpu topology information
+ *
+ * Copyright (C) 2016, ARM Ltd.
+ * Written by: Juri Lelli, ARM Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arch_topology.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sched/topology.h>
+
+DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+
+void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+			 unsigned long max_freq)
+{
+	unsigned long scale;
+	int i;
+
+	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+	for_each_cpu(i, cpus)
+		per_cpu(freq_scale, i) = scale;
+}
+
+static DEFINE_MUTEX(cpu_scale_mutex);
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+	per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+	return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
+}
+
+static ssize_t cpu_capacity_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int this_cpu = cpu->dev.id;
+	int i;
+	unsigned long new_capacity;
+	ssize_t ret;
+
+	if (!count)
+		return 0;
+
+	ret = kstrtoul(buf, 0, &new_capacity);
+	if (ret)
+		return ret;
+	if (new_capacity > SCHED_CAPACITY_SCALE)
+		return -EINVAL;
+
+	mutex_lock(&cpu_scale_mutex);
+	for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+		topology_set_cpu_scale(i, new_capacity);
+	mutex_unlock(&cpu_scale_mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+	int i;
+	struct device *cpu;
+
+	for_each_possible_cpu(i) {
+		cpu = get_cpu_device(i);
+		if (!cpu) {
+			pr_err("%s: too early to get CPU%d device!\n",
+			       __func__, i);
+			continue;
+		}
+		device_create_file(cpu, &dev_attr_cpu_capacity);
+	}
+
+	return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+
+static int free_raw_capacity(void)
+{
+	kfree(raw_capacity);
+	raw_capacity = NULL;
+
+	return 0;
+}
+
+void topology_normalize_cpu_scale(void)
+{
+	u64 capacity;
+	int cpu;
+
+	if (!raw_capacity)
+		return;
+
+	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+	mutex_lock(&cpu_scale_mutex);
+	for_each_possible_cpu(cpu) {
+		pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+			 cpu, raw_capacity[cpu]);
+		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+			/ capacity_scale;
+		topology_set_cpu_scale(cpu, capacity);
+		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+			cpu, topology_get_cpu_scale(NULL, cpu));
+	}
+	mutex_unlock(&cpu_scale_mutex);
+}
+
+bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+	static bool cap_parsing_failed;
+	int ret;
+	u32 cpu_capacity;
+
+	if (cap_parsing_failed)
+		return false;
+
+	ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
+				   &cpu_capacity);
+	if (!ret) {
+		if (!raw_capacity) {
+			raw_capacity = kcalloc(num_possible_cpus(),
+					       sizeof(*raw_capacity),
+					       GFP_KERNEL);
+			if (!raw_capacity) {
+				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
+				cap_parsing_failed = true;
+				return false;
+			}
+		}
+		capacity_scale = max(cpu_capacity, capacity_scale);
+		raw_capacity[cpu] = cpu_capacity;
+		pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
+			cpu_node, raw_capacity[cpu]);
+	} else {
+		if (raw_capacity) {
+			pr_err("cpu_capacity: missing %pOF raw capacity\n",
+				cpu_node);
+			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+		}
+		cap_parsing_failed = true;
+		free_raw_capacity();
+	}
+
+	return !ret;
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+			   unsigned long val,
+			   void *data)
+{
+	struct cpufreq_policy *policy = data;
+	int cpu;
+
+	if (!raw_capacity)
+		return 0;
+
+	if (val != CPUFREQ_NOTIFY)
+		return 0;
+
+	pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+		 cpumask_pr_args(policy->related_cpus),
+		 cpumask_pr_args(cpus_to_visit));
+
+	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
+
+	for_each_cpu(cpu, policy->related_cpus) {
+		raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+				    policy->cpuinfo.max_freq / 1000UL;
+		capacity_scale = max(raw_capacity[cpu], capacity_scale);
+	}
+
+	if (cpumask_empty(cpus_to_visit)) {
+		topology_normalize_cpu_scale();
+		free_raw_capacity();
+		pr_debug("cpu_capacity: parsing done\n");
+		schedule_work(&parsing_done_work);
+	}
+
+	return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+	.notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+	int ret;
+
+	/*
+	 * on ACPI-based systems we need to use the default cpu capacity
+	 * until we have the necessary code to parse the cpu capacity, so
+	 * skip registering cpufreq notifier.
+	 */
+	if (!acpi_disabled || !raw_capacity)
+		return -EINVAL;
+
+	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
+		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+		return -ENOMEM;
+	}
+
+	cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+	ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
+					CPUFREQ_POLICY_NOTIFIER);
+
+	if (ret)
+		free_cpumask_var(cpus_to_visit);
+
+	return ret;
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+	free_cpumask_var(cpus_to_visit);
+}
+
+#else
+core_initcall(free_raw_capacity);
+#endif
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
new file mode 100644
index 0000000..20736aa
--- /dev/null
+++ b/drivers/base/attribute_container.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * attribute_container.c - implementation of a simple container for classes
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * The basic idea here is to enable a device to be attached to an
+ * aritrary numer of classes without having to allocate storage for them.
+ * Instead, the contained classes select the devices they need to attach
+ * to via a matching function.
+ */
+
+#include <linux/attribute_container.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "base.h"
+
+/* This is a private structure used to tie the classdev and the
+ * container .. it should never be visible outside this file */
+struct internal_container {
+	struct klist_node node;
+	struct attribute_container *cont;
+	struct device classdev;
+};
+
+static void internal_container_klist_get(struct klist_node *n)
+{
+	struct internal_container *ic =
+		container_of(n, struct internal_container, node);
+	get_device(&ic->classdev);
+}
+
+static void internal_container_klist_put(struct klist_node *n)
+{
+	struct internal_container *ic =
+		container_of(n, struct internal_container, node);
+	put_device(&ic->classdev);
+}
+
+
+/**
+ * attribute_container_classdev_to_container - given a classdev, return the container
+ *
+ * @classdev: the class device created by attribute_container_add_device.
+ *
+ * Returns the container associated with this classdev.
+ */
+struct attribute_container *
+attribute_container_classdev_to_container(struct device *classdev)
+{
+	struct internal_container *ic =
+		container_of(classdev, struct internal_container, classdev);
+	return ic->cont;
+}
+EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+
+static LIST_HEAD(attribute_container_list);
+
+static DEFINE_MUTEX(attribute_container_mutex);
+
+/**
+ * attribute_container_register - register an attribute container
+ *
+ * @cont: The container to register.  This must be allocated by the
+ *        callee and should also be zeroed by it.
+ */
+int
+attribute_container_register(struct attribute_container *cont)
+{
+	INIT_LIST_HEAD(&cont->node);
+	klist_init(&cont->containers, internal_container_klist_get,
+		   internal_container_klist_put);
+
+	mutex_lock(&attribute_container_mutex);
+	list_add_tail(&cont->node, &attribute_container_list);
+	mutex_unlock(&attribute_container_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(attribute_container_register);
+
+/**
+ * attribute_container_unregister - remove a container registration
+ *
+ * @cont: previously registered container to remove
+ */
+int
+attribute_container_unregister(struct attribute_container *cont)
+{
+	int retval = -EBUSY;
+
+	mutex_lock(&attribute_container_mutex);
+	spin_lock(&cont->containers.k_lock);
+	if (!list_empty(&cont->containers.k_list))
+		goto out;
+	retval = 0;
+	list_del(&cont->node);
+ out:
+	spin_unlock(&cont->containers.k_lock);
+	mutex_unlock(&attribute_container_mutex);
+	return retval;
+
+}
+EXPORT_SYMBOL_GPL(attribute_container_unregister);
+
+/* private function used as class release */
+static void attribute_container_release(struct device *classdev)
+{
+	struct internal_container *ic
+		= container_of(classdev, struct internal_container, classdev);
+	struct device *dev = classdev->parent;
+
+	kfree(ic);
+	put_device(dev);
+}
+
+/**
+ * attribute_container_add_device - see if any container is interested in dev
+ *
+ * @dev: device to add attributes to
+ * @fn:	 function to trigger addition of class device.
+ *
+ * This function allocates storage for the class device(s) to be
+ * attached to dev (one for each matching attribute_container).  If no
+ * fn is provided, the code will simply register the class device via
+ * device_add.  If a function is provided, it is expected to add
+ * the class device at the appropriate time.  One of the things that
+ * might be necessary is to allocate and initialise the classdev and
+ * then add it a later time.  To do this, call this routine for
+ * allocation and initialisation and then use
+ * attribute_container_device_trigger() to call device_add() on
+ * it.  Note: after this, the class device contains a reference to dev
+ * which is not relinquished until the release of the classdev.
+ */
+void
+attribute_container_add_device(struct device *dev,
+			       int (*fn)(struct attribute_container *,
+					 struct device *,
+					 struct device *))
+{
+	struct attribute_container *cont;
+
+	mutex_lock(&attribute_container_mutex);
+	list_for_each_entry(cont, &attribute_container_list, node) {
+		struct internal_container *ic;
+
+		if (attribute_container_no_classdevs(cont))
+			continue;
+
+		if (!cont->match(cont, dev))
+			continue;
+
+		ic = kzalloc(sizeof(*ic), GFP_KERNEL);
+		if (!ic) {
+			dev_err(dev, "failed to allocate class container\n");
+			continue;
+		}
+
+		ic->cont = cont;
+		device_initialize(&ic->classdev);
+		ic->classdev.parent = get_device(dev);
+		ic->classdev.class = cont->class;
+		cont->class->dev_release = attribute_container_release;
+		dev_set_name(&ic->classdev, "%s", dev_name(dev));
+		if (fn)
+			fn(cont, dev, &ic->classdev);
+		else
+			attribute_container_add_class_device(&ic->classdev);
+		klist_add_tail(&ic->node, &cont->containers);
+	}
+	mutex_unlock(&attribute_container_mutex);
+}
+
+/* FIXME: can't break out of this unless klist_iter_exit is also
+ * called before doing the break
+ */
+#define klist_for_each_entry(pos, head, member, iter) \
+	for (klist_iter_init(head, iter); (pos = ({ \
+		struct klist_node *n = klist_next(iter); \
+		n ? container_of(n, typeof(*pos), member) : \
+			({ klist_iter_exit(iter) ; NULL; }); \
+	})) != NULL;)
+
+
+/**
+ * attribute_container_remove_device - make device eligible for removal.
+ *
+ * @dev:  The generic device
+ * @fn:	  A function to call to remove the device
+ *
+ * This routine triggers device removal.  If fn is NULL, then it is
+ * simply done via device_unregister (note that if something
+ * still has a reference to the classdev, then the memory occupied
+ * will not be freed until the classdev is released).  If you want a
+ * two phase release: remove from visibility and then delete the
+ * device, then you should use this routine with a fn that calls
+ * device_del() and then use attribute_container_device_trigger()
+ * to do the final put on the classdev.
+ */
+void
+attribute_container_remove_device(struct device *dev,
+				  void (*fn)(struct attribute_container *,
+					     struct device *,
+					     struct device *))
+{
+	struct attribute_container *cont;
+
+	mutex_lock(&attribute_container_mutex);
+	list_for_each_entry(cont, &attribute_container_list, node) {
+		struct internal_container *ic;
+		struct klist_iter iter;
+
+		if (attribute_container_no_classdevs(cont))
+			continue;
+
+		if (!cont->match(cont, dev))
+			continue;
+
+		klist_for_each_entry(ic, &cont->containers, node, &iter) {
+			if (dev != ic->classdev.parent)
+				continue;
+			klist_del(&ic->node);
+			if (fn)
+				fn(cont, dev, &ic->classdev);
+			else {
+				attribute_container_remove_attrs(&ic->classdev);
+				device_unregister(&ic->classdev);
+			}
+		}
+	}
+	mutex_unlock(&attribute_container_mutex);
+}
+
+/**
+ * attribute_container_device_trigger - execute a trigger for each matching classdev
+ *
+ * @dev:  The generic device to run the trigger for
+ * @fn	  the function to execute for each classdev.
+ *
+ * This function is for executing a trigger when you need to know both
+ * the container and the classdev.  If you only care about the
+ * container, then use attribute_container_trigger() instead.
+ */
+void
+attribute_container_device_trigger(struct device *dev,
+				   int (*fn)(struct attribute_container *,
+					     struct device *,
+					     struct device *))
+{
+	struct attribute_container *cont;
+
+	mutex_lock(&attribute_container_mutex);
+	list_for_each_entry(cont, &attribute_container_list, node) {
+		struct internal_container *ic;
+		struct klist_iter iter;
+
+		if (!cont->match(cont, dev))
+			continue;
+
+		if (attribute_container_no_classdevs(cont)) {
+			fn(cont, dev, NULL);
+			continue;
+		}
+
+		klist_for_each_entry(ic, &cont->containers, node, &iter) {
+			if (dev == ic->classdev.parent)
+				fn(cont, dev, &ic->classdev);
+		}
+	}
+	mutex_unlock(&attribute_container_mutex);
+}
+
+/**
+ * attribute_container_trigger - trigger a function for each matching container
+ *
+ * @dev:  The generic device to activate the trigger for
+ * @fn:	  the function to trigger
+ *
+ * This routine triggers a function that only needs to know the
+ * matching containers (not the classdev) associated with a device.
+ * It is more lightweight than attribute_container_device_trigger, so
+ * should be used in preference unless the triggering function
+ * actually needs to know the classdev.
+ */
+void
+attribute_container_trigger(struct device *dev,
+			    int (*fn)(struct attribute_container *,
+				      struct device *))
+{
+	struct attribute_container *cont;
+
+	mutex_lock(&attribute_container_mutex);
+	list_for_each_entry(cont, &attribute_container_list, node) {
+		if (cont->match(cont, dev))
+			fn(cont, dev);
+	}
+	mutex_unlock(&attribute_container_mutex);
+}
+
+/**
+ * attribute_container_add_attrs - add attributes
+ *
+ * @classdev: The class device
+ *
+ * This simply creates all the class device sysfs files from the
+ * attributes listed in the container
+ */
+int
+attribute_container_add_attrs(struct device *classdev)
+{
+	struct attribute_container *cont =
+		attribute_container_classdev_to_container(classdev);
+	struct device_attribute **attrs = cont->attrs;
+	int i, error;
+
+	BUG_ON(attrs && cont->grp);
+
+	if (!attrs && !cont->grp)
+		return 0;
+
+	if (cont->grp)
+		return sysfs_create_group(&classdev->kobj, cont->grp);
+
+	for (i = 0; attrs[i]; i++) {
+		sysfs_attr_init(&attrs[i]->attr);
+		error = device_create_file(classdev, attrs[i]);
+		if (error)
+			return error;
+	}
+
+	return 0;
+}
+
+/**
+ * attribute_container_add_class_device - same function as device_add
+ *
+ * @classdev:	the class device to add
+ *
+ * This performs essentially the same function as device_add except for
+ * attribute containers, namely add the classdev to the system and then
+ * create the attribute files
+ */
+int
+attribute_container_add_class_device(struct device *classdev)
+{
+	int error = device_add(classdev);
+
+	if (error)
+		return error;
+	return attribute_container_add_attrs(classdev);
+}
+
+/**
+ * attribute_container_add_class_device_adapter - simple adapter for triggers
+ *
+ * This function is identical to attribute_container_add_class_device except
+ * that it is designed to be called from the triggers
+ */
+int
+attribute_container_add_class_device_adapter(struct attribute_container *cont,
+					     struct device *dev,
+					     struct device *classdev)
+{
+	return attribute_container_add_class_device(classdev);
+}
+
+/**
+ * attribute_container_remove_attrs - remove any attribute files
+ *
+ * @classdev: The class device to remove the files from
+ *
+ */
+void
+attribute_container_remove_attrs(struct device *classdev)
+{
+	struct attribute_container *cont =
+		attribute_container_classdev_to_container(classdev);
+	struct device_attribute **attrs = cont->attrs;
+	int i;
+
+	if (!attrs && !cont->grp)
+		return;
+
+	if (cont->grp) {
+		sysfs_remove_group(&classdev->kobj, cont->grp);
+		return ;
+	}
+
+	for (i = 0; attrs[i]; i++)
+		device_remove_file(classdev, attrs[i]);
+}
+
+/**
+ * attribute_container_class_device_del - equivalent of class_device_del
+ *
+ * @classdev: the class device
+ *
+ * This function simply removes all the attribute files and then calls
+ * device_del.
+ */
+void
+attribute_container_class_device_del(struct device *classdev)
+{
+	attribute_container_remove_attrs(classdev);
+	device_del(classdev);
+}
+
+/**
+ * attribute_container_find_class_device - find the corresponding class_device
+ *
+ * @cont:	the container
+ * @dev:	the generic device
+ *
+ * Looks up the device in the container's list of class devices and returns
+ * the corresponding class_device.
+ */
+struct device *
+attribute_container_find_class_device(struct attribute_container *cont,
+				      struct device *dev)
+{
+	struct device *cdev = NULL;
+	struct internal_container *ic;
+	struct klist_iter iter;
+
+	klist_for_each_entry(ic, &cont->containers, node, &iter) {
+		if (ic->classdev.parent == dev) {
+			cdev = &ic->classdev;
+			/* FIXME: must exit iterator then break */
+			klist_iter_exit(&iter);
+			break;
+		}
+	}
+
+	return cdev;
+}
+EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
diff --git a/drivers/base/base.h b/drivers/base/base.h
new file mode 100644
index 0000000..7a419a7
--- /dev/null
+++ b/drivers/base/base.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/notifier.h>
+
+/**
+ * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
+ *
+ * @subsys - the struct kset that defines this subsystem
+ * @devices_kset - the subsystem's 'devices' directory
+ * @interfaces - list of subsystem interfaces associated
+ * @mutex - protect the devices, and interfaces lists.
+ *
+ * @drivers_kset - the list of drivers associated
+ * @klist_devices - the klist to iterate over the @devices_kset
+ * @klist_drivers - the klist to iterate over the @drivers_kset
+ * @bus_notifier - the bus notifier list for anything that cares about things
+ *                 on this bus.
+ * @bus - pointer back to the struct bus_type that this structure is associated
+ *        with.
+ *
+ * @glue_dirs - "glue" directory to put in-between the parent device to
+ *              avoid namespace conflicts
+ * @class - pointer back to the struct class that this structure is associated
+ *          with.
+ *
+ * This structure is the one that is the actual kobject allowing struct
+ * bus_type/class to be statically allocated safely.  Nothing outside of the
+ * driver core should ever touch these fields.
+ */
+struct subsys_private {
+	struct kset subsys;
+	struct kset *devices_kset;
+	struct list_head interfaces;
+	struct mutex mutex;
+
+	struct kset *drivers_kset;
+	struct klist klist_devices;
+	struct klist klist_drivers;
+	struct blocking_notifier_head bus_notifier;
+	unsigned int drivers_autoprobe:1;
+	struct bus_type *bus;
+
+	struct kset glue_dirs;
+	struct class *class;
+};
+#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
+
+struct driver_private {
+	struct kobject kobj;
+	struct klist klist_devices;
+	struct klist_node knode_bus;
+	struct module_kobject *mkobj;
+	struct device_driver *driver;
+};
+#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+
+/**
+ * struct device_private - structure to hold the private to the driver core portions of the device structure.
+ *
+ * @klist_children - klist containing all children of this device
+ * @knode_parent - node in sibling list
+ * @knode_driver - node in driver list
+ * @knode_bus - node in bus list
+ * @deferred_probe - entry in deferred_probe_list which is used to retry the
+ *	binding of drivers which were unable to get all the resources needed by
+ *	the device; typically because it depends on another driver getting
+ *	probed first.
+ * @device - pointer back to the struct device that this structure is
+ * associated with.
+ *
+ * Nothing outside of the driver core should ever touch these fields.
+ */
+struct device_private {
+	struct klist klist_children;
+	struct klist_node knode_parent;
+	struct klist_node knode_driver;
+	struct klist_node knode_bus;
+	struct list_head deferred_probe;
+	struct device *device;
+};
+#define to_device_private_parent(obj)	\
+	container_of(obj, struct device_private, knode_parent)
+#define to_device_private_driver(obj)	\
+	container_of(obj, struct device_private, knode_driver)
+#define to_device_private_bus(obj)	\
+	container_of(obj, struct device_private, knode_bus)
+
+/* initialisation functions */
+extern int devices_init(void);
+extern int buses_init(void);
+extern int classes_init(void);
+extern int firmware_init(void);
+#ifdef CONFIG_SYS_HYPERVISOR
+extern int hypervisor_init(void);
+#else
+static inline int hypervisor_init(void) { return 0; }
+#endif
+extern int platform_bus_init(void);
+extern void cpu_dev_init(void);
+extern void container_dev_init(void);
+
+struct kobject *virtual_device_parent(struct device *dev);
+
+extern int bus_add_device(struct device *dev);
+extern void bus_probe_device(struct device *dev);
+extern void bus_remove_device(struct device *dev);
+
+extern int bus_add_driver(struct device_driver *drv);
+extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+					   struct device_driver *drv,
+					   struct device *parent);
+
+extern void driver_detach(struct device_driver *drv);
+extern int driver_probe_device(struct device_driver *drv, struct device *dev);
+extern void driver_deferred_probe_del(struct device *dev);
+static inline int driver_match_device(struct device_driver *drv,
+				      struct device *dev)
+{
+	return drv->bus->match ? drv->bus->match(dev, drv) : 1;
+}
+extern bool driver_allows_async_probing(struct device_driver *drv);
+
+extern int driver_add_groups(struct device_driver *drv,
+			     const struct attribute_group **groups);
+extern void driver_remove_groups(struct device_driver *drv,
+				 const struct attribute_group **groups);
+
+extern char *make_class_name(const char *name, struct kobject *kobj);
+
+extern int devres_release_all(struct device *dev);
+extern void device_block_probing(void);
+extern void device_unblock_probing(void);
+
+/* /sys/devices directory */
+extern struct kset *devices_kset;
+extern void devices_kset_move_last(struct device *dev);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
+extern void module_add_driver(struct module *mod, struct device_driver *drv);
+extern void module_remove_driver(struct device_driver *drv);
+#else
+static inline void module_add_driver(struct module *mod,
+				     struct device_driver *drv) { }
+static inline void module_remove_driver(struct device_driver *drv) { }
+#endif
+
+#ifdef CONFIG_DEVTMPFS
+extern int devtmpfs_init(void);
+#else
+static inline int devtmpfs_init(void) { return 0; }
+#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
+
+/* device pm support */
+void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
new file mode 100644
index 0000000..8bfd27e
--- /dev/null
+++ b/drivers/base/bus.c
@@ -0,0 +1,1241 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus.c - bus driver management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007 Novell Inc.
+ */
+
+#include <linux/async.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include "base.h"
+#include "power/power.h"
+
+/* /sys/devices/system */
+static struct kset *system_kset;
+
+#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
+
+/*
+ * sysfs bindings for drivers
+ */
+
+#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
+
+
+static int __must_check bus_rescan_devices_helper(struct device *dev,
+						void *data);
+
+static struct bus_type *bus_get(struct bus_type *bus)
+{
+	if (bus) {
+		kset_get(&bus->p->subsys);
+		return bus;
+	}
+	return NULL;
+}
+
+static void bus_put(struct bus_type *bus)
+{
+	if (bus)
+		kset_put(&bus->p->subsys);
+}
+
+static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct driver_attribute *drv_attr = to_drv_attr(attr);
+	struct driver_private *drv_priv = to_driver(kobj);
+	ssize_t ret = -EIO;
+
+	if (drv_attr->show)
+		ret = drv_attr->show(drv_priv->driver, buf);
+	return ret;
+}
+
+static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct driver_attribute *drv_attr = to_drv_attr(attr);
+	struct driver_private *drv_priv = to_driver(kobj);
+	ssize_t ret = -EIO;
+
+	if (drv_attr->store)
+		ret = drv_attr->store(drv_priv->driver, buf, count);
+	return ret;
+}
+
+static const struct sysfs_ops driver_sysfs_ops = {
+	.show	= drv_attr_show,
+	.store	= drv_attr_store,
+};
+
+static void driver_release(struct kobject *kobj)
+{
+	struct driver_private *drv_priv = to_driver(kobj);
+
+	pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__);
+	kfree(drv_priv);
+}
+
+static struct kobj_type driver_ktype = {
+	.sysfs_ops	= &driver_sysfs_ops,
+	.release	= driver_release,
+};
+
+/*
+ * sysfs bindings for buses
+ */
+static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct bus_attribute *bus_attr = to_bus_attr(attr);
+	struct subsys_private *subsys_priv = to_subsys_private(kobj);
+	ssize_t ret = 0;
+
+	if (bus_attr->show)
+		ret = bus_attr->show(subsys_priv->bus, buf);
+	return ret;
+}
+
+static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct bus_attribute *bus_attr = to_bus_attr(attr);
+	struct subsys_private *subsys_priv = to_subsys_private(kobj);
+	ssize_t ret = 0;
+
+	if (bus_attr->store)
+		ret = bus_attr->store(subsys_priv->bus, buf, count);
+	return ret;
+}
+
+static const struct sysfs_ops bus_sysfs_ops = {
+	.show	= bus_attr_show,
+	.store	= bus_attr_store,
+};
+
+int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
+{
+	int error;
+	if (bus_get(bus)) {
+		error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
+		bus_put(bus);
+	} else
+		error = -EINVAL;
+	return error;
+}
+EXPORT_SYMBOL_GPL(bus_create_file);
+
+void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
+{
+	if (bus_get(bus)) {
+		sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
+		bus_put(bus);
+	}
+}
+EXPORT_SYMBOL_GPL(bus_remove_file);
+
+static void bus_release(struct kobject *kobj)
+{
+	struct subsys_private *priv = to_subsys_private(kobj);
+	struct bus_type *bus = priv->bus;
+
+	kfree(priv);
+	bus->p = NULL;
+}
+
+static struct kobj_type bus_ktype = {
+	.sysfs_ops	= &bus_sysfs_ops,
+	.release	= bus_release,
+};
+
+static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
+{
+	struct kobj_type *ktype = get_ktype(kobj);
+
+	if (ktype == &bus_ktype)
+		return 1;
+	return 0;
+}
+
+static const struct kset_uevent_ops bus_uevent_ops = {
+	.filter = bus_uevent_filter,
+};
+
+static struct kset *bus_kset;
+
+/* Manually detach a device from its associated driver. */
+static ssize_t unbind_store(struct device_driver *drv, const char *buf,
+			    size_t count)
+{
+	struct bus_type *bus = bus_get(drv->bus);
+	struct device *dev;
+	int err = -ENODEV;
+
+	dev = bus_find_device_by_name(bus, NULL, buf);
+	if (dev && dev->driver == drv) {
+		if (dev->parent && dev->bus->need_parent_lock)
+			device_lock(dev->parent);
+		device_release_driver(dev);
+		if (dev->parent && dev->bus->need_parent_lock)
+			device_unlock(dev->parent);
+		err = count;
+	}
+	put_device(dev);
+	bus_put(bus);
+	return err;
+}
+static DRIVER_ATTR_WO(unbind);
+
+/*
+ * Manually attach a device to a driver.
+ * Note: the driver must want to bind to the device,
+ * it is not possible to override the driver's id table.
+ */
+static ssize_t bind_store(struct device_driver *drv, const char *buf,
+			  size_t count)
+{
+	struct bus_type *bus = bus_get(drv->bus);
+	struct device *dev;
+	int err = -ENODEV;
+
+	dev = bus_find_device_by_name(bus, NULL, buf);
+	if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
+		if (dev->parent && bus->need_parent_lock)
+			device_lock(dev->parent);
+		device_lock(dev);
+		err = driver_probe_device(drv, dev);
+		device_unlock(dev);
+		if (dev->parent && bus->need_parent_lock)
+			device_unlock(dev->parent);
+
+		if (err > 0) {
+			/* success */
+			err = count;
+		} else if (err == 0) {
+			/* driver didn't accept device */
+			err = -ENODEV;
+		}
+	}
+	put_device(dev);
+	bus_put(bus);
+	return err;
+}
+static DRIVER_ATTR_WO(bind);
+
+static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
+{
+	return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
+}
+
+static ssize_t store_drivers_autoprobe(struct bus_type *bus,
+				       const char *buf, size_t count)
+{
+	if (buf[0] == '0')
+		bus->p->drivers_autoprobe = 0;
+	else
+		bus->p->drivers_autoprobe = 1;
+	return count;
+}
+
+static ssize_t store_drivers_probe(struct bus_type *bus,
+				   const char *buf, size_t count)
+{
+	struct device *dev;
+	int err = -EINVAL;
+
+	dev = bus_find_device_by_name(bus, NULL, buf);
+	if (!dev)
+		return -ENODEV;
+	if (bus_rescan_devices_helper(dev, NULL) == 0)
+		err = count;
+	put_device(dev);
+	return err;
+}
+
+static struct device *next_device(struct klist_iter *i)
+{
+	struct klist_node *n = klist_next(i);
+	struct device *dev = NULL;
+	struct device_private *dev_prv;
+
+	if (n) {
+		dev_prv = to_device_private_bus(n);
+		dev = dev_prv->device;
+	}
+	return dev;
+}
+
+/**
+ * bus_for_each_dev - device iterator.
+ * @bus: bus type.
+ * @start: device to start iterating from.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over @bus's list of devices, and call @fn for each,
+ * passing it @data. If @start is not NULL, we use that device to
+ * begin iterating from.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ *
+ * NOTE: The device that returns a non-zero value is not retained
+ * in any way, nor is its refcount incremented. If the caller needs
+ * to retain this data, it should do so, and increment the reference
+ * count in the supplied callback.
+ */
+int bus_for_each_dev(struct bus_type *bus, struct device *start,
+		     void *data, int (*fn)(struct device *, void *))
+{
+	struct klist_iter i;
+	struct device *dev;
+	int error = 0;
+
+	if (!bus || !bus->p)
+		return -EINVAL;
+
+	klist_iter_init_node(&bus->p->klist_devices, &i,
+			     (start ? &start->p->knode_bus : NULL));
+	while (!error && (dev = next_device(&i)))
+		error = fn(dev, data);
+	klist_iter_exit(&i);
+	return error;
+}
+EXPORT_SYMBOL_GPL(bus_for_each_dev);
+
+/**
+ * bus_find_device - device iterator for locating a particular device.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the bus_for_each_dev() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does.  If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct device *bus_find_device(struct bus_type *bus,
+			       struct device *start, void *data,
+			       int (*match)(struct device *dev, void *data))
+{
+	struct klist_iter i;
+	struct device *dev;
+
+	if (!bus || !bus->p)
+		return NULL;
+
+	klist_iter_init_node(&bus->p->klist_devices, &i,
+			     (start ? &start->p->knode_bus : NULL));
+	while ((dev = next_device(&i)))
+		if (match(dev, data) && get_device(dev))
+			break;
+	klist_iter_exit(&i);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device);
+
+static int match_name(struct device *dev, void *data)
+{
+	const char *name = data;
+
+	return sysfs_streq(name, dev_name(dev));
+}
+
+/**
+ * bus_find_device_by_name - device iterator for locating a particular device of a specific name
+ * @bus: bus type
+ * @start: Device to begin with
+ * @name: name of the device to match
+ *
+ * This is similar to the bus_find_device() function above, but it handles
+ * searching by a name automatically, no need to write another strcmp matching
+ * function.
+ */
+struct device *bus_find_device_by_name(struct bus_type *bus,
+				       struct device *start, const char *name)
+{
+	return bus_find_device(bus, start, (void *)name, match_name);
+}
+EXPORT_SYMBOL_GPL(bus_find_device_by_name);
+
+/**
+ * subsys_find_device_by_id - find a device with a specific enumeration number
+ * @subsys: subsystem
+ * @id: index 'id' in struct device
+ * @hint: device to check first
+ *
+ * Check the hint's next object and if it is a match return it directly,
+ * otherwise, fall back to a full list search. Either way a reference for
+ * the returned object is taken.
+ */
+struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
+					struct device *hint)
+{
+	struct klist_iter i;
+	struct device *dev;
+
+	if (!subsys)
+		return NULL;
+
+	if (hint) {
+		klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
+		dev = next_device(&i);
+		if (dev && dev->id == id && get_device(dev)) {
+			klist_iter_exit(&i);
+			return dev;
+		}
+		klist_iter_exit(&i);
+	}
+
+	klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
+	while ((dev = next_device(&i))) {
+		if (dev->id == id && get_device(dev)) {
+			klist_iter_exit(&i);
+			return dev;
+		}
+	}
+	klist_iter_exit(&i);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
+
+static struct device_driver *next_driver(struct klist_iter *i)
+{
+	struct klist_node *n = klist_next(i);
+	struct driver_private *drv_priv;
+
+	if (n) {
+		drv_priv = container_of(n, struct driver_private, knode_bus);
+		return drv_priv->driver;
+	}
+	return NULL;
+}
+
+/**
+ * bus_for_each_drv - driver iterator
+ * @bus: bus we're dealing with.
+ * @start: driver to start iterating on.
+ * @data: data to pass to the callback.
+ * @fn: function to call for each driver.
+ *
+ * This is nearly identical to the device iterator above.
+ * We iterate over each driver that belongs to @bus, and call
+ * @fn for each. If @fn returns anything but 0, we break out
+ * and return it. If @start is not NULL, we use it as the head
+ * of the list.
+ *
+ * NOTE: we don't return the driver that returns a non-zero
+ * value, nor do we leave the reference count incremented for that
+ * driver. If the caller needs to know that info, it must set it
+ * in the callback. It must also be sure to increment the refcount
+ * so it doesn't disappear before returning to the caller.
+ */
+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+		     void *data, int (*fn)(struct device_driver *, void *))
+{
+	struct klist_iter i;
+	struct device_driver *drv;
+	int error = 0;
+
+	if (!bus)
+		return -EINVAL;
+
+	klist_iter_init_node(&bus->p->klist_drivers, &i,
+			     start ? &start->p->knode_bus : NULL);
+	while ((drv = next_driver(&i)) && !error)
+		error = fn(drv, data);
+	klist_iter_exit(&i);
+	return error;
+}
+EXPORT_SYMBOL_GPL(bus_for_each_drv);
+
+/**
+ * bus_add_device - add device to bus
+ * @dev: device being added
+ *
+ * - Add device's bus attributes.
+ * - Create links to device's bus.
+ * - Add the device to its bus's list of devices.
+ */
+int bus_add_device(struct device *dev)
+{
+	struct bus_type *bus = bus_get(dev->bus);
+	int error = 0;
+
+	if (bus) {
+		pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
+		error = device_add_groups(dev, bus->dev_groups);
+		if (error)
+			goto out_put;
+		error = sysfs_create_link(&bus->p->devices_kset->kobj,
+						&dev->kobj, dev_name(dev));
+		if (error)
+			goto out_groups;
+		error = sysfs_create_link(&dev->kobj,
+				&dev->bus->p->subsys.kobj, "subsystem");
+		if (error)
+			goto out_subsys;
+		klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
+	}
+	return 0;
+
+out_subsys:
+	sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
+out_groups:
+	device_remove_groups(dev, bus->dev_groups);
+out_put:
+	bus_put(dev->bus);
+	return error;
+}
+
+/**
+ * bus_probe_device - probe drivers for a new device
+ * @dev: device to probe
+ *
+ * - Automatically probe for a driver if the bus allows it.
+ */
+void bus_probe_device(struct device *dev)
+{
+	struct bus_type *bus = dev->bus;
+	struct subsys_interface *sif;
+
+	if (!bus)
+		return;
+
+	if (bus->p->drivers_autoprobe)
+		device_initial_probe(dev);
+
+	mutex_lock(&bus->p->mutex);
+	list_for_each_entry(sif, &bus->p->interfaces, node)
+		if (sif->add_dev)
+			sif->add_dev(dev, sif);
+	mutex_unlock(&bus->p->mutex);
+}
+
+/**
+ * bus_remove_device - remove device from bus
+ * @dev: device to be removed
+ *
+ * - Remove device from all interfaces.
+ * - Remove symlink from bus' directory.
+ * - Delete device from bus's list.
+ * - Detach from its driver.
+ * - Drop reference taken in bus_add_device().
+ */
+void bus_remove_device(struct device *dev)
+{
+	struct bus_type *bus = dev->bus;
+	struct subsys_interface *sif;
+
+	if (!bus)
+		return;
+
+	mutex_lock(&bus->p->mutex);
+	list_for_each_entry(sif, &bus->p->interfaces, node)
+		if (sif->remove_dev)
+			sif->remove_dev(dev, sif);
+	mutex_unlock(&bus->p->mutex);
+
+	sysfs_remove_link(&dev->kobj, "subsystem");
+	sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
+			  dev_name(dev));
+	device_remove_groups(dev, dev->bus->dev_groups);
+	if (klist_node_attached(&dev->p->knode_bus))
+		klist_del(&dev->p->knode_bus);
+
+	pr_debug("bus: '%s': remove device %s\n",
+		 dev->bus->name, dev_name(dev));
+	device_release_driver(dev);
+	bus_put(dev->bus);
+}
+
+static int __must_check add_bind_files(struct device_driver *drv)
+{
+	int ret;
+
+	ret = driver_create_file(drv, &driver_attr_unbind);
+	if (ret == 0) {
+		ret = driver_create_file(drv, &driver_attr_bind);
+		if (ret)
+			driver_remove_file(drv, &driver_attr_unbind);
+	}
+	return ret;
+}
+
+static void remove_bind_files(struct device_driver *drv)
+{
+	driver_remove_file(drv, &driver_attr_bind);
+	driver_remove_file(drv, &driver_attr_unbind);
+}
+
+static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe);
+static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO,
+		show_drivers_autoprobe, store_drivers_autoprobe);
+
+static int add_probe_files(struct bus_type *bus)
+{
+	int retval;
+
+	retval = bus_create_file(bus, &bus_attr_drivers_probe);
+	if (retval)
+		goto out;
+
+	retval = bus_create_file(bus, &bus_attr_drivers_autoprobe);
+	if (retval)
+		bus_remove_file(bus, &bus_attr_drivers_probe);
+out:
+	return retval;
+}
+
+static void remove_probe_files(struct bus_type *bus)
+{
+	bus_remove_file(bus, &bus_attr_drivers_autoprobe);
+	bus_remove_file(bus, &bus_attr_drivers_probe);
+}
+
+static ssize_t uevent_store(struct device_driver *drv, const char *buf,
+			    size_t count)
+{
+	kobject_synth_uevent(&drv->p->kobj, buf, count);
+	return count;
+}
+static DRIVER_ATTR_WO(uevent);
+
+static void driver_attach_async(void *_drv, async_cookie_t cookie)
+{
+	struct device_driver *drv = _drv;
+	int ret;
+
+	ret = driver_attach(drv);
+
+	pr_debug("bus: '%s': driver %s async attach completed: %d\n",
+		 drv->bus->name, drv->name, ret);
+}
+
+/**
+ * bus_add_driver - Add a driver to the bus.
+ * @drv: driver.
+ */
+int bus_add_driver(struct device_driver *drv)
+{
+	struct bus_type *bus;
+	struct driver_private *priv;
+	int error = 0;
+
+	bus = bus_get(drv->bus);
+	if (!bus)
+		return -EINVAL;
+
+	pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		error = -ENOMEM;
+		goto out_put_bus;
+	}
+	klist_init(&priv->klist_devices, NULL, NULL);
+	priv->driver = drv;
+	drv->p = priv;
+	priv->kobj.kset = bus->p->drivers_kset;
+	error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
+				     "%s", drv->name);
+	if (error)
+		goto out_unregister;
+
+	klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
+	if (drv->bus->p->drivers_autoprobe) {
+		if (driver_allows_async_probing(drv)) {
+			pr_debug("bus: '%s': probing driver %s asynchronously\n",
+				drv->bus->name, drv->name);
+			async_schedule(driver_attach_async, drv);
+		} else {
+			error = driver_attach(drv);
+			if (error)
+				goto out_unregister;
+		}
+	}
+	module_add_driver(drv->owner, drv);
+
+	error = driver_create_file(drv, &driver_attr_uevent);
+	if (error) {
+		printk(KERN_ERR "%s: uevent attr (%s) failed\n",
+			__func__, drv->name);
+	}
+	error = driver_add_groups(drv, bus->drv_groups);
+	if (error) {
+		/* How the hell do we get out of this pickle? Give up */
+		printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
+			__func__, drv->name);
+	}
+
+	if (!drv->suppress_bind_attrs) {
+		error = add_bind_files(drv);
+		if (error) {
+			/* Ditto */
+			printk(KERN_ERR "%s: add_bind_files(%s) failed\n",
+				__func__, drv->name);
+		}
+	}
+
+	return 0;
+
+out_unregister:
+	kobject_put(&priv->kobj);
+	/* drv->p is freed in driver_release()  */
+	drv->p = NULL;
+out_put_bus:
+	bus_put(bus);
+	return error;
+}
+
+/**
+ * bus_remove_driver - delete driver from bus's knowledge.
+ * @drv: driver.
+ *
+ * Detach the driver from the devices it controls, and remove
+ * it from its bus's list of drivers. Finally, we drop the reference
+ * to the bus we took in bus_add_driver().
+ */
+void bus_remove_driver(struct device_driver *drv)
+{
+	if (!drv->bus)
+		return;
+
+	if (!drv->suppress_bind_attrs)
+		remove_bind_files(drv);
+	driver_remove_groups(drv, drv->bus->drv_groups);
+	driver_remove_file(drv, &driver_attr_uevent);
+	klist_remove(&drv->p->knode_bus);
+	pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
+	driver_detach(drv);
+	module_remove_driver(drv);
+	kobject_put(&drv->p->kobj);
+	bus_put(drv->bus);
+}
+
+/* Helper for bus_rescan_devices's iter */
+static int __must_check bus_rescan_devices_helper(struct device *dev,
+						  void *data)
+{
+	int ret = 0;
+
+	if (!dev->driver) {
+		if (dev->parent && dev->bus->need_parent_lock)
+			device_lock(dev->parent);
+		ret = device_attach(dev);
+		if (dev->parent && dev->bus->need_parent_lock)
+			device_unlock(dev->parent);
+	}
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * bus_rescan_devices - rescan devices on the bus for possible drivers
+ * @bus: the bus to scan.
+ *
+ * This function will look for devices on the bus with no driver
+ * attached and rescan it against existing drivers to see if it matches
+ * any by calling device_attach() for the unbound devices.
+ */
+int bus_rescan_devices(struct bus_type *bus)
+{
+	return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
+}
+EXPORT_SYMBOL_GPL(bus_rescan_devices);
+
+/**
+ * device_reprobe - remove driver for a device and probe for a new driver
+ * @dev: the device to reprobe
+ *
+ * This function detaches the attached driver (if any) for the given
+ * device and restarts the driver probing process.  It is intended
+ * to use if probing criteria changed during a devices lifetime and
+ * driver attachment should change accordingly.
+ */
+int device_reprobe(struct device *dev)
+{
+	if (dev->driver) {
+		if (dev->parent && dev->bus->need_parent_lock)
+			device_lock(dev->parent);
+		device_release_driver(dev);
+		if (dev->parent && dev->bus->need_parent_lock)
+			device_unlock(dev->parent);
+	}
+	return bus_rescan_devices_helper(dev, NULL);
+}
+EXPORT_SYMBOL_GPL(device_reprobe);
+
+/**
+ * find_bus - locate bus by name.
+ * @name: name of bus.
+ *
+ * Call kset_find_obj() to iterate over list of buses to
+ * find a bus by name. Return bus if found.
+ *
+ * Note that kset_find_obj increments bus' reference count.
+ */
+#if 0
+struct bus_type *find_bus(char *name)
+{
+	struct kobject *k = kset_find_obj(bus_kset, name);
+	return k ? to_bus(k) : NULL;
+}
+#endif  /*  0  */
+
+static int bus_add_groups(struct bus_type *bus,
+			  const struct attribute_group **groups)
+{
+	return sysfs_create_groups(&bus->p->subsys.kobj, groups);
+}
+
+static void bus_remove_groups(struct bus_type *bus,
+			      const struct attribute_group **groups)
+{
+	sysfs_remove_groups(&bus->p->subsys.kobj, groups);
+}
+
+static void klist_devices_get(struct klist_node *n)
+{
+	struct device_private *dev_prv = to_device_private_bus(n);
+	struct device *dev = dev_prv->device;
+
+	get_device(dev);
+}
+
+static void klist_devices_put(struct klist_node *n)
+{
+	struct device_private *dev_prv = to_device_private_bus(n);
+	struct device *dev = dev_prv->device;
+
+	put_device(dev);
+}
+
+static ssize_t bus_uevent_store(struct bus_type *bus,
+				const char *buf, size_t count)
+{
+	kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
+	return count;
+}
+static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
+
+/**
+ * bus_register - register a driver-core subsystem
+ * @bus: bus to register
+ *
+ * Once we have that, we register the bus with the kobject
+ * infrastructure, then register the children subsystems it has:
+ * the devices and drivers that belong to the subsystem.
+ */
+int bus_register(struct bus_type *bus)
+{
+	int retval;
+	struct subsys_private *priv;
+	struct lock_class_key *key = &bus->lock_key;
+
+	priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->bus = bus;
+	bus->p = priv;
+
+	BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
+
+	retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
+	if (retval)
+		goto out;
+
+	priv->subsys.kobj.kset = bus_kset;
+	priv->subsys.kobj.ktype = &bus_ktype;
+	priv->drivers_autoprobe = 1;
+
+	retval = kset_register(&priv->subsys);
+	if (retval)
+		goto out;
+
+	retval = bus_create_file(bus, &bus_attr_uevent);
+	if (retval)
+		goto bus_uevent_fail;
+
+	priv->devices_kset = kset_create_and_add("devices", NULL,
+						 &priv->subsys.kobj);
+	if (!priv->devices_kset) {
+		retval = -ENOMEM;
+		goto bus_devices_fail;
+	}
+
+	priv->drivers_kset = kset_create_and_add("drivers", NULL,
+						 &priv->subsys.kobj);
+	if (!priv->drivers_kset) {
+		retval = -ENOMEM;
+		goto bus_drivers_fail;
+	}
+
+	INIT_LIST_HEAD(&priv->interfaces);
+	__mutex_init(&priv->mutex, "subsys mutex", key);
+	klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
+	klist_init(&priv->klist_drivers, NULL, NULL);
+
+	retval = add_probe_files(bus);
+	if (retval)
+		goto bus_probe_files_fail;
+
+	retval = bus_add_groups(bus, bus->bus_groups);
+	if (retval)
+		goto bus_groups_fail;
+
+	pr_debug("bus: '%s': registered\n", bus->name);
+	return 0;
+
+bus_groups_fail:
+	remove_probe_files(bus);
+bus_probe_files_fail:
+	kset_unregister(bus->p->drivers_kset);
+bus_drivers_fail:
+	kset_unregister(bus->p->devices_kset);
+bus_devices_fail:
+	bus_remove_file(bus, &bus_attr_uevent);
+bus_uevent_fail:
+	kset_unregister(&bus->p->subsys);
+out:
+	kfree(bus->p);
+	bus->p = NULL;
+	return retval;
+}
+EXPORT_SYMBOL_GPL(bus_register);
+
+/**
+ * bus_unregister - remove a bus from the system
+ * @bus: bus.
+ *
+ * Unregister the child subsystems and the bus itself.
+ * Finally, we call bus_put() to release the refcount
+ */
+void bus_unregister(struct bus_type *bus)
+{
+	pr_debug("bus: '%s': unregistering\n", bus->name);
+	if (bus->dev_root)
+		device_unregister(bus->dev_root);
+	bus_remove_groups(bus, bus->bus_groups);
+	remove_probe_files(bus);
+	kset_unregister(bus->p->drivers_kset);
+	kset_unregister(bus->p->devices_kset);
+	bus_remove_file(bus, &bus_attr_uevent);
+	kset_unregister(&bus->p->subsys);
+}
+EXPORT_SYMBOL_GPL(bus_unregister);
+
+int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bus_register_notifier);
+
+int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bus_unregister_notifier);
+
+struct kset *bus_get_kset(struct bus_type *bus)
+{
+	return &bus->p->subsys;
+}
+EXPORT_SYMBOL_GPL(bus_get_kset);
+
+struct klist *bus_get_device_klist(struct bus_type *bus)
+{
+	return &bus->p->klist_devices;
+}
+EXPORT_SYMBOL_GPL(bus_get_device_klist);
+
+/*
+ * Yes, this forcibly breaks the klist abstraction temporarily.  It
+ * just wants to sort the klist, not change reference counts and
+ * take/drop locks rapidly in the process.  It does all this while
+ * holding the lock for the list, so objects can't otherwise be
+ * added/removed while we're swizzling.
+ */
+static void device_insertion_sort_klist(struct device *a, struct list_head *list,
+					int (*compare)(const struct device *a,
+							const struct device *b))
+{
+	struct klist_node *n;
+	struct device_private *dev_prv;
+	struct device *b;
+
+	list_for_each_entry(n, list, n_node) {
+		dev_prv = to_device_private_bus(n);
+		b = dev_prv->device;
+		if (compare(a, b) <= 0) {
+			list_move_tail(&a->p->knode_bus.n_node,
+				       &b->p->knode_bus.n_node);
+			return;
+		}
+	}
+	list_move_tail(&a->p->knode_bus.n_node, list);
+}
+
+void bus_sort_breadthfirst(struct bus_type *bus,
+			   int (*compare)(const struct device *a,
+					  const struct device *b))
+{
+	LIST_HEAD(sorted_devices);
+	struct klist_node *n, *tmp;
+	struct device_private *dev_prv;
+	struct device *dev;
+	struct klist *device_klist;
+
+	device_klist = bus_get_device_klist(bus);
+
+	spin_lock(&device_klist->k_lock);
+	list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
+		dev_prv = to_device_private_bus(n);
+		dev = dev_prv->device;
+		device_insertion_sort_klist(dev, &sorted_devices, compare);
+	}
+	list_splice(&sorted_devices, &device_klist->k_list);
+	spin_unlock(&device_klist->k_lock);
+}
+EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
+
+/**
+ * subsys_dev_iter_init - initialize subsys device iterator
+ * @iter: subsys iterator to initialize
+ * @subsys: the subsys we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize subsys iterator @iter such that it iterates over devices
+ * of @subsys.  If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
+			  struct device *start, const struct device_type *type)
+{
+	struct klist_node *start_knode = NULL;
+
+	if (start)
+		start_knode = &start->p->knode_bus;
+	klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+	iter->type = type;
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
+
+/**
+ * subsys_dev_iter_next - iterate to the next device
+ * @iter: subsys iterator to proceed
+ *
+ * Proceed @iter to the next device and return it.  Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited.  The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into subsys code.
+ */
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+{
+	struct klist_node *knode;
+	struct device *dev;
+
+	for (;;) {
+		knode = klist_next(&iter->ki);
+		if (!knode)
+			return NULL;
+		dev = to_device_private_bus(knode)->device;
+		if (!iter->type || iter->type == dev->type)
+			return dev;
+	}
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
+
+/**
+ * subsys_dev_iter_exit - finish iteration
+ * @iter: subsys iterator to finish
+ *
+ * Finish an iteration.  Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+{
+	klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
+
+int subsys_interface_register(struct subsys_interface *sif)
+{
+	struct bus_type *subsys;
+	struct subsys_dev_iter iter;
+	struct device *dev;
+
+	if (!sif || !sif->subsys)
+		return -ENODEV;
+
+	subsys = bus_get(sif->subsys);
+	if (!subsys)
+		return -EINVAL;
+
+	mutex_lock(&subsys->p->mutex);
+	list_add_tail(&sif->node, &subsys->p->interfaces);
+	if (sif->add_dev) {
+		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+		while ((dev = subsys_dev_iter_next(&iter)))
+			sif->add_dev(dev, sif);
+		subsys_dev_iter_exit(&iter);
+	}
+	mutex_unlock(&subsys->p->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(subsys_interface_register);
+
+void subsys_interface_unregister(struct subsys_interface *sif)
+{
+	struct bus_type *subsys;
+	struct subsys_dev_iter iter;
+	struct device *dev;
+
+	if (!sif || !sif->subsys)
+		return;
+
+	subsys = sif->subsys;
+
+	mutex_lock(&subsys->p->mutex);
+	list_del_init(&sif->node);
+	if (sif->remove_dev) {
+		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+		while ((dev = subsys_dev_iter_next(&iter)))
+			sif->remove_dev(dev, sif);
+		subsys_dev_iter_exit(&iter);
+	}
+	mutex_unlock(&subsys->p->mutex);
+
+	bus_put(subsys);
+}
+EXPORT_SYMBOL_GPL(subsys_interface_unregister);
+
+static void system_root_device_release(struct device *dev)
+{
+	kfree(dev);
+}
+
+static int subsys_register(struct bus_type *subsys,
+			   const struct attribute_group **groups,
+			   struct kobject *parent_of_root)
+{
+	struct device *dev;
+	int err;
+
+	err = bus_register(subsys);
+	if (err < 0)
+		return err;
+
+	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_dev;
+	}
+
+	err = dev_set_name(dev, "%s", subsys->name);
+	if (err < 0)
+		goto err_name;
+
+	dev->kobj.parent = parent_of_root;
+	dev->groups = groups;
+	dev->release = system_root_device_release;
+
+	err = device_register(dev);
+	if (err < 0)
+		goto err_dev_reg;
+
+	subsys->dev_root = dev;
+	return 0;
+
+err_dev_reg:
+	put_device(dev);
+	dev = NULL;
+err_name:
+	kfree(dev);
+err_dev:
+	bus_unregister(subsys);
+	return err;
+}
+
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitly named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+			   const struct attribute_group **groups)
+{
+	return subsys_register(subsys, groups, &system_kset->kobj);
+}
+EXPORT_SYMBOL_GPL(subsys_system_register);
+
+/**
+ * subsys_virtual_register - register a subsystem at /sys/devices/virtual/
+ * @subsys: virtual subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'virtual' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subystem.  The root device can carry subsystem-wide
+ * attributes.  All registered devices are below this single root device.
+ * There's no restriction on device naming.  This is for kernel software
+ * constructs which need sysfs interface.
+ */
+int subsys_virtual_register(struct bus_type *subsys,
+			    const struct attribute_group **groups)
+{
+	struct kobject *virtual_dir;
+
+	virtual_dir = virtual_device_parent(NULL);
+	if (!virtual_dir)
+		return -ENOMEM;
+
+	return subsys_register(subsys, groups, virtual_dir);
+}
+EXPORT_SYMBOL_GPL(subsys_virtual_register);
+
+int __init buses_init(void)
+{
+	bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
+	if (!bus_kset)
+		return -ENOMEM;
+
+	system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
+	if (!system_kset)
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
new file mode 100644
index 0000000..5d5b598
--- /dev/null
+++ b/drivers/base/cacheinfo.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cacheinfo support - processor cache information via sysfs
+ *
+ * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+
+/* pointer to per cpu cacheinfo */
+static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
+#define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
+#define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
+#define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
+{
+	return ci_cacheinfo(cpu);
+}
+
+#ifdef CONFIG_OF
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+					   struct cacheinfo *sib_leaf)
+{
+	return sib_leaf->fw_token == this_leaf->fw_token;
+}
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+	const char *size_prop;
+	const char *line_size_props[2];
+	const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+	{
+		.size_prop       = "cache-size",
+		.line_size_props = { "cache-line-size",
+				     "cache-block-size", },
+		.nr_sets_prop    = "cache-sets",
+	}, {
+		.size_prop       = "i-cache-size",
+		.line_size_props = { "i-cache-line-size",
+				     "i-cache-block-size", },
+		.nr_sets_prop    = "i-cache-sets",
+	}, {
+		.size_prop       = "d-cache-size",
+		.line_size_props = { "d-cache-line-size",
+				     "d-cache-block-size", },
+		.nr_sets_prop    = "d-cache-sets",
+	},
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+	if (type == CACHE_TYPE_UNIFIED)
+		return 0;
+	return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
+{
+	const char *propname;
+	int ct_idx;
+
+	ct_idx = get_cacheinfo_idx(this_leaf->type);
+	propname = cache_type_info[ct_idx].size_prop;
+
+	if (of_property_read_u32(np, propname, &this_leaf->size))
+		this_leaf->size = 0;
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf,
+				struct device_node *np)
+{
+	int i, lim, ct_idx;
+
+	ct_idx = get_cacheinfo_idx(this_leaf->type);
+	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+	for (i = 0; i < lim; i++) {
+		int ret;
+		u32 line_size;
+		const char *propname;
+
+		propname = cache_type_info[ct_idx].line_size_props[i];
+		ret = of_property_read_u32(np, propname, &line_size);
+		if (!ret) {
+			this_leaf->coherency_line_size = line_size;
+			break;
+		}
+	}
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
+{
+	const char *propname;
+	int ct_idx;
+
+	ct_idx = get_cacheinfo_idx(this_leaf->type);
+	propname = cache_type_info[ct_idx].nr_sets_prop;
+
+	if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
+		this_leaf->number_of_sets = 0;
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+	unsigned int line_size = this_leaf->coherency_line_size;
+	unsigned int nr_sets = this_leaf->number_of_sets;
+	unsigned int size = this_leaf->size;
+
+	/*
+	 * If the cache is fully associative, there is no need to
+	 * check the other properties.
+	 */
+	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static bool cache_node_is_unified(struct cacheinfo *this_leaf,
+				  struct device_node *np)
+{
+	return of_property_read_bool(np, "cache-unified");
+}
+
+static void cache_of_set_props(struct cacheinfo *this_leaf,
+			       struct device_node *np)
+{
+	/*
+	 * init_cache_level must setup the cache level correctly
+	 * overriding the architecturally specified levels, so
+	 * if type is NONE at this stage, it should be unified
+	 */
+	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+	    cache_node_is_unified(this_leaf, np))
+		this_leaf->type = CACHE_TYPE_UNIFIED;
+	cache_size(this_leaf, np);
+	cache_get_line_size(this_leaf, np);
+	cache_nr_sets(this_leaf, np);
+	cache_associativity(this_leaf);
+}
+
+static int cache_setup_of_node(unsigned int cpu)
+{
+	struct device_node *np;
+	struct cacheinfo *this_leaf;
+	struct device *cpu_dev = get_cpu_device(cpu);
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	unsigned int index = 0;
+
+	/* skip if fw_token is already populated */
+	if (this_cpu_ci->info_list->fw_token) {
+		return 0;
+	}
+
+	if (!cpu_dev) {
+		pr_err("No cpu device for CPU %d\n", cpu);
+		return -ENODEV;
+	}
+	np = cpu_dev->of_node;
+	if (!np) {
+		pr_err("Failed to find cpu%d device node\n", cpu);
+		return -ENOENT;
+	}
+
+	while (index < cache_leaves(cpu)) {
+		this_leaf = this_cpu_ci->info_list + index;
+		if (this_leaf->level != 1)
+			np = of_find_next_cache_node(np);
+		else
+			np = of_node_get(np);/* cpu node itself */
+		if (!np)
+			break;
+		cache_of_set_props(this_leaf, np);
+		this_leaf->fw_token = np;
+		index++;
+	}
+
+	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+		return -ENOENT;
+
+	return 0;
+}
+#else
+static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+					   struct cacheinfo *sib_leaf)
+{
+	/*
+	 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
+	 * shared caches for all other levels. This will be used only if
+	 * arch specific code has not populated shared_cpu_map
+	 */
+	return !(this_leaf->level == 1);
+}
+#endif
+
+int __weak cache_setup_acpi(unsigned int cpu)
+{
+	return -ENOTSUPP;
+}
+
+static int cache_shared_cpu_map_setup(unsigned int cpu)
+{
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	struct cacheinfo *this_leaf, *sib_leaf;
+	unsigned int index;
+	int ret = 0;
+
+	if (this_cpu_ci->cpu_map_populated)
+		return 0;
+
+	if (of_have_populated_dt())
+		ret = cache_setup_of_node(cpu);
+	else if (!acpi_disabled)
+		ret = cache_setup_acpi(cpu);
+
+	if (ret)
+		return ret;
+
+	for (index = 0; index < cache_leaves(cpu); index++) {
+		unsigned int i;
+
+		this_leaf = this_cpu_ci->info_list + index;
+		/* skip if shared_cpu_map is already populated */
+		if (!cpumask_empty(&this_leaf->shared_cpu_map))
+			continue;
+
+		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+		for_each_online_cpu(i) {
+			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+			if (i == cpu || !sib_cpu_ci->info_list)
+				continue;/* skip if itself or no cacheinfo */
+			sib_leaf = sib_cpu_ci->info_list + index;
+			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void cache_shared_cpu_map_remove(unsigned int cpu)
+{
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	struct cacheinfo *this_leaf, *sib_leaf;
+	unsigned int sibling, index;
+
+	for (index = 0; index < cache_leaves(cpu); index++) {
+		this_leaf = this_cpu_ci->info_list + index;
+		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
+			struct cpu_cacheinfo *sib_cpu_ci;
+
+			if (sibling == cpu) /* skip itself */
+				continue;
+
+			sib_cpu_ci = get_cpu_cacheinfo(sibling);
+			if (!sib_cpu_ci->info_list)
+				continue;
+
+			sib_leaf = sib_cpu_ci->info_list + index;
+			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+		}
+		if (of_have_populated_dt())
+			of_node_put(this_leaf->fw_token);
+	}
+}
+
+static void free_cache_attributes(unsigned int cpu)
+{
+	if (!per_cpu_cacheinfo(cpu))
+		return;
+
+	cache_shared_cpu_map_remove(cpu);
+
+	kfree(per_cpu_cacheinfo(cpu));
+	per_cpu_cacheinfo(cpu) = NULL;
+}
+
+int __weak init_cache_level(unsigned int cpu)
+{
+	return -ENOENT;
+}
+
+int __weak populate_cache_leaves(unsigned int cpu)
+{
+	return -ENOENT;
+}
+
+static int detect_cache_attributes(unsigned int cpu)
+{
+	int ret;
+
+	if (init_cache_level(cpu) || !cache_leaves(cpu))
+		return -ENOENT;
+
+	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+					 sizeof(struct cacheinfo), GFP_KERNEL);
+	if (per_cpu_cacheinfo(cpu) == NULL)
+		return -ENOMEM;
+
+	/*
+	 * populate_cache_leaves() may completely setup the cache leaves and
+	 * shared_cpu_map or it may leave it partially setup.
+	 */
+	ret = populate_cache_leaves(cpu);
+	if (ret)
+		goto free_ci;
+	/*
+	 * For systems using DT for cache hierarchy, fw_token
+	 * and shared_cpu_map will be set up here only if they are
+	 * not populated already
+	 */
+	ret = cache_shared_cpu_map_setup(cpu);
+	if (ret) {
+		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
+		goto free_ci;
+	}
+
+	return 0;
+
+free_ci:
+	free_cache_attributes(cpu);
+	return ret;
+}
+
+/* pointer to cpuX/cache device */
+static DEFINE_PER_CPU(struct device *, ci_cache_dev);
+#define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
+
+static cpumask_t cache_dev_map;
+
+/* pointer to array of devices for cpuX/cache/indexY */
+static DEFINE_PER_CPU(struct device **, ci_index_dev);
+#define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
+#define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
+
+#define show_one(file_name, object)				\
+static ssize_t file_name##_show(struct device *dev,		\
+		struct device_attribute *attr, char *buf)	\
+{								\
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
+	return sprintf(buf, "%u\n", this_leaf->object);		\
+}
+
+show_one(id, id);
+show_one(level, level);
+show_one(coherency_line_size, coherency_line_size);
+show_one(number_of_sets, number_of_sets);
+show_one(physical_line_partition, physical_line_partition);
+show_one(ways_of_associativity, ways_of_associativity);
+
+static ssize_t size_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%uK\n", this_leaf->size >> 10);
+}
+
+static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
+{
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+	const struct cpumask *mask = &this_leaf->shared_cpu_map;
+
+	return cpumap_print_to_pagebuf(list, buf, mask);
+}
+
+static ssize_t shared_cpu_map_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return shared_cpumap_show_func(dev, false, buf);
+}
+
+static ssize_t shared_cpu_list_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	return shared_cpumap_show_func(dev, true, buf);
+}
+
+static ssize_t type_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+	switch (this_leaf->type) {
+	case CACHE_TYPE_DATA:
+		return sprintf(buf, "Data\n");
+	case CACHE_TYPE_INST:
+		return sprintf(buf, "Instruction\n");
+	case CACHE_TYPE_UNIFIED:
+		return sprintf(buf, "Unified\n");
+	default:
+		return -EINVAL;
+	}
+}
+
+static ssize_t allocation_policy_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+	unsigned int ci_attr = this_leaf->attributes;
+	int n = 0;
+
+	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
+		n = sprintf(buf, "ReadWriteAllocate\n");
+	else if (ci_attr & CACHE_READ_ALLOCATE)
+		n = sprintf(buf, "ReadAllocate\n");
+	else if (ci_attr & CACHE_WRITE_ALLOCATE)
+		n = sprintf(buf, "WriteAllocate\n");
+	return n;
+}
+
+static ssize_t write_policy_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+	unsigned int ci_attr = this_leaf->attributes;
+	int n = 0;
+
+	if (ci_attr & CACHE_WRITE_THROUGH)
+		n = sprintf(buf, "WriteThrough\n");
+	else if (ci_attr & CACHE_WRITE_BACK)
+		n = sprintf(buf, "WriteBack\n");
+	return n;
+}
+
+static DEVICE_ATTR_RO(id);
+static DEVICE_ATTR_RO(level);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(coherency_line_size);
+static DEVICE_ATTR_RO(ways_of_associativity);
+static DEVICE_ATTR_RO(number_of_sets);
+static DEVICE_ATTR_RO(size);
+static DEVICE_ATTR_RO(allocation_policy);
+static DEVICE_ATTR_RO(write_policy);
+static DEVICE_ATTR_RO(shared_cpu_map);
+static DEVICE_ATTR_RO(shared_cpu_list);
+static DEVICE_ATTR_RO(physical_line_partition);
+
+static struct attribute *cache_default_attrs[] = {
+	&dev_attr_id.attr,
+	&dev_attr_type.attr,
+	&dev_attr_level.attr,
+	&dev_attr_shared_cpu_map.attr,
+	&dev_attr_shared_cpu_list.attr,
+	&dev_attr_coherency_line_size.attr,
+	&dev_attr_ways_of_associativity.attr,
+	&dev_attr_number_of_sets.attr,
+	&dev_attr_size.attr,
+	&dev_attr_allocation_policy.attr,
+	&dev_attr_write_policy.attr,
+	&dev_attr_physical_line_partition.attr,
+	NULL
+};
+
+static umode_t
+cache_default_attrs_is_visible(struct kobject *kobj,
+			       struct attribute *attr, int unused)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+	const struct cpumask *mask = &this_leaf->shared_cpu_map;
+	umode_t mode = attr->mode;
+
+	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+		return mode;
+	if ((attr == &dev_attr_type.attr) && this_leaf->type)
+		return mode;
+	if ((attr == &dev_attr_level.attr) && this_leaf->level)
+		return mode;
+	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
+		return mode;
+	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
+		return mode;
+	if ((attr == &dev_attr_coherency_line_size.attr) &&
+	    this_leaf->coherency_line_size)
+		return mode;
+	if ((attr == &dev_attr_ways_of_associativity.attr) &&
+	    this_leaf->size) /* allow 0 = full associativity */
+		return mode;
+	if ((attr == &dev_attr_number_of_sets.attr) &&
+	    this_leaf->number_of_sets)
+		return mode;
+	if ((attr == &dev_attr_size.attr) && this_leaf->size)
+		return mode;
+	if ((attr == &dev_attr_write_policy.attr) &&
+	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
+		return mode;
+	if ((attr == &dev_attr_allocation_policy.attr) &&
+	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
+		return mode;
+	if ((attr == &dev_attr_physical_line_partition.attr) &&
+	    this_leaf->physical_line_partition)
+		return mode;
+
+	return 0;
+}
+
+static const struct attribute_group cache_default_group = {
+	.attrs = cache_default_attrs,
+	.is_visible = cache_default_attrs_is_visible,
+};
+
+static const struct attribute_group *cache_default_groups[] = {
+	&cache_default_group,
+	NULL,
+};
+
+static const struct attribute_group *cache_private_groups[] = {
+	&cache_default_group,
+	NULL, /* Place holder for private group */
+	NULL,
+};
+
+const struct attribute_group *
+__weak cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+	return NULL;
+}
+
+static const struct attribute_group **
+cache_get_attribute_groups(struct cacheinfo *this_leaf)
+{
+	const struct attribute_group *priv_group =
+			cache_get_priv_group(this_leaf);
+
+	if (!priv_group)
+		return cache_default_groups;
+
+	if (!cache_private_groups[1])
+		cache_private_groups[1] = priv_group;
+
+	return cache_private_groups;
+}
+
+/* Add/Remove cache interface for CPU device */
+static void cpu_cache_sysfs_exit(unsigned int cpu)
+{
+	int i;
+	struct device *ci_dev;
+
+	if (per_cpu_index_dev(cpu)) {
+		for (i = 0; i < cache_leaves(cpu); i++) {
+			ci_dev = per_cache_index_dev(cpu, i);
+			if (!ci_dev)
+				continue;
+			device_unregister(ci_dev);
+		}
+		kfree(per_cpu_index_dev(cpu));
+		per_cpu_index_dev(cpu) = NULL;
+	}
+	device_unregister(per_cpu_cache_dev(cpu));
+	per_cpu_cache_dev(cpu) = NULL;
+}
+
+static int cpu_cache_sysfs_init(unsigned int cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+
+	if (per_cpu_cacheinfo(cpu) == NULL)
+		return -ENOENT;
+
+	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
+	if (IS_ERR(per_cpu_cache_dev(cpu)))
+		return PTR_ERR(per_cpu_cache_dev(cpu));
+
+	/* Allocate all required memory */
+	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
+					 sizeof(struct device *), GFP_KERNEL);
+	if (unlikely(per_cpu_index_dev(cpu) == NULL))
+		goto err_out;
+
+	return 0;
+
+err_out:
+	cpu_cache_sysfs_exit(cpu);
+	return -ENOMEM;
+}
+
+static int cache_add_dev(unsigned int cpu)
+{
+	unsigned int i;
+	int rc;
+	struct device *ci_dev, *parent;
+	struct cacheinfo *this_leaf;
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	const struct attribute_group **cache_groups;
+
+	rc = cpu_cache_sysfs_init(cpu);
+	if (unlikely(rc < 0))
+		return rc;
+
+	parent = per_cpu_cache_dev(cpu);
+	for (i = 0; i < cache_leaves(cpu); i++) {
+		this_leaf = this_cpu_ci->info_list + i;
+		if (this_leaf->disable_sysfs)
+			continue;
+		cache_groups = cache_get_attribute_groups(this_leaf);
+		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
+					   "index%1u", i);
+		if (IS_ERR(ci_dev)) {
+			rc = PTR_ERR(ci_dev);
+			goto err;
+		}
+		per_cache_index_dev(cpu, i) = ci_dev;
+	}
+	cpumask_set_cpu(cpu, &cache_dev_map);
+
+	return 0;
+err:
+	cpu_cache_sysfs_exit(cpu);
+	return rc;
+}
+
+static int cacheinfo_cpu_online(unsigned int cpu)
+{
+	int rc = detect_cache_attributes(cpu);
+
+	if (rc)
+		return rc;
+	rc = cache_add_dev(cpu);
+	if (rc)
+		free_cache_attributes(cpu);
+	return rc;
+}
+
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
+{
+	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
+		cpu_cache_sysfs_exit(cpu);
+
+	free_cache_attributes(cpu);
+	return 0;
+}
+
+static int __init cacheinfo_sysfs_init(void)
+{
+	return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
+}
+device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/class.c b/drivers/base/class.c
new file mode 100644
index 0000000..54def4e
--- /dev/null
+++ b/drivers/base/class.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * class.c - basic device class management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman
+ * Copyright (c) 2003-2004 IBM Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/genhd.h>
+#include <linux/mutex.h>
+#include "base.h"
+
+#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
+
+static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
+			       char *buf)
+{
+	struct class_attribute *class_attr = to_class_attr(attr);
+	struct subsys_private *cp = to_subsys_private(kobj);
+	ssize_t ret = -EIO;
+
+	if (class_attr->show)
+		ret = class_attr->show(cp->class, class_attr, buf);
+	return ret;
+}
+
+static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct class_attribute *class_attr = to_class_attr(attr);
+	struct subsys_private *cp = to_subsys_private(kobj);
+	ssize_t ret = -EIO;
+
+	if (class_attr->store)
+		ret = class_attr->store(cp->class, class_attr, buf, count);
+	return ret;
+}
+
+static void class_release(struct kobject *kobj)
+{
+	struct subsys_private *cp = to_subsys_private(kobj);
+	struct class *class = cp->class;
+
+	pr_debug("class '%s': release.\n", class->name);
+
+	if (class->class_release)
+		class->class_release(class);
+	else
+		pr_debug("class '%s' does not have a release() function, "
+			 "be careful\n", class->name);
+
+	kfree(cp);
+}
+
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+	struct subsys_private *cp = to_subsys_private(kobj);
+	struct class *class = cp->class;
+
+	return class->ns_type;
+}
+
+static const struct sysfs_ops class_sysfs_ops = {
+	.show	   = class_attr_show,
+	.store	   = class_attr_store,
+};
+
+static struct kobj_type class_ktype = {
+	.sysfs_ops	= &class_sysfs_ops,
+	.release	= class_release,
+	.child_ns_type	= class_child_ns_type,
+};
+
+/* Hotplug events for classes go to the class subsys */
+static struct kset *class_kset;
+
+
+int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
+			 const void *ns)
+{
+	int error;
+
+	if (cls)
+		error = sysfs_create_file_ns(&cls->p->subsys.kobj,
+					     &attr->attr, ns);
+	else
+		error = -EINVAL;
+	return error;
+}
+
+void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
+			  const void *ns)
+{
+	if (cls)
+		sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
+}
+
+static struct class *class_get(struct class *cls)
+{
+	if (cls)
+		kset_get(&cls->p->subsys);
+	return cls;
+}
+
+static void class_put(struct class *cls)
+{
+	if (cls)
+		kset_put(&cls->p->subsys);
+}
+
+static void klist_class_dev_get(struct klist_node *n)
+{
+	struct device *dev = container_of(n, struct device, knode_class);
+
+	get_device(dev);
+}
+
+static void klist_class_dev_put(struct klist_node *n)
+{
+	struct device *dev = container_of(n, struct device, knode_class);
+
+	put_device(dev);
+}
+
+static int class_add_groups(struct class *cls,
+			    const struct attribute_group **groups)
+{
+	return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+				const struct attribute_group **groups)
+{
+	return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
+int __class_register(struct class *cls, struct lock_class_key *key)
+{
+	struct subsys_private *cp;
+	int error;
+
+	pr_debug("device class '%s': registering\n", cls->name);
+
+	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	if (!cp)
+		return -ENOMEM;
+	klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
+	INIT_LIST_HEAD(&cp->interfaces);
+	kset_init(&cp->glue_dirs);
+	__mutex_init(&cp->mutex, "subsys mutex", key);
+	error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
+	if (error) {
+		kfree(cp);
+		return error;
+	}
+
+	/* set the default /sys/dev directory for devices of this class */
+	if (!cls->dev_kobj)
+		cls->dev_kobj = sysfs_dev_char_kobj;
+
+#if defined(CONFIG_BLOCK)
+	/* let the block class directory show up in the root of sysfs */
+	if (!sysfs_deprecated || cls != &block_class)
+		cp->subsys.kobj.kset = class_kset;
+#else
+	cp->subsys.kobj.kset = class_kset;
+#endif
+	cp->subsys.kobj.ktype = &class_ktype;
+	cp->class = cls;
+	cls->p = cp;
+
+	error = kset_register(&cp->subsys);
+	if (error) {
+		kfree(cp);
+		return error;
+	}
+	error = class_add_groups(class_get(cls), cls->class_groups);
+	class_put(cls);
+	return error;
+}
+EXPORT_SYMBOL_GPL(__class_register);
+
+void class_unregister(struct class *cls)
+{
+	pr_debug("device class '%s': unregistering\n", cls->name);
+	class_remove_groups(cls, cls->class_groups);
+	kset_unregister(&cls->p->subsys);
+}
+
+static void class_create_release(struct class *cls)
+{
+	pr_debug("%s called for %s\n", __func__, cls->name);
+	kfree(cls);
+}
+
+/**
+ * class_create - create a struct class structure
+ * @owner: pointer to the module that is to "own" this struct class
+ * @name: pointer to a string for the name of this class.
+ * @key: the lock_class_key for this class; used by mutex lock debugging
+ *
+ * This is used to create a struct class pointer that can then be used
+ * in calls to device_create().
+ *
+ * Returns &struct class pointer on success, or ERR_PTR() on error.
+ *
+ * Note, the pointer created here is to be destroyed when finished by
+ * making a call to class_destroy().
+ */
+struct class *__class_create(struct module *owner, const char *name,
+			     struct lock_class_key *key)
+{
+	struct class *cls;
+	int retval;
+
+	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+	if (!cls) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	cls->name = name;
+	cls->owner = owner;
+	cls->class_release = class_create_release;
+
+	retval = __class_register(cls, key);
+	if (retval)
+		goto error;
+
+	return cls;
+
+error:
+	kfree(cls);
+	return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(__class_create);
+
+/**
+ * class_destroy - destroys a struct class structure
+ * @cls: pointer to the struct class that is to be destroyed
+ *
+ * Note, the pointer to be destroyed must have been created with a call
+ * to class_create().
+ */
+void class_destroy(struct class *cls)
+{
+	if ((cls == NULL) || (IS_ERR(cls)))
+		return;
+
+	class_unregister(cls);
+}
+
+/**
+ * class_dev_iter_init - initialize class device iterator
+ * @iter: class iterator to initialize
+ * @class: the class we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize class iterator @iter such that it iterates over devices
+ * of @class.  If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
+			 struct device *start, const struct device_type *type)
+{
+	struct klist_node *start_knode = NULL;
+
+	if (start)
+		start_knode = &start->knode_class;
+	klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
+	iter->type = type;
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_init);
+
+/**
+ * class_dev_iter_next - iterate to the next device
+ * @iter: class iterator to proceed
+ *
+ * Proceed @iter to the next device and return it.  Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited.  The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into class code.
+ */
+struct device *class_dev_iter_next(struct class_dev_iter *iter)
+{
+	struct klist_node *knode;
+	struct device *dev;
+
+	while (1) {
+		knode = klist_next(&iter->ki);
+		if (!knode)
+			return NULL;
+		dev = container_of(knode, struct device, knode_class);
+		if (!iter->type || iter->type == dev->type)
+			return dev;
+	}
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_next);
+
+/**
+ * class_dev_iter_exit - finish iteration
+ * @iter: class iterator to finish
+ *
+ * Finish an iteration.  Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void class_dev_iter_exit(struct class_dev_iter *iter)
+{
+	klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_exit);
+
+/**
+ * class_for_each_device - device iterator
+ * @class: the class we're iterating
+ * @start: the device to start with in the list, if any.
+ * @data: data for the callback
+ * @fn: function to be called for each device
+ *
+ * Iterate over @class's list of devices, and call @fn for each,
+ * passing it @data.  If @start is set, the list iteration will start
+ * there, otherwise if it is NULL, the iteration starts at the
+ * beginning of the list.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ *
+ * @fn is allowed to do anything including calling back into class
+ * code.  There's no locking restriction.
+ */
+int class_for_each_device(struct class *class, struct device *start,
+			  void *data, int (*fn)(struct device *, void *))
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	int error = 0;
+
+	if (!class)
+		return -EINVAL;
+	if (!class->p) {
+		WARN(1, "%s called for class '%s' before it was initialized",
+		     __func__, class->name);
+		return -EINVAL;
+	}
+
+	class_dev_iter_init(&iter, class, start, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		error = fn(dev, data);
+		if (error)
+			break;
+	}
+	class_dev_iter_exit(&iter);
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(class_for_each_device);
+
+/**
+ * class_find_device - device iterator for locating a particular device
+ * @class: the class we're iterating
+ * @start: Device to begin with
+ * @data: data for the match function
+ * @match: function to check device
+ *
+ * This is similar to the class_for_each_dev() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does.  If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ *
+ * Note, you will need to drop the reference with put_device() after use.
+ *
+ * @match is allowed to do anything including calling back into class
+ * code.  There's no locking restriction.
+ */
+struct device *class_find_device(struct class *class, struct device *start,
+				 const void *data,
+				 int (*match)(struct device *, const void *))
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+
+	if (!class)
+		return NULL;
+	if (!class->p) {
+		WARN(1, "%s called for class '%s' before it was initialized",
+		     __func__, class->name);
+		return NULL;
+	}
+
+	class_dev_iter_init(&iter, class, start, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		if (match(dev, data)) {
+			get_device(dev);
+			break;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(class_find_device);
+
+int class_interface_register(struct class_interface *class_intf)
+{
+	struct class *parent;
+	struct class_dev_iter iter;
+	struct device *dev;
+
+	if (!class_intf || !class_intf->class)
+		return -ENODEV;
+
+	parent = class_get(class_intf->class);
+	if (!parent)
+		return -EINVAL;
+
+	mutex_lock(&parent->p->mutex);
+	list_add_tail(&class_intf->node, &parent->p->interfaces);
+	if (class_intf->add_dev) {
+		class_dev_iter_init(&iter, parent, NULL, NULL);
+		while ((dev = class_dev_iter_next(&iter)))
+			class_intf->add_dev(dev, class_intf);
+		class_dev_iter_exit(&iter);
+	}
+	mutex_unlock(&parent->p->mutex);
+
+	return 0;
+}
+
+void class_interface_unregister(struct class_interface *class_intf)
+{
+	struct class *parent = class_intf->class;
+	struct class_dev_iter iter;
+	struct device *dev;
+
+	if (!parent)
+		return;
+
+	mutex_lock(&parent->p->mutex);
+	list_del_init(&class_intf->node);
+	if (class_intf->remove_dev) {
+		class_dev_iter_init(&iter, parent, NULL, NULL);
+		while ((dev = class_dev_iter_next(&iter)))
+			class_intf->remove_dev(dev, class_intf);
+		class_dev_iter_exit(&iter);
+	}
+	mutex_unlock(&parent->p->mutex);
+
+	class_put(parent);
+}
+
+ssize_t show_class_attr_string(struct class *class,
+			       struct class_attribute *attr, char *buf)
+{
+	struct class_attribute_string *cs;
+
+	cs = container_of(attr, struct class_attribute_string, attr);
+	return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
+}
+
+EXPORT_SYMBOL_GPL(show_class_attr_string);
+
+struct class_compat {
+	struct kobject *kobj;
+};
+
+/**
+ * class_compat_register - register a compatibility class
+ * @name: the name of the class
+ *
+ * Compatibility class are meant as a temporary user-space compatibility
+ * workaround when converting a family of class devices to a bus devices.
+ */
+struct class_compat *class_compat_register(const char *name)
+{
+	struct class_compat *cls;
+
+	cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
+	if (!cls)
+		return NULL;
+	cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
+	if (!cls->kobj) {
+		kfree(cls);
+		return NULL;
+	}
+	return cls;
+}
+EXPORT_SYMBOL_GPL(class_compat_register);
+
+/**
+ * class_compat_unregister - unregister a compatibility class
+ * @cls: the class to unregister
+ */
+void class_compat_unregister(struct class_compat *cls)
+{
+	kobject_put(cls->kobj);
+	kfree(cls);
+}
+EXPORT_SYMBOL_GPL(class_compat_unregister);
+
+/**
+ * class_compat_create_link - create a compatibility class device link to
+ *			      a bus device
+ * @cls: the compatibility class
+ * @dev: the target bus device
+ * @device_link: an optional device to which a "device" link should be created
+ */
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+			     struct device *device_link)
+{
+	int error;
+
+	error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
+	if (error)
+		return error;
+
+	/*
+	 * Optionally add a "device" link (typically to the parent), as a
+	 * class device would have one and we want to provide as much
+	 * backwards compatibility as possible.
+	 */
+	if (device_link) {
+		error = sysfs_create_link(&dev->kobj, &device_link->kobj,
+					  "device");
+		if (error)
+			sysfs_remove_link(cls->kobj, dev_name(dev));
+	}
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(class_compat_create_link);
+
+/**
+ * class_compat_remove_link - remove a compatibility class device link to
+ *			      a bus device
+ * @cls: the compatibility class
+ * @dev: the target bus device
+ * @device_link: an optional device to which a "device" link was previously
+ * 		 created
+ */
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+			      struct device *device_link)
+{
+	if (device_link)
+		sysfs_remove_link(&dev->kobj, "device");
+	sysfs_remove_link(cls->kobj, dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(class_compat_remove_link);
+
+int __init classes_init(void)
+{
+	class_kset = kset_create_and_add("class", NULL, NULL);
+	if (!class_kset)
+		return -ENOMEM;
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(class_create_file_ns);
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
+EXPORT_SYMBOL_GPL(class_unregister);
+EXPORT_SYMBOL_GPL(class_destroy);
+
+EXPORT_SYMBOL_GPL(class_interface_register);
+EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/drivers/base/component.c b/drivers/base/component.c
new file mode 100644
index 0000000..8946dfe
--- /dev/null
+++ b/drivers/base/component.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Componentized device handling.
+ *
+ * This is work in progress.  We gather up the component devices into a list,
+ * and bind them when instructed.  At the moment, we're specific to the DRM
+ * subsystem, and only handles one master device, but this doesn't have to be
+ * the case.
+ */
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+struct component;
+
+struct component_match_array {
+	void *data;
+	int (*compare)(struct device *, void *);
+	void (*release)(struct device *, void *);
+	struct component *component;
+	bool duplicate;
+};
+
+struct component_match {
+	size_t alloc;
+	size_t num;
+	struct component_match_array *compare;
+};
+
+struct master {
+	struct list_head node;
+	bool bound;
+
+	const struct component_master_ops *ops;
+	struct device *dev;
+	struct component_match *match;
+	struct dentry *dentry;
+};
+
+struct component {
+	struct list_head node;
+	struct master *master;
+	bool bound;
+
+	const struct component_ops *ops;
+	struct device *dev;
+};
+
+static DEFINE_MUTEX(component_mutex);
+static LIST_HEAD(component_list);
+static LIST_HEAD(masters);
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *component_debugfs_dir;
+
+static int component_devices_show(struct seq_file *s, void *data)
+{
+	struct master *m = s->private;
+	struct component_match *match = m->match;
+	size_t i;
+
+	mutex_lock(&component_mutex);
+	seq_printf(s, "%-40s %20s\n", "master name", "status");
+	seq_puts(s, "-------------------------------------------------------------\n");
+	seq_printf(s, "%-40s %20s\n\n",
+		   dev_name(m->dev), m->bound ? "bound" : "not bound");
+
+	seq_printf(s, "%-40s %20s\n", "device name", "status");
+	seq_puts(s, "-------------------------------------------------------------\n");
+	for (i = 0; i < match->num; i++) {
+		struct device *d = (struct device *)match->compare[i].data;
+
+		seq_printf(s, "%-40s %20s\n", dev_name(d),
+			   match->compare[i].component ?
+			   "registered" : "not registered");
+	}
+	mutex_unlock(&component_mutex);
+
+	return 0;
+}
+
+static int component_devices_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, component_devices_show, inode->i_private);
+}
+
+static const struct file_operations component_devices_fops = {
+	.open = component_devices_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init component_debug_init(void)
+{
+	component_debugfs_dir = debugfs_create_dir("device_component", NULL);
+
+	return 0;
+}
+
+core_initcall(component_debug_init);
+
+static void component_master_debugfs_add(struct master *m)
+{
+	m->dentry = debugfs_create_file(dev_name(m->dev), 0444,
+					component_debugfs_dir,
+					m, &component_devices_fops);
+}
+
+static void component_master_debugfs_del(struct master *m)
+{
+	debugfs_remove(m->dentry);
+	m->dentry = NULL;
+}
+
+#else
+
+static void component_master_debugfs_add(struct master *m)
+{ }
+
+static void component_master_debugfs_del(struct master *m)
+{ }
+
+#endif
+
+static struct master *__master_find(struct device *dev,
+	const struct component_master_ops *ops)
+{
+	struct master *m;
+
+	list_for_each_entry(m, &masters, node)
+		if (m->dev == dev && (!ops || m->ops == ops))
+			return m;
+
+	return NULL;
+}
+
+static struct component *find_component(struct master *master,
+	int (*compare)(struct device *, void *), void *compare_data)
+{
+	struct component *c;
+
+	list_for_each_entry(c, &component_list, node) {
+		if (c->master && c->master != master)
+			continue;
+
+		if (compare(c->dev, compare_data))
+			return c;
+	}
+
+	return NULL;
+}
+
+static int find_components(struct master *master)
+{
+	struct component_match *match = master->match;
+	size_t i;
+	int ret = 0;
+
+	/*
+	 * Scan the array of match functions and attach
+	 * any components which are found to this master.
+	 */
+	for (i = 0; i < match->num; i++) {
+		struct component_match_array *mc = &match->compare[i];
+		struct component *c;
+
+		dev_dbg(master->dev, "Looking for component %zu\n", i);
+
+		if (match->compare[i].component)
+			continue;
+
+		c = find_component(master, mc->compare, mc->data);
+		if (!c) {
+			ret = -ENXIO;
+			break;
+		}
+
+		dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master);
+
+		/* Attach this component to the master */
+		match->compare[i].duplicate = !!c->master;
+		match->compare[i].component = c;
+		c->master = master;
+	}
+	return ret;
+}
+
+/* Detach component from associated master */
+static void remove_component(struct master *master, struct component *c)
+{
+	size_t i;
+
+	/* Detach the component from this master. */
+	for (i = 0; i < master->match->num; i++)
+		if (master->match->compare[i].component == c)
+			master->match->compare[i].component = NULL;
+}
+
+/*
+ * Try to bring up a master.  If component is NULL, we're interested in
+ * this master, otherwise it's a component which must be present to try
+ * and bring up the master.
+ *
+ * Returns 1 for successful bringup, 0 if not ready, or -ve errno.
+ */
+static int try_to_bring_up_master(struct master *master,
+	struct component *component)
+{
+	int ret;
+
+	dev_dbg(master->dev, "trying to bring up master\n");
+
+	if (find_components(master)) {
+		dev_dbg(master->dev, "master has incomplete components\n");
+		return 0;
+	}
+
+	if (component && component->master != master) {
+		dev_dbg(master->dev, "master is not for this component (%s)\n",
+			dev_name(component->dev));
+		return 0;
+	}
+
+	if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	/* Found all components */
+	ret = master->ops->bind(master->dev);
+	if (ret < 0) {
+		devres_release_group(master->dev, NULL);
+		dev_info(master->dev, "master bind failed: %d\n", ret);
+		return ret;
+	}
+
+	master->bound = true;
+	return 1;
+}
+
+static int try_to_bring_up_masters(struct component *component)
+{
+	struct master *m;
+	int ret = 0;
+
+	list_for_each_entry(m, &masters, node) {
+		if (!m->bound) {
+			ret = try_to_bring_up_master(m, component);
+			if (ret != 0)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+static void take_down_master(struct master *master)
+{
+	if (master->bound) {
+		master->ops->unbind(master->dev);
+		devres_release_group(master->dev, NULL);
+		master->bound = false;
+	}
+}
+
+static void component_match_release(struct device *master,
+	struct component_match *match)
+{
+	unsigned int i;
+
+	for (i = 0; i < match->num; i++) {
+		struct component_match_array *mc = &match->compare[i];
+
+		if (mc->release)
+			mc->release(master, mc->data);
+	}
+
+	kfree(match->compare);
+}
+
+static void devm_component_match_release(struct device *dev, void *res)
+{
+	component_match_release(dev, res);
+}
+
+static int component_match_realloc(struct device *dev,
+	struct component_match *match, size_t num)
+{
+	struct component_match_array *new;
+
+	if (match->alloc == num)
+		return 0;
+
+	new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return -ENOMEM;
+
+	if (match->compare) {
+		memcpy(new, match->compare, sizeof(*new) *
+					    min(match->num, num));
+		kfree(match->compare);
+	}
+	match->compare = new;
+	match->alloc = num;
+
+	return 0;
+}
+
+/*
+ * Add a component to be matched, with a release function.
+ *
+ * The match array is first created or extended if necessary.
+ */
+void component_match_add_release(struct device *master,
+	struct component_match **matchptr,
+	void (*release)(struct device *, void *),
+	int (*compare)(struct device *, void *), void *compare_data)
+{
+	struct component_match *match = *matchptr;
+
+	if (IS_ERR(match))
+		return;
+
+	if (!match) {
+		match = devres_alloc(devm_component_match_release,
+				     sizeof(*match), GFP_KERNEL);
+		if (!match) {
+			*matchptr = ERR_PTR(-ENOMEM);
+			return;
+		}
+
+		devres_add(master, match);
+
+		*matchptr = match;
+	}
+
+	if (match->num == match->alloc) {
+		size_t new_size = match->alloc + 16;
+		int ret;
+
+		ret = component_match_realloc(master, match, new_size);
+		if (ret) {
+			*matchptr = ERR_PTR(ret);
+			return;
+		}
+	}
+
+	match->compare[match->num].compare = compare;
+	match->compare[match->num].release = release;
+	match->compare[match->num].data = compare_data;
+	match->compare[match->num].component = NULL;
+	match->num++;
+}
+EXPORT_SYMBOL(component_match_add_release);
+
+static void free_master(struct master *master)
+{
+	struct component_match *match = master->match;
+	int i;
+
+	component_master_debugfs_del(master);
+	list_del(&master->node);
+
+	if (match) {
+		for (i = 0; i < match->num; i++) {
+			struct component *c = match->compare[i].component;
+			if (c)
+				c->master = NULL;
+		}
+	}
+
+	kfree(master);
+}
+
+int component_master_add_with_match(struct device *dev,
+	const struct component_master_ops *ops,
+	struct component_match *match)
+{
+	struct master *master;
+	int ret;
+
+	/* Reallocate the match array for its true size */
+	ret = component_match_realloc(dev, match, match->num);
+	if (ret)
+		return ret;
+
+	master = kzalloc(sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return -ENOMEM;
+
+	master->dev = dev;
+	master->ops = ops;
+	master->match = match;
+
+	component_master_debugfs_add(master);
+	/* Add to the list of available masters. */
+	mutex_lock(&component_mutex);
+	list_add(&master->node, &masters);
+
+	ret = try_to_bring_up_master(master, NULL);
+
+	if (ret < 0)
+		free_master(master);
+
+	mutex_unlock(&component_mutex);
+
+	return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_master_add_with_match);
+
+void component_master_del(struct device *dev,
+	const struct component_master_ops *ops)
+{
+	struct master *master;
+
+	mutex_lock(&component_mutex);
+	master = __master_find(dev, ops);
+	if (master) {
+		take_down_master(master);
+		free_master(master);
+	}
+	mutex_unlock(&component_mutex);
+}
+EXPORT_SYMBOL_GPL(component_master_del);
+
+static void component_unbind(struct component *component,
+	struct master *master, void *data)
+{
+	WARN_ON(!component->bound);
+
+	component->ops->unbind(component->dev, master->dev, data);
+	component->bound = false;
+
+	/* Release all resources claimed in the binding of this component */
+	devres_release_group(component->dev, component);
+}
+
+void component_unbind_all(struct device *master_dev, void *data)
+{
+	struct master *master;
+	struct component *c;
+	size_t i;
+
+	WARN_ON(!mutex_is_locked(&component_mutex));
+
+	master = __master_find(master_dev, NULL);
+	if (!master)
+		return;
+
+	/* Unbind components in reverse order */
+	for (i = master->match->num; i--; )
+		if (!master->match->compare[i].duplicate) {
+			c = master->match->compare[i].component;
+			component_unbind(c, master, data);
+		}
+}
+EXPORT_SYMBOL_GPL(component_unbind_all);
+
+static int component_bind(struct component *component, struct master *master,
+	void *data)
+{
+	int ret;
+
+	/*
+	 * Each component initialises inside its own devres group.
+	 * This allows us to roll-back a failed component without
+	 * affecting anything else.
+	 */
+	if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	/*
+	 * Also open a group for the device itself: this allows us
+	 * to release the resources claimed against the sub-device
+	 * at the appropriate moment.
+	 */
+	if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
+		devres_release_group(master->dev, NULL);
+		return -ENOMEM;
+	}
+
+	dev_dbg(master->dev, "binding %s (ops %ps)\n",
+		dev_name(component->dev), component->ops);
+
+	ret = component->ops->bind(component->dev, master->dev, data);
+	if (!ret) {
+		component->bound = true;
+
+		/*
+		 * Close the component device's group so that resources
+		 * allocated in the binding are encapsulated for removal
+		 * at unbind.  Remove the group on the DRM device as we
+		 * can clean those resources up independently.
+		 */
+		devres_close_group(component->dev, NULL);
+		devres_remove_group(master->dev, NULL);
+
+		dev_info(master->dev, "bound %s (ops %ps)\n",
+			 dev_name(component->dev), component->ops);
+	} else {
+		devres_release_group(component->dev, NULL);
+		devres_release_group(master->dev, NULL);
+
+		dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+			dev_name(component->dev), component->ops, ret);
+	}
+
+	return ret;
+}
+
+int component_bind_all(struct device *master_dev, void *data)
+{
+	struct master *master;
+	struct component *c;
+	size_t i;
+	int ret = 0;
+
+	WARN_ON(!mutex_is_locked(&component_mutex));
+
+	master = __master_find(master_dev, NULL);
+	if (!master)
+		return -EINVAL;
+
+	/* Bind components in match order */
+	for (i = 0; i < master->match->num; i++)
+		if (!master->match->compare[i].duplicate) {
+			c = master->match->compare[i].component;
+			ret = component_bind(c, master, data);
+			if (ret)
+				break;
+		}
+
+	if (ret != 0) {
+		for (; i--; )
+			if (!master->match->compare[i].duplicate) {
+				c = master->match->compare[i].component;
+				component_unbind(c, master, data);
+			}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(component_bind_all);
+
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+	struct component *component;
+	int ret;
+
+	component = kzalloc(sizeof(*component), GFP_KERNEL);
+	if (!component)
+		return -ENOMEM;
+
+	component->ops = ops;
+	component->dev = dev;
+
+	dev_dbg(dev, "adding component (ops %ps)\n", ops);
+
+	mutex_lock(&component_mutex);
+	list_add_tail(&component->node, &component_list);
+
+	ret = try_to_bring_up_masters(component);
+	if (ret < 0) {
+		if (component->master)
+			remove_component(component->master, component);
+		list_del(&component->node);
+
+		kfree(component);
+	}
+	mutex_unlock(&component_mutex);
+
+	return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_add);
+
+void component_del(struct device *dev, const struct component_ops *ops)
+{
+	struct component *c, *component = NULL;
+
+	mutex_lock(&component_mutex);
+	list_for_each_entry(c, &component_list, node)
+		if (c->dev == dev && c->ops == ops) {
+			list_del(&c->node);
+			component = c;
+			break;
+		}
+
+	if (component && component->master) {
+		take_down_master(component->master);
+		remove_component(component->master, component);
+	}
+
+	mutex_unlock(&component_mutex);
+
+	WARN_ON(!component);
+	kfree(component);
+}
+EXPORT_SYMBOL_GPL(component_del);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/container.c b/drivers/base/container.c
new file mode 100644
index 0000000..1ba42d2
--- /dev/null
+++ b/drivers/base/container.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System bus type for containers.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include <linux/container.h>
+
+#include "base.h"
+
+#define CONTAINER_BUS_NAME	"container"
+
+static int trivial_online(struct device *dev)
+{
+	return 0;
+}
+
+static int container_offline(struct device *dev)
+{
+	struct container_dev *cdev = to_container_dev(dev);
+
+	return cdev->offline ? cdev->offline(cdev) : 0;
+}
+
+struct bus_type container_subsys = {
+	.name = CONTAINER_BUS_NAME,
+	.dev_name = CONTAINER_BUS_NAME,
+	.online = trivial_online,
+	.offline = container_offline,
+};
+
+void __init container_dev_init(void)
+{
+	int ret;
+
+	ret = subsys_system_register(&container_subsys, NULL);
+	if (ret)
+		pr_err("%s() failed: %d\n", __func__, ret);
+}
diff --git a/drivers/base/core.c b/drivers/base/core.c
new file mode 100644
index 0000000..04bbcd7
--- /dev/null
+++ b/drivers/base/core.c
@@ -0,0 +1,3137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/core.c - core driver model code (device registration, etc)
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2006 Novell, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fwnode.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/kdev_t.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/genhd.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/netdevice.h>
+#include <linux/sched/signal.h>
+#include <linux/sysfs.h>
+
+#include "base.h"
+#include "power/power.h"
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+#ifdef CONFIG_SYSFS_DEPRECATED_V2
+long sysfs_deprecated = 1;
+#else
+long sysfs_deprecated = 0;
+#endif
+static int __init sysfs_deprecated_setup(char *arg)
+{
+	return kstrtol(arg, 10, &sysfs_deprecated);
+}
+early_param("sysfs.deprecated", sysfs_deprecated_setup);
+#endif
+
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+	mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+	mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+	return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+	srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+	down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+	up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+	down_read(&device_links_lock);
+	return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+	up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc).  Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+	struct device_link *link;
+	int ret;
+
+	if (dev == target)
+		return 1;
+
+	ret = device_for_each_child(dev, target, device_is_dependent);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->consumer == target)
+			return 1;
+
+		ret = device_is_dependent(link->consumer, target);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+	struct device_link *link;
+
+	/*
+	 * Devices that have not been registered yet will be put to the ends
+	 * of the lists during the registration, so skip them here.
+	 */
+	if (device_is_registered(dev))
+		devices_kset_move_last(dev);
+
+	if (device_pm_initialized(dev))
+		device_pm_move_last(dev);
+
+	device_for_each_child(dev, NULL, device_reorder_to_tail);
+	list_for_each_entry(link, &dev->links.consumers, s_node)
+		device_reorder_to_tail(link->consumer, NULL);
+
+	return 0;
+}
+
+/**
+ * device_pm_move_to_tail - Move set of devices to the end of device lists
+ * @dev: Device to move
+ *
+ * This is a device_reorder_to_tail() wrapper taking the requisite locks.
+ *
+ * It moves the @dev along with all of its children and all of its consumers
+ * to the ends of the device_kset and dpm_list, recursively.
+ */
+void device_pm_move_to_tail(struct device *dev)
+{
+	int idx;
+
+	idx = device_links_read_lock();
+	device_pm_lock();
+	device_reorder_to_tail(dev, NULL);
+	device_pm_unlock();
+	device_links_read_unlock(idx);
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM.  First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account.  Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link.  If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
+ * automatically when the consumer device driver unbinds from it.
+ * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
+ * set is invalid and will cause NULL to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case.  The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+				    struct device *supplier, u32 flags)
+{
+	struct device_link *link;
+
+	if (!consumer || !supplier ||
+	    ((flags & DL_FLAG_STATELESS) &&
+	     (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
+		return NULL;
+
+	device_links_write_lock();
+	device_pm_lock();
+
+	/*
+	 * If the supplier has not been fully registered yet or there is a
+	 * reverse dependency between the consumer and the supplier already in
+	 * the graph, return NULL.
+	 */
+	if (!device_pm_initialized(supplier)
+	    || device_is_dependent(consumer, supplier)) {
+		link = NULL;
+		goto out;
+	}
+
+	list_for_each_entry(link, &supplier->links.consumers, s_node)
+		if (link->consumer == consumer) {
+			kref_get(&link->kref);
+			goto out;
+		}
+
+	link = kzalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		goto out;
+
+	if (flags & DL_FLAG_PM_RUNTIME) {
+		if (flags & DL_FLAG_RPM_ACTIVE) {
+			if (pm_runtime_get_sync(supplier) < 0) {
+				pm_runtime_put_noidle(supplier);
+				kfree(link);
+				link = NULL;
+				goto out;
+			}
+			link->rpm_active = true;
+		}
+		pm_runtime_new_link(consumer);
+		/*
+		 * If the link is being added by the consumer driver at probe
+		 * time, balance the decrementation of the supplier's runtime PM
+		 * usage counter after consumer probe in driver_probe_device().
+		 */
+		if (consumer->links.status == DL_DEV_PROBING)
+			pm_runtime_get_noresume(supplier);
+	}
+	get_device(supplier);
+	link->supplier = supplier;
+	INIT_LIST_HEAD(&link->s_node);
+	get_device(consumer);
+	link->consumer = consumer;
+	INIT_LIST_HEAD(&link->c_node);
+	link->flags = flags;
+	kref_init(&link->kref);
+
+	/* Determine the initial link state. */
+	if (flags & DL_FLAG_STATELESS) {
+		link->status = DL_STATE_NONE;
+	} else {
+		switch (supplier->links.status) {
+		case DL_DEV_DRIVER_BOUND:
+			switch (consumer->links.status) {
+			case DL_DEV_PROBING:
+				/*
+				 * Some callers expect the link creation during
+				 * consumer driver probe to resume the supplier
+				 * even without DL_FLAG_RPM_ACTIVE.
+				 */
+				if (flags & DL_FLAG_PM_RUNTIME)
+					pm_runtime_resume(supplier);
+
+				link->status = DL_STATE_CONSUMER_PROBE;
+				break;
+			case DL_DEV_DRIVER_BOUND:
+				link->status = DL_STATE_ACTIVE;
+				break;
+			default:
+				link->status = DL_STATE_AVAILABLE;
+				break;
+			}
+			break;
+		case DL_DEV_UNBINDING:
+			link->status = DL_STATE_SUPPLIER_UNBIND;
+			break;
+		default:
+			link->status = DL_STATE_DORMANT;
+			break;
+		}
+	}
+
+	/*
+	 * Move the consumer and all of the devices depending on it to the end
+	 * of dpm_list and the devices_kset list.
+	 *
+	 * It is necessary to hold dpm_list locked throughout all that or else
+	 * we may end up suspending with a wrong ordering of it.
+	 */
+	device_reorder_to_tail(consumer, NULL);
+
+	list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+	list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+	dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+	device_pm_unlock();
+	device_links_write_unlock();
+	return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+	put_device(link->consumer);
+	put_device(link->supplier);
+	kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+	device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct kref *kref)
+{
+	struct device_link *link = container_of(kref, struct device_link, kref);
+
+	dev_info(link->consumer, "Dropping the link to %s\n",
+		 dev_name(link->supplier));
+
+	if (link->flags & DL_FLAG_PM_RUNTIME)
+		pm_runtime_drop_link(link->consumer);
+
+	list_del_rcu(&link->s_node);
+	list_del_rcu(&link->c_node);
+	call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct kref *kref)
+{
+	struct device_link *link = container_of(kref, struct device_link, kref);
+
+	dev_info(link->consumer, "Dropping the link to %s\n",
+		 dev_name(link->supplier));
+
+	if (link->flags & DL_FLAG_PM_RUNTIME)
+		pm_runtime_drop_link(link->consumer);
+
+	list_del(&link->s_node);
+	list_del(&link->c_node);
+	device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.  If the link was added multiple times, it needs to be deleted as often.
+ * Care is required for hotplugged devices:  Their links are purged on removal
+ * and calling device_link_del() is then no longer allowed.
+ */
+void device_link_del(struct device_link *link)
+{
+	device_links_write_lock();
+	device_pm_lock();
+	kref_put(&link->kref, __device_link_del);
+	device_pm_unlock();
+	device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+/**
+ * device_link_remove - remove a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_remove(void *consumer, struct device *supplier)
+{
+	struct device_link *link;
+
+	if (WARN_ON(consumer == supplier))
+		return;
+
+	device_links_write_lock();
+	device_pm_lock();
+
+	list_for_each_entry(link, &supplier->links.consumers, s_node) {
+		if (link->consumer == consumer) {
+			kref_put(&link->kref, __device_link_del);
+			break;
+		}
+	}
+
+	device_pm_unlock();
+	device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_remove);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+	struct device_link *link;
+
+	list_for_each_entry(link, &dev->links.suppliers, c_node)
+		if (link->status == DL_STATE_CONSUMER_PROBE)
+			WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers.  Walk the list of the device's
+ * links to suppliers and see if all of them are available.  If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here.  It only can go away in __device_release_driver() and
+ * that function  checks the device's links to consumers.  This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+	struct device_link *link;
+	int ret = 0;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.suppliers, c_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->status != DL_STATE_AVAILABLE) {
+			device_links_missing_supplier(dev);
+			ret = -EPROBE_DEFER;
+			break;
+		}
+		WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+	}
+	dev->links.status = DL_DEV_PROBING;
+
+	device_links_write_unlock();
+	return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+	struct device_link *link;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		WARN_ON(link->status != DL_STATE_DORMANT);
+		WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+	}
+
+	list_for_each_entry(link, &dev->links.suppliers, c_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+		WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+	}
+
+	dev->links.status = DL_DEV_DRIVER_BOUND;
+
+	device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+	struct device_link *link, *ln;
+
+	list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+			kref_put(&link->kref, __device_link_del);
+		else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+			WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+	}
+
+	dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+	device_links_write_lock();
+	__device_links_no_driver(dev);
+	device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+	struct device_link *link;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+		WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+
+		/*
+		 * autoremove the links between this @dev and its consumer
+		 * devices that are not active, i.e. where the link state
+		 * has moved to DL_STATE_SUPPLIER_UNBIND.
+		 */
+		if (link->status == DL_STATE_SUPPLIER_UNBIND &&
+		    link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+			kref_put(&link->kref, __device_link_del);
+
+		WRITE_ONCE(link->status, DL_STATE_DORMANT);
+	}
+
+	__device_links_no_driver(dev);
+
+	device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present).  Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+	struct device_link *link;
+	bool ret = false;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->status == DL_STATE_CONSUMER_PROBE
+		    || link->status == DL_STATE_ACTIVE) {
+			ret = true;
+			break;
+		}
+		WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+	}
+
+	dev->links.status = DL_DEV_UNBINDING;
+
+	device_links_write_unlock();
+	return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state.  If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+	struct device_link *link;
+
+ start:
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		enum device_link_state status;
+
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		status = link->status;
+		if (status == DL_STATE_CONSUMER_PROBE) {
+			device_links_write_unlock();
+
+			wait_for_device_probe();
+			goto start;
+		}
+		WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+		if (status == DL_STATE_ACTIVE) {
+			struct device *consumer = link->consumer;
+
+			get_device(consumer);
+
+			device_links_write_unlock();
+
+			device_release_driver_internal(consumer, NULL,
+						       consumer->parent);
+			put_device(consumer);
+			goto start;
+		}
+	}
+
+	device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+	struct device_link *link, *ln;
+
+	/*
+	 * Delete all of the remaining links from this device to any other
+	 * devices (either consumers or suppliers).
+	 */
+	device_links_write_lock();
+
+	list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+		WARN_ON(link->status == DL_STATE_ACTIVE);
+		__device_link_del(&link->kref);
+	}
+
+	list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+		WARN_ON(link->status != DL_STATE_DORMANT &&
+			link->status != DL_STATE_NONE);
+		__device_link_del(&link->kref);
+	}
+
+	device_links_write_unlock();
+}
+
+/* Device links support end. */
+
+int (*platform_notify)(struct device *dev) = NULL;
+int (*platform_notify_remove)(struct device *dev) = NULL;
+static struct kobject *dev_kobj;
+struct kobject *sysfs_dev_char_kobj;
+struct kobject *sysfs_dev_block_kobj;
+
+static DEFINE_MUTEX(device_hotplug_lock);
+
+void lock_device_hotplug(void)
+{
+	mutex_lock(&device_hotplug_lock);
+}
+
+void unlock_device_hotplug(void)
+{
+	mutex_unlock(&device_hotplug_lock);
+}
+
+int lock_device_hotplug_sysfs(void)
+{
+	if (mutex_trylock(&device_hotplug_lock))
+		return 0;
+
+	/* Avoid busy looping (5 ms of sleep should do). */
+	msleep(5);
+	return restart_syscall();
+}
+
+#ifdef CONFIG_BLOCK
+static inline int device_is_not_partition(struct device *dev)
+{
+	return !(dev->type == &part_type);
+}
+#else
+static inline int device_is_not_partition(struct device *dev)
+{
+	return 1;
+}
+#endif
+
+/**
+ * dev_driver_string - Return a device's driver name, if at all possible
+ * @dev: struct device to get the name of
+ *
+ * Will return the device's driver's name if it is bound to a device.  If
+ * the device is not bound to a driver, it will return the name of the bus
+ * it is attached to.  If it is not attached to a bus either, an empty
+ * string will be returned.
+ */
+const char *dev_driver_string(const struct device *dev)
+{
+	struct device_driver *drv;
+
+	/* dev->driver can change to NULL underneath us because of unbinding,
+	 * so be careful about accessing it.  dev->bus and dev->class should
+	 * never change once they are set, so they don't need special care.
+	 */
+	drv = READ_ONCE(dev->driver);
+	return drv ? drv->name :
+			(dev->bus ? dev->bus->name :
+			(dev->class ? dev->class->name : ""));
+}
+EXPORT_SYMBOL(dev_driver_string);
+
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
+static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct device_attribute *dev_attr = to_dev_attr(attr);
+	struct device *dev = kobj_to_dev(kobj);
+	ssize_t ret = -EIO;
+
+	if (dev_attr->show)
+		ret = dev_attr->show(dev, dev_attr, buf);
+	if (ret >= (ssize_t)PAGE_SIZE) {
+		printk("dev_attr_show: %pS returned bad count\n",
+				dev_attr->show);
+	}
+	return ret;
+}
+
+static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct device_attribute *dev_attr = to_dev_attr(attr);
+	struct device *dev = kobj_to_dev(kobj);
+	ssize_t ret = -EIO;
+
+	if (dev_attr->store)
+		ret = dev_attr->store(dev, dev_attr, buf, count);
+	return ret;
+}
+
+static const struct sysfs_ops dev_sysfs_ops = {
+	.show	= dev_attr_show,
+	.store	= dev_attr_store,
+};
+
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+ssize_t device_store_ulong(struct device *dev,
+			   struct device_attribute *attr,
+			   const char *buf, size_t size)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+	char *end;
+	unsigned long new = simple_strtoul(buf, &end, 0);
+	if (end == buf)
+		return -EINVAL;
+	*(unsigned long *)(ea->var) = new;
+	/* Always return full write size even if we didn't consume all */
+	return size;
+}
+EXPORT_SYMBOL_GPL(device_store_ulong);
+
+ssize_t device_show_ulong(struct device *dev,
+			  struct device_attribute *attr,
+			  char *buf)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+	return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_ulong);
+
+ssize_t device_store_int(struct device *dev,
+			 struct device_attribute *attr,
+			 const char *buf, size_t size)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+	char *end;
+	long new = simple_strtol(buf, &end, 0);
+	if (end == buf || new > INT_MAX || new < INT_MIN)
+		return -EINVAL;
+	*(int *)(ea->var) = new;
+	/* Always return full write size even if we didn't consume all */
+	return size;
+}
+EXPORT_SYMBOL_GPL(device_store_int);
+
+ssize_t device_show_int(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_int);
+
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+	if (strtobool(buf, ea->var) < 0)
+		return -EINVAL;
+
+	return size;
+}
+EXPORT_SYMBOL_GPL(device_store_bool);
+
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_bool);
+
+/**
+ * device_release - free device structure.
+ * @kobj: device's kobject.
+ *
+ * This is called once the reference count for the object
+ * reaches 0. We forward the call to the device's release
+ * method, which should handle actually freeing the structure.
+ */
+static void device_release(struct kobject *kobj)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct device_private *p = dev->p;
+
+	/*
+	 * Some platform devices are driven without driver attached
+	 * and managed resources may have been acquired.  Make sure
+	 * all resources are released.
+	 *
+	 * Drivers still can add resources into device after device
+	 * is deleted but alive, so release devres here to avoid
+	 * possible memory leak.
+	 */
+	devres_release_all(dev);
+
+	if (dev->release)
+		dev->release(dev);
+	else if (dev->type && dev->type->release)
+		dev->type->release(dev);
+	else if (dev->class && dev->class->dev_release)
+		dev->class->dev_release(dev);
+	else
+		WARN(1, KERN_ERR "Device '%s' does not have a release() "
+			"function, it is broken and must be fixed.\n",
+			dev_name(dev));
+	kfree(p);
+}
+
+static const void *device_namespace(struct kobject *kobj)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	const void *ns = NULL;
+
+	if (dev->class && dev->class->ns_type)
+		ns = dev->class->namespace(dev);
+
+	return ns;
+}
+
+static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+	struct device *dev = kobj_to_dev(kobj);
+
+	if (dev->class && dev->class->get_ownership)
+		dev->class->get_ownership(dev, uid, gid);
+}
+
+static struct kobj_type device_ktype = {
+	.release	= device_release,
+	.sysfs_ops	= &dev_sysfs_ops,
+	.namespace	= device_namespace,
+	.get_ownership	= device_get_ownership,
+};
+
+
+static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
+{
+	struct kobj_type *ktype = get_ktype(kobj);
+
+	if (ktype == &device_ktype) {
+		struct device *dev = kobj_to_dev(kobj);
+		if (dev->bus)
+			return 1;
+		if (dev->class)
+			return 1;
+	}
+	return 0;
+}
+
+static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
+{
+	struct device *dev = kobj_to_dev(kobj);
+
+	if (dev->bus)
+		return dev->bus->name;
+	if (dev->class)
+		return dev->class->name;
+	return NULL;
+}
+
+static int dev_uevent(struct kset *kset, struct kobject *kobj,
+		      struct kobj_uevent_env *env)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	int retval = 0;
+
+	/* add device node properties if present */
+	if (MAJOR(dev->devt)) {
+		const char *tmp;
+		const char *name;
+		umode_t mode = 0;
+		kuid_t uid = GLOBAL_ROOT_UID;
+		kgid_t gid = GLOBAL_ROOT_GID;
+
+		add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
+		add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
+		name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
+		if (name) {
+			add_uevent_var(env, "DEVNAME=%s", name);
+			if (mode)
+				add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
+			if (!uid_eq(uid, GLOBAL_ROOT_UID))
+				add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
+			if (!gid_eq(gid, GLOBAL_ROOT_GID))
+				add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
+			kfree(tmp);
+		}
+	}
+
+	if (dev->type && dev->type->name)
+		add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
+
+	if (dev->driver)
+		add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+
+	/* Add common DT information about the device */
+	of_device_uevent(dev, env);
+
+	/* have the bus specific function add its stuff */
+	if (dev->bus && dev->bus->uevent) {
+		retval = dev->bus->uevent(dev, env);
+		if (retval)
+			pr_debug("device: '%s': %s: bus uevent() returned %d\n",
+				 dev_name(dev), __func__, retval);
+	}
+
+	/* have the class specific function add its stuff */
+	if (dev->class && dev->class->dev_uevent) {
+		retval = dev->class->dev_uevent(dev, env);
+		if (retval)
+			pr_debug("device: '%s': %s: class uevent() "
+				 "returned %d\n", dev_name(dev),
+				 __func__, retval);
+	}
+
+	/* have the device type specific function add its stuff */
+	if (dev->type && dev->type->uevent) {
+		retval = dev->type->uevent(dev, env);
+		if (retval)
+			pr_debug("device: '%s': %s: dev_type uevent() "
+				 "returned %d\n", dev_name(dev),
+				 __func__, retval);
+	}
+
+	return retval;
+}
+
+static const struct kset_uevent_ops device_uevent_ops = {
+	.filter =	dev_uevent_filter,
+	.name =		dev_uevent_name,
+	.uevent =	dev_uevent,
+};
+
+static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct kobject *top_kobj;
+	struct kset *kset;
+	struct kobj_uevent_env *env = NULL;
+	int i;
+	size_t count = 0;
+	int retval;
+
+	/* search the kset, the device belongs to */
+	top_kobj = &dev->kobj;
+	while (!top_kobj->kset && top_kobj->parent)
+		top_kobj = top_kobj->parent;
+	if (!top_kobj->kset)
+		goto out;
+
+	kset = top_kobj->kset;
+	if (!kset->uevent_ops || !kset->uevent_ops->uevent)
+		goto out;
+
+	/* respect filter */
+	if (kset->uevent_ops && kset->uevent_ops->filter)
+		if (!kset->uevent_ops->filter(kset, &dev->kobj))
+			goto out;
+
+	env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+	if (!env)
+		return -ENOMEM;
+
+	/* let the kset specific function add its keys */
+	retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
+	if (retval)
+		goto out;
+
+	/* copy keys to file */
+	for (i = 0; i < env->envp_idx; i++)
+		count += sprintf(&buf[count], "%s\n", env->envp[i]);
+out:
+	kfree(env);
+	return count;
+}
+
+static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	if (kobject_synth_uevent(&dev->kobj, buf, count))
+		dev_err(dev, "uevent: failed to send synthetic uevent\n");
+
+	return count;
+}
+static DEVICE_ATTR_RW(uevent);
+
+static ssize_t online_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	bool val;
+
+	device_lock(dev);
+	val = !dev->offline;
+	device_unlock(dev);
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t online_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	bool val;
+	int ret;
+
+	ret = strtobool(buf, &val);
+	if (ret < 0)
+		return ret;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	ret = val ? device_online(dev) : device_offline(dev);
+	unlock_device_hotplug();
+	return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_RW(online);
+
+int device_add_groups(struct device *dev, const struct attribute_group **groups)
+{
+	return sysfs_create_groups(&dev->kobj, groups);
+}
+EXPORT_SYMBOL_GPL(device_add_groups);
+
+void device_remove_groups(struct device *dev,
+			  const struct attribute_group **groups)
+{
+	sysfs_remove_groups(&dev->kobj, groups);
+}
+EXPORT_SYMBOL_GPL(device_remove_groups);
+
+union device_attr_group_devres {
+	const struct attribute_group *group;
+	const struct attribute_group **groups;
+};
+
+static int devm_attr_group_match(struct device *dev, void *res, void *data)
+{
+	return ((union device_attr_group_devres *)res)->group == data;
+}
+
+static void devm_attr_group_remove(struct device *dev, void *res)
+{
+	union device_attr_group_devres *devres = res;
+	const struct attribute_group *group = devres->group;
+
+	dev_dbg(dev, "%s: removing group %p\n", __func__, group);
+	sysfs_remove_group(&dev->kobj, group);
+}
+
+static void devm_attr_groups_remove(struct device *dev, void *res)
+{
+	union device_attr_group_devres *devres = res;
+	const struct attribute_group **groups = devres->groups;
+
+	dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
+	sysfs_remove_groups(&dev->kobj, groups);
+}
+
+/**
+ * devm_device_add_group - given a device, create a managed attribute group
+ * @dev:	The device to create the group for
+ * @grp:	The attribute group to create
+ *
+ * This function creates a group for the first time.  It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
+{
+	union device_attr_group_devres *devres;
+	int error;
+
+	devres = devres_alloc(devm_attr_group_remove,
+			      sizeof(*devres), GFP_KERNEL);
+	if (!devres)
+		return -ENOMEM;
+
+	error = sysfs_create_group(&dev->kobj, grp);
+	if (error) {
+		devres_free(devres);
+		return error;
+	}
+
+	devres->group = grp;
+	devres_add(dev, devres);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_group);
+
+/**
+ * devm_device_remove_group: remove a managed group from a device
+ * @dev:	device to remove the group from
+ * @grp:	group to remove
+ *
+ * This function removes a group of attributes from a device. The attributes
+ * previously have to have been created for this group, otherwise it will fail.
+ */
+void devm_device_remove_group(struct device *dev,
+			      const struct attribute_group *grp)
+{
+	WARN_ON(devres_release(dev, devm_attr_group_remove,
+			       devm_attr_group_match,
+			       /* cast away const */ (void *)grp));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_group);
+
+/**
+ * devm_device_add_groups - create a bunch of managed attribute groups
+ * @dev:	The device to create the group for
+ * @groups:	The attribute groups to create, NULL terminated
+ *
+ * This function creates a bunch of managed attribute groups.  If an error
+ * occurs when creating a group, all previously created groups will be
+ * removed, unwinding everything back to the original state when this
+ * function was called.  It will explicitly warn and error if any of the
+ * attribute files being created already exist.
+ *
+ * Returns 0 on success or error code from sysfs_create_group on failure.
+ */
+int devm_device_add_groups(struct device *dev,
+			   const struct attribute_group **groups)
+{
+	union device_attr_group_devres *devres;
+	int error;
+
+	devres = devres_alloc(devm_attr_groups_remove,
+			      sizeof(*devres), GFP_KERNEL);
+	if (!devres)
+		return -ENOMEM;
+
+	error = sysfs_create_groups(&dev->kobj, groups);
+	if (error) {
+		devres_free(devres);
+		return error;
+	}
+
+	devres->groups = groups;
+	devres_add(dev, devres);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_groups);
+
+/**
+ * devm_device_remove_groups - remove a list of managed groups
+ *
+ * @dev:	The device for the groups to be removed from
+ * @groups:	NULL terminated list of groups to be removed
+ *
+ * If groups is not NULL, remove the specified groups from the device.
+ */
+void devm_device_remove_groups(struct device *dev,
+			       const struct attribute_group **groups)
+{
+	WARN_ON(devres_release(dev, devm_attr_groups_remove,
+			       devm_attr_group_match,
+			       /* cast away const */ (void *)groups));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_groups);
+
+static int device_add_attrs(struct device *dev)
+{
+	struct class *class = dev->class;
+	const struct device_type *type = dev->type;
+	int error;
+
+	if (class) {
+		error = device_add_groups(dev, class->dev_groups);
+		if (error)
+			return error;
+	}
+
+	if (type) {
+		error = device_add_groups(dev, type->groups);
+		if (error)
+			goto err_remove_class_groups;
+	}
+
+	error = device_add_groups(dev, dev->groups);
+	if (error)
+		goto err_remove_type_groups;
+
+	if (device_supports_offline(dev) && !dev->offline_disabled) {
+		error = device_create_file(dev, &dev_attr_online);
+		if (error)
+			goto err_remove_dev_groups;
+	}
+
+	return 0;
+
+ err_remove_dev_groups:
+	device_remove_groups(dev, dev->groups);
+ err_remove_type_groups:
+	if (type)
+		device_remove_groups(dev, type->groups);
+ err_remove_class_groups:
+	if (class)
+		device_remove_groups(dev, class->dev_groups);
+
+	return error;
+}
+
+static void device_remove_attrs(struct device *dev)
+{
+	struct class *class = dev->class;
+	const struct device_type *type = dev->type;
+
+	device_remove_file(dev, &dev_attr_online);
+	device_remove_groups(dev, dev->groups);
+
+	if (type)
+		device_remove_groups(dev, type->groups);
+
+	if (class)
+		device_remove_groups(dev, class->dev_groups);
+}
+
+static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	return print_dev_t(buf, dev->devt);
+}
+static DEVICE_ATTR_RO(dev);
+
+/* /sys/devices/ */
+struct kset *devices_kset;
+
+/**
+ * devices_kset_move_before - Move device in the devices_kset's list.
+ * @deva: Device to move.
+ * @devb: Device @deva should come before.
+ */
+static void devices_kset_move_before(struct device *deva, struct device *devb)
+{
+	if (!devices_kset)
+		return;
+	pr_debug("devices_kset: Moving %s before %s\n",
+		 dev_name(deva), dev_name(devb));
+	spin_lock(&devices_kset->list_lock);
+	list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_after - Move device in the devices_kset's list.
+ * @deva: Device to move
+ * @devb: Device @deva should come after.
+ */
+static void devices_kset_move_after(struct device *deva, struct device *devb)
+{
+	if (!devices_kset)
+		return;
+	pr_debug("devices_kset: Moving %s after %s\n",
+		 dev_name(deva), dev_name(devb));
+	spin_lock(&devices_kset->list_lock);
+	list_move(&deva->kobj.entry, &devb->kobj.entry);
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_last - move the device to the end of devices_kset's list.
+ * @dev: device to move
+ */
+void devices_kset_move_last(struct device *dev)
+{
+	if (!devices_kset)
+		return;
+	pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
+	spin_lock(&devices_kset->list_lock);
+	list_move_tail(&dev->kobj.entry, &devices_kset->list);
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * device_create_file - create sysfs attribute file for device.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ */
+int device_create_file(struct device *dev,
+		       const struct device_attribute *attr)
+{
+	int error = 0;
+
+	if (dev) {
+		WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
+			"Attribute %s: write permission without 'store'\n",
+			attr->attr.name);
+		WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
+			"Attribute %s: read permission without 'show'\n",
+			attr->attr.name);
+		error = sysfs_create_file(&dev->kobj, &attr->attr);
+	}
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_create_file);
+
+/**
+ * device_remove_file - remove sysfs attribute file.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ */
+void device_remove_file(struct device *dev,
+			const struct device_attribute *attr)
+{
+	if (dev)
+		sysfs_remove_file(&dev->kobj, &attr->attr);
+}
+EXPORT_SYMBOL_GPL(device_remove_file);
+
+/**
+ * device_remove_file_self - remove sysfs attribute file from its own method.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ *
+ * See kernfs_remove_self() for details.
+ */
+bool device_remove_file_self(struct device *dev,
+			     const struct device_attribute *attr)
+{
+	if (dev)
+		return sysfs_remove_file_self(&dev->kobj, &attr->attr);
+	else
+		return false;
+}
+EXPORT_SYMBOL_GPL(device_remove_file_self);
+
+/**
+ * device_create_bin_file - create sysfs binary attribute file for device.
+ * @dev: device.
+ * @attr: device binary attribute descriptor.
+ */
+int device_create_bin_file(struct device *dev,
+			   const struct bin_attribute *attr)
+{
+	int error = -EINVAL;
+	if (dev)
+		error = sysfs_create_bin_file(&dev->kobj, attr);
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_create_bin_file);
+
+/**
+ * device_remove_bin_file - remove sysfs binary attribute file
+ * @dev: device.
+ * @attr: device binary attribute descriptor.
+ */
+void device_remove_bin_file(struct device *dev,
+			    const struct bin_attribute *attr)
+{
+	if (dev)
+		sysfs_remove_bin_file(&dev->kobj, attr);
+}
+EXPORT_SYMBOL_GPL(device_remove_bin_file);
+
+static void klist_children_get(struct klist_node *n)
+{
+	struct device_private *p = to_device_private_parent(n);
+	struct device *dev = p->device;
+
+	get_device(dev);
+}
+
+static void klist_children_put(struct klist_node *n)
+{
+	struct device_private *p = to_device_private_parent(n);
+	struct device *dev = p->device;
+
+	put_device(dev);
+}
+
+/**
+ * device_initialize - init device structure.
+ * @dev: device.
+ *
+ * This prepares the device for use by other layers by initializing
+ * its fields.
+ * It is the first half of device_register(), if called by
+ * that function, though it can also be called separately, so one
+ * may use @dev's fields. In particular, get_device()/put_device()
+ * may be used for reference counting of @dev after calling this
+ * function.
+ *
+ * All fields in @dev must be initialized by the caller to 0, except
+ * for those explicitly set to some other value.  The simplest
+ * approach is to use kzalloc() to allocate the structure containing
+ * @dev.
+ *
+ * NOTE: Use put_device() to give up your reference instead of freeing
+ * @dev directly once you have called this function.
+ */
+void device_initialize(struct device *dev)
+{
+	dev->kobj.kset = devices_kset;
+	kobject_init(&dev->kobj, &device_ktype);
+	INIT_LIST_HEAD(&dev->dma_pools);
+	mutex_init(&dev->mutex);
+	lockdep_set_novalidate_class(&dev->mutex);
+	spin_lock_init(&dev->devres_lock);
+	INIT_LIST_HEAD(&dev->devres_head);
+	device_pm_init(dev);
+	set_dev_node(dev, -1);
+#ifdef CONFIG_GENERIC_MSI_IRQ
+	INIT_LIST_HEAD(&dev->msi_list);
+#endif
+	INIT_LIST_HEAD(&dev->links.consumers);
+	INIT_LIST_HEAD(&dev->links.suppliers);
+	dev->links.status = DL_DEV_NO_DRIVER;
+}
+EXPORT_SYMBOL_GPL(device_initialize);
+
+struct kobject *virtual_device_parent(struct device *dev)
+{
+	static struct kobject *virtual_dir = NULL;
+
+	if (!virtual_dir)
+		virtual_dir = kobject_create_and_add("virtual",
+						     &devices_kset->kobj);
+
+	return virtual_dir;
+}
+
+struct class_dir {
+	struct kobject kobj;
+	struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
+{
+	struct class_dir *dir = to_class_dir(kobj);
+	kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
+{
+	struct class_dir *dir = to_class_dir(kobj);
+	return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+	.release	= class_dir_release,
+	.sysfs_ops	= &kobj_sysfs_ops,
+	.child_ns_type	= class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+	struct class_dir *dir;
+	int retval;
+
+	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+	if (!dir)
+		return ERR_PTR(-ENOMEM);
+
+	dir->class = class;
+	kobject_init(&dir->kobj, &class_dir_ktype);
+
+	dir->kobj.kset = &class->p->glue_dirs;
+
+	retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+	if (retval < 0) {
+		kobject_put(&dir->kobj);
+		return ERR_PTR(retval);
+	}
+	return &dir->kobj;
+}
+
+static DEFINE_MUTEX(gdp_mutex);
+
+static struct kobject *get_device_parent(struct device *dev,
+					 struct device *parent)
+{
+	if (dev->class) {
+		struct kobject *kobj = NULL;
+		struct kobject *parent_kobj;
+		struct kobject *k;
+
+#ifdef CONFIG_BLOCK
+		/* block disks show up in /sys/block */
+		if (sysfs_deprecated && dev->class == &block_class) {
+			if (parent && parent->class == &block_class)
+				return &parent->kobj;
+			return &block_class.p->subsys.kobj;
+		}
+#endif
+
+		/*
+		 * If we have no parent, we live in "virtual".
+		 * Class-devices with a non class-device as parent, live
+		 * in a "glue" directory to prevent namespace collisions.
+		 */
+		if (parent == NULL)
+			parent_kobj = virtual_device_parent(dev);
+		else if (parent->class && !dev->class->ns_type)
+			return &parent->kobj;
+		else
+			parent_kobj = &parent->kobj;
+
+		mutex_lock(&gdp_mutex);
+
+		/* find our class-directory at the parent and reference it */
+		spin_lock(&dev->class->p->glue_dirs.list_lock);
+		list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
+			if (k->parent == parent_kobj) {
+				kobj = kobject_get(k);
+				break;
+			}
+		spin_unlock(&dev->class->p->glue_dirs.list_lock);
+		if (kobj) {
+			mutex_unlock(&gdp_mutex);
+			return kobj;
+		}
+
+		/* or create a new class-directory at the parent device */
+		k = class_dir_create_and_add(dev->class, parent_kobj);
+		/* do not emit an uevent for this simple "glue" directory */
+		mutex_unlock(&gdp_mutex);
+		return k;
+	}
+
+	/* subsystems can specify a default root directory for their devices */
+	if (!parent && dev->bus && dev->bus->dev_root)
+		return &dev->bus->dev_root->kobj;
+
+	if (parent)
+		return &parent->kobj;
+	return NULL;
+}
+
+static inline bool live_in_glue_dir(struct kobject *kobj,
+				    struct device *dev)
+{
+	if (!kobj || !dev->class ||
+	    kobj->kset != &dev->class->p->glue_dirs)
+		return false;
+	return true;
+}
+
+static inline struct kobject *get_glue_dir(struct device *dev)
+{
+	return dev->kobj.parent;
+}
+
+/*
+ * make sure cleaning up dir as the last step, we need to make
+ * sure .release handler of kobject is run with holding the
+ * global lock
+ */
+static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+{
+	/* see if we live in a "glue" directory */
+	if (!live_in_glue_dir(glue_dir, dev))
+		return;
+
+	mutex_lock(&gdp_mutex);
+	if (!kobject_has_children(glue_dir))
+		kobject_del(glue_dir);
+	kobject_put(glue_dir);
+	mutex_unlock(&gdp_mutex);
+}
+
+static int device_add_class_symlinks(struct device *dev)
+{
+	struct device_node *of_node = dev_of_node(dev);
+	int error;
+
+	if (of_node) {
+		error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
+		if (error)
+			dev_warn(dev, "Error %d creating of_node link\n",error);
+		/* An error here doesn't warrant bringing down the device */
+	}
+
+	if (!dev->class)
+		return 0;
+
+	error = sysfs_create_link(&dev->kobj,
+				  &dev->class->p->subsys.kobj,
+				  "subsystem");
+	if (error)
+		goto out_devnode;
+
+	if (dev->parent && device_is_not_partition(dev)) {
+		error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
+					  "device");
+		if (error)
+			goto out_subsys;
+	}
+
+#ifdef CONFIG_BLOCK
+	/* /sys/block has directories and does not need symlinks */
+	if (sysfs_deprecated && dev->class == &block_class)
+		return 0;
+#endif
+
+	/* link in the class directory pointing to the device */
+	error = sysfs_create_link(&dev->class->p->subsys.kobj,
+				  &dev->kobj, dev_name(dev));
+	if (error)
+		goto out_device;
+
+	return 0;
+
+out_device:
+	sysfs_remove_link(&dev->kobj, "device");
+
+out_subsys:
+	sysfs_remove_link(&dev->kobj, "subsystem");
+out_devnode:
+	sysfs_remove_link(&dev->kobj, "of_node");
+	return error;
+}
+
+static void device_remove_class_symlinks(struct device *dev)
+{
+	if (dev_of_node(dev))
+		sysfs_remove_link(&dev->kobj, "of_node");
+
+	if (!dev->class)
+		return;
+
+	if (dev->parent && device_is_not_partition(dev))
+		sysfs_remove_link(&dev->kobj, "device");
+	sysfs_remove_link(&dev->kobj, "subsystem");
+#ifdef CONFIG_BLOCK
+	if (sysfs_deprecated && dev->class == &block_class)
+		return;
+#endif
+	sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
+}
+
+/**
+ * dev_set_name - set a device name
+ * @dev: device
+ * @fmt: format string for the device's name
+ */
+int dev_set_name(struct device *dev, const char *fmt, ...)
+{
+	va_list vargs;
+	int err;
+
+	va_start(vargs, fmt);
+	err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
+	va_end(vargs);
+	return err;
+}
+EXPORT_SYMBOL_GPL(dev_set_name);
+
+/**
+ * device_to_dev_kobj - select a /sys/dev/ directory for the device
+ * @dev: device
+ *
+ * By default we select char/ for new entries.  Setting class->dev_obj
+ * to NULL prevents an entry from being created.  class->dev_kobj must
+ * be set (or cleared) before any devices are registered to the class
+ * otherwise device_create_sys_dev_entry() and
+ * device_remove_sys_dev_entry() will disagree about the presence of
+ * the link.
+ */
+static struct kobject *device_to_dev_kobj(struct device *dev)
+{
+	struct kobject *kobj;
+
+	if (dev->class)
+		kobj = dev->class->dev_kobj;
+	else
+		kobj = sysfs_dev_char_kobj;
+
+	return kobj;
+}
+
+static int device_create_sys_dev_entry(struct device *dev)
+{
+	struct kobject *kobj = device_to_dev_kobj(dev);
+	int error = 0;
+	char devt_str[15];
+
+	if (kobj) {
+		format_dev_t(devt_str, dev->devt);
+		error = sysfs_create_link(kobj, &dev->kobj, devt_str);
+	}
+
+	return error;
+}
+
+static void device_remove_sys_dev_entry(struct device *dev)
+{
+	struct kobject *kobj = device_to_dev_kobj(dev);
+	char devt_str[15];
+
+	if (kobj) {
+		format_dev_t(devt_str, dev->devt);
+		sysfs_remove_link(kobj, devt_str);
+	}
+}
+
+static int device_private_init(struct device *dev)
+{
+	dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
+	if (!dev->p)
+		return -ENOMEM;
+	dev->p->device = dev;
+	klist_init(&dev->p->klist_children, klist_children_get,
+		   klist_children_put);
+	INIT_LIST_HEAD(&dev->p->deferred_probe);
+	return 0;
+}
+
+/**
+ * device_add - add device to device hierarchy.
+ * @dev: device.
+ *
+ * This is part 2 of device_register(), though may be called
+ * separately _iff_ device_initialize() has been called separately.
+ *
+ * This adds @dev to the kobject hierarchy via kobject_add(), adds it
+ * to the global and sibling lists for the device, then
+ * adds it to the other relevant subsystems of the driver model.
+ *
+ * Do not call this routine or device_register() more than once for
+ * any device structure.  The driver model core is not designed to work
+ * with devices that get unregistered and then spring back to life.
+ * (Among other things, it's very hard to guarantee that all references
+ * to the previous incarnation of @dev have been dropped.)  Allocate
+ * and register a fresh new struct device instead.
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use put_device() to give up your
+ * reference instead.
+ */
+int device_add(struct device *dev)
+{
+	struct device *parent;
+	struct kobject *kobj;
+	struct class_interface *class_intf;
+	int error = -EINVAL;
+	struct kobject *glue_dir = NULL;
+
+	dev = get_device(dev);
+	if (!dev)
+		goto done;
+
+	if (!dev->p) {
+		error = device_private_init(dev);
+		if (error)
+			goto done;
+	}
+
+	/*
+	 * for statically allocated devices, which should all be converted
+	 * some day, we need to initialize the name. We prevent reading back
+	 * the name, and force the use of dev_name()
+	 */
+	if (dev->init_name) {
+		dev_set_name(dev, "%s", dev->init_name);
+		dev->init_name = NULL;
+	}
+
+	/* subsystems can specify simple device enumeration */
+	if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
+		dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+
+	if (!dev_name(dev)) {
+		error = -EINVAL;
+		goto name_error;
+	}
+
+	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+
+	parent = get_device(dev->parent);
+	kobj = get_device_parent(dev, parent);
+	if (IS_ERR(kobj)) {
+		error = PTR_ERR(kobj);
+		goto parent_error;
+	}
+	if (kobj)
+		dev->kobj.parent = kobj;
+
+	/* use parent numa_node */
+	if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
+		set_dev_node(dev, dev_to_node(parent));
+
+	/* first, register with generic layer. */
+	/* we require the name to be set before, and pass NULL */
+	error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
+	if (error) {
+		glue_dir = get_glue_dir(dev);
+		goto Error;
+	}
+
+	/* notify platform of device entry */
+	if (platform_notify)
+		platform_notify(dev);
+
+	error = device_create_file(dev, &dev_attr_uevent);
+	if (error)
+		goto attrError;
+
+	error = device_add_class_symlinks(dev);
+	if (error)
+		goto SymlinkError;
+	error = device_add_attrs(dev);
+	if (error)
+		goto AttrsError;
+	error = bus_add_device(dev);
+	if (error)
+		goto BusError;
+	error = dpm_sysfs_add(dev);
+	if (error)
+		goto DPMError;
+	device_pm_add(dev);
+
+	if (MAJOR(dev->devt)) {
+		error = device_create_file(dev, &dev_attr_dev);
+		if (error)
+			goto DevAttrError;
+
+		error = device_create_sys_dev_entry(dev);
+		if (error)
+			goto SysEntryError;
+
+		devtmpfs_create_node(dev);
+	}
+
+	/* Notify clients of device addition.  This call must come
+	 * after dpm_sysfs_add() and before kobject_uevent().
+	 */
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_ADD_DEVICE, dev);
+
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+	bus_probe_device(dev);
+	if (parent)
+		klist_add_tail(&dev->p->knode_parent,
+			       &parent->p->klist_children);
+
+	if (dev->class) {
+		mutex_lock(&dev->class->p->mutex);
+		/* tie the class to the device */
+		klist_add_tail(&dev->knode_class,
+			       &dev->class->p->klist_devices);
+
+		/* notify any interfaces that the device is here */
+		list_for_each_entry(class_intf,
+				    &dev->class->p->interfaces, node)
+			if (class_intf->add_dev)
+				class_intf->add_dev(dev, class_intf);
+		mutex_unlock(&dev->class->p->mutex);
+	}
+done:
+	put_device(dev);
+	return error;
+ SysEntryError:
+	if (MAJOR(dev->devt))
+		device_remove_file(dev, &dev_attr_dev);
+ DevAttrError:
+	device_pm_remove(dev);
+	dpm_sysfs_remove(dev);
+ DPMError:
+	bus_remove_device(dev);
+ BusError:
+	device_remove_attrs(dev);
+ AttrsError:
+	device_remove_class_symlinks(dev);
+ SymlinkError:
+	device_remove_file(dev, &dev_attr_uevent);
+ attrError:
+	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+	glue_dir = get_glue_dir(dev);
+	kobject_del(&dev->kobj);
+ Error:
+	cleanup_glue_dir(dev, glue_dir);
+parent_error:
+	put_device(parent);
+name_error:
+	kfree(dev->p);
+	dev->p = NULL;
+	goto done;
+}
+EXPORT_SYMBOL_GPL(device_add);
+
+/**
+ * device_register - register a device with the system.
+ * @dev: pointer to the device structure
+ *
+ * This happens in two clean steps - initialize the device
+ * and add it to the system. The two steps can be called
+ * separately, but this is the easiest and most common.
+ * I.e. you should only call the two helpers separately if
+ * have a clearly defined need to use and refcount the device
+ * before it is added to the hierarchy.
+ *
+ * For more information, see the kerneldoc for device_initialize()
+ * and device_add().
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use put_device() to give up the
+ * reference initialized in this function instead.
+ */
+int device_register(struct device *dev)
+{
+	device_initialize(dev);
+	return device_add(dev);
+}
+EXPORT_SYMBOL_GPL(device_register);
+
+/**
+ * get_device - increment reference count for device.
+ * @dev: device.
+ *
+ * This simply forwards the call to kobject_get(), though
+ * we do take care to provide for the case that we get a NULL
+ * pointer passed in.
+ */
+struct device *get_device(struct device *dev)
+{
+	return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_device);
+
+/**
+ * put_device - decrement reference count.
+ * @dev: device in question.
+ */
+void put_device(struct device *dev)
+{
+	/* might_sleep(); */
+	if (dev)
+		kobject_put(&dev->kobj);
+}
+EXPORT_SYMBOL_GPL(put_device);
+
+/**
+ * device_del - delete device from system.
+ * @dev: device.
+ *
+ * This is the first part of the device unregistration
+ * sequence. This removes the device from the lists we control
+ * from here, has it removed from the other driver model
+ * subsystems it was added to in device_add(), and removes it
+ * from the kobject hierarchy.
+ *
+ * NOTE: this should be called manually _iff_ device_add() was
+ * also called manually.
+ */
+void device_del(struct device *dev)
+{
+	struct device *parent = dev->parent;
+	struct kobject *glue_dir = NULL;
+	struct class_interface *class_intf;
+
+	/* Notify clients of device removal.  This call must come
+	 * before dpm_sysfs_remove().
+	 */
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_DEL_DEVICE, dev);
+
+	dpm_sysfs_remove(dev);
+	if (parent)
+		klist_del(&dev->p->knode_parent);
+	if (MAJOR(dev->devt)) {
+		devtmpfs_delete_node(dev);
+		device_remove_sys_dev_entry(dev);
+		device_remove_file(dev, &dev_attr_dev);
+	}
+	if (dev->class) {
+		device_remove_class_symlinks(dev);
+
+		mutex_lock(&dev->class->p->mutex);
+		/* notify any interfaces that the device is now gone */
+		list_for_each_entry(class_intf,
+				    &dev->class->p->interfaces, node)
+			if (class_intf->remove_dev)
+				class_intf->remove_dev(dev, class_intf);
+		/* remove the device from the class list */
+		klist_del(&dev->knode_class);
+		mutex_unlock(&dev->class->p->mutex);
+	}
+	device_remove_file(dev, &dev_attr_uevent);
+	device_remove_attrs(dev);
+	bus_remove_device(dev);
+	device_pm_remove(dev);
+	driver_deferred_probe_del(dev);
+	device_remove_properties(dev);
+	device_links_purge(dev);
+
+	/* Notify the platform of the removal, in case they
+	 * need to do anything...
+	 */
+	if (platform_notify_remove)
+		platform_notify_remove(dev);
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_REMOVED_DEVICE, dev);
+	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+	glue_dir = get_glue_dir(dev);
+	kobject_del(&dev->kobj);
+	cleanup_glue_dir(dev, glue_dir);
+	put_device(parent);
+}
+EXPORT_SYMBOL_GPL(device_del);
+
+/**
+ * device_unregister - unregister device from system.
+ * @dev: device going away.
+ *
+ * We do this in two parts, like we do device_register(). First,
+ * we remove it from all the subsystems with device_del(), then
+ * we decrement the reference count via put_device(). If that
+ * is the final reference count, the device will be cleaned up
+ * via device_release() above. Otherwise, the structure will
+ * stick around until the final reference to the device is dropped.
+ */
+void device_unregister(struct device *dev)
+{
+	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+	device_del(dev);
+	put_device(dev);
+}
+EXPORT_SYMBOL_GPL(device_unregister);
+
+static struct device *prev_device(struct klist_iter *i)
+{
+	struct klist_node *n = klist_prev(i);
+	struct device *dev = NULL;
+	struct device_private *p;
+
+	if (n) {
+		p = to_device_private_parent(n);
+		dev = p->device;
+	}
+	return dev;
+}
+
+static struct device *next_device(struct klist_iter *i)
+{
+	struct klist_node *n = klist_next(i);
+	struct device *dev = NULL;
+	struct device_private *p;
+
+	if (n) {
+		p = to_device_private_parent(n);
+		dev = p->device;
+	}
+	return dev;
+}
+
+/**
+ * device_get_devnode - path of device node file
+ * @dev: device
+ * @mode: returned file access mode
+ * @uid: returned file owner
+ * @gid: returned file group
+ * @tmp: possibly allocated string
+ *
+ * Return the relative path of a possible device node.
+ * Non-default names may need to allocate a memory to compose
+ * a name. This memory is returned in tmp and needs to be
+ * freed by the caller.
+ */
+const char *device_get_devnode(struct device *dev,
+			       umode_t *mode, kuid_t *uid, kgid_t *gid,
+			       const char **tmp)
+{
+	char *s;
+
+	*tmp = NULL;
+
+	/* the device type may provide a specific name */
+	if (dev->type && dev->type->devnode)
+		*tmp = dev->type->devnode(dev, mode, uid, gid);
+	if (*tmp)
+		return *tmp;
+
+	/* the class may provide a specific name */
+	if (dev->class && dev->class->devnode)
+		*tmp = dev->class->devnode(dev, mode);
+	if (*tmp)
+		return *tmp;
+
+	/* return name without allocation, tmp == NULL */
+	if (strchr(dev_name(dev), '!') == NULL)
+		return dev_name(dev);
+
+	/* replace '!' in the name with '/' */
+	s = kstrdup(dev_name(dev), GFP_KERNEL);
+	if (!s)
+		return NULL;
+	strreplace(s, '!', '/');
+	return *tmp = s;
+}
+
+/**
+ * device_for_each_child - device child iterator.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child(struct device *parent, void *data,
+			  int (*fn)(struct device *dev, void *data))
+{
+	struct klist_iter i;
+	struct device *child;
+	int error = 0;
+
+	if (!parent->p)
+		return 0;
+
+	klist_iter_init(&parent->p->klist_children, &i);
+	while (!error && (child = next_device(&i)))
+		error = fn(child, data);
+	klist_iter_exit(&i);
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child);
+
+/**
+ * device_for_each_child_reverse - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child_reverse(struct device *parent, void *data,
+				  int (*fn)(struct device *dev, void *data))
+{
+	struct klist_iter i;
+	struct device *child;
+	int error = 0;
+
+	if (!parent->p)
+		return 0;
+
+	klist_iter_init(&parent->p->klist_children, &i);
+	while ((child = prev_device(&i)) && !error)
+		error = fn(child, data);
+	klist_iter_exit(&i);
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
+
+/**
+ * device_find_child - device iterator for locating a particular device.
+ * @parent: parent struct device
+ * @match: Callback function to check device
+ * @data: Data to pass to match function
+ *
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does.  If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_child(struct device *parent, void *data,
+				 int (*match)(struct device *dev, void *data))
+{
+	struct klist_iter i;
+	struct device *child;
+
+	if (!parent)
+		return NULL;
+
+	klist_iter_init(&parent->p->klist_children, &i);
+	while ((child = next_device(&i)))
+		if (match(child, data) && get_device(child))
+			break;
+	klist_iter_exit(&i);
+	return child;
+}
+EXPORT_SYMBOL_GPL(device_find_child);
+
+int __init devices_init(void)
+{
+	devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
+	if (!devices_kset)
+		return -ENOMEM;
+	dev_kobj = kobject_create_and_add("dev", NULL);
+	if (!dev_kobj)
+		goto dev_kobj_err;
+	sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
+	if (!sysfs_dev_block_kobj)
+		goto block_kobj_err;
+	sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
+	if (!sysfs_dev_char_kobj)
+		goto char_kobj_err;
+
+	return 0;
+
+ char_kobj_err:
+	kobject_put(sysfs_dev_block_kobj);
+ block_kobj_err:
+	kobject_put(dev_kobj);
+ dev_kobj_err:
+	kset_unregister(devices_kset);
+	return -ENOMEM;
+}
+
+static int device_check_offline(struct device *dev, void *not_used)
+{
+	int ret;
+
+	ret = device_for_each_child(dev, NULL, device_check_offline);
+	if (ret)
+		return ret;
+
+	return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
+}
+
+/**
+ * device_offline - Prepare the device for hot-removal.
+ * @dev: Device to be put offline.
+ *
+ * Execute the device bus type's .offline() callback, if present, to prepare
+ * the device for a subsequent hot-removal.  If that succeeds, the device must
+ * not be used until either it is removed or its bus type's .online() callback
+ * is executed.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_offline(struct device *dev)
+{
+	int ret;
+
+	if (dev->offline_disabled)
+		return -EPERM;
+
+	ret = device_for_each_child(dev, NULL, device_check_offline);
+	if (ret)
+		return ret;
+
+	device_lock(dev);
+	if (device_supports_offline(dev)) {
+		if (dev->offline) {
+			ret = 1;
+		} else {
+			ret = dev->bus->offline(dev);
+			if (!ret) {
+				kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+				dev->offline = true;
+			}
+		}
+	}
+	device_unlock(dev);
+
+	return ret;
+}
+
+/**
+ * device_online - Put the device back online after successful device_offline().
+ * @dev: Device to be put back online.
+ *
+ * If device_offline() has been successfully executed for @dev, but the device
+ * has not been removed subsequently, execute its bus type's .online() callback
+ * to indicate that the device can be used again.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_online(struct device *dev)
+{
+	int ret = 0;
+
+	device_lock(dev);
+	if (device_supports_offline(dev)) {
+		if (dev->offline) {
+			ret = dev->bus->online(dev);
+			if (!ret) {
+				kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+				dev->offline = false;
+			}
+		} else {
+			ret = 1;
+		}
+	}
+	device_unlock(dev);
+
+	return ret;
+}
+
+struct root_device {
+	struct device dev;
+	struct module *owner;
+};
+
+static inline struct root_device *to_root_device(struct device *d)
+{
+	return container_of(d, struct root_device, dev);
+}
+
+static void root_device_release(struct device *dev)
+{
+	kfree(to_root_device(dev));
+}
+
+/**
+ * __root_device_register - allocate and register a root device
+ * @name: root device name
+ * @owner: owner module of the root device, usually THIS_MODULE
+ *
+ * This function allocates a root device and registers it
+ * using device_register(). In order to free the returned
+ * device, use root_device_unregister().
+ *
+ * Root devices are dummy devices which allow other devices
+ * to be grouped under /sys/devices. Use this function to
+ * allocate a root device and then use it as the parent of
+ * any device which should appear under /sys/devices/{name}
+ *
+ * The /sys/devices/{name} directory will also contain a
+ * 'module' symlink which points to the @owner directory
+ * in sysfs.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: You probably want to use root_device_register().
+ */
+struct device *__root_device_register(const char *name, struct module *owner)
+{
+	struct root_device *root;
+	int err = -ENOMEM;
+
+	root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
+	if (!root)
+		return ERR_PTR(err);
+
+	err = dev_set_name(&root->dev, "%s", name);
+	if (err) {
+		kfree(root);
+		return ERR_PTR(err);
+	}
+
+	root->dev.release = root_device_release;
+
+	err = device_register(&root->dev);
+	if (err) {
+		put_device(&root->dev);
+		return ERR_PTR(err);
+	}
+
+#ifdef CONFIG_MODULES	/* gotta find a "cleaner" way to do this */
+	if (owner) {
+		struct module_kobject *mk = &owner->mkobj;
+
+		err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
+		if (err) {
+			device_unregister(&root->dev);
+			return ERR_PTR(err);
+		}
+		root->owner = owner;
+	}
+#endif
+
+	return &root->dev;
+}
+EXPORT_SYMBOL_GPL(__root_device_register);
+
+/**
+ * root_device_unregister - unregister and free a root device
+ * @dev: device going away
+ *
+ * This function unregisters and cleans up a device that was created by
+ * root_device_register().
+ */
+void root_device_unregister(struct device *dev)
+{
+	struct root_device *root = to_root_device(dev);
+
+	if (root->owner)
+		sysfs_remove_link(&root->dev.kobj, "module");
+
+	device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(root_device_unregister);
+
+
+static void device_create_release(struct device *dev)
+{
+	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+	kfree(dev);
+}
+
+static __printf(6, 0) struct device *
+device_create_groups_vargs(struct class *class, struct device *parent,
+			   dev_t devt, void *drvdata,
+			   const struct attribute_group **groups,
+			   const char *fmt, va_list args)
+{
+	struct device *dev = NULL;
+	int retval = -ENODEV;
+
+	if (class == NULL || IS_ERR(class))
+		goto error;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	device_initialize(dev);
+	dev->devt = devt;
+	dev->class = class;
+	dev->parent = parent;
+	dev->groups = groups;
+	dev->release = device_create_release;
+	dev_set_drvdata(dev, drvdata);
+
+	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+	if (retval)
+		goto error;
+
+	retval = device_add(dev);
+	if (retval)
+		goto error;
+
+	return dev;
+
+error:
+	put_device(dev);
+	return ERR_PTR(retval);
+}
+
+/**
+ * device_create_vargs - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @fmt: string for the device's name
+ * @args: va_list for the device's name
+ *
+ * This function can be used by char device classes.  A struct device
+ * will be created in sysfs, registered to the specified class.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create_vargs(struct class *class, struct device *parent,
+				   dev_t devt, void *drvdata, const char *fmt,
+				   va_list args)
+{
+	return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
+					  fmt, args);
+}
+EXPORT_SYMBOL_GPL(device_create_vargs);
+
+/**
+ * device_create - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @fmt: string for the device's name
+ *
+ * This function can be used by char device classes.  A struct device
+ * will be created in sysfs, registered to the specified class.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create(struct class *class, struct device *parent,
+			     dev_t devt, void *drvdata, const char *fmt, ...)
+{
+	va_list vargs;
+	struct device *dev;
+
+	va_start(vargs, fmt);
+	dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
+	va_end(vargs);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(device_create);
+
+/**
+ * device_create_with_groups - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @groups: NULL-terminated list of attribute groups to be created
+ * @fmt: string for the device's name
+ *
+ * This function can be used by char device classes.  A struct device
+ * will be created in sysfs, registered to the specified class.
+ * Additional attributes specified in the groups parameter will also
+ * be created automatically.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create_with_groups(struct class *class,
+					 struct device *parent, dev_t devt,
+					 void *drvdata,
+					 const struct attribute_group **groups,
+					 const char *fmt, ...)
+{
+	va_list vargs;
+	struct device *dev;
+
+	va_start(vargs, fmt);
+	dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
+					 fmt, vargs);
+	va_end(vargs);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(device_create_with_groups);
+
+static int __match_devt(struct device *dev, const void *data)
+{
+	const dev_t *devt = data;
+
+	return dev->devt == *devt;
+}
+
+/**
+ * device_destroy - removes a device that was created with device_create()
+ * @class: pointer to the struct class that this device was registered with
+ * @devt: the dev_t of the device that was previously registered
+ *
+ * This call unregisters and cleans up a device that was created with a
+ * call to device_create().
+ */
+void device_destroy(struct class *class, dev_t devt)
+{
+	struct device *dev;
+
+	dev = class_find_device(class, NULL, &devt, __match_devt);
+	if (dev) {
+		put_device(dev);
+		device_unregister(dev);
+	}
+}
+EXPORT_SYMBOL_GPL(device_destroy);
+
+/**
+ * device_rename - renames a device
+ * @dev: the pointer to the struct device to be renamed
+ * @new_name: the new name of the device
+ *
+ * It is the responsibility of the caller to provide mutual
+ * exclusion between two different calls of device_rename
+ * on the same device to ensure that new_name is valid and
+ * won't conflict with other devices.
+ *
+ * Note: Don't call this function.  Currently, the networking layer calls this
+ * function, but that will change.  The following text from Kay Sievers offers
+ * some insight:
+ *
+ * Renaming devices is racy at many levels, symlinks and other stuff are not
+ * replaced atomically, and you get a "move" uevent, but it's not easy to
+ * connect the event to the old and new device. Device nodes are not renamed at
+ * all, there isn't even support for that in the kernel now.
+ *
+ * In the meantime, during renaming, your target name might be taken by another
+ * driver, creating conflicts. Or the old name is taken directly after you
+ * renamed it -- then you get events for the same DEVPATH, before you even see
+ * the "move" event. It's just a mess, and nothing new should ever rely on
+ * kernel device renaming. Besides that, it's not even implemented now for
+ * other things than (driver-core wise very simple) network devices.
+ *
+ * We are currently about to change network renaming in udev to completely
+ * disallow renaming of devices in the same namespace as the kernel uses,
+ * because we can't solve the problems properly, that arise with swapping names
+ * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
+ * be allowed to some other name than eth[0-9]*, for the aforementioned
+ * reasons.
+ *
+ * Make up a "real" name in the driver before you register anything, or add
+ * some other attributes for userspace to find the device, or use udev to add
+ * symlinks -- but never rename kernel devices later, it's a complete mess. We
+ * don't even want to get into that and try to implement the missing pieces in
+ * the core. We really have other pieces to fix in the driver core mess. :)
+ */
+int device_rename(struct device *dev, const char *new_name)
+{
+	struct kobject *kobj = &dev->kobj;
+	char *old_device_name = NULL;
+	int error;
+
+	dev = get_device(dev);
+	if (!dev)
+		return -EINVAL;
+
+	dev_dbg(dev, "renaming to %s\n", new_name);
+
+	old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
+	if (!old_device_name) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	if (dev->class) {
+		error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
+					     kobj, old_device_name,
+					     new_name, kobject_namespace(kobj));
+		if (error)
+			goto out;
+	}
+
+	error = kobject_rename(kobj, new_name);
+	if (error)
+		goto out;
+
+out:
+	put_device(dev);
+
+	kfree(old_device_name);
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_rename);
+
+static int device_move_class_links(struct device *dev,
+				   struct device *old_parent,
+				   struct device *new_parent)
+{
+	int error = 0;
+
+	if (old_parent)
+		sysfs_remove_link(&dev->kobj, "device");
+	if (new_parent)
+		error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
+					  "device");
+	return error;
+}
+
+/**
+ * device_move - moves a device to a new parent
+ * @dev: the pointer to the struct device to be moved
+ * @new_parent: the new parent of the device (can be NULL)
+ * @dpm_order: how to reorder the dpm_list
+ */
+int device_move(struct device *dev, struct device *new_parent,
+		enum dpm_order dpm_order)
+{
+	int error;
+	struct device *old_parent;
+	struct kobject *new_parent_kobj;
+
+	dev = get_device(dev);
+	if (!dev)
+		return -EINVAL;
+
+	device_pm_lock();
+	new_parent = get_device(new_parent);
+	new_parent_kobj = get_device_parent(dev, new_parent);
+	if (IS_ERR(new_parent_kobj)) {
+		error = PTR_ERR(new_parent_kobj);
+		put_device(new_parent);
+		goto out;
+	}
+
+	pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
+		 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
+	error = kobject_move(&dev->kobj, new_parent_kobj);
+	if (error) {
+		cleanup_glue_dir(dev, new_parent_kobj);
+		put_device(new_parent);
+		goto out;
+	}
+	old_parent = dev->parent;
+	dev->parent = new_parent;
+	if (old_parent)
+		klist_remove(&dev->p->knode_parent);
+	if (new_parent) {
+		klist_add_tail(&dev->p->knode_parent,
+			       &new_parent->p->klist_children);
+		set_dev_node(dev, dev_to_node(new_parent));
+	}
+
+	if (dev->class) {
+		error = device_move_class_links(dev, old_parent, new_parent);
+		if (error) {
+			/* We ignore errors on cleanup since we're hosed anyway... */
+			device_move_class_links(dev, new_parent, old_parent);
+			if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
+				if (new_parent)
+					klist_remove(&dev->p->knode_parent);
+				dev->parent = old_parent;
+				if (old_parent) {
+					klist_add_tail(&dev->p->knode_parent,
+						       &old_parent->p->klist_children);
+					set_dev_node(dev, dev_to_node(old_parent));
+				}
+			}
+			cleanup_glue_dir(dev, new_parent_kobj);
+			put_device(new_parent);
+			goto out;
+		}
+	}
+	switch (dpm_order) {
+	case DPM_ORDER_NONE:
+		break;
+	case DPM_ORDER_DEV_AFTER_PARENT:
+		device_pm_move_after(dev, new_parent);
+		devices_kset_move_after(dev, new_parent);
+		break;
+	case DPM_ORDER_PARENT_BEFORE_DEV:
+		device_pm_move_before(new_parent, dev);
+		devices_kset_move_before(new_parent, dev);
+		break;
+	case DPM_ORDER_DEV_LAST:
+		device_pm_move_last(dev);
+		devices_kset_move_last(dev);
+		break;
+	}
+
+	put_device(old_parent);
+out:
+	device_pm_unlock();
+	put_device(dev);
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_move);
+
+/**
+ * device_shutdown - call ->shutdown() on each device to shutdown.
+ */
+void device_shutdown(void)
+{
+	struct device *dev, *parent;
+
+	wait_for_device_probe();
+	device_block_probing();
+
+	spin_lock(&devices_kset->list_lock);
+	/*
+	 * Walk the devices list backward, shutting down each in turn.
+	 * Beware that device unplug events may also start pulling
+	 * devices offline, even as the system is shutting down.
+	 */
+	while (!list_empty(&devices_kset->list)) {
+		dev = list_entry(devices_kset->list.prev, struct device,
+				kobj.entry);
+
+		/*
+		 * hold reference count of device's parent to
+		 * prevent it from being freed because parent's
+		 * lock is to be held
+		 */
+		parent = get_device(dev->parent);
+		get_device(dev);
+		/*
+		 * Make sure the device is off the kset list, in the
+		 * event that dev->*->shutdown() doesn't remove it.
+		 */
+		list_del_init(&dev->kobj.entry);
+		spin_unlock(&devices_kset->list_lock);
+
+		/* hold lock to avoid race with probe/release */
+		if (parent)
+			device_lock(parent);
+		device_lock(dev);
+
+		/* Don't allow any more runtime suspends */
+		pm_runtime_get_noresume(dev);
+		pm_runtime_barrier(dev);
+
+		if (dev->class && dev->class->shutdown_pre) {
+			if (initcall_debug)
+				dev_info(dev, "shutdown_pre\n");
+			dev->class->shutdown_pre(dev);
+		}
+		if (dev->bus && dev->bus->shutdown) {
+			if (initcall_debug)
+				dev_info(dev, "shutdown\n");
+			dev->bus->shutdown(dev);
+		} else if (dev->driver && dev->driver->shutdown) {
+			if (initcall_debug)
+				dev_info(dev, "shutdown\n");
+			dev->driver->shutdown(dev);
+		}
+
+		device_unlock(dev);
+		if (parent)
+			device_unlock(parent);
+
+		put_device(dev);
+		put_device(parent);
+
+		spin_lock(&devices_kset->list_lock);
+	}
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/*
+ * Device logging functions
+ */
+
+#ifdef CONFIG_PRINTK
+static int
+create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
+{
+	const char *subsys;
+	size_t pos = 0;
+
+	if (dev->class)
+		subsys = dev->class->name;
+	else if (dev->bus)
+		subsys = dev->bus->name;
+	else
+		return 0;
+
+	pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
+	if (pos >= hdrlen)
+		goto overflow;
+
+	/*
+	 * Add device identifier DEVICE=:
+	 *   b12:8         block dev_t
+	 *   c127:3        char dev_t
+	 *   n8            netdev ifindex
+	 *   +sound:card0  subsystem:devname
+	 */
+	if (MAJOR(dev->devt)) {
+		char c;
+
+		if (strcmp(subsys, "block") == 0)
+			c = 'b';
+		else
+			c = 'c';
+		pos++;
+		pos += snprintf(hdr + pos, hdrlen - pos,
+				"DEVICE=%c%u:%u",
+				c, MAJOR(dev->devt), MINOR(dev->devt));
+	} else if (strcmp(subsys, "net") == 0) {
+		struct net_device *net = to_net_dev(dev);
+
+		pos++;
+		pos += snprintf(hdr + pos, hdrlen - pos,
+				"DEVICE=n%u", net->ifindex);
+	} else {
+		pos++;
+		pos += snprintf(hdr + pos, hdrlen - pos,
+				"DEVICE=+%s:%s", subsys, dev_name(dev));
+	}
+
+	if (pos >= hdrlen)
+		goto overflow;
+
+	return pos;
+
+overflow:
+	dev_WARN(dev, "device/subsystem name too long");
+	return 0;
+}
+
+int dev_vprintk_emit(int level, const struct device *dev,
+		     const char *fmt, va_list args)
+{
+	char hdr[128];
+	size_t hdrlen;
+
+	hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
+
+	return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
+}
+EXPORT_SYMBOL(dev_vprintk_emit);
+
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{
+	va_list args;
+	int r;
+
+	va_start(args, fmt);
+
+	r = dev_vprintk_emit(level, dev, fmt, args);
+
+	va_end(args);
+
+	return r;
+}
+EXPORT_SYMBOL(dev_printk_emit);
+
+static void __dev_printk(const char *level, const struct device *dev,
+			struct va_format *vaf)
+{
+	if (dev)
+		dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
+				dev_driver_string(dev), dev_name(dev), vaf);
+	else
+		printk("%s(NULL device *): %pV", level, vaf);
+}
+
+void dev_printk(const char *level, const struct device *dev,
+		const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	__dev_printk(level, dev, &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(dev_printk);
+
+#define define_dev_printk_level(func, kern_level)		\
+void func(const struct device *dev, const char *fmt, ...)	\
+{								\
+	struct va_format vaf;					\
+	va_list args;						\
+								\
+	va_start(args, fmt);					\
+								\
+	vaf.fmt = fmt;						\
+	vaf.va = &args;						\
+								\
+	__dev_printk(kern_level, dev, &vaf);			\
+								\
+	va_end(args);						\
+}								\
+EXPORT_SYMBOL(func);
+
+define_dev_printk_level(_dev_emerg, KERN_EMERG);
+define_dev_printk_level(_dev_alert, KERN_ALERT);
+define_dev_printk_level(_dev_crit, KERN_CRIT);
+define_dev_printk_level(_dev_err, KERN_ERR);
+define_dev_printk_level(_dev_warn, KERN_WARNING);
+define_dev_printk_level(_dev_notice, KERN_NOTICE);
+define_dev_printk_level(_dev_info, KERN_INFO);
+
+#endif
+
+static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
+{
+	return fwnode && !IS_ERR(fwnode->secondary);
+}
+
+/**
+ * set_primary_fwnode - Change the primary firmware node of a given device.
+ * @dev: Device to handle.
+ * @fwnode: New primary firmware node of the device.
+ *
+ * Set the device's firmware node pointer to @fwnode, but if a secondary
+ * firmware node of the device is present, preserve it.
+ */
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+{
+	if (fwnode) {
+		struct fwnode_handle *fn = dev->fwnode;
+
+		if (fwnode_is_primary(fn))
+			fn = fn->secondary;
+
+		if (fn) {
+			WARN_ON(fwnode->secondary);
+			fwnode->secondary = fn;
+		}
+		dev->fwnode = fwnode;
+	} else {
+		dev->fwnode = fwnode_is_primary(dev->fwnode) ?
+			dev->fwnode->secondary : NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(set_primary_fwnode);
+
+/**
+ * set_secondary_fwnode - Change the secondary firmware node of a given device.
+ * @dev: Device to handle.
+ * @fwnode: New secondary firmware node of the device.
+ *
+ * If a primary firmware node of the device is present, set its secondary
+ * pointer to @fwnode.  Otherwise, set the device's firmware node pointer to
+ * @fwnode.
+ */
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+{
+	if (fwnode)
+		fwnode->secondary = ERR_PTR(-ENODEV);
+
+	if (fwnode_is_primary(dev->fwnode))
+		dev->fwnode->secondary = fwnode;
+	else
+		dev->fwnode = fwnode;
+}
+
+/**
+ * device_set_of_node_from_dev - reuse device-tree node of another device
+ * @dev: device whose device-tree node is being set
+ * @dev2: device whose device-tree node is being reused
+ *
+ * Takes another reference to the new device-tree node after first dropping
+ * any reference held to the old node.
+ */
+void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
+{
+	of_node_put(dev->of_node);
+	dev->of_node = of_node_get(dev2->of_node);
+	dev->of_node_reused = true;
+}
+EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
new file mode 100644
index 0000000..eb9443d
--- /dev/null
+++ b/drivers/base/cpu.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CPU subsystem support
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/device.h>
+#include <linux/node.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/cpufeature.h>
+#include <linux/tick.h>
+#include <linux/pm_qos.h>
+#include <linux/sched/isolation.h>
+
+#include "base.h"
+
+static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
+
+static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+{
+	/* ACPI style match is the only one that may succeed. */
+	if (acpi_driver_match_device(dev, drv))
+		return 1;
+
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void change_cpu_under_node(struct cpu *cpu,
+			unsigned int from_nid, unsigned int to_nid)
+{
+	int cpuid = cpu->dev.id;
+	unregister_cpu_under_node(cpuid, from_nid);
+	register_cpu_under_node(cpuid, to_nid);
+	cpu->node_id = to_nid;
+}
+
+static int cpu_subsys_online(struct device *dev)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int cpuid = dev->id;
+	int from_nid, to_nid;
+	int ret;
+
+	from_nid = cpu_to_node(cpuid);
+	if (from_nid == NUMA_NO_NODE)
+		return -ENODEV;
+
+	ret = cpu_up(cpuid);
+	/*
+	 * When hot adding memory to memoryless node and enabling a cpu
+	 * on the node, node number of the cpu may internally change.
+	 */
+	to_nid = cpu_to_node(cpuid);
+	if (from_nid != to_nid)
+		change_cpu_under_node(cpu, from_nid, to_nid);
+
+	return ret;
+}
+
+static int cpu_subsys_offline(struct device *dev)
+{
+	return cpu_down(dev->id);
+}
+
+void unregister_cpu(struct cpu *cpu)
+{
+	int logical_cpu = cpu->dev.id;
+
+	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
+
+	device_unregister(&cpu->dev);
+	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
+	return;
+}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+static ssize_t cpu_probe_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf,
+			       size_t count)
+{
+	ssize_t cnt;
+	int ret;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	cnt = arch_cpu_probe(buf, count);
+
+	unlock_device_hotplug();
+	return cnt;
+}
+
+static ssize_t cpu_release_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t count)
+{
+	ssize_t cnt;
+	int ret;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	cnt = arch_cpu_release(buf, count);
+
+	unlock_device_hotplug();
+	return cnt;
+}
+
+static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+#endif /* CONFIG_HOTPLUG_CPU */
+
+struct bus_type cpu_subsys = {
+	.name = "cpu",
+	.dev_name = "cpu",
+	.match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+	.online = cpu_subsys_online,
+	.offline = cpu_subsys_offline,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
+#ifdef CONFIG_KEXEC
+#include <linux/kexec.h>
+
+static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	unsigned long long addr;
+	int cpunum;
+
+	cpunum = cpu->dev.id;
+
+	/*
+	 * Might be reading other cpu's data based on which cpu read thread
+	 * has been scheduled. But cpu data (memory) is allocated once during
+	 * boot up and this data does not change there after. Hence this
+	 * operation should be safe. No locking required.
+	 */
+	addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
+	rc = sprintf(buf, "%Lx\n", addr);
+	return rc;
+}
+static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+
+static ssize_t show_crash_notes_size(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	ssize_t rc;
+
+	rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
+	return rc;
+}
+static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
+
+static struct attribute *crash_note_cpu_attrs[] = {
+	&dev_attr_crash_notes.attr,
+	&dev_attr_crash_notes_size.attr,
+	NULL
+};
+
+static struct attribute_group crash_note_cpu_attr_group = {
+	.attrs = crash_note_cpu_attrs,
+};
+#endif
+
+static const struct attribute_group *common_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+	&crash_note_cpu_attr_group,
+#endif
+	NULL
+};
+
+static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+	&crash_note_cpu_attr_group,
+#endif
+	NULL
+};
+
+/*
+ * Print cpu online, possible, present, and system maps
+ */
+
+struct cpu_attr {
+	struct device_attribute attr;
+	const struct cpumask *const map;
+};
+
+static ssize_t show_cpus_attr(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
+
+	return cpumap_print_to_pagebuf(true, buf, ca->map);
+}
+
+#define _CPU_ATTR(name, map) \
+	{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
+
+/* Keep in sync with cpu_subsys_attrs */
+static struct cpu_attr cpu_attrs[] = {
+	_CPU_ATTR(online, &__cpu_online_mask),
+	_CPU_ATTR(possible, &__cpu_possible_mask),
+	_CPU_ATTR(present, &__cpu_present_mask),
+};
+
+/*
+ * Print values for NR_CPUS and offlined cpus
+ */
+static ssize_t print_cpus_kernel_max(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
+	return n;
+}
+static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+
+/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
+unsigned int total_cpus;
+
+static ssize_t print_cpus_offline(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int n = 0, len = PAGE_SIZE-2;
+	cpumask_var_t offline;
+
+	/* display offline cpus < nr_cpu_ids */
+	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
+		return -ENOMEM;
+	cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
+	n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
+	free_cpumask_var(offline);
+
+	/* display offline cpus >= nr_cpu_ids */
+	if (total_cpus && nr_cpu_ids < total_cpus) {
+		if (n && n < len)
+			buf[n++] = ',';
+
+		if (nr_cpu_ids == total_cpus-1)
+			n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
+		else
+			n += snprintf(&buf[n], len - n, "%u-%d",
+						      nr_cpu_ids, total_cpus-1);
+	}
+
+	n += snprintf(&buf[n], len - n, "\n");
+	return n;
+}
+static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+
+static ssize_t print_cpus_isolated(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int n = 0, len = PAGE_SIZE-2;
+	cpumask_var_t isolated;
+
+	if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpumask_andnot(isolated, cpu_possible_mask,
+		       housekeeping_cpumask(HK_FLAG_DOMAIN));
+	n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
+
+	free_cpumask_var(isolated);
+
+	return n;
+}
+static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+
+#ifdef CONFIG_NO_HZ_FULL
+static ssize_t print_cpus_nohz_full(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int n = 0, len = PAGE_SIZE-2;
+
+	n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+
+	return n;
+}
+static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+#endif
+
+static void cpu_device_release(struct device *dev)
+{
+	/*
+	 * This is an empty function to prevent the driver core from spitting a
+	 * warning at us.  Yes, I know this is directly opposite of what the
+	 * documentation for the driver core and kobjects say, and the author
+	 * of this code has already been publically ridiculed for doing
+	 * something as foolish as this.  However, at this point in time, it is
+	 * the only way to handle the issue of statically allocated cpu
+	 * devices.  The different architectures will have their cpu device
+	 * code reworked to properly handle this in the near future, so this
+	 * function will then be changed to correctly free up the memory held
+	 * by the cpu device.
+	 *
+	 * Never copy this way of doing things, or you too will be made fun of
+	 * on the linux-kernel list, you have been warned.
+	 */
+}
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	ssize_t n;
+	u32 i;
+
+	n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+		    CPU_FEATURE_TYPEVAL);
+
+	for (i = 0; i < MAX_CPU_FEATURES; i++)
+		if (cpu_have_feature(i)) {
+			if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+				WARN(1, "CPU features overflow page\n");
+				break;
+			}
+			n += sprintf(&buf[n], ",%04X", i);
+		}
+	buf[n++] = '\n';
+	return n;
+}
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf) {
+		print_cpu_modalias(NULL, NULL, buf);
+		add_uevent_var(env, "MODALIAS=%s", buf);
+		kfree(buf);
+	}
+	return 0;
+}
+#endif
+
+/*
+ * register_cpu - Setup a sysfs device for a CPU.
+ * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
+ *	  sysfs for this CPU.
+ * @num - CPU number to use when creating the device.
+ *
+ * Initialize and register the CPU device.
+ */
+int register_cpu(struct cpu *cpu, int num)
+{
+	int error;
+
+	cpu->node_id = cpu_to_node(num);
+	memset(&cpu->dev, 0x00, sizeof(struct device));
+	cpu->dev.id = num;
+	cpu->dev.bus = &cpu_subsys;
+	cpu->dev.release = cpu_device_release;
+	cpu->dev.offline_disabled = !cpu->hotpluggable;
+	cpu->dev.offline = !cpu_online(num);
+	cpu->dev.of_node = of_get_cpu_node(num, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+	cpu->dev.bus->uevent = cpu_uevent;
+#endif
+	cpu->dev.groups = common_cpu_attr_groups;
+	if (cpu->hotpluggable)
+		cpu->dev.groups = hotplugable_cpu_attr_groups;
+	error = device_register(&cpu->dev);
+	if (error) {
+		put_device(&cpu->dev);
+		return error;
+	}
+
+	per_cpu(cpu_sys_devices, num) = &cpu->dev;
+	register_cpu_under_node(num, cpu_to_node(num));
+	dev_pm_qos_expose_latency_limit(&cpu->dev,
+					PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+
+	return 0;
+}
+
+struct device *get_cpu_device(unsigned cpu)
+{
+	if (cpu < nr_cpu_ids && cpu_possible(cpu))
+		return per_cpu(cpu_sys_devices, cpu);
+	else
+		return NULL;
+}
+EXPORT_SYMBOL_GPL(get_cpu_device);
+
+static void device_create_release(struct device *dev)
+{
+	kfree(dev);
+}
+
+static struct device *
+__cpu_device_create(struct device *parent, void *drvdata,
+		    const struct attribute_group **groups,
+		    const char *fmt, va_list args)
+{
+	struct device *dev = NULL;
+	int retval = -ENODEV;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	device_initialize(dev);
+	dev->parent = parent;
+	dev->groups = groups;
+	dev->release = device_create_release;
+	dev_set_drvdata(dev, drvdata);
+
+	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+	if (retval)
+		goto error;
+
+	retval = device_add(dev);
+	if (retval)
+		goto error;
+
+	return dev;
+
+error:
+	put_device(dev);
+	return ERR_PTR(retval);
+}
+
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+				 const struct attribute_group **groups,
+				 const char *fmt, ...)
+{
+	va_list vargs;
+	struct device *dev;
+
+	va_start(vargs, fmt);
+	dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
+	va_end(vargs);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(cpu_device_create);
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
+#endif
+
+static struct attribute *cpu_root_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+	&dev_attr_probe.attr,
+	&dev_attr_release.attr,
+#endif
+	&cpu_attrs[0].attr.attr,
+	&cpu_attrs[1].attr.attr,
+	&cpu_attrs[2].attr.attr,
+	&dev_attr_kernel_max.attr,
+	&dev_attr_offline.attr,
+	&dev_attr_isolated.attr,
+#ifdef CONFIG_NO_HZ_FULL
+	&dev_attr_nohz_full.attr,
+#endif
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+	&dev_attr_modalias.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group cpu_root_attr_group = {
+	.attrs = cpu_root_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+	&cpu_root_attr_group,
+	NULL,
+};
+
+bool cpu_is_hotpluggable(unsigned cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
+
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
+static void __init cpu_dev_register_generic(void)
+{
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+	int i;
+
+	for_each_possible_cpu(i) {
+		if (register_cpu(&per_cpu(cpu_devices, i), i))
+			panic("Failed to register CPU device");
+	}
+#endif
+}
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+					  struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+	&dev_attr_meltdown.attr,
+	&dev_attr_spectre_v1.attr,
+	&dev_attr_spectre_v2.attr,
+	&dev_attr_spec_store_bypass.attr,
+	&dev_attr_l1tf.attr,
+	NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+	.name  = "vulnerabilities",
+	.attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+	if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+			       &cpu_root_vulnerabilities_group))
+		pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
+void __init cpu_dev_init(void)
+{
+	if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
+		panic("Failed to register CPU subsystem");
+
+	cpu_dev_register_generic();
+	cpu_register_vulnerabilities();
+}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
new file mode 100644
index 0000000..edfc9f0
--- /dev/null
+++ b/drivers/base/dd.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/dd.c - The core device/driver interactions.
+ *
+ * This file contains the (sometimes tricky) code that controls the
+ * interactions between devices and drivers, which primarily includes
+ * driver binding and unbinding.
+ *
+ * All of this code used to exist in drivers/base/bus.c, but was
+ * relocated to here in the name of compartmentalization (since it wasn't
+ * strictly code just for the 'struct bus_type'.
+ *
+ * Copyright (c) 2002-5 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007-2009 Novell Inc.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/async.h>
+#include <linux/pm_runtime.h>
+#include <linux/pinctrl/devinfo.h>
+
+#include "base.h"
+#include "power/power.h"
+
+/*
+ * Deferred Probe infrastructure.
+ *
+ * Sometimes driver probe order matters, but the kernel doesn't always have
+ * dependency information which means some drivers will get probed before a
+ * resource it depends on is available.  For example, an SDHCI driver may
+ * first need a GPIO line from an i2c GPIO controller before it can be
+ * initialized.  If a required resource is not available yet, a driver can
+ * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
+ *
+ * Deferred probe maintains two lists of devices, a pending list and an active
+ * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
+ * pending list.  A successful driver probe will trigger moving all devices
+ * from the pending to the active list so that the workqueue will eventually
+ * retry them.
+ *
+ * The deferred_probe_mutex must be held any time the deferred_probe_*_list
+ * of the (struct device*)->p->deferred_probe pointers are manipulated
+ */
+static DEFINE_MUTEX(deferred_probe_mutex);
+static LIST_HEAD(deferred_probe_pending_list);
+static LIST_HEAD(deferred_probe_active_list);
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static struct dentry *deferred_devices;
+static bool initcalls_done;
+
+/*
+ * In some cases, like suspend to RAM or hibernation, It might be reasonable
+ * to prohibit probing of devices as it could be unsafe.
+ * Once defer_all_probes is true all drivers probes will be forcibly deferred.
+ */
+static bool defer_all_probes;
+
+/*
+ * deferred_probe_work_func() - Retry probing devices in the active list.
+ */
+static void deferred_probe_work_func(struct work_struct *work)
+{
+	struct device *dev;
+	struct device_private *private;
+	/*
+	 * This block processes every device in the deferred 'active' list.
+	 * Each device is removed from the active list and passed to
+	 * bus_probe_device() to re-attempt the probe.  The loop continues
+	 * until every device in the active list is removed and retried.
+	 *
+	 * Note: Once the device is removed from the list and the mutex is
+	 * released, it is possible for the device get freed by another thread
+	 * and cause a illegal pointer dereference.  This code uses
+	 * get/put_device() to ensure the device structure cannot disappear
+	 * from under our feet.
+	 */
+	mutex_lock(&deferred_probe_mutex);
+	while (!list_empty(&deferred_probe_active_list)) {
+		private = list_first_entry(&deferred_probe_active_list,
+					typeof(*dev->p), deferred_probe);
+		dev = private->device;
+		list_del_init(&private->deferred_probe);
+
+		get_device(dev);
+
+		/*
+		 * Drop the mutex while probing each device; the probe path may
+		 * manipulate the deferred list
+		 */
+		mutex_unlock(&deferred_probe_mutex);
+
+		/*
+		 * Force the device to the end of the dpm_list since
+		 * the PM code assumes that the order we add things to
+		 * the list is a good order for suspend but deferred
+		 * probe makes that very unsafe.
+		 */
+		device_pm_move_to_tail(dev);
+
+		dev_dbg(dev, "Retrying from deferred list\n");
+		bus_probe_device(dev);
+		mutex_lock(&deferred_probe_mutex);
+
+		put_device(dev);
+	}
+	mutex_unlock(&deferred_probe_mutex);
+}
+static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
+
+static void driver_deferred_probe_add(struct device *dev)
+{
+	mutex_lock(&deferred_probe_mutex);
+	if (list_empty(&dev->p->deferred_probe)) {
+		dev_dbg(dev, "Added to deferred list\n");
+		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
+	}
+	mutex_unlock(&deferred_probe_mutex);
+}
+
+void driver_deferred_probe_del(struct device *dev)
+{
+	mutex_lock(&deferred_probe_mutex);
+	if (!list_empty(&dev->p->deferred_probe)) {
+		dev_dbg(dev, "Removed from deferred list\n");
+		list_del_init(&dev->p->deferred_probe);
+	}
+	mutex_unlock(&deferred_probe_mutex);
+}
+
+static bool driver_deferred_probe_enable = false;
+/**
+ * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
+ *
+ * This functions moves all devices from the pending list to the active
+ * list and schedules the deferred probe workqueue to process them.  It
+ * should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occurred and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
+ */
+static void driver_deferred_probe_trigger(void)
+{
+	if (!driver_deferred_probe_enable)
+		return;
+
+	/*
+	 * A successful probe means that all the devices in the pending list
+	 * should be triggered to be reprobed.  Move all the deferred devices
+	 * into the active list so they can be retried by the workqueue
+	 */
+	mutex_lock(&deferred_probe_mutex);
+	atomic_inc(&deferred_trigger_count);
+	list_splice_tail_init(&deferred_probe_pending_list,
+			      &deferred_probe_active_list);
+	mutex_unlock(&deferred_probe_mutex);
+
+	/*
+	 * Kick the re-probe thread.  It may already be scheduled, but it is
+	 * safe to kick it again.
+	 */
+	schedule_work(&deferred_probe_work);
+}
+
+/**
+ * device_block_probing() - Block/defere device's probes
+ *
+ *	It will disable probing of devices and defer their probes instead.
+ */
+void device_block_probing(void)
+{
+	defer_all_probes = true;
+	/* sync with probes to avoid races. */
+	wait_for_device_probe();
+}
+
+/**
+ * device_unblock_probing() - Unblock/enable device's probes
+ *
+ *	It will restore normal behavior and trigger re-probing of deferred
+ * devices.
+ */
+void device_unblock_probing(void)
+{
+	defer_all_probes = false;
+	driver_deferred_probe_trigger();
+}
+
+/*
+ * deferred_devs_show() - Show the devices in the deferred probe pending list.
+ */
+static int deferred_devs_show(struct seq_file *s, void *data)
+{
+	struct device_private *curr;
+
+	mutex_lock(&deferred_probe_mutex);
+
+	list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
+		seq_printf(s, "%s\n", dev_name(curr->device));
+
+	mutex_unlock(&deferred_probe_mutex);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(deferred_devs);
+
+static int deferred_probe_timeout = -1;
+static int __init deferred_probe_timeout_setup(char *str)
+{
+	deferred_probe_timeout = simple_strtol(str, NULL, 10);
+	return 1;
+}
+__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Returns -ENODEV if init is done and all built-in drivers have had a chance
+ * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug
+ * timeout has expired, or -EPROBE_DEFER if none of those conditions are met.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+	if (initcalls_done) {
+		if (!deferred_probe_timeout) {
+			dev_WARN(dev, "deferred probe timeout, ignoring dependency");
+			return -ETIMEDOUT;
+		}
+		dev_warn(dev, "ignoring dependency for device, assuming no driver");
+		return -ENODEV;
+	}
+	return -EPROBE_DEFER;
+}
+
+static void deferred_probe_timeout_work_func(struct work_struct *work)
+{
+	struct device_private *private, *p;
+
+	deferred_probe_timeout = 0;
+	driver_deferred_probe_trigger();
+	flush_work(&deferred_probe_work);
+
+	list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
+		dev_info(private->device, "deferred probe pending");
+}
+static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+
+/**
+ * deferred_probe_initcall() - Enable probing of deferred devices
+ *
+ * We don't want to get in the way when the bulk of drivers are getting probed.
+ * Instead, this initcall makes sure that deferred probing is delayed until
+ * late_initcall time.
+ */
+static int deferred_probe_initcall(void)
+{
+	deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
+					       NULL, &deferred_devs_fops);
+
+	driver_deferred_probe_enable = true;
+	driver_deferred_probe_trigger();
+	/* Sort as many dependencies as possible before exiting initcalls */
+	flush_work(&deferred_probe_work);
+	initcalls_done = true;
+
+	/*
+	 * Trigger deferred probe again, this time we won't defer anything
+	 * that is optional
+	 */
+	driver_deferred_probe_trigger();
+	flush_work(&deferred_probe_work);
+
+	if (deferred_probe_timeout > 0) {
+		schedule_delayed_work(&deferred_probe_timeout_work,
+			deferred_probe_timeout * HZ);
+	}
+	return 0;
+}
+late_initcall(deferred_probe_initcall);
+
+static void __exit deferred_probe_exit(void)
+{
+	debugfs_remove_recursive(deferred_devices);
+}
+__exitcall(deferred_probe_exit);
+
+/**
+ * device_is_bound() - Check if device is bound to a driver
+ * @dev: device to check
+ *
+ * Returns true if passed device has already finished probing successfully
+ * against a driver.
+ *
+ * This function must be called with the device lock held.
+ */
+bool device_is_bound(struct device *dev)
+{
+	return dev->p && klist_node_attached(&dev->p->knode_driver);
+}
+
+static void driver_bound(struct device *dev)
+{
+	if (device_is_bound(dev)) {
+		printk(KERN_WARNING "%s: device %s already bound\n",
+			__func__, kobject_name(&dev->kobj));
+		return;
+	}
+
+	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
+		 __func__, dev_name(dev));
+
+	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+	device_links_driver_bound(dev);
+
+	device_pm_check_callbacks(dev);
+
+	/*
+	 * Make sure the device is no longer in one of the deferred lists and
+	 * kick off retrying all pending devices
+	 */
+	driver_deferred_probe_del(dev);
+	driver_deferred_probe_trigger();
+
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_BOUND_DRIVER, dev);
+
+	kobject_uevent(&dev->kobj, KOBJ_BIND);
+}
+
+static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	device_lock(dev);
+	dev->driver->coredump(dev);
+	device_unlock(dev);
+
+	return count;
+}
+static DEVICE_ATTR_WO(coredump);
+
+static int driver_sysfs_add(struct device *dev)
+{
+	int ret;
+
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_BIND_DRIVER, dev);
+
+	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
+				kobject_name(&dev->kobj));
+	if (ret)
+		goto fail;
+
+	ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
+				"driver");
+	if (ret)
+		goto rm_dev;
+
+	if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
+	    !device_create_file(dev, &dev_attr_coredump))
+		return 0;
+
+	sysfs_remove_link(&dev->kobj, "driver");
+
+rm_dev:
+	sysfs_remove_link(&dev->driver->p->kobj,
+			  kobject_name(&dev->kobj));
+
+fail:
+	return ret;
+}
+
+static void driver_sysfs_remove(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+
+	if (drv) {
+		if (drv->coredump)
+			device_remove_file(dev, &dev_attr_coredump);
+		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
+		sysfs_remove_link(&dev->kobj, "driver");
+	}
+}
+
+/**
+ * device_bind_driver - bind a driver to one device.
+ * @dev: device.
+ *
+ * Allow manual attachment of a driver to a device.
+ * Caller must have already set @dev->driver.
+ *
+ * Note that this does not modify the bus reference count
+ * nor take the bus's rwsem. Please verify those are accounted
+ * for before calling this. (It is ok to call with no other effort
+ * from a driver's probe() method.)
+ *
+ * This function must be called with the device lock held.
+ */
+int device_bind_driver(struct device *dev)
+{
+	int ret;
+
+	ret = driver_sysfs_add(dev);
+	if (!ret)
+		driver_bound(dev);
+	else if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(device_bind_driver);
+
+static atomic_t probe_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+
+static void driver_deferred_probe_add_trigger(struct device *dev,
+					      int local_trigger_count)
+{
+	driver_deferred_probe_add(dev);
+	/* Did a trigger occur while probing? Need to re-trigger if yes */
+	if (local_trigger_count != atomic_read(&deferred_trigger_count))
+		driver_deferred_probe_trigger();
+}
+
+static int really_probe(struct device *dev, struct device_driver *drv)
+{
+	int ret = -EPROBE_DEFER;
+	int local_trigger_count = atomic_read(&deferred_trigger_count);
+	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+			   !drv->suppress_bind_attrs;
+
+	if (defer_all_probes) {
+		/*
+		 * Value of defer_all_probes can be set only by
+		 * device_defer_all_probes_enable() which, in turn, will call
+		 * wait_for_device_probe() right after that to avoid any races.
+		 */
+		dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
+		driver_deferred_probe_add(dev);
+		return ret;
+	}
+
+	ret = device_links_check_suppliers(dev);
+	if (ret == -EPROBE_DEFER)
+		driver_deferred_probe_add_trigger(dev, local_trigger_count);
+	if (ret)
+		return ret;
+
+	atomic_inc(&probe_count);
+	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
+		 drv->bus->name, __func__, drv->name, dev_name(dev));
+	WARN_ON(!list_empty(&dev->devres_head));
+
+re_probe:
+	dev->driver = drv;
+
+	/* If using pinctrl, bind pins now before probing */
+	ret = pinctrl_bind_pins(dev);
+	if (ret)
+		goto pinctrl_bind_failed;
+
+	ret = dma_configure(dev);
+	if (ret)
+		goto dma_failed;
+
+	if (driver_sysfs_add(dev)) {
+		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
+			__func__, dev_name(dev));
+		goto probe_failed;
+	}
+
+	if (dev->pm_domain && dev->pm_domain->activate) {
+		ret = dev->pm_domain->activate(dev);
+		if (ret)
+			goto probe_failed;
+	}
+
+	if (dev->bus->probe) {
+		ret = dev->bus->probe(dev);
+		if (ret)
+			goto probe_failed;
+	} else if (drv->probe) {
+		ret = drv->probe(dev);
+		if (ret)
+			goto probe_failed;
+	}
+
+	if (test_remove) {
+		test_remove = false;
+
+		if (dev->bus->remove)
+			dev->bus->remove(dev);
+		else if (drv->remove)
+			drv->remove(dev);
+
+		devres_release_all(dev);
+		driver_sysfs_remove(dev);
+		dev->driver = NULL;
+		dev_set_drvdata(dev, NULL);
+		if (dev->pm_domain && dev->pm_domain->dismiss)
+			dev->pm_domain->dismiss(dev);
+		pm_runtime_reinit(dev);
+
+		goto re_probe;
+	}
+
+	pinctrl_init_done(dev);
+
+	if (dev->pm_domain && dev->pm_domain->sync)
+		dev->pm_domain->sync(dev);
+
+	driver_bound(dev);
+	ret = 1;
+	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
+		 drv->bus->name, __func__, dev_name(dev), drv->name);
+	goto done;
+
+probe_failed:
+	dma_deconfigure(dev);
+dma_failed:
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+pinctrl_bind_failed:
+	device_links_no_driver(dev);
+	devres_release_all(dev);
+	driver_sysfs_remove(dev);
+	dev->driver = NULL;
+	dev_set_drvdata(dev, NULL);
+	if (dev->pm_domain && dev->pm_domain->dismiss)
+		dev->pm_domain->dismiss(dev);
+	pm_runtime_reinit(dev);
+	dev_pm_set_driver_flags(dev, 0);
+
+	switch (ret) {
+	case -EPROBE_DEFER:
+		/* Driver requested deferred probing */
+		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
+		driver_deferred_probe_add_trigger(dev, local_trigger_count);
+		break;
+	case -ENODEV:
+	case -ENXIO:
+		pr_debug("%s: probe of %s rejects match %d\n",
+			 drv->name, dev_name(dev), ret);
+		break;
+	default:
+		/* driver matched but the probe failed */
+		printk(KERN_WARNING
+		       "%s: probe of %s failed with error %d\n",
+		       drv->name, dev_name(dev), ret);
+	}
+	/*
+	 * Ignore errors returned by ->probe so that the next driver can try
+	 * its luck.
+	 */
+	ret = 0;
+done:
+	atomic_dec(&probe_count);
+	wake_up(&probe_waitqueue);
+	return ret;
+}
+
+/*
+ * For initcall_debug, show the driver probe time.
+ */
+static int really_probe_debug(struct device *dev, struct device_driver *drv)
+{
+	ktime_t calltime, delta, rettime;
+	int ret;
+
+	calltime = ktime_get();
+	ret = really_probe(dev, drv);
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, calltime);
+	printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
+	       dev_name(dev), ret, (s64) ktime_to_us(delta));
+	return ret;
+}
+
+/**
+ * driver_probe_done
+ * Determine if the probe sequence is finished or not.
+ *
+ * Should somehow figure out how to use a semaphore, not an atomic variable...
+ */
+int driver_probe_done(void)
+{
+	pr_debug("%s: probe_count = %d\n", __func__,
+		 atomic_read(&probe_count));
+	if (atomic_read(&probe_count))
+		return -EBUSY;
+	return 0;
+}
+
+/**
+ * wait_for_device_probe
+ * Wait for device probing to be completed.
+ */
+void wait_for_device_probe(void)
+{
+	/* wait for the deferred probe workqueue to finish */
+	flush_work(&deferred_probe_work);
+
+	/* wait for the known devices to complete their probing */
+	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
+	async_synchronize_full();
+}
+EXPORT_SYMBOL_GPL(wait_for_device_probe);
+
+/**
+ * driver_probe_device - attempt to bind device & driver together
+ * @drv: driver to bind a device to
+ * @dev: device to try to bind to the driver
+ *
+ * This function returns -ENODEV if the device is not registered,
+ * 1 if the device is bound successfully and 0 otherwise.
+ *
+ * This function must be called with @dev lock held.  When called for a
+ * USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
+ */
+int driver_probe_device(struct device_driver *drv, struct device *dev)
+{
+	int ret = 0;
+
+	if (!device_is_registered(dev))
+		return -ENODEV;
+
+	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
+		 drv->bus->name, __func__, dev_name(dev), drv->name);
+
+	pm_runtime_get_suppliers(dev);
+	if (dev->parent)
+		pm_runtime_get_sync(dev->parent);
+
+	pm_runtime_barrier(dev);
+	if (initcall_debug)
+		ret = really_probe_debug(dev, drv);
+	else
+		ret = really_probe(dev, drv);
+	pm_request_idle(dev);
+
+	if (dev->parent)
+		pm_runtime_put(dev->parent);
+
+	pm_runtime_put_suppliers(dev);
+	return ret;
+}
+
+bool driver_allows_async_probing(struct device_driver *drv)
+{
+	switch (drv->probe_type) {
+	case PROBE_PREFER_ASYNCHRONOUS:
+		return true;
+
+	case PROBE_FORCE_SYNCHRONOUS:
+		return false;
+
+	default:
+		if (module_requested_async_probing(drv->owner))
+			return true;
+
+		return false;
+	}
+}
+
+struct device_attach_data {
+	struct device *dev;
+
+	/*
+	 * Indicates whether we are are considering asynchronous probing or
+	 * not. Only initial binding after device or driver registration
+	 * (including deferral processing) may be done asynchronously, the
+	 * rest is always synchronous, as we expect it is being done by
+	 * request from userspace.
+	 */
+	bool check_async;
+
+	/*
+	 * Indicates if we are binding synchronous or asynchronous drivers.
+	 * When asynchronous probing is enabled we'll execute 2 passes
+	 * over drivers: first pass doing synchronous probing and second
+	 * doing asynchronous probing (if synchronous did not succeed -
+	 * most likely because there was no driver requiring synchronous
+	 * probing - and we found asynchronous driver during first pass).
+	 * The 2 passes are done because we can't shoot asynchronous
+	 * probe for given device and driver from bus_for_each_drv() since
+	 * driver pointer is not guaranteed to stay valid once
+	 * bus_for_each_drv() iterates to the next driver on the bus.
+	 */
+	bool want_async;
+
+	/*
+	 * We'll set have_async to 'true' if, while scanning for matching
+	 * driver, we'll encounter one that requests asynchronous probing.
+	 */
+	bool have_async;
+};
+
+static int __device_attach_driver(struct device_driver *drv, void *_data)
+{
+	struct device_attach_data *data = _data;
+	struct device *dev = data->dev;
+	bool async_allowed;
+	int ret;
+
+	/*
+	 * Check if device has already been claimed. This may
+	 * happen with driver loading, device discovery/registration,
+	 * and deferred probe processing happens all at once with
+	 * multiple threads.
+	 */
+	if (dev->driver)
+		return -EBUSY;
+
+	ret = driver_match_device(drv, dev);
+	if (ret == 0) {
+		/* no match */
+		return 0;
+	} else if (ret == -EPROBE_DEFER) {
+		dev_dbg(dev, "Device match requests probe deferral\n");
+		driver_deferred_probe_add(dev);
+	} else if (ret < 0) {
+		dev_dbg(dev, "Bus failed to match device: %d", ret);
+		return ret;
+	} /* ret > 0 means positive match */
+
+	async_allowed = driver_allows_async_probing(drv);
+
+	if (async_allowed)
+		data->have_async = true;
+
+	if (data->check_async && async_allowed != data->want_async)
+		return 0;
+
+	return driver_probe_device(drv, dev);
+}
+
+static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
+{
+	struct device *dev = _dev;
+	struct device_attach_data data = {
+		.dev		= dev,
+		.check_async	= true,
+		.want_async	= true,
+	};
+
+	device_lock(dev);
+
+	if (dev->parent)
+		pm_runtime_get_sync(dev->parent);
+
+	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
+	dev_dbg(dev, "async probe completed\n");
+
+	pm_request_idle(dev);
+
+	if (dev->parent)
+		pm_runtime_put(dev->parent);
+
+	device_unlock(dev);
+
+	put_device(dev);
+}
+
+static int __device_attach(struct device *dev, bool allow_async)
+{
+	int ret = 0;
+
+	device_lock(dev);
+	if (dev->driver) {
+		if (device_is_bound(dev)) {
+			ret = 1;
+			goto out_unlock;
+		}
+		ret = device_bind_driver(dev);
+		if (ret == 0)
+			ret = 1;
+		else {
+			dev->driver = NULL;
+			ret = 0;
+		}
+	} else {
+		struct device_attach_data data = {
+			.dev = dev,
+			.check_async = allow_async,
+			.want_async = false,
+		};
+
+		if (dev->parent)
+			pm_runtime_get_sync(dev->parent);
+
+		ret = bus_for_each_drv(dev->bus, NULL, &data,
+					__device_attach_driver);
+		if (!ret && allow_async && data.have_async) {
+			/*
+			 * If we could not find appropriate driver
+			 * synchronously and we are allowed to do
+			 * async probes and there are drivers that
+			 * want to probe asynchronously, we'll
+			 * try them.
+			 */
+			dev_dbg(dev, "scheduling asynchronous probe\n");
+			get_device(dev);
+			async_schedule(__device_attach_async_helper, dev);
+		} else {
+			pm_request_idle(dev);
+		}
+
+		if (dev->parent)
+			pm_runtime_put(dev->parent);
+	}
+out_unlock:
+	device_unlock(dev);
+	return ret;
+}
+
+/**
+ * device_attach - try to attach device to a driver.
+ * @dev: device.
+ *
+ * Walk the list of drivers that the bus has and call
+ * driver_probe_device() for each pair. If a compatible
+ * pair is found, break out and return.
+ *
+ * Returns 1 if the device was bound to a driver;
+ * 0 if no matching driver was found;
+ * -ENODEV if the device is not registered.
+ *
+ * When called for a USB interface, @dev->parent lock must be held.
+ */
+int device_attach(struct device *dev)
+{
+	return __device_attach(dev, false);
+}
+EXPORT_SYMBOL_GPL(device_attach);
+
+void device_initial_probe(struct device *dev)
+{
+	__device_attach(dev, true);
+}
+
+static int __driver_attach(struct device *dev, void *data)
+{
+	struct device_driver *drv = data;
+	int ret;
+
+	/*
+	 * Lock device and try to bind to it. We drop the error
+	 * here and always return 0, because we need to keep trying
+	 * to bind to devices and some drivers will return an error
+	 * simply if it didn't support the device.
+	 *
+	 * driver_probe_device() will spit a warning if there
+	 * is an error.
+	 */
+
+	ret = driver_match_device(drv, dev);
+	if (ret == 0) {
+		/* no match */
+		return 0;
+	} else if (ret == -EPROBE_DEFER) {
+		dev_dbg(dev, "Device match requests probe deferral\n");
+		driver_deferred_probe_add(dev);
+	} else if (ret < 0) {
+		dev_dbg(dev, "Bus failed to match device: %d", ret);
+		return ret;
+	} /* ret > 0 means positive match */
+
+	if (dev->parent && dev->bus->need_parent_lock)
+		device_lock(dev->parent);
+	device_lock(dev);
+	if (!dev->driver)
+		driver_probe_device(drv, dev);
+	device_unlock(dev);
+	if (dev->parent && dev->bus->need_parent_lock)
+		device_unlock(dev->parent);
+
+	return 0;
+}
+
+/**
+ * driver_attach - try to bind driver to devices.
+ * @drv: driver.
+ *
+ * Walk the list of devices that the bus has on it and try to
+ * match the driver with each one.  If driver_probe_device()
+ * returns 0 and the @dev->driver is set, we've found a
+ * compatible pair.
+ */
+int driver_attach(struct device_driver *drv)
+{
+	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
+}
+EXPORT_SYMBOL_GPL(driver_attach);
+
+/*
+ * __device_release_driver() must be called with @dev lock held.
+ * When called for a USB interface, @dev->parent lock must be held as well.
+ */
+static void __device_release_driver(struct device *dev, struct device *parent)
+{
+	struct device_driver *drv;
+
+	drv = dev->driver;
+	if (drv) {
+		if (driver_allows_async_probing(drv))
+			async_synchronize_full();
+
+		while (device_links_busy(dev)) {
+			device_unlock(dev);
+			if (parent)
+				device_unlock(parent);
+
+			device_links_unbind_consumers(dev);
+			if (parent)
+				device_lock(parent);
+
+			device_lock(dev);
+			/*
+			 * A concurrent invocation of the same function might
+			 * have released the driver successfully while this one
+			 * was waiting, so check for that.
+			 */
+			if (dev->driver != drv)
+				return;
+		}
+
+		pm_runtime_get_sync(dev);
+		pm_runtime_clean_up_links(dev);
+
+		driver_sysfs_remove(dev);
+
+		if (dev->bus)
+			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+						     BUS_NOTIFY_UNBIND_DRIVER,
+						     dev);
+
+		pm_runtime_put_sync(dev);
+
+		if (dev->bus && dev->bus->remove)
+			dev->bus->remove(dev);
+		else if (drv->remove)
+			drv->remove(dev);
+
+		device_links_driver_cleanup(dev);
+		dma_deconfigure(dev);
+
+		devres_release_all(dev);
+		dev->driver = NULL;
+		dev_set_drvdata(dev, NULL);
+		if (dev->pm_domain && dev->pm_domain->dismiss)
+			dev->pm_domain->dismiss(dev);
+		pm_runtime_reinit(dev);
+		dev_pm_set_driver_flags(dev, 0);
+
+		klist_remove(&dev->p->knode_driver);
+		device_pm_check_callbacks(dev);
+		if (dev->bus)
+			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+						     BUS_NOTIFY_UNBOUND_DRIVER,
+						     dev);
+
+		kobject_uevent(&dev->kobj, KOBJ_UNBIND);
+	}
+}
+
+void device_release_driver_internal(struct device *dev,
+				    struct device_driver *drv,
+				    struct device *parent)
+{
+	if (parent && dev->bus->need_parent_lock)
+		device_lock(parent);
+
+	device_lock(dev);
+	if (!drv || drv == dev->driver)
+		__device_release_driver(dev, parent);
+
+	device_unlock(dev);
+	if (parent && dev->bus->need_parent_lock)
+		device_unlock(parent);
+}
+
+/**
+ * device_release_driver - manually detach device from driver.
+ * @dev: device.
+ *
+ * Manually detach device from driver.
+ * When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
+ */
+void device_release_driver(struct device *dev)
+{
+	/*
+	 * If anyone calls device_release_driver() recursively from
+	 * within their ->remove callback for the same device, they
+	 * will deadlock right here.
+	 */
+	device_release_driver_internal(dev, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(device_release_driver);
+
+/**
+ * driver_detach - detach driver from all devices it controls.
+ * @drv: driver.
+ */
+void driver_detach(struct device_driver *drv)
+{
+	struct device_private *dev_prv;
+	struct device *dev;
+
+	for (;;) {
+		spin_lock(&drv->p->klist_devices.k_lock);
+		if (list_empty(&drv->p->klist_devices.k_list)) {
+			spin_unlock(&drv->p->klist_devices.k_lock);
+			break;
+		}
+		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
+				     struct device_private,
+				     knode_driver.n_node);
+		dev = dev_prv->device;
+		get_device(dev);
+		spin_unlock(&drv->p->klist_devices.k_lock);
+		device_release_driver_internal(dev, drv, dev->parent);
+		put_device(dev);
+	}
+}
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
new file mode 100644
index 0000000..d427e80
--- /dev/null
+++ b/drivers/base/devcon.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Device connections
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/device.h>
+
+static DEFINE_MUTEX(devcon_lock);
+static LIST_HEAD(devcon_list);
+
+/**
+ * device_connection_find_match - Find physical connection to a device
+ * @dev: Device with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ *
+ * Find a connection with unique identifier @con_id between @dev and another
+ * device. @match will be used to convert the connection description to data the
+ * caller is expecting to be returned.
+ */
+void *device_connection_find_match(struct device *dev, const char *con_id,
+			       void *data,
+			       void *(*match)(struct device_connection *con,
+					      int ep, void *data))
+{
+	const char *devname = dev_name(dev);
+	struct device_connection *con;
+	void *ret = NULL;
+	int ep;
+
+	if (!match)
+		return NULL;
+
+	mutex_lock(&devcon_lock);
+
+	list_for_each_entry(con, &devcon_list, list) {
+		ep = match_string(con->endpoint, 2, devname);
+		if (ep < 0)
+			continue;
+
+		if (con_id && strcmp(con->id, con_id))
+			continue;
+
+		ret = match(con, !ep, data);
+		if (ret)
+			break;
+	}
+
+	mutex_unlock(&devcon_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(device_connection_find_match);
+
+extern struct bus_type platform_bus_type;
+extern struct bus_type pci_bus_type;
+extern struct bus_type i2c_bus_type;
+extern struct bus_type spi_bus_type;
+
+static struct bus_type *generic_match_buses[] = {
+	&platform_bus_type,
+#ifdef CONFIG_PCI
+	&pci_bus_type,
+#endif
+#ifdef CONFIG_I2C
+	&i2c_bus_type,
+#endif
+#ifdef CONFIG_SPI_MASTER
+	&spi_bus_type,
+#endif
+	NULL,
+};
+
+/* This tries to find the device from the most common bus types by name. */
+static void *generic_match(struct device_connection *con, int ep, void *data)
+{
+	struct bus_type *bus;
+	struct device *dev;
+
+	for (bus = generic_match_buses[0]; bus; bus++) {
+		dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]);
+		if (dev)
+			return dev;
+	}
+
+	/*
+	 * We only get called if a connection was found, tell the caller to
+	 * wait for the other device to show up.
+	 */
+	return ERR_PTR(-EPROBE_DEFER);
+}
+
+/**
+ * device_connection_find - Find two devices connected together
+ * @dev: Device with the connection
+ * @con_id: Identifier for the connection
+ *
+ * Find a connection with unique identifier @con_id between @dev and
+ * another device. On success returns handle to the device that is connected
+ * to @dev, with the reference count for the found device incremented. Returns
+ * NULL if no matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a
+ * connection was found but the other device has not been enumerated yet.
+ */
+struct device *device_connection_find(struct device *dev, const char *con_id)
+{
+	return device_connection_find_match(dev, con_id, NULL, generic_match);
+}
+EXPORT_SYMBOL_GPL(device_connection_find);
+
+/**
+ * device_connection_add - Register a connection description
+ * @con: The connection description to be registered
+ */
+void device_connection_add(struct device_connection *con)
+{
+	mutex_lock(&devcon_lock);
+	list_add_tail(&con->list, &devcon_list);
+	mutex_unlock(&devcon_lock);
+}
+EXPORT_SYMBOL_GPL(device_connection_add);
+
+/**
+ * device_connections_remove - Unregister connection description
+ * @con: The connection description to be unregistered
+ */
+void device_connection_remove(struct device_connection *con)
+{
+	mutex_lock(&devcon_lock);
+	list_del(&con->list);
+	mutex_unlock(&devcon_lock);
+}
+EXPORT_SYMBOL_GPL(device_connection_remove);
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
new file mode 100644
index 0000000..f1a3353
--- /dev/null
+++ b/drivers/base/devcoredump.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devcoredump.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/workqueue.h>
+
+static struct class devcd_class;
+
+/* global disable flag, for security purposes */
+static bool devcd_disabled;
+
+/* if data isn't read by userspace after 5 minutes then delete it */
+#define DEVCD_TIMEOUT	(HZ * 60 * 5)
+
+struct devcd_entry {
+	struct device devcd_dev;
+	void *data;
+	size_t datalen;
+	struct module *owner;
+	ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+			void *data, size_t datalen);
+	void (*free)(void *data);
+	struct delayed_work del_wk;
+	struct device *failing_dev;
+};
+
+static struct devcd_entry *dev_to_devcd(struct device *dev)
+{
+	return container_of(dev, struct devcd_entry, devcd_dev);
+}
+
+static void devcd_dev_release(struct device *dev)
+{
+	struct devcd_entry *devcd = dev_to_devcd(dev);
+
+	devcd->free(devcd->data);
+	module_put(devcd->owner);
+
+	/*
+	 * this seems racy, but I don't see a notifier or such on
+	 * a struct device to know when it goes away?
+	 */
+	if (devcd->failing_dev->kobj.sd)
+		sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
+				  "devcoredump");
+
+	put_device(devcd->failing_dev);
+	kfree(devcd);
+}
+
+static void devcd_del(struct work_struct *wk)
+{
+	struct devcd_entry *devcd;
+
+	devcd = container_of(wk, struct devcd_entry, del_wk.work);
+
+	device_del(&devcd->devcd_dev);
+	put_device(&devcd->devcd_dev);
+}
+
+static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
+			       struct bin_attribute *bin_attr,
+			       char *buffer, loff_t offset, size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct devcd_entry *devcd = dev_to_devcd(dev);
+
+	return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
+}
+
+static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
+				struct bin_attribute *bin_attr,
+				char *buffer, loff_t offset, size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct devcd_entry *devcd = dev_to_devcd(dev);
+
+	mod_delayed_work(system_wq, &devcd->del_wk, 0);
+
+	return count;
+}
+
+static struct bin_attribute devcd_attr_data = {
+	.attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
+	.size = 0,
+	.read = devcd_data_read,
+	.write = devcd_data_write,
+};
+
+static struct bin_attribute *devcd_dev_bin_attrs[] = {
+	&devcd_attr_data, NULL,
+};
+
+static const struct attribute_group devcd_dev_group = {
+	.bin_attrs = devcd_dev_bin_attrs,
+};
+
+static const struct attribute_group *devcd_dev_groups[] = {
+	&devcd_dev_group, NULL,
+};
+
+static int devcd_free(struct device *dev, void *data)
+{
+	struct devcd_entry *devcd = dev_to_devcd(dev);
+
+	flush_delayed_work(&devcd->del_wk);
+	return 0;
+}
+
+static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
+			     char *buf)
+{
+	return sprintf(buf, "%d\n", devcd_disabled);
+}
+
+static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
+			      const char *buf, size_t count)
+{
+	long tmp = simple_strtol(buf, NULL, 10);
+
+	/*
+	 * This essentially makes the attribute write-once, since you can't
+	 * go back to not having it disabled. This is intentional, it serves
+	 * as a system lockdown feature.
+	 */
+	if (tmp != 1)
+		return -EINVAL;
+
+	devcd_disabled = true;
+
+	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+
+	return count;
+}
+static CLASS_ATTR_RW(disabled);
+
+static struct attribute *devcd_class_attrs[] = {
+	&class_attr_disabled.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(devcd_class);
+
+static struct class devcd_class = {
+	.name		= "devcoredump",
+	.owner		= THIS_MODULE,
+	.dev_release	= devcd_dev_release,
+	.dev_groups	= devcd_dev_groups,
+	.class_groups	= devcd_class_groups,
+};
+
+static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
+			   void *data, size_t datalen)
+{
+	if (offset > datalen)
+		return -EINVAL;
+
+	if (offset + count > datalen)
+		count = datalen - offset;
+
+	if (count)
+		memcpy(buffer, ((u8 *)data) + offset, count);
+
+	return count;
+}
+
+static void devcd_freev(void *data)
+{
+	vfree(data);
+}
+
+/**
+ * dev_coredumpv - create device coredump with vmalloc data
+ * @dev: the struct device for the crashed device
+ * @data: vmalloc data containing the device coredump
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ *
+ * This function takes ownership of the vmalloc'ed data and will free
+ * it when it is no longer used. See dev_coredumpm() for more information.
+ */
+void dev_coredumpv(struct device *dev, void *data, size_t datalen,
+		   gfp_t gfp)
+{
+	dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpv);
+
+static int devcd_match_failing(struct device *dev, const void *failing)
+{
+	struct devcd_entry *devcd = dev_to_devcd(dev);
+
+	return devcd->failing_dev == failing;
+}
+
+/**
+ * devcd_free_sgtable - free all the memory of the given scatterlist table
+ * (i.e. both pages and scatterlist instances)
+ * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
+ * using the sg_chain function then that function should be called only once
+ * on the chained table
+ * @table: pointer to sg_table to free
+ */
+static void devcd_free_sgtable(void *data)
+{
+	_devcd_free_sgtable(data);
+}
+
+/**
+ * devcd_read_from_table - copy data from sg_table to a given buffer
+ * and return the number of bytes read
+ * @buffer: the buffer to copy the data to it
+ * @buf_len: the length of the buffer
+ * @data: the scatterlist table to copy from
+ * @offset: start copy from @offset@ bytes from the head of the data
+ *	in the given scatterlist
+ * @data_len: the length of the data in the sg_table
+ */
+static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
+				       size_t buf_len, void *data,
+				       size_t data_len)
+{
+	struct scatterlist *table = data;
+
+	if (offset > data_len)
+		return -EINVAL;
+
+	if (offset + buf_len > data_len)
+		buf_len = data_len - offset;
+	return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
+				  offset);
+}
+
+/**
+ * dev_coredumpm - create device coredump with read/free methods
+ * @dev: the struct device for the crashed device
+ * @owner: the module that contains the read/free functions, use %THIS_MODULE
+ * @data: data cookie for the @read/@free functions
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ * @read: function to read from the given buffer
+ * @free: function to free the given buffer
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed the @free
+ * function will be called to free the data.
+ */
+void dev_coredumpm(struct device *dev, struct module *owner,
+		   void *data, size_t datalen, gfp_t gfp,
+		   ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+				   void *data, size_t datalen),
+		   void (*free)(void *data))
+{
+	static atomic_t devcd_count = ATOMIC_INIT(0);
+	struct devcd_entry *devcd;
+	struct device *existing;
+
+	if (devcd_disabled)
+		goto free;
+
+	existing = class_find_device(&devcd_class, NULL, dev,
+				     devcd_match_failing);
+	if (existing) {
+		put_device(existing);
+		goto free;
+	}
+
+	if (!try_module_get(owner))
+		goto free;
+
+	devcd = kzalloc(sizeof(*devcd), gfp);
+	if (!devcd)
+		goto put_module;
+
+	devcd->owner = owner;
+	devcd->data = data;
+	devcd->datalen = datalen;
+	devcd->read = read;
+	devcd->free = free;
+	devcd->failing_dev = get_device(dev);
+
+	device_initialize(&devcd->devcd_dev);
+
+	dev_set_name(&devcd->devcd_dev, "devcd%d",
+		     atomic_inc_return(&devcd_count));
+	devcd->devcd_dev.class = &devcd_class;
+
+	if (device_add(&devcd->devcd_dev))
+		goto put_device;
+
+	if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
+			      "failing_device"))
+		/* nothing - symlink will be missing */;
+
+	if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
+			      "devcoredump"))
+		/* nothing - symlink will be missing */;
+
+	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+	schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+
+	return;
+ put_device:
+	put_device(&devcd->devcd_dev);
+ put_module:
+	module_put(owner);
+ free:
+	free(data);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpm);
+
+/**
+ * dev_coredumpmsg - create device coredump that uses scatterlist as data
+ * parameter
+ * @dev: the struct device for the crashed device
+ * @table: the dump data
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed
+ * it will free the data.
+ */
+void dev_coredumpsg(struct device *dev, struct scatterlist *table,
+		    size_t datalen, gfp_t gfp)
+{
+	dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
+		      devcd_free_sgtable);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpsg);
+
+static int __init devcoredump_init(void)
+{
+	return class_register(&devcd_class);
+}
+__initcall(devcoredump_init);
+
+static void __exit devcoredump_exit(void)
+{
+	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+	class_unregister(&devcd_class);
+}
+__exitcall(devcoredump_exit);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
new file mode 100644
index 0000000..f98a097
--- /dev/null
+++ b/drivers/base/devres.c
@@ -0,0 +1,1057 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/devres.c - device resource management
+ *
+ * Copyright (c) 2006  SUSE Linux Products GmbH
+ * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+
+#include "base.h"
+
+struct devres_node {
+	struct list_head		entry;
+	dr_release_t			release;
+#ifdef CONFIG_DEBUG_DEVRES
+	const char			*name;
+	size_t				size;
+#endif
+};
+
+struct devres {
+	struct devres_node		node;
+	/* -- 3 pointers */
+	unsigned long long		data[];	/* guarantee ull alignment */
+};
+
+struct devres_group {
+	struct devres_node		node[2];
+	void				*id;
+	int				color;
+	/* -- 8 pointers */
+};
+
+#ifdef CONFIG_DEBUG_DEVRES
+static int log_devres = 0;
+module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
+
+static void set_node_dbginfo(struct devres_node *node, const char *name,
+			     size_t size)
+{
+	node->name = name;
+	node->size = size;
+}
+
+static void devres_log(struct device *dev, struct devres_node *node,
+		       const char *op)
+{
+	if (unlikely(log_devres))
+		dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
+			op, node, node->name, (unsigned long)node->size);
+}
+#else /* CONFIG_DEBUG_DEVRES */
+#define set_node_dbginfo(node, n, s)	do {} while (0)
+#define devres_log(dev, node, op)	do {} while (0)
+#endif /* CONFIG_DEBUG_DEVRES */
+
+/*
+ * Release functions for devres group.  These callbacks are used only
+ * for identification.
+ */
+static void group_open_release(struct device *dev, void *res)
+{
+	/* noop */
+}
+
+static void group_close_release(struct device *dev, void *res)
+{
+	/* noop */
+}
+
+static struct devres_group * node_to_group(struct devres_node *node)
+{
+	if (node->release == &group_open_release)
+		return container_of(node, struct devres_group, node[0]);
+	if (node->release == &group_close_release)
+		return container_of(node, struct devres_group, node[1]);
+	return NULL;
+}
+
+static __always_inline struct devres * alloc_dr(dr_release_t release,
+						size_t size, gfp_t gfp, int nid)
+{
+	size_t tot_size;
+	struct devres *dr;
+
+	/* We must catch any near-SIZE_MAX cases that could overflow. */
+	if (unlikely(check_add_overflow(sizeof(struct devres), size,
+					&tot_size)))
+		return NULL;
+
+	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+	if (unlikely(!dr))
+		return NULL;
+
+	memset(dr, 0, offsetof(struct devres, data));
+
+	INIT_LIST_HEAD(&dr->node.entry);
+	dr->node.release = release;
+	return dr;
+}
+
+static void add_dr(struct device *dev, struct devres_node *node)
+{
+	devres_log(dev, node, "ADD");
+	BUG_ON(!list_empty(&node->entry));
+	list_add_tail(&node->entry, &dev->devres_head);
+}
+
+#ifdef CONFIG_DEBUG_DEVRES
+void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
+		      const char *name)
+{
+	struct devres *dr;
+
+	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
+	if (unlikely(!dr))
+		return NULL;
+	set_node_dbginfo(&dr->node, name, size);
+	return dr->data;
+}
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
+#else
+/**
+ * devres_alloc - Allocate device resource data
+ * @release: Release function devres will be associated with
+ * @size: Allocation size
+ * @gfp: Allocation flags
+ * @nid: NUMA node
+ *
+ * Allocate devres of @size bytes.  The allocated area is zeroed, then
+ * associated with @release.  The returned pointer can be passed to
+ * other devres_*() functions.
+ *
+ * RETURNS:
+ * Pointer to allocated devres on success, NULL on failure.
+ */
+void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
+{
+	struct devres *dr;
+
+	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
+	if (unlikely(!dr))
+		return NULL;
+	return dr->data;
+}
+EXPORT_SYMBOL_GPL(devres_alloc_node);
+#endif
+
+/**
+ * devres_for_each_res - Resource iterator
+ * @dev: Device to iterate resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ * @fn: Function to be called for each matched resource.
+ * @data: Data for @fn, the 3rd parameter of @fn
+ *
+ * Call @fn for each devres of @dev which is associated with @release
+ * and for which @match returns 1.
+ *
+ * RETURNS:
+ * 	void
+ */
+void devres_for_each_res(struct device *dev, dr_release_t release,
+			dr_match_t match, void *match_data,
+			void (*fn)(struct device *, void *, void *),
+			void *data)
+{
+	struct devres_node *node;
+	struct devres_node *tmp;
+	unsigned long flags;
+
+	if (!fn)
+		return;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	list_for_each_entry_safe_reverse(node, tmp,
+			&dev->devres_head, entry) {
+		struct devres *dr = container_of(node, struct devres, node);
+
+		if (node->release != release)
+			continue;
+		if (match && !match(dev, dr->data, match_data))
+			continue;
+		fn(dev, dr->data, data);
+	}
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_for_each_res);
+
+/**
+ * devres_free - Free device resource data
+ * @res: Pointer to devres data to free
+ *
+ * Free devres created with devres_alloc().
+ */
+void devres_free(void *res)
+{
+	if (res) {
+		struct devres *dr = container_of(res, struct devres, data);
+
+		BUG_ON(!list_empty(&dr->node.entry));
+		kfree(dr);
+	}
+}
+EXPORT_SYMBOL_GPL(devres_free);
+
+/**
+ * devres_add - Register device resource
+ * @dev: Device to add resource to
+ * @res: Resource to register
+ *
+ * Register devres @res to @dev.  @res should have been allocated
+ * using devres_alloc().  On driver detach, the associated release
+ * function will be invoked and devres will be freed automatically.
+ */
+void devres_add(struct device *dev, void *res)
+{
+	struct devres *dr = container_of(res, struct devres, data);
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	add_dr(dev, &dr->node);
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_add);
+
+static struct devres *find_dr(struct device *dev, dr_release_t release,
+			      dr_match_t match, void *match_data)
+{
+	struct devres_node *node;
+
+	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
+		struct devres *dr = container_of(node, struct devres, node);
+
+		if (node->release != release)
+			continue;
+		if (match && !match(dev, dr->data, match_data))
+			continue;
+		return dr;
+	}
+
+	return NULL;
+}
+
+/**
+ * devres_find - Find device resource
+ * @dev: Device to lookup resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev which is associated with @release
+ * and for which @match returns 1.  If @match is NULL, it's considered
+ * to match all.
+ *
+ * RETURNS:
+ * Pointer to found devres, NULL if not found.
+ */
+void * devres_find(struct device *dev, dr_release_t release,
+		   dr_match_t match, void *match_data)
+{
+	struct devres *dr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	dr = find_dr(dev, release, match, match_data);
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+	if (dr)
+		return dr->data;
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(devres_find);
+
+/**
+ * devres_get - Find devres, if non-existent, add one atomically
+ * @dev: Device to lookup or add devres for
+ * @new_res: Pointer to new initialized devres to add if not found
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev which has the same release function
+ * as @new_res and for which @match return 1.  If found, @new_res is
+ * freed; otherwise, @new_res is added atomically.
+ *
+ * RETURNS:
+ * Pointer to found or added devres.
+ */
+void * devres_get(struct device *dev, void *new_res,
+		  dr_match_t match, void *match_data)
+{
+	struct devres *new_dr = container_of(new_res, struct devres, data);
+	struct devres *dr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	dr = find_dr(dev, new_dr->node.release, match, match_data);
+	if (!dr) {
+		add_dr(dev, &new_dr->node);
+		dr = new_dr;
+		new_res = NULL;
+	}
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+	devres_free(new_res);
+
+	return dr->data;
+}
+EXPORT_SYMBOL_GPL(devres_get);
+
+/**
+ * devres_remove - Find a device resource and remove it
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1.  If @match is NULL, it's considered to
+ * match all.  If found, the resource is removed atomically and
+ * returned.
+ *
+ * RETURNS:
+ * Pointer to removed devres on success, NULL if not found.
+ */
+void * devres_remove(struct device *dev, dr_release_t release,
+		     dr_match_t match, void *match_data)
+{
+	struct devres *dr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	dr = find_dr(dev, release, match, match_data);
+	if (dr) {
+		list_del_init(&dr->node.entry);
+		devres_log(dev, &dr->node, "REM");
+	}
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+	if (dr)
+		return dr->data;
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(devres_remove);
+
+/**
+ * devres_destroy - Find a device resource and destroy it
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1.  If @match is NULL, it's considered to
+ * match all.  If found, the resource is removed atomically and freed.
+ *
+ * Note that the release function for the resource will not be called,
+ * only the devres-allocated data will be freed.  The caller becomes
+ * responsible for freeing any other data.
+ *
+ * RETURNS:
+ * 0 if devres is found and freed, -ENOENT if not found.
+ */
+int devres_destroy(struct device *dev, dr_release_t release,
+		   dr_match_t match, void *match_data)
+{
+	void *res;
+
+	res = devres_remove(dev, release, match, match_data);
+	if (unlikely(!res))
+		return -ENOENT;
+
+	devres_free(res);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devres_destroy);
+
+
+/**
+ * devres_release - Find a device resource and destroy it, calling release
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1.  If @match is NULL, it's considered to
+ * match all.  If found, the resource is removed atomically, the
+ * release function called and the resource freed.
+ *
+ * RETURNS:
+ * 0 if devres is found and freed, -ENOENT if not found.
+ */
+int devres_release(struct device *dev, dr_release_t release,
+		   dr_match_t match, void *match_data)
+{
+	void *res;
+
+	res = devres_remove(dev, release, match, match_data);
+	if (unlikely(!res))
+		return -ENOENT;
+
+	(*release)(dev, res);
+	devres_free(res);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devres_release);
+
+static int remove_nodes(struct device *dev,
+			struct list_head *first, struct list_head *end,
+			struct list_head *todo)
+{
+	int cnt = 0, nr_groups = 0;
+	struct list_head *cur;
+
+	/* First pass - move normal devres entries to @todo and clear
+	 * devres_group colors.
+	 */
+	cur = first;
+	while (cur != end) {
+		struct devres_node *node;
+		struct devres_group *grp;
+
+		node = list_entry(cur, struct devres_node, entry);
+		cur = cur->next;
+
+		grp = node_to_group(node);
+		if (grp) {
+			/* clear color of group markers in the first pass */
+			grp->color = 0;
+			nr_groups++;
+		} else {
+			/* regular devres entry */
+			if (&node->entry == first)
+				first = first->next;
+			list_move_tail(&node->entry, todo);
+			cnt++;
+		}
+	}
+
+	if (!nr_groups)
+		return cnt;
+
+	/* Second pass - Scan groups and color them.  A group gets
+	 * color value of two iff the group is wholly contained in
+	 * [cur, end).  That is, for a closed group, both opening and
+	 * closing markers should be in the range, while just the
+	 * opening marker is enough for an open group.
+	 */
+	cur = first;
+	while (cur != end) {
+		struct devres_node *node;
+		struct devres_group *grp;
+
+		node = list_entry(cur, struct devres_node, entry);
+		cur = cur->next;
+
+		grp = node_to_group(node);
+		BUG_ON(!grp || list_empty(&grp->node[0].entry));
+
+		grp->color++;
+		if (list_empty(&grp->node[1].entry))
+			grp->color++;
+
+		BUG_ON(grp->color <= 0 || grp->color > 2);
+		if (grp->color == 2) {
+			/* No need to update cur or end.  The removed
+			 * nodes are always before both.
+			 */
+			list_move_tail(&grp->node[0].entry, todo);
+			list_del_init(&grp->node[1].entry);
+		}
+	}
+
+	return cnt;
+}
+
+static int release_nodes(struct device *dev, struct list_head *first,
+			 struct list_head *end, unsigned long flags)
+	__releases(&dev->devres_lock)
+{
+	LIST_HEAD(todo);
+	int cnt;
+	struct devres *dr, *tmp;
+
+	cnt = remove_nodes(dev, first, end, &todo);
+
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+	/* Release.  Note that both devres and devres_group are
+	 * handled as devres in the following loop.  This is safe.
+	 */
+	list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
+		devres_log(dev, &dr->node, "REL");
+		dr->node.release(dev, dr->data);
+		kfree(dr);
+	}
+
+	return cnt;
+}
+
+/**
+ * devres_release_all - Release all managed resources
+ * @dev: Device to release resources for
+ *
+ * Release all resources associated with @dev.  This function is
+ * called on driver detach.
+ */
+int devres_release_all(struct device *dev)
+{
+	unsigned long flags;
+
+	/* Looks like an uninitialized device structure */
+	if (WARN_ON(dev->devres_head.next == NULL))
+		return -ENODEV;
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
+			     flags);
+}
+
+/**
+ * devres_open_group - Open a new devres group
+ * @dev: Device to open devres group for
+ * @id: Separator ID
+ * @gfp: Allocation flags
+ *
+ * Open a new devres group for @dev with @id.  For @id, using a
+ * pointer to an object which won't be used for another group is
+ * recommended.  If @id is NULL, address-wise unique ID is created.
+ *
+ * RETURNS:
+ * ID of the new group, NULL on failure.
+ */
+void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+{
+	struct devres_group *grp;
+	unsigned long flags;
+
+	grp = kmalloc(sizeof(*grp), gfp);
+	if (unlikely(!grp))
+		return NULL;
+
+	grp->node[0].release = &group_open_release;
+	grp->node[1].release = &group_close_release;
+	INIT_LIST_HEAD(&grp->node[0].entry);
+	INIT_LIST_HEAD(&grp->node[1].entry);
+	set_node_dbginfo(&grp->node[0], "grp<", 0);
+	set_node_dbginfo(&grp->node[1], "grp>", 0);
+	grp->id = grp;
+	if (id)
+		grp->id = id;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+	add_dr(dev, &grp->node[0]);
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+	return grp->id;
+}
+EXPORT_SYMBOL_GPL(devres_open_group);
+
+/* Find devres group with ID @id.  If @id is NULL, look for the latest. */
+static struct devres_group * find_group(struct device *dev, void *id)
+{
+	struct devres_node *node;
+
+	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
+		struct devres_group *grp;
+
+		if (node->release != &group_open_release)
+			continue;
+
+		grp = container_of(node, struct devres_group, node[0]);
+
+		if (id) {
+			if (grp->id == id)
+				return grp;
+		} else if (list_empty(&grp->node[1].entry))
+			return grp;
+	}
+
+	return NULL;
+}
+
+/**
+ * devres_close_group - Close a devres group
+ * @dev: Device to close devres group for
+ * @id: ID of target group, can be NULL
+ *
+ * Close the group identified by @id.  If @id is NULL, the latest open
+ * group is selected.
+ */
+void devres_close_group(struct device *dev, void *id)
+{
+	struct devres_group *grp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+
+	grp = find_group(dev, id);
+	if (grp)
+		add_dr(dev, &grp->node[1]);
+	else
+		WARN_ON(1);
+
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_close_group);
+
+/**
+ * devres_remove_group - Remove a devres group
+ * @dev: Device to remove group for
+ * @id: ID of target group, can be NULL
+ *
+ * Remove the group identified by @id.  If @id is NULL, the latest
+ * open group is selected.  Note that removing a group doesn't affect
+ * any other resources.
+ */
+void devres_remove_group(struct device *dev, void *id)
+{
+	struct devres_group *grp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+
+	grp = find_group(dev, id);
+	if (grp) {
+		list_del_init(&grp->node[0].entry);
+		list_del_init(&grp->node[1].entry);
+		devres_log(dev, &grp->node[0], "REM");
+	} else
+		WARN_ON(1);
+
+	spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+	kfree(grp);
+}
+EXPORT_SYMBOL_GPL(devres_remove_group);
+
+/**
+ * devres_release_group - Release resources in a devres group
+ * @dev: Device to release group for
+ * @id: ID of target group, can be NULL
+ *
+ * Release all resources in the group identified by @id.  If @id is
+ * NULL, the latest open group is selected.  The selected group and
+ * groups properly nested inside the selected group are removed.
+ *
+ * RETURNS:
+ * The number of released non-group resources.
+ */
+int devres_release_group(struct device *dev, void *id)
+{
+	struct devres_group *grp;
+	unsigned long flags;
+	int cnt = 0;
+
+	spin_lock_irqsave(&dev->devres_lock, flags);
+
+	grp = find_group(dev, id);
+	if (grp) {
+		struct list_head *first = &grp->node[0].entry;
+		struct list_head *end = &dev->devres_head;
+
+		if (!list_empty(&grp->node[1].entry))
+			end = grp->node[1].entry.next;
+
+		cnt = release_nodes(dev, first, end, flags);
+	} else {
+		WARN_ON(1);
+		spin_unlock_irqrestore(&dev->devres_lock, flags);
+	}
+
+	return cnt;
+}
+EXPORT_SYMBOL_GPL(devres_release_group);
+
+/*
+ * Custom devres actions allow inserting a simple function call
+ * into the teadown sequence.
+ */
+
+struct action_devres {
+	void *data;
+	void (*action)(void *);
+};
+
+static int devm_action_match(struct device *dev, void *res, void *p)
+{
+	struct action_devres *devres = res;
+	struct action_devres *target = p;
+
+	return devres->action == target->action &&
+	       devres->data == target->data;
+}
+
+static void devm_action_release(struct device *dev, void *res)
+{
+	struct action_devres *devres = res;
+
+	devres->action(devres->data);
+}
+
+/**
+ * devm_add_action() - add a custom action to list of managed resources
+ * @dev: Device that owns the action
+ * @action: Function that should be called
+ * @data: Pointer to data passed to @action implementation
+ *
+ * This adds a custom action to the list of managed resources so that
+ * it gets executed as part of standard resource unwinding.
+ */
+int devm_add_action(struct device *dev, void (*action)(void *), void *data)
+{
+	struct action_devres *devres;
+
+	devres = devres_alloc(devm_action_release,
+			      sizeof(struct action_devres), GFP_KERNEL);
+	if (!devres)
+		return -ENOMEM;
+
+	devres->data = data;
+	devres->action = action;
+
+	devres_add(dev, devres);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_add_action);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+	struct action_devres devres = {
+		.data = data,
+		.action = action,
+	};
+
+	WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
+			       &devres));
+
+}
+EXPORT_SYMBOL_GPL(devm_remove_action);
+
+/*
+ * Managed kmalloc/kfree
+ */
+static void devm_kmalloc_release(struct device *dev, void *res)
+{
+	/* noop */
+}
+
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
+{
+	return res == data;
+}
+
+/**
+ * devm_kmalloc - Resource-managed kmalloc
+ * @dev: Device to allocate memory for
+ * @size: Allocation size
+ * @gfp: Allocation gfp flags
+ *
+ * Managed kmalloc.  Memory allocated with this function is
+ * automatically freed on driver detach.  Like all other devres
+ * resources, guaranteed alignment is unsigned long long.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+	struct devres *dr;
+
+	/* use raw alloc_dr for kmalloc caller tracing */
+	dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
+	if (unlikely(!dr))
+		return NULL;
+
+	/*
+	 * This is named devm_kzalloc_release for historical reasons
+	 * The initial implementation did not support kmalloc, only kzalloc
+	 */
+	set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
+	devres_add(dev, dr->data);
+	return dr->data;
+}
+EXPORT_SYMBOL_GPL(devm_kmalloc);
+
+/**
+ * devm_kstrdup - Allocate resource managed space and
+ *                copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ *       allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+	size_t size;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	size = strlen(s) + 1;
+	buf = devm_kmalloc(dev, size, gfp);
+	if (buf)
+		memcpy(buf, s, size);
+	return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
+/**
+ * devm_kvasprintf - Allocate resource managed space and format a string
+ *		     into that.
+ * @dev: Device to allocate memory for
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ *       allocating memory
+ * @fmt: The printf()-style format string
+ * @ap: Arguments for the format string
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+		      va_list ap)
+{
+	unsigned int len;
+	char *p;
+	va_list aq;
+
+	va_copy(aq, ap);
+	len = vsnprintf(NULL, 0, fmt, aq);
+	va_end(aq);
+
+	p = devm_kmalloc(dev, len+1, gfp);
+	if (!p)
+		return NULL;
+
+	vsnprintf(p, len+1, fmt, ap);
+
+	return p;
+}
+EXPORT_SYMBOL(devm_kvasprintf);
+
+/**
+ * devm_kasprintf - Allocate resource managed space and format a string
+ *		    into that.
+ * @dev: Device to allocate memory for
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ *       allocating memory
+ * @fmt: The printf()-style format string
+ * @...: Arguments for the format string
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
+{
+	va_list ap;
+	char *p;
+
+	va_start(ap, fmt);
+	p = devm_kvasprintf(dev, gfp, fmt, ap);
+	va_end(ap);
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(devm_kasprintf);
+
+/**
+ * devm_kfree - Resource-managed kfree
+ * @dev: Device this memory belongs to
+ * @p: Memory to free
+ *
+ * Free memory allocated with devm_kmalloc().
+ */
+void devm_kfree(struct device *dev, void *p)
+{
+	int rc;
+
+	rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
+	WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_kfree);
+
+/**
+ * devm_kmemdup - Resource-managed kmemdup
+ * @dev: Device this memory belongs to
+ * @src: Memory region to duplicate
+ * @len: Memory region length
+ * @gfp: GFP mask to use
+ *
+ * Duplicate region of a memory using resource managed kmalloc
+ */
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+	void *p;
+
+	p = devm_kmalloc(dev, len, gfp);
+	if (p)
+		memcpy(p, src, len);
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup);
+
+struct pages_devres {
+	unsigned long addr;
+	unsigned int order;
+};
+
+static int devm_pages_match(struct device *dev, void *res, void *p)
+{
+	struct pages_devres *devres = res;
+	struct pages_devres *target = p;
+
+	return devres->addr == target->addr;
+}
+
+static void devm_pages_release(struct device *dev, void *res)
+{
+	struct pages_devres *devres = res;
+
+	free_pages(devres->addr, devres->order);
+}
+
+/**
+ * devm_get_free_pages - Resource-managed __get_free_pages
+ * @dev: Device to allocate memory for
+ * @gfp_mask: Allocation gfp flags
+ * @order: Allocation size is (1 << order) pages
+ *
+ * Managed get_free_pages.  Memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Address of allocated memory on success, 0 on failure.
+ */
+
+unsigned long devm_get_free_pages(struct device *dev,
+				  gfp_t gfp_mask, unsigned int order)
+{
+	struct pages_devres *devres;
+	unsigned long addr;
+
+	addr = __get_free_pages(gfp_mask, order);
+
+	if (unlikely(!addr))
+		return 0;
+
+	devres = devres_alloc(devm_pages_release,
+			      sizeof(struct pages_devres), GFP_KERNEL);
+	if (unlikely(!devres)) {
+		free_pages(addr, order);
+		return 0;
+	}
+
+	devres->addr = addr;
+	devres->order = order;
+
+	devres_add(dev, devres);
+	return addr;
+}
+EXPORT_SYMBOL_GPL(devm_get_free_pages);
+
+/**
+ * devm_free_pages - Resource-managed free_pages
+ * @dev: Device this memory belongs to
+ * @addr: Memory to free
+ *
+ * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
+ * there is no need to supply the @order.
+ */
+void devm_free_pages(struct device *dev, unsigned long addr)
+{
+	struct pages_devres devres = { .addr = addr };
+
+	WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
+			       &devres));
+}
+EXPORT_SYMBOL_GPL(devm_free_pages);
+
+static void devm_percpu_release(struct device *dev, void *pdata)
+{
+	void __percpu *p;
+
+	p = *(void __percpu **)pdata;
+	free_percpu(p);
+}
+
+static int devm_percpu_match(struct device *dev, void *data, void *p)
+{
+	struct devres *devr = container_of(data, struct devres, data);
+
+	return *(void **)devr->data == p;
+}
+
+/**
+ * __devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @size: Size of per-cpu memory to allocate
+ * @align: Alignment of per-cpu memory to allocate
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+		size_t align)
+{
+	void *p;
+	void __percpu *pcpu;
+
+	pcpu = __alloc_percpu(size, align);
+	if (!pcpu)
+		return NULL;
+
+	p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
+	if (!p) {
+		free_percpu(pcpu);
+		return NULL;
+	}
+
+	*(void __percpu **)p = pcpu;
+
+	devres_add(dev, p);
+
+	return pcpu;
+}
+EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+
+/**
+ * devm_free_percpu - Resource-managed free_percpu
+ * @dev: Device this memory belongs to
+ * @pdata: Per-cpu memory to free
+ *
+ * Free memory allocated with devm_alloc_percpu().
+ */
+void devm_free_percpu(struct device *dev, void __percpu *pdata)
+{
+	WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+			       (void *)pdata));
+}
+EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
new file mode 100644
index 0000000..f776807
--- /dev/null
+++ b/drivers/base/devtmpfs.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * devtmpfs - kernel-maintained tmpfs-based /dev
+ *
+ * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
+ *
+ * During bootup, before any driver core device is registered,
+ * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
+ * device which requests a device node, will add a node in this
+ * filesystem.
+ * By default, all devices are named after the name of the device,
+ * owned by root and have a default mode of 0600. Subsystems can
+ * overwrite the default setting if needed.
+ */
+
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/device.h>
+#include <linux/genhd.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+#include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include "base.h"
+
+static struct task_struct *thread;
+
+#if defined CONFIG_DEVTMPFS_MOUNT
+static int mount_dev = 1;
+#else
+static int mount_dev;
+#endif
+
+static DEFINE_SPINLOCK(req_lock);
+
+static struct req {
+	struct req *next;
+	struct completion done;
+	int err;
+	const char *name;
+	umode_t mode;	/* 0 => delete */
+	kuid_t uid;
+	kgid_t gid;
+	struct device *dev;
+} *requests;
+
+static int __init mount_param(char *str)
+{
+	mount_dev = simple_strtoul(str, NULL, 0);
+	return 1;
+}
+__setup("devtmpfs.mount=", mount_param);
+
+static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
+		      const char *dev_name, void *data)
+{
+#ifdef CONFIG_TMPFS
+	return mount_single(fs_type, flags, data, shmem_fill_super);
+#else
+	return mount_single(fs_type, flags, data, ramfs_fill_super);
+#endif
+}
+
+static struct file_system_type dev_fs_type = {
+	.name = "devtmpfs",
+	.mount = dev_mount,
+	.kill_sb = kill_litter_super,
+};
+
+#ifdef CONFIG_BLOCK
+static inline int is_blockdev(struct device *dev)
+{
+	return dev->class == &block_class;
+}
+#else
+static inline int is_blockdev(struct device *dev) { return 0; }
+#endif
+
+int devtmpfs_create_node(struct device *dev)
+{
+	const char *tmp = NULL;
+	struct req req;
+
+	if (!thread)
+		return 0;
+
+	req.mode = 0;
+	req.uid = GLOBAL_ROOT_UID;
+	req.gid = GLOBAL_ROOT_GID;
+	req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
+	if (!req.name)
+		return -ENOMEM;
+
+	if (req.mode == 0)
+		req.mode = 0600;
+	if (is_blockdev(dev))
+		req.mode |= S_IFBLK;
+	else
+		req.mode |= S_IFCHR;
+
+	req.dev = dev;
+
+	init_completion(&req.done);
+
+	spin_lock(&req_lock);
+	req.next = requests;
+	requests = &req;
+	spin_unlock(&req_lock);
+
+	wake_up_process(thread);
+	wait_for_completion(&req.done);
+
+	kfree(tmp);
+
+	return req.err;
+}
+
+int devtmpfs_delete_node(struct device *dev)
+{
+	const char *tmp = NULL;
+	struct req req;
+
+	if (!thread)
+		return 0;
+
+	req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
+	if (!req.name)
+		return -ENOMEM;
+
+	req.mode = 0;
+	req.dev = dev;
+
+	init_completion(&req.done);
+
+	spin_lock(&req_lock);
+	req.next = requests;
+	requests = &req;
+	spin_unlock(&req_lock);
+
+	wake_up_process(thread);
+	wait_for_completion(&req.done);
+
+	kfree(tmp);
+	return req.err;
+}
+
+static int dev_mkdir(const char *name, umode_t mode)
+{
+	struct dentry *dentry;
+	struct path path;
+	int err;
+
+	dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+
+	err = vfs_mkdir(d_inode(path.dentry), dentry, mode);
+	if (!err)
+		/* mark as kernel-created inode */
+		d_inode(dentry)->i_private = &thread;
+	done_path_create(&path, dentry);
+	return err;
+}
+
+static int create_path(const char *nodepath)
+{
+	char *path;
+	char *s;
+	int err = 0;
+
+	/* parent directories do not exist, create them */
+	path = kstrdup(nodepath, GFP_KERNEL);
+	if (!path)
+		return -ENOMEM;
+
+	s = path;
+	for (;;) {
+		s = strchr(s, '/');
+		if (!s)
+			break;
+		s[0] = '\0';
+		err = dev_mkdir(path, 0755);
+		if (err && err != -EEXIST)
+			break;
+		s[0] = '/';
+		s++;
+	}
+	kfree(path);
+	return err;
+}
+
+static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
+			 kgid_t gid, struct device *dev)
+{
+	struct dentry *dentry;
+	struct path path;
+	int err;
+
+	dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+	if (dentry == ERR_PTR(-ENOENT)) {
+		create_path(nodename);
+		dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+	}
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+
+	err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt);
+	if (!err) {
+		struct iattr newattrs;
+
+		newattrs.ia_mode = mode;
+		newattrs.ia_uid = uid;
+		newattrs.ia_gid = gid;
+		newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
+		inode_lock(d_inode(dentry));
+		notify_change(dentry, &newattrs, NULL);
+		inode_unlock(d_inode(dentry));
+
+		/* mark as kernel-created inode */
+		d_inode(dentry)->i_private = &thread;
+	}
+	done_path_create(&path, dentry);
+	return err;
+}
+
+static int dev_rmdir(const char *name)
+{
+	struct path parent;
+	struct dentry *dentry;
+	int err;
+
+	dentry = kern_path_locked(name, &parent);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+	if (d_really_is_positive(dentry)) {
+		if (d_inode(dentry)->i_private == &thread)
+			err = vfs_rmdir(d_inode(parent.dentry), dentry);
+		else
+			err = -EPERM;
+	} else {
+		err = -ENOENT;
+	}
+	dput(dentry);
+	inode_unlock(d_inode(parent.dentry));
+	path_put(&parent);
+	return err;
+}
+
+static int delete_path(const char *nodepath)
+{
+	const char *path;
+	int err = 0;
+
+	path = kstrdup(nodepath, GFP_KERNEL);
+	if (!path)
+		return -ENOMEM;
+
+	for (;;) {
+		char *base;
+
+		base = strrchr(path, '/');
+		if (!base)
+			break;
+		base[0] = '\0';
+		err = dev_rmdir(path);
+		if (err)
+			break;
+	}
+
+	kfree(path);
+	return err;
+}
+
+static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
+{
+	/* did we create it */
+	if (inode->i_private != &thread)
+		return 0;
+
+	/* does the dev_t match */
+	if (is_blockdev(dev)) {
+		if (!S_ISBLK(stat->mode))
+			return 0;
+	} else {
+		if (!S_ISCHR(stat->mode))
+			return 0;
+	}
+	if (stat->rdev != dev->devt)
+		return 0;
+
+	/* ours */
+	return 1;
+}
+
+static int handle_remove(const char *nodename, struct device *dev)
+{
+	struct path parent;
+	struct dentry *dentry;
+	int deleted = 0;
+	int err;
+
+	dentry = kern_path_locked(nodename, &parent);
+	if (IS_ERR(dentry))
+		return PTR_ERR(dentry);
+
+	if (d_really_is_positive(dentry)) {
+		struct kstat stat;
+		struct path p = {.mnt = parent.mnt, .dentry = dentry};
+		err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+				  AT_STATX_SYNC_AS_STAT);
+		if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+			struct iattr newattrs;
+			/*
+			 * before unlinking this node, reset permissions
+			 * of possible references like hardlinks
+			 */
+			newattrs.ia_uid = GLOBAL_ROOT_UID;
+			newattrs.ia_gid = GLOBAL_ROOT_GID;
+			newattrs.ia_mode = stat.mode & ~0777;
+			newattrs.ia_valid =
+				ATTR_UID|ATTR_GID|ATTR_MODE;
+			inode_lock(d_inode(dentry));
+			notify_change(dentry, &newattrs, NULL);
+			inode_unlock(d_inode(dentry));
+			err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
+			if (!err || err == -ENOENT)
+				deleted = 1;
+		}
+	} else {
+		err = -ENOENT;
+	}
+	dput(dentry);
+	inode_unlock(d_inode(parent.dentry));
+
+	path_put(&parent);
+	if (deleted && strchr(nodename, '/'))
+		delete_path(nodename);
+	return err;
+}
+
+/*
+ * If configured, or requested by the commandline, devtmpfs will be
+ * auto-mounted after the kernel mounted the root filesystem.
+ */
+int devtmpfs_mount(const char *mntdir)
+{
+	int err;
+
+	if (!mount_dev)
+		return 0;
+
+	if (!thread)
+		return 0;
+
+	err = ksys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT,
+			 NULL);
+	if (err)
+		printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+	else
+		printk(KERN_INFO "devtmpfs: mounted\n");
+	return err;
+}
+
+static DECLARE_COMPLETION(setup_done);
+
+static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
+		  struct device *dev)
+{
+	if (mode)
+		return handle_create(name, mode, uid, gid, dev);
+	else
+		return handle_remove(name, dev);
+}
+
+static int devtmpfsd(void *p)
+{
+	char options[] = "mode=0755";
+	int *err = p;
+	*err = ksys_unshare(CLONE_NEWNS);
+	if (*err)
+		goto out;
+	*err = ksys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
+	if (*err)
+		goto out;
+	ksys_chdir("/.."); /* will traverse into overmounted root */
+	ksys_chroot(".");
+	complete(&setup_done);
+	while (1) {
+		spin_lock(&req_lock);
+		while (requests) {
+			struct req *req = requests;
+			requests = NULL;
+			spin_unlock(&req_lock);
+			while (req) {
+				struct req *next = req->next;
+				req->err = handle(req->name, req->mode,
+						  req->uid, req->gid, req->dev);
+				complete(&req->done);
+				req = next;
+			}
+			spin_lock(&req_lock);
+		}
+		__set_current_state(TASK_INTERRUPTIBLE);
+		spin_unlock(&req_lock);
+		schedule();
+	}
+	return 0;
+out:
+	complete(&setup_done);
+	return *err;
+}
+
+/*
+ * Create devtmpfs instance, driver-core devices will add their device
+ * nodes here.
+ */
+int __init devtmpfs_init(void)
+{
+	int err = register_filesystem(&dev_fs_type);
+	if (err) {
+		printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
+		       "type %i\n", err);
+		return err;
+	}
+
+	thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
+	if (!IS_ERR(thread)) {
+		wait_for_completion(&setup_done);
+	} else {
+		err = PTR_ERR(thread);
+		thread = NULL;
+	}
+
+	if (err) {
+		printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
+		unregister_filesystem(&dev_fs_type);
+		return err;
+	}
+
+	printk(KERN_INFO "devtmpfs: initialized\n");
+	return 0;
+}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
new file mode 100644
index 0000000..857c8f1
--- /dev/null
+++ b/drivers/base/driver.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver.c - centralized device driver management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007 Novell Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "base.h"
+
+static struct device *next_device(struct klist_iter *i)
+{
+	struct klist_node *n = klist_next(i);
+	struct device *dev = NULL;
+	struct device_private *dev_prv;
+
+	if (n) {
+		dev_prv = to_device_private_driver(n);
+		dev = dev_prv->device;
+	}
+	return dev;
+}
+
+/**
+ * driver_for_each_device - Iterator for devices bound to a driver.
+ * @drv: Driver we're iterating.
+ * @start: Device to begin with
+ * @data: Data to pass to the callback.
+ * @fn: Function to call for each device.
+ *
+ * Iterate over the @drv's list of devices calling @fn for each one.
+ */
+int driver_for_each_device(struct device_driver *drv, struct device *start,
+			   void *data, int (*fn)(struct device *, void *))
+{
+	struct klist_iter i;
+	struct device *dev;
+	int error = 0;
+
+	if (!drv)
+		return -EINVAL;
+
+	klist_iter_init_node(&drv->p->klist_devices, &i,
+			     start ? &start->p->knode_driver : NULL);
+	while (!error && (dev = next_device(&i)))
+		error = fn(dev, data);
+	klist_iter_exit(&i);
+	return error;
+}
+EXPORT_SYMBOL_GPL(driver_for_each_device);
+
+/**
+ * driver_find_device - device iterator for locating a particular device.
+ * @drv: The device's driver
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the driver_for_each_device() function above, but
+ * it returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does.  If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct device *driver_find_device(struct device_driver *drv,
+				  struct device *start, void *data,
+				  int (*match)(struct device *dev, void *data))
+{
+	struct klist_iter i;
+	struct device *dev;
+
+	if (!drv || !drv->p)
+		return NULL;
+
+	klist_iter_init_node(&drv->p->klist_devices, &i,
+			     (start ? &start->p->knode_driver : NULL));
+	while ((dev = next_device(&i)))
+		if (match(dev, data) && get_device(dev))
+			break;
+	klist_iter_exit(&i);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(driver_find_device);
+
+/**
+ * driver_create_file - create sysfs file for driver.
+ * @drv: driver.
+ * @attr: driver attribute descriptor.
+ */
+int driver_create_file(struct device_driver *drv,
+		       const struct driver_attribute *attr)
+{
+	int error;
+
+	if (drv)
+		error = sysfs_create_file(&drv->p->kobj, &attr->attr);
+	else
+		error = -EINVAL;
+	return error;
+}
+EXPORT_SYMBOL_GPL(driver_create_file);
+
+/**
+ * driver_remove_file - remove sysfs file for driver.
+ * @drv: driver.
+ * @attr: driver attribute descriptor.
+ */
+void driver_remove_file(struct device_driver *drv,
+			const struct driver_attribute *attr)
+{
+	if (drv)
+		sysfs_remove_file(&drv->p->kobj, &attr->attr);
+}
+EXPORT_SYMBOL_GPL(driver_remove_file);
+
+int driver_add_groups(struct device_driver *drv,
+		      const struct attribute_group **groups)
+{
+	return sysfs_create_groups(&drv->p->kobj, groups);
+}
+
+void driver_remove_groups(struct device_driver *drv,
+			  const struct attribute_group **groups)
+{
+	sysfs_remove_groups(&drv->p->kobj, groups);
+}
+
+/**
+ * driver_register - register driver with bus
+ * @drv: driver to register
+ *
+ * We pass off most of the work to the bus_add_driver() call,
+ * since most of the things we have to do deal with the bus
+ * structures.
+ */
+int driver_register(struct device_driver *drv)
+{
+	int ret;
+	struct device_driver *other;
+
+	if (!drv->bus->p) {
+		pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n",
+			   drv->name, drv->bus->name);
+		return -EINVAL;
+	}
+
+	if ((drv->bus->probe && drv->probe) ||
+	    (drv->bus->remove && drv->remove) ||
+	    (drv->bus->shutdown && drv->shutdown))
+		printk(KERN_WARNING "Driver '%s' needs updating - please use "
+			"bus_type methods\n", drv->name);
+
+	other = driver_find(drv->name, drv->bus);
+	if (other) {
+		printk(KERN_ERR "Error: Driver '%s' is already registered, "
+			"aborting...\n", drv->name);
+		return -EBUSY;
+	}
+
+	ret = bus_add_driver(drv);
+	if (ret)
+		return ret;
+	ret = driver_add_groups(drv, drv->groups);
+	if (ret) {
+		bus_remove_driver(drv);
+		return ret;
+	}
+	kobject_uevent(&drv->p->kobj, KOBJ_ADD);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(driver_register);
+
+/**
+ * driver_unregister - remove driver from system.
+ * @drv: driver.
+ *
+ * Again, we pass off most of the work to the bus-level call.
+ */
+void driver_unregister(struct device_driver *drv)
+{
+	if (!drv || !drv->p) {
+		WARN(1, "Unexpected driver unregister!\n");
+		return;
+	}
+	driver_remove_groups(drv, drv->groups);
+	bus_remove_driver(drv);
+}
+EXPORT_SYMBOL_GPL(driver_unregister);
+
+/**
+ * driver_find - locate driver on a bus by its name.
+ * @name: name of the driver.
+ * @bus: bus to scan for the driver.
+ *
+ * Call kset_find_obj() to iterate over list of drivers on
+ * a bus to find driver by name. Return driver if found.
+ *
+ * This routine provides no locking to prevent the driver it returns
+ * from being unregistered or unloaded while the caller is using it.
+ * The caller is responsible for preventing this.
+ */
+struct device_driver *driver_find(const char *name, struct bus_type *bus)
+{
+	struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
+	struct driver_private *priv;
+
+	if (k) {
+		/* Drop reference added by kset_find_obj() */
+		kobject_put(k);
+		priv = to_driver(k);
+		return priv->driver;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(driver_find);
diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c
new file mode 100644
index 0000000..8dff940
--- /dev/null
+++ b/drivers/base/firmware.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * firmware.c - firmware subsystem hoohaw.
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007 Novell Inc.
+ */
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include "base.h"
+
+struct kobject *firmware_kobj;
+EXPORT_SYMBOL_GPL(firmware_kobj);
+
+int __init firmware_init(void)
+{
+	firmware_kobj = kobject_create_and_add("firmware", NULL);
+	if (!firmware_kobj)
+		return -ENOMEM;
+	return 0;
+}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
new file mode 100644
index 0000000..eb15d97
--- /dev/null
+++ b/drivers/base/firmware_loader/Kconfig
@@ -0,0 +1,154 @@
+menu "Firmware loader"
+
+config FW_LOADER
+	tristate "Firmware loading facility" if EXPERT
+	default y
+	help
+	  This enables the firmware loading facility in the kernel. The kernel
+	  will first look for built-in firmware, if it has any. Next, it will
+	  look for the requested firmware in a series of filesystem paths:
+
+		o firmware_class path module parameter or kernel boot param
+		o /lib/firmware/updates/UTS_RELEASE
+		o /lib/firmware/updates
+		o /lib/firmware/UTS_RELEASE
+		o /lib/firmware
+
+	  Enabling this feature only increases your kernel image by about
+	  828 bytes, enable this option unless you are certain you don't
+	  need firmware.
+
+	  You typically want this built-in (=y) but you can also enable this
+	  as a module, in which case the firmware_class module will be built.
+	  You also want to be sure to enable this built-in if you are going to
+	  enable built-in firmware (CONFIG_EXTRA_FIRMWARE).
+
+if FW_LOADER
+
+config EXTRA_FIRMWARE
+	string "Build named firmware blobs into the kernel binary"
+	help
+	  Device drivers which require firmware can typically deal with
+	  having the kernel load firmware from the various supported
+	  /lib/firmware/ paths. This option enables you to build into the
+	  kernel firmware files. Built-in firmware searches are preceded
+	  over firmware lookups using your filesystem over the supported
+	  /lib/firmware paths documented on CONFIG_FW_LOADER.
+
+	  This may be useful for testing or if the firmware is required early on
+	  in boot and cannot rely on the firmware being placed in an initrd or
+	  initramfs.
+
+	  This option is a string and takes the (space-separated) names of the
+	  firmware files -- the same names that appear in MODULE_FIRMWARE()
+	  and request_firmware() in the source. These files should exist under
+	  the directory specified by the EXTRA_FIRMWARE_DIR option, which is
+	  /lib/firmware by default.
+
+	  For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy
+	  the usb8388.bin file into /lib/firmware, and build the kernel. Then
+	  any request_firmware("usb8388.bin") will be satisfied internally
+	  inside the kernel without ever looking at your filesystem at runtime.
+
+	  WARNING: If you include additional firmware files into your binary
+	  kernel image that are not available under the terms of the GPL,
+	  then it may be a violation of the GPL to distribute the resulting
+	  image since it combines both GPL and non-GPL work. You should
+	  consult a lawyer of your own before distributing such an image.
+
+config EXTRA_FIRMWARE_DIR
+	string "Firmware blobs root directory"
+	depends on EXTRA_FIRMWARE != ""
+	default "/lib/firmware"
+	help
+	  This option controls the directory in which the kernel build system
+	  looks for the firmware files listed in the EXTRA_FIRMWARE option.
+
+config FW_LOADER_USER_HELPER
+	bool "Enable the firmware sysfs fallback mechanism"
+	help
+	  This option enables a sysfs loading facility to enable firmware
+	  loading to the kernel through userspace as a fallback mechanism
+	  if and only if the kernel's direct filesystem lookup for the
+	  firmware failed using the different /lib/firmware/ paths, or the
+	  path specified in the firmware_class path module parameter, or the
+	  firmware_class path kernel boot parameter if the firmware_class is
+	  built-in. For details on how to work with the sysfs fallback mechanism
+	  refer to Documentation/driver-api/firmware/fallback-mechanisms.rst.
+
+	  The direct filesystem lookup for firmware is always used first now.
+
+	  If the kernel's direct filesystem lookup for firmware fails to find
+	  the requested firmware a sysfs fallback loading facility is made
+	  available and userspace is informed about this through uevents.
+	  The uevent can be suppressed if the driver explicitly requested it,
+	  this is known as the driver using the custom fallback mechanism.
+	  If the custom fallback mechanism is used userspace must always
+	  acknowledge failure to find firmware as the timeout for the fallback
+	  mechanism is disabled, and failed requests will linger forever.
+
+	  This used to be the default firmware loading facility, and udev used
+	  to listen for uvents to load firmware for the kernel. The firmware
+	  loading facility functionality in udev has been removed, as such it
+	  can no longer be relied upon as a fallback mechanism. Linux no longer
+	  relies on or uses a fallback mechanism in userspace. If you need to
+	  rely on one refer to the permissively licensed firmwared:
+
+	  https://github.com/teg/firmwared
+
+	  Since this was the default firmware loading facility at one point,
+	  old userspace may exist which relies upon it, and as such this
+	  mechanism can never be removed from the kernel.
+
+	  You should only enable this functionality if you are certain you
+	  require a fallback mechanism and have a userspace mechanism ready to
+	  load firmware in case it is not found. One main reason for this may
+	  be if you have drivers which require firmware built-in and for
+	  whatever reason cannot place the required firmware in initramfs.
+	  Another reason kernels may have this feature enabled is to support a
+	  driver which explicitly relies on this fallback mechanism. Only two
+	  drivers need this today:
+
+	    o CONFIG_LEDS_LP55XX_COMMON
+	    o CONFIG_DELL_RBU
+
+	  Outside of supporting the above drivers, another reason for needing
+	  this may be that your firmware resides outside of the paths the kernel
+	  looks for and cannot possibly be specified using the firmware_class
+	  path module parameter or kernel firmware_class path boot parameter
+	  if firmware_class is built-in.
+
+	  A modern use case may be to temporarily mount a custom partition
+	  during provisioning which is only accessible to userspace, and then
+	  to use it to look for and fetch the required firmware. Such type of
+	  driver functionality may not even ever be desirable upstream by
+	  vendors, and as such is only required to be supported as an interface
+	  for provisioning. Since udev's firmware loading facility has been
+	  removed you can use firmwared or a fork of it to customize how you
+	  want to load firmware based on uevents issued.
+
+	  Enabling this option will increase your kernel image size by about
+	  13436 bytes.
+
+	  If you are unsure about this, say N here, unless you are Linux
+	  distribution and need to support the above two drivers, or you are
+	  certain you need to support some really custom firmware loading
+	  facility in userspace.
+
+config FW_LOADER_USER_HELPER_FALLBACK
+	bool "Force the firmware sysfs fallback mechanism when possible"
+	depends on FW_LOADER_USER_HELPER
+	help
+	  Enabling this option forces a sysfs userspace fallback mechanism
+	  to be used for all firmware requests which explicitly do not disable a
+	  a fallback mechanism. Firmware calls which do prohibit a fallback
+	  mechanism is request_firmware_direct(). This option is kept for
+          backward compatibility purposes given this precise mechanism can also
+	  be enabled by setting the proc sysctl value to true:
+
+	       /proc/sys/kernel/firmware_config/force_sysfs_fallback
+
+	  If you are unsure about this, say N here.
+
+endif # FW_LOADER
+endmenu
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
new file mode 100644
index 0000000..a97eeb0
--- /dev/null
+++ b/drivers/base/firmware_loader/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Linux firmware loader
+
+obj-y			:= fallback_table.o
+obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
+firmware_class-objs := main.o
+firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
new file mode 100644
index 0000000..b5c865f
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/highmem.h>
+#include <linux/umh.h>
+#include <linux/sysctl.h>
+#include <linux/vmalloc.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+/*
+ * firmware fallback mechanism
+ */
+
+extern struct firmware_fallback_config fw_fallback_config;
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+	return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static void __fw_fallback_set_timeout(int timeout)
+{
+	fw_fallback_config.loading_timeout = timeout;
+}
+
+/*
+ * use small loading timeout for caching devices' firmware because all these
+ * firmware images have been loaded successfully at lease once, also system is
+ * ready for completing firmware loading now. The maximum size of firmware in
+ * current distributions is about 2M bytes, so 10 secs should be enough.
+ */
+void fw_fallback_set_cache_timeout(void)
+{
+	fw_fallback_config.old_timeout = __firmware_loading_timeout();
+	__fw_fallback_set_timeout(10);
+}
+
+/* Restores the timeout to the value last configured during normal operation */
+void fw_fallback_set_default_timeout(void)
+{
+	__fw_fallback_set_timeout(fw_fallback_config.old_timeout);
+}
+
+static long firmware_loading_timeout(void)
+{
+	return __firmware_loading_timeout() > 0 ?
+		__firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
+}
+
+static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
+{
+	return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
+{
+	return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv,  long timeout)
+{
+	return __fw_state_wait_common(fw_priv, timeout);
+}
+
+struct fw_sysfs {
+	bool nowait;
+	struct device dev;
+	struct fw_priv *fw_priv;
+	struct firmware *fw;
+};
+
+static struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+	return container_of(dev, struct fw_sysfs, dev);
+}
+
+static void __fw_load_abort(struct fw_priv *fw_priv)
+{
+	/*
+	 * There is a small window in which user can write to 'loading'
+	 * between loading done and disappearance of 'loading'
+	 */
+	if (fw_sysfs_done(fw_priv))
+		return;
+
+	list_del_init(&fw_priv->pending_list);
+	fw_state_aborted(fw_priv);
+}
+
+static void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+	struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+	__fw_load_abort(fw_priv);
+}
+
+static LIST_HEAD(pending_fw_head);
+
+void kill_pending_fw_fallback_reqs(bool only_kill_custom)
+{
+	struct fw_priv *fw_priv;
+	struct fw_priv *next;
+
+	mutex_lock(&fw_lock);
+	list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
+				 pending_list) {
+		if (!fw_priv->need_uevent || !only_kill_custom)
+			 __fw_load_abort(fw_priv);
+	}
+	mutex_unlock(&fw_lock);
+}
+
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+			    char *buf)
+{
+	return sprintf(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * firmware_timeout_store() - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ *	Sets the number of seconds to wait for the firmware.  Once
+ *	this expires an error will be returned to the driver and no
+ *	firmware will be provided.
+ *
+ *	Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+			     const char *buf, size_t count)
+{
+	int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+	if (tmp_loading_timeout < 0)
+		tmp_loading_timeout = 0;
+
+	__fw_fallback_set_timeout(tmp_loading_timeout);
+
+	return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+	&class_attr_timeout.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static void fw_dev_release(struct device *dev)
+{
+	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+	kfree(fw_sysfs);
+}
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+	if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+		return -ENOMEM;
+	if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+		return -ENOMEM;
+	if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+	int err = 0;
+
+	mutex_lock(&fw_lock);
+	if (fw_sysfs->fw_priv)
+		err = do_firmware_uevent(fw_sysfs, env);
+	mutex_unlock(&fw_lock);
+	return err;
+}
+
+static struct class firmware_class = {
+	.name		= "firmware",
+	.class_groups	= firmware_class_groups,
+	.dev_uevent	= firmware_uevent,
+	.dev_release	= fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+	return class_register(&firmware_class);
+}
+
+void unregister_sysfs_loader(void)
+{
+	class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+	int loading = 0;
+
+	mutex_lock(&fw_lock);
+	if (fw_sysfs->fw_priv)
+		loading = fw_sysfs_loading(fw_sysfs->fw_priv);
+	mutex_unlock(&fw_lock);
+
+	return sprintf(buf, "%d\n", loading);
+}
+
+/* one pages buffer should be mapped/unmapped only once */
+static int map_fw_priv_pages(struct fw_priv *fw_priv)
+{
+	if (!fw_priv->is_paged_buf)
+		return 0;
+
+	vunmap(fw_priv->data);
+	fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
+			     PAGE_KERNEL_RO);
+	if (!fw_priv->data)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * firmware_loading_store() - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ *	The relevant values are:
+ *
+ *	 1: Start a load, discarding any previous partial load.
+ *	 0: Conclude the load and hand the data to the driver code.
+ *	-1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+	struct fw_priv *fw_priv;
+	ssize_t written = count;
+	int loading = simple_strtol(buf, NULL, 10);
+	int i;
+
+	mutex_lock(&fw_lock);
+	fw_priv = fw_sysfs->fw_priv;
+	if (fw_state_is_aborted(fw_priv))
+		goto out;
+
+	switch (loading) {
+	case 1:
+		/* discarding any previous partial load */
+		if (!fw_sysfs_done(fw_priv)) {
+			for (i = 0; i < fw_priv->nr_pages; i++)
+				__free_page(fw_priv->pages[i]);
+			vfree(fw_priv->pages);
+			fw_priv->pages = NULL;
+			fw_priv->page_array_size = 0;
+			fw_priv->nr_pages = 0;
+			fw_state_start(fw_priv);
+		}
+		break;
+	case 0:
+		if (fw_sysfs_loading(fw_priv)) {
+			int rc;
+
+			/*
+			 * Several loading requests may be pending on
+			 * one same firmware buf, so let all requests
+			 * see the mapped 'buf->data' once the loading
+			 * is completed.
+			 * */
+			rc = map_fw_priv_pages(fw_priv);
+			if (rc)
+				dev_err(dev, "%s: map pages failed\n",
+					__func__);
+			else
+				rc = security_kernel_post_read_file(NULL,
+						fw_priv->data, fw_priv->size,
+						READING_FIRMWARE);
+
+			/*
+			 * Same logic as fw_load_abort, only the DONE bit
+			 * is ignored and we set ABORT only on failure.
+			 */
+			list_del_init(&fw_priv->pending_list);
+			if (rc) {
+				fw_state_aborted(fw_priv);
+				written = rc;
+			} else {
+				fw_state_done(fw_priv);
+			}
+			break;
+		}
+		/* fallthrough */
+	default:
+		dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+		/* fallthrough */
+	case -1:
+		fw_load_abort(fw_sysfs);
+		break;
+	}
+out:
+	mutex_unlock(&fw_lock);
+	return written;
+}
+
+static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+			   loff_t offset, size_t count, bool read)
+{
+	if (read)
+		memcpy(buffer, fw_priv->data + offset, count);
+	else
+		memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+			loff_t offset, size_t count, bool read)
+{
+	while (count) {
+		void *page_data;
+		int page_nr = offset >> PAGE_SHIFT;
+		int page_ofs = offset & (PAGE_SIZE-1);
+		int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+		page_data = kmap(fw_priv->pages[page_nr]);
+
+		if (read)
+			memcpy(buffer, page_data + page_ofs, page_cnt);
+		else
+			memcpy(page_data + page_ofs, buffer, page_cnt);
+
+		kunmap(fw_priv->pages[page_nr]);
+		buffer += page_cnt;
+		offset += page_cnt;
+		count -= page_cnt;
+	}
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+				  struct bin_attribute *bin_attr,
+				  char *buffer, loff_t offset, size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+	struct fw_priv *fw_priv;
+	ssize_t ret_count;
+
+	mutex_lock(&fw_lock);
+	fw_priv = fw_sysfs->fw_priv;
+	if (!fw_priv || fw_sysfs_done(fw_priv)) {
+		ret_count = -ENODEV;
+		goto out;
+	}
+	if (offset > fw_priv->size) {
+		ret_count = 0;
+		goto out;
+	}
+	if (count > fw_priv->size - offset)
+		count = fw_priv->size - offset;
+
+	ret_count = count;
+
+	if (fw_priv->data)
+		firmware_rw_data(fw_priv, buffer, offset, count, true);
+	else
+		firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+	mutex_unlock(&fw_lock);
+	return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+	struct fw_priv *fw_priv= fw_sysfs->fw_priv;
+	int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
+
+	/* If the array of pages is too small, grow it... */
+	if (fw_priv->page_array_size < pages_needed) {
+		int new_array_size = max(pages_needed,
+					 fw_priv->page_array_size * 2);
+		struct page **new_pages;
+
+		new_pages = vmalloc(array_size(new_array_size, sizeof(void *)));
+		if (!new_pages) {
+			fw_load_abort(fw_sysfs);
+			return -ENOMEM;
+		}
+		memcpy(new_pages, fw_priv->pages,
+		       fw_priv->page_array_size * sizeof(void *));
+		memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
+		       (new_array_size - fw_priv->page_array_size));
+		vfree(fw_priv->pages);
+		fw_priv->pages = new_pages;
+		fw_priv->page_array_size = new_array_size;
+	}
+
+	while (fw_priv->nr_pages < pages_needed) {
+		fw_priv->pages[fw_priv->nr_pages] =
+			alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+
+		if (!fw_priv->pages[fw_priv->nr_pages]) {
+			fw_load_abort(fw_sysfs);
+			return -ENOMEM;
+		}
+		fw_priv->nr_pages++;
+	}
+	return 0;
+}
+
+/**
+ * firmware_data_write() - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ *	Data written to the 'data' attribute will be later handed to
+ *	the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+				   struct bin_attribute *bin_attr,
+				   char *buffer, loff_t offset, size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+	struct fw_priv *fw_priv;
+	ssize_t retval;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	mutex_lock(&fw_lock);
+	fw_priv = fw_sysfs->fw_priv;
+	if (!fw_priv || fw_sysfs_done(fw_priv)) {
+		retval = -ENODEV;
+		goto out;
+	}
+
+	if (fw_priv->data) {
+		if (offset + count > fw_priv->allocated_size) {
+			retval = -ENOMEM;
+			goto out;
+		}
+		firmware_rw_data(fw_priv, buffer, offset, count, false);
+		retval = count;
+	} else {
+		retval = fw_realloc_pages(fw_sysfs, offset + count);
+		if (retval)
+			goto out;
+
+		retval = count;
+		firmware_rw(fw_priv, buffer, offset, count, false);
+	}
+
+	fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+	mutex_unlock(&fw_lock);
+	return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+	.attr = { .name = "data", .mode = 0644 },
+	.size = 0,
+	.read = firmware_data_read,
+	.write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+	&dev_attr_loading.attr,
+	NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+	&firmware_attr_data,
+	NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+	.attrs = fw_dev_attrs,
+	.bin_attrs = fw_dev_bin_attrs,
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+	&fw_dev_attr_group,
+	NULL
+};
+
+static struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+		   struct device *device, enum fw_opt opt_flags)
+{
+	struct fw_sysfs *fw_sysfs;
+	struct device *f_dev;
+
+	fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+	if (!fw_sysfs) {
+		fw_sysfs = ERR_PTR(-ENOMEM);
+		goto exit;
+	}
+
+	fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+	fw_sysfs->fw = firmware;
+	f_dev = &fw_sysfs->dev;
+
+	device_initialize(f_dev);
+	dev_set_name(f_dev, "%s", fw_name);
+	f_dev->parent = device;
+	f_dev->class = &firmware_class;
+	f_dev->groups = fw_dev_attr_groups;
+exit:
+	return fw_sysfs;
+}
+
+/**
+ * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
+ * @fw_sysfs: firmware sysfs information for the firmware to load
+ * @opt_flags: flags of options, FW_OPT_*
+ * @timeout: timeout to wait for the load
+ *
+ * In charge of constructing a sysfs fallback interface for firmware loading.
+ **/
+static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
+				  enum fw_opt opt_flags, long timeout)
+{
+	int retval = 0;
+	struct device *f_dev = &fw_sysfs->dev;
+	struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+	/* fall back on userspace loading */
+	if (!fw_priv->data)
+		fw_priv->is_paged_buf = true;
+
+	dev_set_uevent_suppress(f_dev, true);
+
+	retval = device_add(f_dev);
+	if (retval) {
+		dev_err(f_dev, "%s: device_register failed\n", __func__);
+		goto err_put_dev;
+	}
+
+	mutex_lock(&fw_lock);
+	list_add(&fw_priv->pending_list, &pending_fw_head);
+	mutex_unlock(&fw_lock);
+
+	if (opt_flags & FW_OPT_UEVENT) {
+		fw_priv->need_uevent = true;
+		dev_set_uevent_suppress(f_dev, false);
+		dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
+		kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
+	} else {
+		timeout = MAX_JIFFY_OFFSET;
+	}
+
+	retval = fw_sysfs_wait_timeout(fw_priv, timeout);
+	if (retval < 0) {
+		mutex_lock(&fw_lock);
+		fw_load_abort(fw_sysfs);
+		mutex_unlock(&fw_lock);
+	}
+
+	if (fw_state_is_aborted(fw_priv)) {
+		if (retval == -ERESTARTSYS)
+			retval = -EINTR;
+		else
+			retval = -EAGAIN;
+	} else if (fw_priv->is_paged_buf && !fw_priv->data)
+		retval = -ENOMEM;
+
+	device_del(f_dev);
+err_put_dev:
+	put_device(f_dev);
+	return retval;
+}
+
+static int fw_load_from_user_helper(struct firmware *firmware,
+				    const char *name, struct device *device,
+				    enum fw_opt opt_flags)
+{
+	struct fw_sysfs *fw_sysfs;
+	long timeout;
+	int ret;
+
+	timeout = firmware_loading_timeout();
+	if (opt_flags & FW_OPT_NOWAIT) {
+		timeout = usermodehelper_read_lock_wait(timeout);
+		if (!timeout) {
+			dev_dbg(device, "firmware: %s loading timed out\n",
+				name);
+			return -EBUSY;
+		}
+	} else {
+		ret = usermodehelper_read_trylock();
+		if (WARN_ON(ret)) {
+			dev_err(device, "firmware: %s will not be loaded\n",
+				name);
+			return ret;
+		}
+	}
+
+	fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
+	if (IS_ERR(fw_sysfs)) {
+		ret = PTR_ERR(fw_sysfs);
+		goto out_unlock;
+	}
+
+	fw_sysfs->fw_priv = firmware->priv;
+	ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout);
+
+	if (!ret)
+		ret = assign_fw(firmware, device, opt_flags);
+
+out_unlock:
+	usermodehelper_read_unlock();
+
+	return ret;
+}
+
+static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
+{
+	if (fw_fallback_config.force_sysfs_fallback)
+		return true;
+	if (!(opt_flags & FW_OPT_USERHELPER))
+		return false;
+	return true;
+}
+
+static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
+{
+	int ret;
+
+	if (fw_fallback_config.ignore_sysfs_fallback) {
+		pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
+		return false;
+	}
+
+	if ((opt_flags & FW_OPT_NOFALLBACK))
+		return false;
+
+	/* Also permit LSMs and IMA to fail firmware sysfs fallback */
+	ret = security_kernel_load_data(LOADING_FIRMWARE);
+	if (ret < 0)
+		return ret;
+
+	return fw_force_sysfs_fallback(opt_flags);
+}
+
+/**
+ * firmware_fallback_sysfs() - use the fallback mechanism to find firmware
+ * @fw: pointer to firmware image
+ * @name: name of firmware file to look for
+ * @device: device for which firmware is being loaded
+ * @opt_flags: options to control firmware loading behaviour
+ * @ret: return value from direct lookup which triggered the fallback mechanism
+ *
+ * This function is called if direct lookup for the firmware failed, it enables
+ * a fallback mechanism through userspace by exposing a sysfs loading
+ * interface. Userspace is in charge of loading the firmware through the syfs
+ * loading interface. This syfs fallback mechanism may be disabled completely
+ * on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
+ * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
+ * flag, if so it would also disable the fallback mechanism. A system may want
+ * to enfoce the sysfs fallback mechanism at all times, it can do this by
+ * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true.
+ * Enabling force_sysfs_fallback is functionally equivalent to build a kernel
+ * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
+ **/
+int firmware_fallback_sysfs(struct firmware *fw, const char *name,
+			    struct device *device,
+			    enum fw_opt opt_flags,
+			    int ret)
+{
+	if (!fw_run_sysfs_fallback(opt_flags))
+		return ret;
+
+	if (!(opt_flags & FW_OPT_NO_WARN))
+		dev_warn(device, "Falling back to syfs fallback for: %s\n",
+				 name);
+	else
+		dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
+				name);
+	return fw_load_from_user_helper(fw, name, device, opt_flags);
+}
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
new file mode 100644
index 0000000..2106350
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_FALLBACK_H
+#define __FIRMWARE_FALLBACK_H
+
+#include <linux/firmware.h>
+#include <linux/device.h>
+
+#include "firmware.h"
+
+/**
+ * struct firmware_fallback_config - firmware fallback configuration settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ * 	as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ * 	Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ * 	functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ * 	This emulates the behaviour as if we had set the kernel
+ * 	config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ * 	giving up, in seconds.
+ */
+struct firmware_fallback_config {
+	unsigned int force_sysfs_fallback;
+	unsigned int ignore_sysfs_fallback;
+	int old_timeout;
+	int loading_timeout;
+};
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+int firmware_fallback_sysfs(struct firmware *fw, const char *name,
+			    struct device *device,
+			    enum fw_opt opt_flags,
+			    int ret);
+void kill_pending_fw_fallback_reqs(bool only_kill_custom);
+
+void fw_fallback_set_cache_timeout(void);
+void fw_fallback_set_default_timeout(void);
+
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
+					  struct device *device,
+					  enum fw_opt opt_flags,
+					  int ret)
+{
+	/* Keep carrying over the same error */
+	return ret;
+}
+
+static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
+static inline void fw_fallback_set_cache_timeout(void) { }
+static inline void fw_fallback_set_default_timeout(void) { }
+
+static inline int register_sysfs_loader(void)
+{
+	return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+#endif /* __FIRMWARE_FALLBACK_H */
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
new file mode 100644
index 0000000..7428659
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/highmem.h>
+#include <linux/umh.h>
+#include <linux/sysctl.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+/*
+ * firmware fallback configuration table
+ */
+
+/* Module or buit-in */
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
+static unsigned int zero;
+static unsigned int one = 1;
+
+struct firmware_fallback_config fw_fallback_config = {
+	.force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
+	.loading_timeout = 60,
+	.old_timeout = 60,
+};
+EXPORT_SYMBOL_GPL(fw_fallback_config);
+
+struct ctl_table firmware_config_table[] = {
+	{
+		.procname	= "force_sysfs_fallback",
+		.data		= &fw_fallback_config.force_sysfs_fallback,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_douintvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname	= "ignore_sysfs_fallback",
+		.data		= &fw_fallback_config.ignore_sysfs_fallback,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_douintvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{ }
+};
+EXPORT_SYMBOL_GPL(firmware_config_table);
+
+#endif
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
new file mode 100644
index 0000000..4c1395f
--- /dev/null
+++ b/drivers/base/firmware_loader/firmware.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_LOADER_H
+#define __FIRMWARE_LOADER_H
+
+#include <linux/bitops.h>
+#include <linux/firmware.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+
+#include <generated/utsrelease.h>
+
+/**
+ * enum fw_opt - options to control firmware loading behaviour
+ *
+ * @FW_OPT_UEVENT: Enables the fallback mechanism to send a kobject uevent
+ *	when the firmware is not found. Userspace is in charge to load the
+ *	firmware using the sysfs loading facility.
+ * @FW_OPT_NOWAIT: Used to describe the firmware request is asynchronous.
+ * @FW_OPT_USERHELPER: Enable the fallback mechanism, in case the direct
+ *	filesystem lookup fails at finding the firmware.  For details refer to
+ *	firmware_fallback_sysfs().
+ * @FW_OPT_NO_WARN: Quiet, avoid printing warning messages.
+ * @FW_OPT_NOCACHE: Disables firmware caching. Firmware caching is used to
+ *	cache the firmware upon suspend, so that upon resume races against the
+ *	firmware file lookup on storage is avoided. Used for calls where the
+ *	file may be too big, or where the driver takes charge of its own
+ *	firmware caching mechanism.
+ * @FW_OPT_NOFALLBACK: Disable the fallback mechanism. Takes precedence over
+ *	&FW_OPT_UEVENT and &FW_OPT_USERHELPER.
+ */
+enum fw_opt {
+	FW_OPT_UEVENT =         BIT(0),
+	FW_OPT_NOWAIT =         BIT(1),
+	FW_OPT_USERHELPER =     BIT(2),
+	FW_OPT_NO_WARN =        BIT(3),
+	FW_OPT_NOCACHE =        BIT(4),
+	FW_OPT_NOFALLBACK =     BIT(5),
+};
+
+enum fw_status {
+	FW_STATUS_UNKNOWN,
+	FW_STATUS_LOADING,
+	FW_STATUS_DONE,
+	FW_STATUS_ABORTED,
+};
+
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized.  struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+	struct completion completion;
+	enum fw_status status;
+};
+
+struct fw_priv {
+	struct kref ref;
+	struct list_head list;
+	struct firmware_cache *fwc;
+	struct fw_state fw_st;
+	void *data;
+	size_t size;
+	size_t allocated_size;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+	bool is_paged_buf;
+	bool need_uevent;
+	struct page **pages;
+	int nr_pages;
+	int page_array_size;
+	struct list_head pending_list;
+#endif
+	const char *fw_name;
+};
+
+extern struct mutex fw_lock;
+
+static inline bool __fw_state_check(struct fw_priv *fw_priv,
+				    enum fw_status status)
+{
+	struct fw_state *fw_st = &fw_priv->fw_st;
+
+	return fw_st->status == status;
+}
+
+static inline int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
+{
+	struct fw_state *fw_st = &fw_priv->fw_st;
+	long ret;
+
+	ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
+	if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+		return -ENOENT;
+	if (!ret)
+		return -ETIMEDOUT;
+
+	return ret < 0 ? ret : 0;
+}
+
+static inline void __fw_state_set(struct fw_priv *fw_priv,
+				  enum fw_status status)
+{
+	struct fw_state *fw_st = &fw_priv->fw_st;
+
+	WRITE_ONCE(fw_st->status, status);
+
+	if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+		complete_all(&fw_st->completion);
+}
+
+static inline void fw_state_aborted(struct fw_priv *fw_priv)
+{
+	__fw_state_set(fw_priv, FW_STATUS_ABORTED);
+}
+
+static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
+{
+	return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
+}
+
+static inline void fw_state_start(struct fw_priv *fw_priv)
+{
+	__fw_state_set(fw_priv, FW_STATUS_LOADING);
+}
+
+static inline void fw_state_done(struct fw_priv *fw_priv)
+{
+	__fw_state_set(fw_priv, FW_STATUS_DONE);
+}
+
+int assign_fw(struct firmware *fw, struct device *device,
+	      enum fw_opt opt_flags);
+
+#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
new file mode 100644
index 0000000..8e9213b
--- /dev/null
+++ b/drivers/base/firmware_loader/main.c
@@ -0,0 +1,1279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * main.c - Multi purpose firmware loading support
+ *
+ * Copyright (c) 2003 Manuel Estrada Sainz
+ *
+ * Please see Documentation/firmware_class/ for more information.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/highmem.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/async.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/reboot.h>
+#include <linux/security.h>
+
+#include <generated/utsrelease.h>
+
+#include "../base.h"
+#include "firmware.h"
+#include "fallback.h"
+
+MODULE_AUTHOR("Manuel Estrada Sainz");
+MODULE_DESCRIPTION("Multi purpose firmware loading support");
+MODULE_LICENSE("GPL");
+
+struct firmware_cache {
+	/* firmware_buf instance will be added into the below list */
+	spinlock_t lock;
+	struct list_head head;
+	int state;
+
+#ifdef CONFIG_PM_SLEEP
+	/*
+	 * Names of firmware images which have been cached successfully
+	 * will be added into the below list so that device uncache
+	 * helper can trace which firmware images have been cached
+	 * before.
+	 */
+	spinlock_t name_lock;
+	struct list_head fw_names;
+
+	struct delayed_work work;
+
+	struct notifier_block   pm_notify;
+#endif
+};
+
+struct fw_cache_entry {
+	struct list_head list;
+	const char *name;
+};
+
+struct fw_name_devm {
+	unsigned long magic;
+	const char *name;
+};
+
+static inline struct fw_priv *to_fw_priv(struct kref *ref)
+{
+	return container_of(ref, struct fw_priv, ref);
+}
+
+#define	FW_LOADER_NO_CACHE	0
+#define	FW_LOADER_START_CACHE	1
+
+/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
+ * guarding for corner cases a global lock should be OK */
+DEFINE_MUTEX(fw_lock);
+
+static struct firmware_cache fw_cache;
+
+/* Builtin firmware support */
+
+#ifdef CONFIG_FW_LOADER
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static void fw_copy_to_prealloc_buf(struct firmware *fw,
+				    void *buf, size_t size)
+{
+	if (!buf || size < fw->size)
+		return;
+	memcpy(buf, fw->data, fw->size);
+}
+
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
+				    void *buf, size_t size)
+{
+	struct builtin_fw *b_fw;
+
+	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+		if (strcmp(name, b_fw->name) == 0) {
+			fw->size = b_fw->size;
+			fw->data = b_fw->data;
+			fw_copy_to_prealloc_buf(fw, buf, size);
+
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+	struct builtin_fw *b_fw;
+
+	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+		if (fw->data == b_fw->data)
+			return true;
+
+	return false;
+}
+
+#else /* Module case - no builtin firmware support */
+
+static inline bool fw_get_builtin_firmware(struct firmware *fw,
+					   const char *name, void *buf,
+					   size_t size)
+{
+	return false;
+}
+
+static inline bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+	return false;
+}
+#endif
+
+static void fw_state_init(struct fw_priv *fw_priv)
+{
+	struct fw_state *fw_st = &fw_priv->fw_st;
+
+	init_completion(&fw_st->completion);
+	fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline int fw_state_wait(struct fw_priv *fw_priv)
+{
+	return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
+}
+
+static int fw_cache_piggyback_on_request(const char *name);
+
+static struct fw_priv *__allocate_fw_priv(const char *fw_name,
+					  struct firmware_cache *fwc,
+					  void *dbuf, size_t size)
+{
+	struct fw_priv *fw_priv;
+
+	fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
+	if (!fw_priv)
+		return NULL;
+
+	fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
+	if (!fw_priv->fw_name) {
+		kfree(fw_priv);
+		return NULL;
+	}
+
+	kref_init(&fw_priv->ref);
+	fw_priv->fwc = fwc;
+	fw_priv->data = dbuf;
+	fw_priv->allocated_size = size;
+	fw_state_init(fw_priv);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+	INIT_LIST_HEAD(&fw_priv->pending_list);
+#endif
+
+	pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
+
+	return fw_priv;
+}
+
+static struct fw_priv *__lookup_fw_priv(const char *fw_name)
+{
+	struct fw_priv *tmp;
+	struct firmware_cache *fwc = &fw_cache;
+
+	list_for_each_entry(tmp, &fwc->head, list)
+		if (!strcmp(tmp->fw_name, fw_name))
+			return tmp;
+	return NULL;
+}
+
+/* Returns 1 for batching firmware requests with the same name */
+static int alloc_lookup_fw_priv(const char *fw_name,
+				struct firmware_cache *fwc,
+				struct fw_priv **fw_priv, void *dbuf,
+				size_t size, enum fw_opt opt_flags)
+{
+	struct fw_priv *tmp;
+
+	spin_lock(&fwc->lock);
+	if (!(opt_flags & FW_OPT_NOCACHE)) {
+		tmp = __lookup_fw_priv(fw_name);
+		if (tmp) {
+			kref_get(&tmp->ref);
+			spin_unlock(&fwc->lock);
+			*fw_priv = tmp;
+			pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+			return 1;
+		}
+	}
+
+	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
+	if (tmp) {
+		INIT_LIST_HEAD(&tmp->list);
+		if (!(opt_flags & FW_OPT_NOCACHE))
+			list_add(&tmp->list, &fwc->head);
+	}
+	spin_unlock(&fwc->lock);
+
+	*fw_priv = tmp;
+
+	return tmp ? 0 : -ENOMEM;
+}
+
+static void __free_fw_priv(struct kref *ref)
+	__releases(&fwc->lock)
+{
+	struct fw_priv *fw_priv = to_fw_priv(ref);
+	struct firmware_cache *fwc = fw_priv->fwc;
+
+	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
+		 (unsigned int)fw_priv->size);
+
+	list_del(&fw_priv->list);
+	spin_unlock(&fwc->lock);
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+	if (fw_priv->is_paged_buf) {
+		int i;
+		vunmap(fw_priv->data);
+		for (i = 0; i < fw_priv->nr_pages; i++)
+			__free_page(fw_priv->pages[i]);
+		vfree(fw_priv->pages);
+	} else
+#endif
+	if (!fw_priv->allocated_size)
+		vfree(fw_priv->data);
+	kfree_const(fw_priv->fw_name);
+	kfree(fw_priv);
+}
+
+static void free_fw_priv(struct fw_priv *fw_priv)
+{
+	struct firmware_cache *fwc = fw_priv->fwc;
+	spin_lock(&fwc->lock);
+	if (!kref_put(&fw_priv->ref, __free_fw_priv))
+		spin_unlock(&fwc->lock);
+}
+
+/* direct firmware loading support */
+static char fw_path_para[256];
+static const char * const fw_path[] = {
+	fw_path_para,
+	"/lib/firmware/updates/" UTS_RELEASE,
+	"/lib/firmware/updates",
+	"/lib/firmware/" UTS_RELEASE,
+	"/lib/firmware"
+};
+
+/*
+ * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
+ * from kernel command line because firmware_class is generally built in
+ * kernel instead of module.
+ */
+module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
+MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
+
+static int
+fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
+{
+	loff_t size;
+	int i, len;
+	int rc = -ENOENT;
+	char *path;
+	enum kernel_read_file_id id = READING_FIRMWARE;
+	size_t msize = INT_MAX;
+
+	/* Already populated data member means we're loading into a buffer */
+	if (fw_priv->data) {
+		id = READING_FIRMWARE_PREALLOC_BUFFER;
+		msize = fw_priv->allocated_size;
+	}
+
+	path = __getname();
+	if (!path)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
+		/* skip the unset customized path */
+		if (!fw_path[i][0])
+			continue;
+
+		len = snprintf(path, PATH_MAX, "%s/%s",
+			       fw_path[i], fw_priv->fw_name);
+		if (len >= PATH_MAX) {
+			rc = -ENAMETOOLONG;
+			break;
+		}
+
+		fw_priv->size = 0;
+		rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
+						msize, id);
+		if (rc) {
+			if (rc == -ENOENT)
+				dev_dbg(device, "loading %s failed with error %d\n",
+					 path, rc);
+			else
+				dev_warn(device, "loading %s failed with error %d\n",
+					 path, rc);
+			continue;
+		}
+		dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name);
+		fw_priv->size = size;
+		fw_state_done(fw_priv);
+		break;
+	}
+	__putname(path);
+
+	return rc;
+}
+
+/* firmware holds the ownership of pages */
+static void firmware_free_data(const struct firmware *fw)
+{
+	/* Loaded directly? */
+	if (!fw->priv) {
+		vfree(fw->data);
+		return;
+	}
+	free_fw_priv(fw->priv);
+}
+
+/* store the pages buffer info firmware from buf */
+static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
+{
+	fw->priv = fw_priv;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+	fw->pages = fw_priv->pages;
+#endif
+	fw->size = fw_priv->size;
+	fw->data = fw_priv->data;
+
+	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
+		 (unsigned int)fw_priv->size);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void fw_name_devm_release(struct device *dev, void *res)
+{
+	struct fw_name_devm *fwn = res;
+
+	if (fwn->magic == (unsigned long)&fw_cache)
+		pr_debug("%s: fw_name-%s devm-%p released\n",
+				__func__, fwn->name, res);
+	kfree_const(fwn->name);
+}
+
+static int fw_devm_match(struct device *dev, void *res,
+		void *match_data)
+{
+	struct fw_name_devm *fwn = res;
+
+	return (fwn->magic == (unsigned long)&fw_cache) &&
+		!strcmp(fwn->name, match_data);
+}
+
+static struct fw_name_devm *fw_find_devm_name(struct device *dev,
+		const char *name)
+{
+	struct fw_name_devm *fwn;
+
+	fwn = devres_find(dev, fw_name_devm_release,
+			  fw_devm_match, (void *)name);
+	return fwn;
+}
+
+static bool fw_cache_is_setup(struct device *dev, const char *name)
+{
+	struct fw_name_devm *fwn;
+
+	fwn = fw_find_devm_name(dev, name);
+	if (fwn)
+		return true;
+
+	return false;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+	struct fw_name_devm *fwn;
+
+	if (fw_cache_is_setup(dev, name))
+		return 0;
+
+	fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
+			   GFP_KERNEL);
+	if (!fwn)
+		return -ENOMEM;
+	fwn->name = kstrdup_const(name, GFP_KERNEL);
+	if (!fwn->name) {
+		devres_free(fwn);
+		return -ENOMEM;
+	}
+
+	fwn->magic = (unsigned long)&fw_cache;
+	devres_add(dev, fwn);
+
+	return 0;
+}
+#else
+static bool fw_cache_is_setup(struct device *dev, const char *name)
+{
+	return false;
+}
+
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+	return 0;
+}
+#endif
+
+int assign_fw(struct firmware *fw, struct device *device,
+	      enum fw_opt opt_flags)
+{
+	struct fw_priv *fw_priv = fw->priv;
+	int ret;
+
+	mutex_lock(&fw_lock);
+	if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
+		mutex_unlock(&fw_lock);
+		return -ENOENT;
+	}
+
+	/*
+	 * add firmware name into devres list so that we can auto cache
+	 * and uncache firmware for device.
+	 *
+	 * device may has been deleted already, but the problem
+	 * should be fixed in devres or driver core.
+	 */
+	/* don't cache firmware handled without uevent */
+	if (device && (opt_flags & FW_OPT_UEVENT) &&
+	    !(opt_flags & FW_OPT_NOCACHE)) {
+		ret = fw_add_devm_name(device, fw_priv->fw_name);
+		if (ret) {
+			mutex_unlock(&fw_lock);
+			return ret;
+		}
+	}
+
+	/*
+	 * After caching firmware image is started, let it piggyback
+	 * on request firmware.
+	 */
+	if (!(opt_flags & FW_OPT_NOCACHE) &&
+	    fw_priv->fwc->state == FW_LOADER_START_CACHE) {
+		if (fw_cache_piggyback_on_request(fw_priv->fw_name))
+			kref_get(&fw_priv->ref);
+	}
+
+	/* pass the pages buffer to driver at the last minute */
+	fw_set_page_data(fw_priv, fw);
+	mutex_unlock(&fw_lock);
+	return 0;
+}
+
+/* prepare firmware and firmware_buf structs;
+ * return 0 if a firmware is already assigned, 1 if need to load one,
+ * or a negative error code
+ */
+static int
+_request_firmware_prepare(struct firmware **firmware_p, const char *name,
+			  struct device *device, void *dbuf, size_t size,
+			  enum fw_opt opt_flags)
+{
+	struct firmware *firmware;
+	struct fw_priv *fw_priv;
+	int ret;
+
+	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
+	if (!firmware) {
+		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
+		dev_dbg(device, "using built-in %s\n", name);
+		return 0; /* assigned */
+	}
+
+	ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+				  opt_flags);
+
+	/*
+	 * bind with 'priv' now to avoid warning in failure path
+	 * of requesting firmware.
+	 */
+	firmware->priv = fw_priv;
+
+	if (ret > 0) {
+		ret = fw_state_wait(fw_priv);
+		if (!ret) {
+			fw_set_page_data(fw_priv, firmware);
+			return 0; /* assigned */
+		}
+	}
+
+	if (ret < 0)
+		return ret;
+	return 1; /* need to load */
+}
+
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct fw_priv and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+	struct fw_priv *fw_priv;
+
+	/* Loaded directly? */
+	if (!fw || !fw->priv)
+		return;
+
+	fw_priv = fw->priv;
+	if (!fw_state_is_aborted(fw_priv))
+		fw_state_aborted(fw_priv);
+}
+
+/* called from request_firmware() and request_firmware_work_func() */
+static int
+_request_firmware(const struct firmware **firmware_p, const char *name,
+		  struct device *device, void *buf, size_t size,
+		  enum fw_opt opt_flags)
+{
+	struct firmware *fw = NULL;
+	int ret;
+
+	if (!firmware_p)
+		return -EINVAL;
+
+	if (!name || name[0] == '\0') {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = _request_firmware_prepare(&fw, name, device, buf, size,
+					opt_flags);
+	if (ret <= 0) /* error or already assigned */
+		goto out;
+
+	ret = fw_get_filesystem_firmware(device, fw->priv);
+	if (ret) {
+		if (!(opt_flags & FW_OPT_NO_WARN))
+			dev_warn(device,
+				 "Direct firmware load for %s failed with error %d\n",
+				 name, ret);
+		ret = firmware_fallback_sysfs(fw, name, device, opt_flags, ret);
+	} else
+		ret = assign_fw(fw, device, opt_flags);
+
+ out:
+	if (ret < 0) {
+		fw_abort_batch_reqs(fw);
+		release_firmware(fw);
+		fw = NULL;
+	}
+
+	*firmware_p = fw;
+	return ret;
+}
+
+/**
+ * request_firmware() - send firmware request and wait for it
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ *      @firmware_p will be used to return a firmware image by the name
+ *      of @name for device @device.
+ *
+ *      Should be called from user context where sleeping is allowed.
+ *
+ *      @name will be used as $FIRMWARE in the uevent environment and
+ *      should be distinctive enough not to be confused with any other
+ *      firmware image for this or any other device.
+ *
+ *	Caller must hold the reference count of @device.
+ *
+ *	The function can be called safely inside device's suspend and
+ *	resume callback.
+ **/
+int
+request_firmware(const struct firmware **firmware_p, const char *name,
+		 struct device *device)
+{
+	int ret;
+
+	/* Need to pin this module until return */
+	__module_get(THIS_MODULE);
+	ret = _request_firmware(firmware_p, name, device, NULL, 0,
+				FW_OPT_UEVENT);
+	module_put(THIS_MODULE);
+	return ret;
+}
+EXPORT_SYMBOL(request_firmware);
+
+/**
+ * firmware_request_nowarn() - request for an optional fw module
+ * @firmware: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function is similar in behaviour to request_firmware(), except
+ * it doesn't produce warning messages when the file is not found.
+ * The sysfs fallback mechanism is enabled if direct filesystem lookup fails,
+ * however, however failures to find the firmware file with it are still
+ * suppressed. It is therefore up to the driver to check for the return value
+ * of this call and to decide when to inform the users of errors.
+ **/
+int firmware_request_nowarn(const struct firmware **firmware, const char *name,
+			    struct device *device)
+{
+	int ret;
+
+	/* Need to pin this module until return */
+	__module_get(THIS_MODULE);
+	ret = _request_firmware(firmware, name, device, NULL, 0,
+				FW_OPT_UEVENT | FW_OPT_NO_WARN);
+	module_put(THIS_MODULE);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_nowarn);
+
+/**
+ * request_firmware_direct() - load firmware directly without usermode helper
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function works pretty much like request_firmware(), but this doesn't
+ * fall back to usermode helper even if the firmware couldn't be loaded
+ * directly from fs.  Hence it's useful for loading optional firmwares, which
+ * aren't always present, without extra long timeouts of udev.
+ **/
+int request_firmware_direct(const struct firmware **firmware_p,
+			    const char *name, struct device *device)
+{
+	int ret;
+
+	__module_get(THIS_MODULE);
+	ret = _request_firmware(firmware_p, name, device, NULL, 0,
+				FW_OPT_UEVENT | FW_OPT_NO_WARN |
+				FW_OPT_NOFALLBACK);
+	module_put(THIS_MODULE);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(request_firmware_direct);
+
+/**
+ * firmware_request_cache() - cache firmware for suspend so resume can use it
+ * @name: name of firmware file
+ * @device: device for which firmware should be cached for
+ *
+ * There are some devices with an optimization that enables the device to not
+ * require loading firmware on system reboot. This optimization may still
+ * require the firmware present on resume from suspend. This routine can be
+ * used to ensure the firmware is present on resume from suspend in these
+ * situations. This helper is not compatible with drivers which use
+ * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
+ **/
+int firmware_request_cache(struct device *device, const char *name)
+{
+	int ret;
+
+	mutex_lock(&fw_lock);
+	ret = fw_add_devm_name(device, name);
+	mutex_unlock(&fw_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_cache);
+
+/**
+ * request_firmware_into_buf() - load firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ *
+ * This function works pretty much like request_firmware(), but it doesn't
+ * allocate a buffer to hold the firmware data. Instead, the firmware
+ * is loaded directly into the buffer pointed to by @buf and the @firmware_p
+ * data member is pointed at @buf.
+ *
+ * This function doesn't cache firmware either.
+ */
+int
+request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
+			  struct device *device, void *buf, size_t size)
+{
+	int ret;
+
+	if (fw_cache_is_setup(device, name))
+		return -EOPNOTSUPP;
+
+	__module_get(THIS_MODULE);
+	ret = _request_firmware(firmware_p, name, device, buf, size,
+				FW_OPT_UEVENT | FW_OPT_NOCACHE);
+	module_put(THIS_MODULE);
+	return ret;
+}
+EXPORT_SYMBOL(request_firmware_into_buf);
+
+/**
+ * release_firmware() - release the resource associated with a firmware image
+ * @fw: firmware resource to release
+ **/
+void release_firmware(const struct firmware *fw)
+{
+	if (fw) {
+		if (!fw_is_builtin_firmware(fw))
+			firmware_free_data(fw);
+		kfree(fw);
+	}
+}
+EXPORT_SYMBOL(release_firmware);
+
+/* Async support */
+struct firmware_work {
+	struct work_struct work;
+	struct module *module;
+	const char *name;
+	struct device *device;
+	void *context;
+	void (*cont)(const struct firmware *fw, void *context);
+	enum fw_opt opt_flags;
+};
+
+static void request_firmware_work_func(struct work_struct *work)
+{
+	struct firmware_work *fw_work;
+	const struct firmware *fw;
+
+	fw_work = container_of(work, struct firmware_work, work);
+
+	_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
+			  fw_work->opt_flags);
+	fw_work->cont(fw, fw_work->context);
+	put_device(fw_work->device); /* taken in request_firmware_nowait() */
+
+	module_put(fw_work->module);
+	kfree_const(fw_work->name);
+	kfree(fw_work);
+}
+
+/**
+ * request_firmware_nowait() - asynchronous version of request_firmware
+ * @module: module requesting the firmware
+ * @uevent: sends uevent to copy the firmware image if this flag
+ *	is non-zero else the firmware copy must be done manually.
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ *	@fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ *	request is over.
+ *
+ *	Caller must hold the reference count of @device.
+ *
+ *	Asynchronous variant of request_firmware() for user contexts:
+ *		- sleep for as small periods as possible since it may
+ *		  increase kernel boot time of built-in device drivers
+ *		  requesting firmware in their ->probe() methods, if
+ *		  @gfp is GFP_KERNEL.
+ *
+ *		- can't sleep at all if @gfp is GFP_ATOMIC.
+ **/
+int
+request_firmware_nowait(
+	struct module *module, bool uevent,
+	const char *name, struct device *device, gfp_t gfp, void *context,
+	void (*cont)(const struct firmware *fw, void *context))
+{
+	struct firmware_work *fw_work;
+
+	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
+	if (!fw_work)
+		return -ENOMEM;
+
+	fw_work->module = module;
+	fw_work->name = kstrdup_const(name, gfp);
+	if (!fw_work->name) {
+		kfree(fw_work);
+		return -ENOMEM;
+	}
+	fw_work->device = device;
+	fw_work->context = context;
+	fw_work->cont = cont;
+	fw_work->opt_flags = FW_OPT_NOWAIT |
+		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
+
+	if (!uevent && fw_cache_is_setup(device, name)) {
+		kfree_const(fw_work->name);
+		kfree(fw_work);
+		return -EOPNOTSUPP;
+	}
+
+	if (!try_module_get(module)) {
+		kfree_const(fw_work->name);
+		kfree(fw_work);
+		return -EFAULT;
+	}
+
+	get_device(fw_work->device);
+	INIT_WORK(&fw_work->work, request_firmware_work_func);
+	schedule_work(&fw_work->work);
+	return 0;
+}
+EXPORT_SYMBOL(request_firmware_nowait);
+
+#ifdef CONFIG_PM_SLEEP
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
+
+/**
+ * cache_firmware() - cache one firmware image in kernel memory space
+ * @fw_name: the firmware image name
+ *
+ * Cache firmware in kernel memory so that drivers can use it when
+ * system isn't ready for them to request firmware image from userspace.
+ * Once it returns successfully, driver can use request_firmware or its
+ * nowait version to get the cached firmware without any interacting
+ * with userspace
+ *
+ * Return 0 if the firmware image has been cached successfully
+ * Return !0 otherwise
+ *
+ */
+static int cache_firmware(const char *fw_name)
+{
+	int ret;
+	const struct firmware *fw;
+
+	pr_debug("%s: %s\n", __func__, fw_name);
+
+	ret = request_firmware(&fw, fw_name, NULL);
+	if (!ret)
+		kfree(fw);
+
+	pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
+
+	return ret;
+}
+
+static struct fw_priv *lookup_fw_priv(const char *fw_name)
+{
+	struct fw_priv *tmp;
+	struct firmware_cache *fwc = &fw_cache;
+
+	spin_lock(&fwc->lock);
+	tmp = __lookup_fw_priv(fw_name);
+	spin_unlock(&fwc->lock);
+
+	return tmp;
+}
+
+/**
+ * uncache_firmware() - remove one cached firmware image
+ * @fw_name: the firmware image name
+ *
+ * Uncache one firmware image which has been cached successfully
+ * before.
+ *
+ * Return 0 if the firmware cache has been removed successfully
+ * Return !0 otherwise
+ *
+ */
+static int uncache_firmware(const char *fw_name)
+{
+	struct fw_priv *fw_priv;
+	struct firmware fw;
+
+	pr_debug("%s: %s\n", __func__, fw_name);
+
+	if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
+		return 0;
+
+	fw_priv = lookup_fw_priv(fw_name);
+	if (fw_priv) {
+		free_fw_priv(fw_priv);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
+{
+	struct fw_cache_entry *fce;
+
+	fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
+	if (!fce)
+		goto exit;
+
+	fce->name = kstrdup_const(name, GFP_ATOMIC);
+	if (!fce->name) {
+		kfree(fce);
+		fce = NULL;
+		goto exit;
+	}
+exit:
+	return fce;
+}
+
+static int __fw_entry_found(const char *name)
+{
+	struct firmware_cache *fwc = &fw_cache;
+	struct fw_cache_entry *fce;
+
+	list_for_each_entry(fce, &fwc->fw_names, list) {
+		if (!strcmp(fce->name, name))
+			return 1;
+	}
+	return 0;
+}
+
+static int fw_cache_piggyback_on_request(const char *name)
+{
+	struct firmware_cache *fwc = &fw_cache;
+	struct fw_cache_entry *fce;
+	int ret = 0;
+
+	spin_lock(&fwc->name_lock);
+	if (__fw_entry_found(name))
+		goto found;
+
+	fce = alloc_fw_cache_entry(name);
+	if (fce) {
+		ret = 1;
+		list_add(&fce->list, &fwc->fw_names);
+		pr_debug("%s: fw: %s\n", __func__, name);
+	}
+found:
+	spin_unlock(&fwc->name_lock);
+	return ret;
+}
+
+static void free_fw_cache_entry(struct fw_cache_entry *fce)
+{
+	kfree_const(fce->name);
+	kfree(fce);
+}
+
+static void __async_dev_cache_fw_image(void *fw_entry,
+				       async_cookie_t cookie)
+{
+	struct fw_cache_entry *fce = fw_entry;
+	struct firmware_cache *fwc = &fw_cache;
+	int ret;
+
+	ret = cache_firmware(fce->name);
+	if (ret) {
+		spin_lock(&fwc->name_lock);
+		list_del(&fce->list);
+		spin_unlock(&fwc->name_lock);
+
+		free_fw_cache_entry(fce);
+	}
+}
+
+/* called with dev->devres_lock held */
+static void dev_create_fw_entry(struct device *dev, void *res,
+				void *data)
+{
+	struct fw_name_devm *fwn = res;
+	const char *fw_name = fwn->name;
+	struct list_head *head = data;
+	struct fw_cache_entry *fce;
+
+	fce = alloc_fw_cache_entry(fw_name);
+	if (fce)
+		list_add(&fce->list, head);
+}
+
+static int devm_name_match(struct device *dev, void *res,
+			   void *match_data)
+{
+	struct fw_name_devm *fwn = res;
+	return (fwn->magic == (unsigned long)match_data);
+}
+
+static void dev_cache_fw_image(struct device *dev, void *data)
+{
+	LIST_HEAD(todo);
+	struct fw_cache_entry *fce;
+	struct fw_cache_entry *fce_next;
+	struct firmware_cache *fwc = &fw_cache;
+
+	devres_for_each_res(dev, fw_name_devm_release,
+			    devm_name_match, &fw_cache,
+			    dev_create_fw_entry, &todo);
+
+	list_for_each_entry_safe(fce, fce_next, &todo, list) {
+		list_del(&fce->list);
+
+		spin_lock(&fwc->name_lock);
+		/* only one cache entry for one firmware */
+		if (!__fw_entry_found(fce->name)) {
+			list_add(&fce->list, &fwc->fw_names);
+		} else {
+			free_fw_cache_entry(fce);
+			fce = NULL;
+		}
+		spin_unlock(&fwc->name_lock);
+
+		if (fce)
+			async_schedule_domain(__async_dev_cache_fw_image,
+					      (void *)fce,
+					      &fw_cache_domain);
+	}
+}
+
+static void __device_uncache_fw_images(void)
+{
+	struct firmware_cache *fwc = &fw_cache;
+	struct fw_cache_entry *fce;
+
+	spin_lock(&fwc->name_lock);
+	while (!list_empty(&fwc->fw_names)) {
+		fce = list_entry(fwc->fw_names.next,
+				struct fw_cache_entry, list);
+		list_del(&fce->list);
+		spin_unlock(&fwc->name_lock);
+
+		uncache_firmware(fce->name);
+		free_fw_cache_entry(fce);
+
+		spin_lock(&fwc->name_lock);
+	}
+	spin_unlock(&fwc->name_lock);
+}
+
+/**
+ * device_cache_fw_images() - cache devices' firmware
+ *
+ * If one device called request_firmware or its nowait version
+ * successfully before, the firmware names are recored into the
+ * device's devres link list, so device_cache_fw_images can call
+ * cache_firmware() to cache these firmwares for the device,
+ * then the device driver can load its firmwares easily at
+ * time when system is not ready to complete loading firmware.
+ */
+static void device_cache_fw_images(void)
+{
+	struct firmware_cache *fwc = &fw_cache;
+	DEFINE_WAIT(wait);
+
+	pr_debug("%s\n", __func__);
+
+	/* cancel uncache work */
+	cancel_delayed_work_sync(&fwc->work);
+
+	fw_fallback_set_cache_timeout();
+
+	mutex_lock(&fw_lock);
+	fwc->state = FW_LOADER_START_CACHE;
+	dpm_for_each_dev(NULL, dev_cache_fw_image);
+	mutex_unlock(&fw_lock);
+
+	/* wait for completion of caching firmware for all devices */
+	async_synchronize_full_domain(&fw_cache_domain);
+
+	fw_fallback_set_default_timeout();
+}
+
+/**
+ * device_uncache_fw_images() - uncache devices' firmware
+ *
+ * uncache all firmwares which have been cached successfully
+ * by device_uncache_fw_images earlier
+ */
+static void device_uncache_fw_images(void)
+{
+	pr_debug("%s\n", __func__);
+	__device_uncache_fw_images();
+}
+
+static void device_uncache_fw_images_work(struct work_struct *work)
+{
+	device_uncache_fw_images();
+}
+
+/**
+ * device_uncache_fw_images_delay() - uncache devices firmwares
+ * @delay: number of milliseconds to delay uncache device firmwares
+ *
+ * uncache all devices's firmwares which has been cached successfully
+ * by device_cache_fw_images after @delay milliseconds.
+ */
+static void device_uncache_fw_images_delay(unsigned long delay)
+{
+	queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
+			   msecs_to_jiffies(delay));
+}
+
+static int fw_pm_notify(struct notifier_block *notify_block,
+			unsigned long mode, void *unused)
+{
+	switch (mode) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+	case PM_RESTORE_PREPARE:
+		/*
+		 * kill pending fallback requests with a custom fallback
+		 * to avoid stalling suspend.
+		 */
+		kill_pending_fw_fallback_reqs(true);
+		device_cache_fw_images();
+		break;
+
+	case PM_POST_SUSPEND:
+	case PM_POST_HIBERNATION:
+	case PM_POST_RESTORE:
+		/*
+		 * In case that system sleep failed and syscore_suspend is
+		 * not called.
+		 */
+		mutex_lock(&fw_lock);
+		fw_cache.state = FW_LOADER_NO_CACHE;
+		mutex_unlock(&fw_lock);
+
+		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
+		break;
+	}
+
+	return 0;
+}
+
+/* stop caching firmware once syscore_suspend is reached */
+static int fw_suspend(void)
+{
+	fw_cache.state = FW_LOADER_NO_CACHE;
+	return 0;
+}
+
+static struct syscore_ops fw_syscore_ops = {
+	.suspend = fw_suspend,
+};
+
+static int __init register_fw_pm_ops(void)
+{
+	int ret;
+
+	spin_lock_init(&fw_cache.name_lock);
+	INIT_LIST_HEAD(&fw_cache.fw_names);
+
+	INIT_DELAYED_WORK(&fw_cache.work,
+			  device_uncache_fw_images_work);
+
+	fw_cache.pm_notify.notifier_call = fw_pm_notify;
+	ret = register_pm_notifier(&fw_cache.pm_notify);
+	if (ret)
+		return ret;
+
+	register_syscore_ops(&fw_syscore_ops);
+
+	return ret;
+}
+
+static inline void unregister_fw_pm_ops(void)
+{
+	unregister_syscore_ops(&fw_syscore_ops);
+	unregister_pm_notifier(&fw_cache.pm_notify);
+}
+#else
+static int fw_cache_piggyback_on_request(const char *name)
+{
+	return 0;
+}
+static inline int register_fw_pm_ops(void)
+{
+	return 0;
+}
+static inline void unregister_fw_pm_ops(void)
+{
+}
+#endif
+
+static void __init fw_cache_init(void)
+{
+	spin_lock_init(&fw_cache.lock);
+	INIT_LIST_HEAD(&fw_cache.head);
+	fw_cache.state = FW_LOADER_NO_CACHE;
+}
+
+static int fw_shutdown_notify(struct notifier_block *unused1,
+			      unsigned long unused2, void *unused3)
+{
+	/*
+	 * Kill all pending fallback requests to avoid both stalling shutdown,
+	 * and avoid a deadlock with the usermode_lock.
+	 */
+	kill_pending_fw_fallback_reqs(false);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+	.notifier_call = fw_shutdown_notify,
+};
+
+static int __init firmware_class_init(void)
+{
+	int ret;
+
+	/* No need to unfold these on exit */
+	fw_cache_init();
+
+	ret = register_fw_pm_ops();
+	if (ret)
+		return ret;
+
+	ret = register_reboot_notifier(&fw_shutdown_nb);
+	if (ret)
+		goto out;
+
+	return register_sysfs_loader();
+
+out:
+	unregister_fw_pm_ops();
+	return ret;
+}
+
+static void __exit firmware_class_exit(void)
+{
+	unregister_fw_pm_ops();
+	unregister_reboot_notifier(&fw_shutdown_nb);
+	unregister_sysfs_loader();
+}
+
+fs_initcall(firmware_class_init);
+module_exit(firmware_class_exit);
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
new file mode 100644
index 0000000..1ce59b4
--- /dev/null
+++ b/drivers/base/hypervisor.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * hypervisor.c - /sys/hypervisor subsystem.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2007 Novell Inc.
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include "base.h"
+
+struct kobject *hypervisor_kobj;
+EXPORT_SYMBOL_GPL(hypervisor_kobj);
+
+int __init hypervisor_init(void)
+{
+	hypervisor_kobj = kobject_create_and_add("hypervisor", NULL);
+	if (!hypervisor_kobj)
+		return -ENOMEM;
+	return 0;
+}
diff --git a/drivers/base/init.c b/drivers/base/init.c
new file mode 100644
index 0000000..908e652
--- /dev/null
+++ b/drivers/base/init.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/memory.h>
+#include <linux/of.h>
+
+#include "base.h"
+
+/**
+ * driver_init - initialize driver model.
+ *
+ * Call the driver model init functions to initialize their
+ * subsystems. Called early from init/main.c.
+ */
+void __init driver_init(void)
+{
+	/* These are the core pieces */
+	devtmpfs_init();
+	devices_init();
+	buses_init();
+	classes_init();
+	firmware_init();
+	hypervisor_init();
+
+	/* These are also core pieces, but must come after the
+	 * core core pieces.
+	 */
+	of_core_init();
+	platform_bus_init();
+	cpu_dev_init();
+	memory_dev_init();
+	container_dev_init();
+}
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
new file mode 100644
index 0000000..2772f5d
--- /dev/null
+++ b/drivers/base/isa.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISA bus.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/isa.h>
+
+static struct device isa_bus = {
+	.init_name	= "isa"
+};
+
+struct isa_dev {
+	struct device dev;
+	struct device *next;
+	unsigned int id;
+};
+
+#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
+
+static int isa_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct isa_driver *isa_driver = to_isa_driver(driver);
+
+	if (dev->platform_data == isa_driver) {
+		if (!isa_driver->match ||
+			isa_driver->match(dev, to_isa_dev(dev)->id))
+			return 1;
+		dev->platform_data = NULL;
+	}
+	return 0;
+}
+
+static int isa_bus_probe(struct device *dev)
+{
+	struct isa_driver *isa_driver = dev->platform_data;
+
+	if (isa_driver && isa_driver->probe)
+		return isa_driver->probe(dev, to_isa_dev(dev)->id);
+
+	return 0;
+}
+
+static int isa_bus_remove(struct device *dev)
+{
+	struct isa_driver *isa_driver = dev->platform_data;
+
+	if (isa_driver && isa_driver->remove)
+		return isa_driver->remove(dev, to_isa_dev(dev)->id);
+
+	return 0;
+}
+
+static void isa_bus_shutdown(struct device *dev)
+{
+	struct isa_driver *isa_driver = dev->platform_data;
+
+	if (isa_driver && isa_driver->shutdown)
+		isa_driver->shutdown(dev, to_isa_dev(dev)->id);
+}
+
+static int isa_bus_suspend(struct device *dev, pm_message_t state)
+{
+	struct isa_driver *isa_driver = dev->platform_data;
+
+	if (isa_driver && isa_driver->suspend)
+		return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
+
+	return 0;
+}
+
+static int isa_bus_resume(struct device *dev)
+{
+	struct isa_driver *isa_driver = dev->platform_data;
+
+	if (isa_driver && isa_driver->resume)
+		return isa_driver->resume(dev, to_isa_dev(dev)->id);
+
+	return 0;
+}
+
+static struct bus_type isa_bus_type = {
+	.name		= "isa",
+	.match		= isa_bus_match,
+	.probe		= isa_bus_probe,
+	.remove		= isa_bus_remove,
+	.shutdown	= isa_bus_shutdown,
+	.suspend	= isa_bus_suspend,
+	.resume		= isa_bus_resume
+};
+
+static void isa_dev_release(struct device *dev)
+{
+	kfree(to_isa_dev(dev));
+}
+
+void isa_unregister_driver(struct isa_driver *isa_driver)
+{
+	struct device *dev = isa_driver->devices;
+
+	while (dev) {
+		struct device *tmp = to_isa_dev(dev)->next;
+		device_unregister(dev);
+		dev = tmp;
+	}
+	driver_unregister(&isa_driver->driver);
+}
+EXPORT_SYMBOL_GPL(isa_unregister_driver);
+
+int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev)
+{
+	int error;
+	unsigned int id;
+
+	isa_driver->driver.bus	= &isa_bus_type;
+	isa_driver->devices	= NULL;
+
+	error = driver_register(&isa_driver->driver);
+	if (error)
+		return error;
+
+	for (id = 0; id < ndev; id++) {
+		struct isa_dev *isa_dev;
+
+		isa_dev = kzalloc(sizeof *isa_dev, GFP_KERNEL);
+		if (!isa_dev) {
+			error = -ENOMEM;
+			break;
+		}
+
+		isa_dev->dev.parent	= &isa_bus;
+		isa_dev->dev.bus	= &isa_bus_type;
+
+		dev_set_name(&isa_dev->dev, "%s.%u",
+			     isa_driver->driver.name, id);
+		isa_dev->dev.platform_data	= isa_driver;
+		isa_dev->dev.release		= isa_dev_release;
+		isa_dev->id			= id;
+
+		isa_dev->dev.coherent_dma_mask = DMA_BIT_MASK(24);
+		isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask;
+
+		error = device_register(&isa_dev->dev);
+		if (error) {
+			put_device(&isa_dev->dev);
+			break;
+		}
+
+		if (isa_dev->dev.platform_data) {
+			isa_dev->next = isa_driver->devices;
+			isa_driver->devices = &isa_dev->dev;
+		} else
+			device_unregister(&isa_dev->dev);
+	}
+
+	if (!error && !isa_driver->devices)
+		error = -ENODEV;
+
+	if (error)
+		isa_unregister_driver(isa_driver);
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(isa_register_driver);
+
+static int __init isa_bus_init(void)
+{
+	int error;
+
+	error = bus_register(&isa_bus_type);
+	if (!error) {
+		error = device_register(&isa_bus);
+		if (error)
+			bus_unregister(&isa_bus_type);
+	}
+	return error;
+}
+
+postcore_initcall(isa_bus_init);
diff --git a/drivers/base/map.c b/drivers/base/map.c
new file mode 100644
index 0000000..5650ab2
--- /dev/null
+++ b/drivers/base/map.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/drivers/base/map.c
+ *
+ * (C) Copyright Al Viro 2002,2003
+ *
+ * NOTE: data structure needs to be changed.  It works, but for large dev_t
+ * it will be too slow.  It is isolated, though, so these changes will be
+ * local to that file.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/kobj_map.h>
+
+struct kobj_map {
+	struct probe {
+		struct probe *next;
+		dev_t dev;
+		unsigned long range;
+		struct module *owner;
+		kobj_probe_t *get;
+		int (*lock)(dev_t, void *);
+		void *data;
+	} *probes[255];
+	struct mutex *lock;
+};
+
+int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range,
+	     struct module *module, kobj_probe_t *probe,
+	     int (*lock)(dev_t, void *), void *data)
+{
+	unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
+	unsigned index = MAJOR(dev);
+	unsigned i;
+	struct probe *p;
+
+	if (n > 255)
+		n = 255;
+
+	p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < n; i++, p++) {
+		p->owner = module;
+		p->get = probe;
+		p->lock = lock;
+		p->dev = dev;
+		p->range = range;
+		p->data = data;
+	}
+	mutex_lock(domain->lock);
+	for (i = 0, p -= n; i < n; i++, p++, index++) {
+		struct probe **s = &domain->probes[index % 255];
+		while (*s && (*s)->range < range)
+			s = &(*s)->next;
+		p->next = *s;
+		*s = p;
+	}
+	mutex_unlock(domain->lock);
+	return 0;
+}
+
+void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range)
+{
+	unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
+	unsigned index = MAJOR(dev);
+	unsigned i;
+	struct probe *found = NULL;
+
+	if (n > 255)
+		n = 255;
+
+	mutex_lock(domain->lock);
+	for (i = 0; i < n; i++, index++) {
+		struct probe **s;
+		for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) {
+			struct probe *p = *s;
+			if (p->dev == dev && p->range == range) {
+				*s = p->next;
+				if (!found)
+					found = p;
+				break;
+			}
+		}
+	}
+	mutex_unlock(domain->lock);
+	kfree(found);
+}
+
+struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index)
+{
+	struct kobject *kobj;
+	struct probe *p;
+	unsigned long best = ~0UL;
+
+retry:
+	mutex_lock(domain->lock);
+	for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) {
+		struct kobject *(*probe)(dev_t, int *, void *);
+		struct module *owner;
+		void *data;
+
+		if (p->dev > dev || p->dev + p->range - 1 < dev)
+			continue;
+		if (p->range - 1 >= best)
+			break;
+		if (!try_module_get(p->owner))
+			continue;
+		owner = p->owner;
+		data = p->data;
+		probe = p->get;
+		best = p->range - 1;
+		*index = dev - p->dev;
+		if (p->lock && p->lock(dev, data) < 0) {
+			module_put(owner);
+			continue;
+		}
+		mutex_unlock(domain->lock);
+		kobj = probe(dev, index, data);
+		/* Currently ->owner protects _only_ ->probe() itself. */
+		module_put(owner);
+		if (kobj)
+			return kobj;
+		goto retry;
+	}
+	mutex_unlock(domain->lock);
+	return NULL;
+}
+
+struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock)
+{
+	struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
+	struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL);
+	int i;
+
+	if ((p == NULL) || (base == NULL)) {
+		kfree(p);
+		kfree(base);
+		return NULL;
+	}
+
+	base->dev = 1;
+	base->range = ~0;
+	base->get = base_probe;
+	for (i = 0; i < 255; i++)
+		p->probes[i] = base;
+	p->lock = lock;
+	return p;
+}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
new file mode 100644
index 0000000..817320c
--- /dev/null
+++ b/drivers/base/memory.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory subsystem support
+ *
+ * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
+ *            Dave Hansen <haveblue@us.ibm.com>
+ *
+ * This file provides the necessary infrastructure to represent
+ * a SPARSEMEM-memory-model system's physical memory in /sysfs.
+ * All arch-independent code that assumes MEMORY_HOTPLUG requires
+ * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/topology.h>
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+static DEFINE_MUTEX(mem_sysfs_mutex);
+
+#define MEMORY_CLASS_NAME	"memory"
+
+#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
+
+static int sections_per_block;
+
+static inline int base_memory_block_id(int section_nr)
+{
+	return section_nr / sections_per_block;
+}
+
+static int memory_subsys_online(struct device *dev);
+static int memory_subsys_offline(struct device *dev);
+
+static struct bus_type memory_subsys = {
+	.name = MEMORY_CLASS_NAME,
+	.dev_name = MEMORY_CLASS_NAME,
+	.online = memory_subsys_online,
+	.offline = memory_subsys_offline,
+};
+
+static BLOCKING_NOTIFIER_HEAD(memory_chain);
+
+int register_memory_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&memory_chain, nb);
+}
+EXPORT_SYMBOL(register_memory_notifier);
+
+void unregister_memory_notifier(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&memory_chain, nb);
+}
+EXPORT_SYMBOL(unregister_memory_notifier);
+
+static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
+
+int register_memory_isolate_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&memory_isolate_chain, nb);
+}
+EXPORT_SYMBOL(register_memory_isolate_notifier);
+
+void unregister_memory_isolate_notifier(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
+}
+EXPORT_SYMBOL(unregister_memory_isolate_notifier);
+
+static void memory_block_release(struct device *dev)
+{
+	struct memory_block *mem = to_memory_block(dev);
+
+	kfree(mem);
+}
+
+unsigned long __weak memory_block_size_bytes(void)
+{
+	return MIN_MEMORY_BLOCK_SIZE;
+}
+
+static unsigned long get_memory_block_size(void)
+{
+	unsigned long block_sz;
+
+	block_sz = memory_block_size_bytes();
+
+	/* Validate blk_sz is a power of 2 and not less than section size */
+	if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
+		WARN_ON(1);
+		block_sz = MIN_MEMORY_BLOCK_SIZE;
+	}
+
+	return block_sz;
+}
+
+/*
+ * use this as the physical section index that this memsection
+ * uses.
+ */
+
+static ssize_t show_mem_start_phys_index(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct memory_block *mem = to_memory_block(dev);
+	unsigned long phys_index;
+
+	phys_index = mem->start_section_nr / sections_per_block;
+	return sprintf(buf, "%08lx\n", phys_index);
+}
+
+/*
+ * Show whether the section of memory is likely to be hot-removable
+ */
+static ssize_t show_mem_removable(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	unsigned long i, pfn;
+	int ret = 1;
+	struct memory_block *mem = to_memory_block(dev);
+
+	if (mem->state != MEM_ONLINE)
+		goto out;
+
+	for (i = 0; i < sections_per_block; i++) {
+		if (!present_section_nr(mem->start_section_nr + i))
+			continue;
+		pfn = section_nr_to_pfn(mem->start_section_nr + i);
+		ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+	}
+
+out:
+	return sprintf(buf, "%d\n", ret);
+}
+
+/*
+ * online, offline, going offline, etc.
+ */
+static ssize_t show_mem_state(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct memory_block *mem = to_memory_block(dev);
+	ssize_t len = 0;
+
+	/*
+	 * We can probably put these states in a nice little array
+	 * so that they're not open-coded
+	 */
+	switch (mem->state) {
+	case MEM_ONLINE:
+		len = sprintf(buf, "online\n");
+		break;
+	case MEM_OFFLINE:
+		len = sprintf(buf, "offline\n");
+		break;
+	case MEM_GOING_OFFLINE:
+		len = sprintf(buf, "going-offline\n");
+		break;
+	default:
+		len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
+				mem->state);
+		WARN_ON(1);
+		break;
+	}
+
+	return len;
+}
+
+int memory_notify(unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(&memory_chain, val, v);
+}
+
+int memory_isolate_notify(unsigned long val, void *v)
+{
+	return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
+}
+
+/*
+ * The probe routines leave the pages uninitialized, just as the bootmem code
+ * does. Make sure we do not access them, but instead use only information from
+ * within sections.
+ */
+static bool pages_correctly_probed(unsigned long start_pfn)
+{
+	unsigned long section_nr = pfn_to_section_nr(start_pfn);
+	unsigned long section_nr_end = section_nr + sections_per_block;
+	unsigned long pfn = start_pfn;
+
+	/*
+	 * memmap between sections is not contiguous except with
+	 * SPARSEMEM_VMEMMAP. We lookup the page once per section
+	 * and assume memmap is contiguous within each section
+	 */
+	for (; section_nr < section_nr_end; section_nr++) {
+		if (WARN_ON_ONCE(!pfn_valid(pfn)))
+			return false;
+
+		if (!present_section_nr(section_nr)) {
+			pr_warn("section %ld pfn[%lx, %lx) not present",
+				section_nr, pfn, pfn + PAGES_PER_SECTION);
+			return false;
+		} else if (!valid_section_nr(section_nr)) {
+			pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
+				section_nr, pfn, pfn + PAGES_PER_SECTION);
+			return false;
+		} else if (online_section_nr(section_nr)) {
+			pr_warn("section %ld pfn[%lx, %lx) is already online",
+				section_nr, pfn, pfn + PAGES_PER_SECTION);
+			return false;
+		}
+		pfn += PAGES_PER_SECTION;
+	}
+
+	return true;
+}
+
+/*
+ * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
+ * OK to have direct references to sparsemem variables in here.
+ * Must already be protected by mem_hotplug_begin().
+ */
+static int
+memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
+{
+	unsigned long start_pfn;
+	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+	int ret;
+
+	start_pfn = section_nr_to_pfn(phys_index);
+
+	switch (action) {
+	case MEM_ONLINE:
+		if (!pages_correctly_probed(start_pfn))
+			return -EBUSY;
+
+		ret = online_pages(start_pfn, nr_pages, online_type);
+		break;
+	case MEM_OFFLINE:
+		ret = offline_pages(start_pfn, nr_pages);
+		break;
+	default:
+		WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
+		     "%ld\n", __func__, phys_index, action, action);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int memory_block_change_state(struct memory_block *mem,
+		unsigned long to_state, unsigned long from_state_req)
+{
+	int ret = 0;
+
+	if (mem->state != from_state_req)
+		return -EINVAL;
+
+	if (to_state == MEM_OFFLINE)
+		mem->state = MEM_GOING_OFFLINE;
+
+	ret = memory_block_action(mem->start_section_nr, to_state,
+				mem->online_type);
+
+	mem->state = ret ? from_state_req : to_state;
+
+	return ret;
+}
+
+/* The device lock serializes operations on memory_subsys_[online|offline] */
+static int memory_subsys_online(struct device *dev)
+{
+	struct memory_block *mem = to_memory_block(dev);
+	int ret;
+
+	if (mem->state == MEM_ONLINE)
+		return 0;
+
+	/*
+	 * If we are called from store_mem_state(), online_type will be
+	 * set >= 0 Otherwise we were called from the device online
+	 * attribute and need to set the online_type.
+	 */
+	if (mem->online_type < 0)
+		mem->online_type = MMOP_ONLINE_KEEP;
+
+	/* Already under protection of mem_hotplug_begin() */
+	ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
+
+	/* clear online_type */
+	mem->online_type = -1;
+
+	return ret;
+}
+
+static int memory_subsys_offline(struct device *dev)
+{
+	struct memory_block *mem = to_memory_block(dev);
+
+	if (mem->state == MEM_OFFLINE)
+		return 0;
+
+	/* Can't offline block with non-present sections */
+	if (mem->section_count != sections_per_block)
+		return -EINVAL;
+
+	return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+}
+
+static ssize_t
+store_mem_state(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct memory_block *mem = to_memory_block(dev);
+	int ret, online_type;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	if (sysfs_streq(buf, "online_kernel"))
+		online_type = MMOP_ONLINE_KERNEL;
+	else if (sysfs_streq(buf, "online_movable"))
+		online_type = MMOP_ONLINE_MOVABLE;
+	else if (sysfs_streq(buf, "online"))
+		online_type = MMOP_ONLINE_KEEP;
+	else if (sysfs_streq(buf, "offline"))
+		online_type = MMOP_OFFLINE;
+	else {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/*
+	 * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
+	 * the correct memory block to online before doing device_online(dev),
+	 * which will take dev->mutex.  Take the lock early to prevent an
+	 * inversion, memory_subsys_online() callbacks will be implemented by
+	 * assuming it's already protected.
+	 */
+	mem_hotplug_begin();
+
+	switch (online_type) {
+	case MMOP_ONLINE_KERNEL:
+	case MMOP_ONLINE_MOVABLE:
+	case MMOP_ONLINE_KEEP:
+		mem->online_type = online_type;
+		ret = device_online(&mem->dev);
+		break;
+	case MMOP_OFFLINE:
+		ret = device_offline(&mem->dev);
+		break;
+	default:
+		ret = -EINVAL; /* should never happen */
+	}
+
+	mem_hotplug_done();
+err:
+	unlock_device_hotplug();
+
+	if (ret < 0)
+		return ret;
+	if (ret)
+		return -EINVAL;
+
+	return count;
+}
+
+/*
+ * phys_device is a bad name for this.  What I really want
+ * is a way to differentiate between memory ranges that
+ * are part of physical devices that constitute
+ * a complete removable unit or fru.
+ * i.e. do these ranges belong to the same physical device,
+ * s.t. if I offline all of these sections I can then
+ * remove the physical device?
+ */
+static ssize_t show_phys_device(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct memory_block *mem = to_memory_block(dev);
+	return sprintf(buf, "%d\n", mem->phys_device);
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
+		unsigned long nr_pages, int online_type,
+		struct zone *default_zone)
+{
+	struct zone *zone;
+
+	zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+	if (zone != default_zone) {
+		strcat(buf, " ");
+		strcat(buf, zone->name);
+	}
+}
+
+static ssize_t show_valid_zones(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct memory_block *mem = to_memory_block(dev);
+	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+	unsigned long valid_start_pfn, valid_end_pfn;
+	struct zone *default_zone;
+	int nid;
+
+	/*
+	 * Check the existing zone. Make sure that we do that only on the
+	 * online nodes otherwise the page_zone is not reliable
+	 */
+	if (mem->state == MEM_ONLINE) {
+		/*
+		 * The block contains more than one zone can not be offlined.
+		 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+		 */
+		if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+					  &valid_start_pfn, &valid_end_pfn))
+			return sprintf(buf, "none\n");
+		start_pfn = valid_start_pfn;
+		strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
+		goto out;
+	}
+
+	nid = mem->nid;
+	default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+	strcat(buf, default_zone->name);
+
+	print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
+			default_zone);
+	print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
+			default_zone);
+out:
+	strcat(buf, "\n");
+
+	return strlen(buf);
+}
+static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
+#endif
+
+static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
+static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
+static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
+static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
+
+/*
+ * Block size attribute stuff
+ */
+static ssize_t
+print_block_size(struct device *dev, struct device_attribute *attr,
+		 char *buf)
+{
+	return sprintf(buf, "%lx\n", get_memory_block_size());
+}
+
+static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+
+/*
+ * Memory auto online policy.
+ */
+
+static ssize_t
+show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	if (memhp_auto_online)
+		return sprintf(buf, "online\n");
+	else
+		return sprintf(buf, "offline\n");
+}
+
+static ssize_t
+store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	if (sysfs_streq(buf, "online"))
+		memhp_auto_online = true;
+	else if (sysfs_streq(buf, "offline"))
+		memhp_auto_online = false;
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
+		   store_auto_online_blocks);
+
+/*
+ * Some architectures will have custom drivers to do this, and
+ * will not need to do it from userspace.  The fake hot-add code
+ * as well as ppc64 will do all of their discovery in userspace
+ * and will require this interface.
+ */
+#ifdef CONFIG_ARCH_MEMORY_PROBE
+static ssize_t
+memory_probe_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	u64 phys_addr;
+	int nid, ret;
+	unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
+
+	ret = kstrtoull(buf, 0, &phys_addr);
+	if (ret)
+		return ret;
+
+	if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
+		return -EINVAL;
+
+	nid = memory_add_physaddr_to_nid(phys_addr);
+	ret = add_memory(nid, phys_addr,
+			 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
+
+	if (ret)
+		goto out;
+
+	ret = count;
+out:
+	return ret;
+}
+
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for offlining pages of memory
+ */
+
+/* Soft offline a page */
+static ssize_t
+store_soft_offline_page(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int ret;
+	u64 pfn;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (kstrtoull(buf, 0, &pfn) < 0)
+		return -EINVAL;
+	pfn >>= PAGE_SHIFT;
+	if (!pfn_valid(pfn))
+		return -ENXIO;
+	ret = soft_offline_page(pfn_to_page(pfn), 0);
+	return ret == 0 ? count : ret;
+}
+
+/* Forcibly offline a page, including killing processes. */
+static ssize_t
+store_hard_offline_page(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int ret;
+	u64 pfn;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (kstrtoull(buf, 0, &pfn) < 0)
+		return -EINVAL;
+	pfn >>= PAGE_SHIFT;
+	ret = memory_failure(pfn, 0);
+	return ret ? ret : count;
+}
+
+static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
+#endif
+
+/*
+ * Note that phys_device is optional.  It is here to allow for
+ * differentiation between which *physical* devices each
+ * section belongs to...
+ */
+int __weak arch_get_memory_phys_device(unsigned long start_pfn)
+{
+	return 0;
+}
+
+/*
+ * A reference for the returned object is held and the reference for the
+ * hinted object is released.
+ */
+struct memory_block *find_memory_block_hinted(struct mem_section *section,
+					      struct memory_block *hint)
+{
+	int block_id = base_memory_block_id(__section_nr(section));
+	struct device *hintdev = hint ? &hint->dev : NULL;
+	struct device *dev;
+
+	dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
+	if (hint)
+		put_device(&hint->dev);
+	if (!dev)
+		return NULL;
+	return to_memory_block(dev);
+}
+
+/*
+ * For now, we have a linear search to go find the appropriate
+ * memory_block corresponding to a particular phys_index. If
+ * this gets to be a real problem, we can always use a radix
+ * tree or something here.
+ *
+ * This could be made generic for all device subsystems.
+ */
+struct memory_block *find_memory_block(struct mem_section *section)
+{
+	return find_memory_block_hinted(section, NULL);
+}
+
+static struct attribute *memory_memblk_attrs[] = {
+	&dev_attr_phys_index.attr,
+	&dev_attr_state.attr,
+	&dev_attr_phys_device.attr,
+	&dev_attr_removable.attr,
+#ifdef CONFIG_MEMORY_HOTREMOVE
+	&dev_attr_valid_zones.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group memory_memblk_attr_group = {
+	.attrs = memory_memblk_attrs,
+};
+
+static const struct attribute_group *memory_memblk_attr_groups[] = {
+	&memory_memblk_attr_group,
+	NULL,
+};
+
+/*
+ * register_memory - Setup a sysfs device for a memory block
+ */
+static
+int register_memory(struct memory_block *memory)
+{
+	int ret;
+
+	memory->dev.bus = &memory_subsys;
+	memory->dev.id = memory->start_section_nr / sections_per_block;
+	memory->dev.release = memory_block_release;
+	memory->dev.groups = memory_memblk_attr_groups;
+	memory->dev.offline = memory->state == MEM_OFFLINE;
+
+	ret = device_register(&memory->dev);
+	if (ret)
+		put_device(&memory->dev);
+
+	return ret;
+}
+
+static int init_memory_block(struct memory_block **memory,
+			     struct mem_section *section, unsigned long state)
+{
+	struct memory_block *mem;
+	unsigned long start_pfn;
+	int scn_nr;
+	int ret = 0;
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	scn_nr = __section_nr(section);
+	mem->start_section_nr =
+			base_memory_block_id(scn_nr) * sections_per_block;
+	mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
+	mem->state = state;
+	start_pfn = section_nr_to_pfn(mem->start_section_nr);
+	mem->phys_device = arch_get_memory_phys_device(start_pfn);
+
+	ret = register_memory(mem);
+
+	*memory = mem;
+	return ret;
+}
+
+static int add_memory_block(int base_section_nr)
+{
+	struct memory_block *mem;
+	int i, ret, section_count = 0, section_nr;
+
+	for (i = base_section_nr;
+	     (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
+	     i++) {
+		if (!present_section_nr(i))
+			continue;
+		if (section_count == 0)
+			section_nr = i;
+		section_count++;
+	}
+
+	if (section_count == 0)
+		return 0;
+	ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
+	if (ret)
+		return ret;
+	mem->section_count = section_count;
+	return 0;
+}
+
+/*
+ * need an interface for the VM to add new memory regions,
+ * but without onlining it.
+ */
+int hotplug_memory_register(int nid, struct mem_section *section)
+{
+	int ret = 0;
+	struct memory_block *mem;
+
+	mutex_lock(&mem_sysfs_mutex);
+
+	mem = find_memory_block(section);
+	if (mem) {
+		mem->section_count++;
+		put_device(&mem->dev);
+	} else {
+		ret = init_memory_block(&mem, section, MEM_OFFLINE);
+		if (ret)
+			goto out;
+		mem->section_count++;
+	}
+
+out:
+	mutex_unlock(&mem_sysfs_mutex);
+	return ret;
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static void
+unregister_memory(struct memory_block *memory)
+{
+	BUG_ON(memory->dev.bus != &memory_subsys);
+
+	/* drop the ref. we got in remove_memory_block() */
+	put_device(&memory->dev);
+	device_unregister(&memory->dev);
+}
+
+static int remove_memory_section(unsigned long node_id,
+			       struct mem_section *section, int phys_device)
+{
+	struct memory_block *mem;
+
+	mutex_lock(&mem_sysfs_mutex);
+
+	/*
+	 * Some users of the memory hotplug do not want/need memblock to
+	 * track all sections. Skip over those.
+	 */
+	mem = find_memory_block(section);
+	if (!mem)
+		goto out_unlock;
+
+	unregister_mem_sect_under_nodes(mem, __section_nr(section));
+
+	mem->section_count--;
+	if (mem->section_count == 0)
+		unregister_memory(mem);
+	else
+		put_device(&mem->dev);
+
+out_unlock:
+	mutex_unlock(&mem_sysfs_mutex);
+	return 0;
+}
+
+int unregister_memory_section(struct mem_section *section)
+{
+	if (!present_section(section))
+		return -EINVAL;
+
+	return remove_memory_section(0, section, 0);
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+/* return true if the memory block is offlined, otherwise, return false */
+bool is_memblock_offlined(struct memory_block *mem)
+{
+	return mem->state == MEM_OFFLINE;
+}
+
+static struct attribute *memory_root_attrs[] = {
+#ifdef CONFIG_ARCH_MEMORY_PROBE
+	&dev_attr_probe.attr,
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+	&dev_attr_soft_offline_page.attr,
+	&dev_attr_hard_offline_page.attr,
+#endif
+
+	&dev_attr_block_size_bytes.attr,
+	&dev_attr_auto_online_blocks.attr,
+	NULL
+};
+
+static struct attribute_group memory_root_attr_group = {
+	.attrs = memory_root_attrs,
+};
+
+static const struct attribute_group *memory_root_attr_groups[] = {
+	&memory_root_attr_group,
+	NULL,
+};
+
+/*
+ * Initialize the sysfs support for memory devices...
+ */
+int __init memory_dev_init(void)
+{
+	unsigned int i;
+	int ret;
+	int err;
+	unsigned long block_sz;
+
+	ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
+	if (ret)
+		goto out;
+
+	block_sz = get_memory_block_size();
+	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+
+	/*
+	 * Create entries for memory sections that were found
+	 * during boot and have been initialized
+	 */
+	mutex_lock(&mem_sysfs_mutex);
+	for (i = 0; i <= __highest_present_section_nr;
+		i += sections_per_block) {
+		err = add_memory_block(i);
+		if (!ret)
+			ret = err;
+	}
+	mutex_unlock(&mem_sysfs_mutex);
+
+out:
+	if (ret)
+		printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
+	return ret;
+}
diff --git a/drivers/base/module.c b/drivers/base/module.c
new file mode 100644
index 0000000..46ad4d6
--- /dev/null
+++ b/drivers/base/module.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * module.c - module sysfs fun for drivers
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "base.h"
+
+static char *make_driver_name(struct device_driver *drv)
+{
+	char *driver_name;
+
+	driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
+	if (!driver_name)
+		return NULL;
+
+	return driver_name;
+}
+
+static void module_create_drivers_dir(struct module_kobject *mk)
+{
+	static DEFINE_MUTEX(drivers_dir_mutex);
+
+	mutex_lock(&drivers_dir_mutex);
+	if (mk && !mk->drivers_dir)
+		mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+	mutex_unlock(&drivers_dir_mutex);
+}
+
+void module_add_driver(struct module *mod, struct device_driver *drv)
+{
+	char *driver_name;
+	int no_warn;
+	struct module_kobject *mk = NULL;
+
+	if (!drv)
+		return;
+
+	if (mod)
+		mk = &mod->mkobj;
+	else if (drv->mod_name) {
+		struct kobject *mkobj;
+
+		/* Lookup built-in module entry in /sys/modules */
+		mkobj = kset_find_obj(module_kset, drv->mod_name);
+		if (mkobj) {
+			mk = container_of(mkobj, struct module_kobject, kobj);
+			/* remember our module structure */
+			drv->p->mkobj = mk;
+			/* kset_find_obj took a reference */
+			kobject_put(mkobj);
+		}
+	}
+
+	if (!mk)
+		return;
+
+	/* Don't check return codes; these calls are idempotent */
+	no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+	driver_name = make_driver_name(drv);
+	if (driver_name) {
+		module_create_drivers_dir(mk);
+		no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
+					    driver_name);
+		kfree(driver_name);
+	}
+}
+
+void module_remove_driver(struct device_driver *drv)
+{
+	struct module_kobject *mk = NULL;
+	char *driver_name;
+
+	if (!drv)
+		return;
+
+	sysfs_remove_link(&drv->p->kobj, "module");
+
+	if (drv->owner)
+		mk = &drv->owner->mkobj;
+	else if (drv->p->mkobj)
+		mk = drv->p->mkobj;
+	if (mk && mk->drivers_dir) {
+		driver_name = make_driver_name(drv);
+		if (driver_name) {
+			sysfs_remove_link(mk->drivers_dir, driver_name);
+			kfree(driver_name);
+		}
+	}
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
new file mode 100644
index 0000000..1ac4c36
--- /dev/null
+++ b/drivers/base/node.c
@@ -0,0 +1,678 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic Node interface support
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memory.h>
+#include <linux/vmstat.h>
+#include <linux/notifier.h>
+#include <linux/node.h>
+#include <linux/hugetlb.h>
+#include <linux/compaction.h>
+#include <linux/cpumask.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+
+static struct bus_type node_subsys = {
+	.name = "node",
+	.dev_name = "node",
+};
+
+
+static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
+{
+	ssize_t n;
+	cpumask_var_t mask;
+	struct node *node_dev = to_node(dev);
+
+	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
+	BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
+
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+		return 0;
+
+	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+	n = cpumap_print_to_pagebuf(list, buf, mask);
+	free_cpumask_var(mask);
+
+	return n;
+}
+
+static inline ssize_t node_read_cpumask(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return node_read_cpumap(dev, false, buf);
+}
+static inline ssize_t node_read_cpulist(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return node_read_cpumap(dev, true, buf);
+}
+
+static DEVICE_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
+static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+static ssize_t node_read_meminfo(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int n;
+	int nid = dev->id;
+	struct pglist_data *pgdat = NODE_DATA(nid);
+	struct sysinfo i;
+
+	si_meminfo_node(&i, nid);
+	n = sprintf(buf,
+		       "Node %d MemTotal:       %8lu kB\n"
+		       "Node %d MemFree:        %8lu kB\n"
+		       "Node %d MemUsed:        %8lu kB\n"
+		       "Node %d Active:         %8lu kB\n"
+		       "Node %d Inactive:       %8lu kB\n"
+		       "Node %d Active(anon):   %8lu kB\n"
+		       "Node %d Inactive(anon): %8lu kB\n"
+		       "Node %d Active(file):   %8lu kB\n"
+		       "Node %d Inactive(file): %8lu kB\n"
+		       "Node %d Unevictable:    %8lu kB\n"
+		       "Node %d Mlocked:        %8lu kB\n",
+		       nid, K(i.totalram),
+		       nid, K(i.freeram),
+		       nid, K(i.totalram - i.freeram),
+		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
+				node_page_state(pgdat, NR_ACTIVE_FILE)),
+		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
+				node_page_state(pgdat, NR_INACTIVE_FILE)),
+		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
+		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
+		       nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
+		       nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
+		       nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
+		       nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
+
+#ifdef CONFIG_HIGHMEM
+	n += sprintf(buf + n,
+		       "Node %d HighTotal:      %8lu kB\n"
+		       "Node %d HighFree:       %8lu kB\n"
+		       "Node %d LowTotal:       %8lu kB\n"
+		       "Node %d LowFree:        %8lu kB\n",
+		       nid, K(i.totalhigh),
+		       nid, K(i.freehigh),
+		       nid, K(i.totalram - i.totalhigh),
+		       nid, K(i.freeram - i.freehigh));
+#endif
+	n += sprintf(buf + n,
+		       "Node %d Dirty:          %8lu kB\n"
+		       "Node %d Writeback:      %8lu kB\n"
+		       "Node %d FilePages:      %8lu kB\n"
+		       "Node %d Mapped:         %8lu kB\n"
+		       "Node %d AnonPages:      %8lu kB\n"
+		       "Node %d Shmem:          %8lu kB\n"
+		       "Node %d KernelStack:    %8lu kB\n"
+		       "Node %d PageTables:     %8lu kB\n"
+		       "Node %d NFS_Unstable:   %8lu kB\n"
+		       "Node %d Bounce:         %8lu kB\n"
+		       "Node %d WritebackTmp:   %8lu kB\n"
+		       "Node %d Slab:           %8lu kB\n"
+		       "Node %d SReclaimable:   %8lu kB\n"
+		       "Node %d SUnreclaim:     %8lu kB\n"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		       "Node %d AnonHugePages:  %8lu kB\n"
+		       "Node %d ShmemHugePages: %8lu kB\n"
+		       "Node %d ShmemPmdMapped: %8lu kB\n"
+#endif
+			,
+		       nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
+		       nid, K(node_page_state(pgdat, NR_WRITEBACK)),
+		       nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
+		       nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
+		       nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
+		       nid, K(i.sharedram),
+		       nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
+		       nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
+		       nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
+		       nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+		       nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+		       nid, K(node_page_state(pgdat, NR_SLAB_RECLAIMABLE) +
+			      node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)),
+		       nid, K(node_page_state(pgdat, NR_SLAB_RECLAIMABLE)),
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		       nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)),
+		       nid, K(node_page_state(pgdat, NR_ANON_THPS) *
+				       HPAGE_PMD_NR),
+		       nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
+				       HPAGE_PMD_NR),
+		       nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
+				       HPAGE_PMD_NR));
+#else
+		       nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)));
+#endif
+	n += hugetlb_report_node_meminfo(nid, buf + n);
+	return n;
+}
+
+#undef K
+static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+
+static ssize_t node_read_numastat(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf,
+		       "numa_hit %lu\n"
+		       "numa_miss %lu\n"
+		       "numa_foreign %lu\n"
+		       "interleave_hit %lu\n"
+		       "local_node %lu\n"
+		       "other_node %lu\n",
+		       sum_zone_numa_state(dev->id, NUMA_HIT),
+		       sum_zone_numa_state(dev->id, NUMA_MISS),
+		       sum_zone_numa_state(dev->id, NUMA_FOREIGN),
+		       sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
+		       sum_zone_numa_state(dev->id, NUMA_LOCAL),
+		       sum_zone_numa_state(dev->id, NUMA_OTHER));
+}
+static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+
+static ssize_t node_read_vmstat(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int nid = dev->id;
+	struct pglist_data *pgdat = NODE_DATA(nid);
+	int i;
+	int n = 0;
+
+	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+		n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
+			     sum_zone_node_page_state(nid, i));
+
+#ifdef CONFIG_NUMA
+	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
+		n += sprintf(buf+n, "%s %lu\n",
+			     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+			     sum_zone_numa_state(nid, i));
+#endif
+
+	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+		n += sprintf(buf+n, "%s %lu\n",
+			     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
+			     NR_VM_NUMA_STAT_ITEMS],
+			     node_page_state(pgdat, i));
+
+	return n;
+}
+static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+
+static ssize_t node_read_distance(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int nid = dev->id;
+	int len = 0;
+	int i;
+
+	/*
+	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
+	 * at the most (distance + space or newline).
+	 */
+	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
+
+	for_each_online_node(i)
+		len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
+
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+
+static struct attribute *node_dev_attrs[] = {
+	&dev_attr_cpumap.attr,
+	&dev_attr_cpulist.attr,
+	&dev_attr_meminfo.attr,
+	&dev_attr_numastat.attr,
+	&dev_attr_distance.attr,
+	&dev_attr_vmstat.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(node_dev);
+
+#ifdef CONFIG_HUGETLBFS
+/*
+ * hugetlbfs per node attributes registration interface:
+ * When/if hugetlb[fs] subsystem initializes [sometime after this module],
+ * it will register its per node attributes for all online nodes with
+ * memory.  It will also call register_hugetlbfs_with_node(), below, to
+ * register its attribute registration functions with this node driver.
+ * Once these hooks have been initialized, the node driver will call into
+ * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ */
+static node_registration_func_t __hugetlb_register_node;
+static node_registration_func_t __hugetlb_unregister_node;
+
+static inline bool hugetlb_register_node(struct node *node)
+{
+	if (__hugetlb_register_node &&
+			node_state(node->dev.id, N_MEMORY)) {
+		__hugetlb_register_node(node);
+		return true;
+	}
+	return false;
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+	if (__hugetlb_unregister_node)
+		__hugetlb_unregister_node(node);
+}
+
+void register_hugetlbfs_with_node(node_registration_func_t doregister,
+				  node_registration_func_t unregister)
+{
+	__hugetlb_register_node   = doregister;
+	__hugetlb_unregister_node = unregister;
+}
+#else
+static inline void hugetlb_register_node(struct node *node) {}
+
+static inline void hugetlb_unregister_node(struct node *node) {}
+#endif
+
+static void node_device_release(struct device *dev)
+{
+	struct node *node = to_node(dev);
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+	/*
+	 * We schedule the work only when a memory section is
+	 * onlined/offlined on this node. When we come here,
+	 * all the memory on this node has been offlined,
+	 * so we won't enqueue new work to this work.
+	 *
+	 * The work is using node->node_work, so we should
+	 * flush work before freeing the memory.
+	 */
+	flush_work(&node->node_work);
+#endif
+	kfree(node);
+}
+
+/*
+ * register_node - Setup a sysfs device for a node.
+ * @num - Node number to use when creating the device.
+ *
+ * Initialize and register the node device.
+ */
+static int register_node(struct node *node, int num)
+{
+	int error;
+
+	node->dev.id = num;
+	node->dev.bus = &node_subsys;
+	node->dev.release = node_device_release;
+	node->dev.groups = node_dev_groups;
+	error = device_register(&node->dev);
+
+	if (error)
+		put_device(&node->dev);
+	else {
+		hugetlb_register_node(node);
+
+		compaction_register_node(node);
+	}
+	return error;
+}
+
+/**
+ * unregister_node - unregister a node device
+ * @node: node going away
+ *
+ * Unregisters a node device @node.  All the devices on the node must be
+ * unregistered before calling this function.
+ */
+void unregister_node(struct node *node)
+{
+	hugetlb_unregister_node(node);		/* no-op, if memoryless node */
+
+	device_unregister(&node->dev);
+}
+
+struct node *node_devices[MAX_NUMNODES];
+
+/*
+ * register cpu under node
+ */
+int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+	int ret;
+	struct device *obj;
+
+	if (!node_online(nid))
+		return 0;
+
+	obj = get_cpu_device(cpu);
+	if (!obj)
+		return 0;
+
+	ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
+				&obj->kobj,
+				kobject_name(&obj->kobj));
+	if (ret)
+		return ret;
+
+	return sysfs_create_link(&obj->kobj,
+				 &node_devices[nid]->dev.kobj,
+				 kobject_name(&node_devices[nid]->dev.kobj));
+}
+
+int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+	struct device *obj;
+
+	if (!node_online(nid))
+		return 0;
+
+	obj = get_cpu_device(cpu);
+	if (!obj)
+		return 0;
+
+	sysfs_remove_link(&node_devices[nid]->dev.kobj,
+			  kobject_name(&obj->kobj));
+	sysfs_remove_link(&obj->kobj,
+			  kobject_name(&node_devices[nid]->dev.kobj));
+
+	return 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+static int __ref get_nid_for_pfn(unsigned long pfn)
+{
+	if (!pfn_valid_within(pfn))
+		return -1;
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+	if (system_state < SYSTEM_RUNNING)
+		return early_pfn_to_nid(pfn);
+#endif
+	return pfn_to_nid(pfn);
+}
+
+/* register memory section under specified node if it spans that node */
+int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
+{
+	int ret, nid = *(int *)arg;
+	unsigned long pfn, sect_start_pfn, sect_end_pfn;
+
+	mem_blk->nid = nid;
+
+	sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+	sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
+	sect_end_pfn += PAGES_PER_SECTION - 1;
+	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
+		int page_nid;
+
+		/*
+		 * memory block could have several absent sections from start.
+		 * skip pfn range from absent section
+		 */
+		if (!pfn_present(pfn)) {
+			pfn = round_down(pfn + PAGES_PER_SECTION,
+					 PAGES_PER_SECTION) - 1;
+			continue;
+		}
+
+		/*
+		 * We need to check if page belongs to nid only for the boot
+		 * case, during hotplug we know that all pages in the memory
+		 * block belong to the same node.
+		 */
+		if (system_state == SYSTEM_BOOTING) {
+			page_nid = get_nid_for_pfn(pfn);
+			if (page_nid < 0)
+				continue;
+			if (page_nid != nid)
+				continue;
+		}
+		ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
+					&mem_blk->dev.kobj,
+					kobject_name(&mem_blk->dev.kobj));
+		if (ret)
+			return ret;
+
+		return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+				&node_devices[nid]->dev.kobj,
+				kobject_name(&node_devices[nid]->dev.kobj));
+	}
+	/* mem section does not span the specified node */
+	return 0;
+}
+
+/* unregister memory section under all nodes that it spans */
+int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
+				    unsigned long phys_index)
+{
+	NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
+	unsigned long pfn, sect_start_pfn, sect_end_pfn;
+
+	if (!mem_blk) {
+		NODEMASK_FREE(unlinked_nodes);
+		return -EFAULT;
+	}
+	if (!unlinked_nodes)
+		return -ENOMEM;
+	nodes_clear(*unlinked_nodes);
+
+	sect_start_pfn = section_nr_to_pfn(phys_index);
+	sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
+	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
+		int nid;
+
+		nid = get_nid_for_pfn(pfn);
+		if (nid < 0)
+			continue;
+		if (!node_online(nid))
+			continue;
+		if (node_test_and_set(nid, *unlinked_nodes))
+			continue;
+		sysfs_remove_link(&node_devices[nid]->dev.kobj,
+			 kobject_name(&mem_blk->dev.kobj));
+		sysfs_remove_link(&mem_blk->dev.kobj,
+			 kobject_name(&node_devices[nid]->dev.kobj));
+	}
+	NODEMASK_FREE(unlinked_nodes);
+	return 0;
+}
+
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
+{
+	return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
+					register_mem_sect_under_node);
+}
+
+#ifdef CONFIG_HUGETLBFS
+/*
+ * Handle per node hstate attribute [un]registration on transistions
+ * to/from memoryless state.
+ */
+static void node_hugetlb_work(struct work_struct *work)
+{
+	struct node *node = container_of(work, struct node, node_work);
+
+	/*
+	 * We only get here when a node transitions to/from memoryless state.
+	 * We can detect which transition occurred by examining whether the
+	 * node has memory now.  hugetlb_register_node() already check this
+	 * so we try to register the attributes.  If that fails, then the
+	 * node has transitioned to memoryless, try to unregister the
+	 * attributes.
+	 */
+	if (!hugetlb_register_node(node))
+		hugetlb_unregister_node(node);
+}
+
+static void init_node_hugetlb_work(int nid)
+{
+	INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
+}
+
+static int node_memory_callback(struct notifier_block *self,
+				unsigned long action, void *arg)
+{
+	struct memory_notify *mnb = arg;
+	int nid = mnb->status_change_nid;
+
+	switch (action) {
+	case MEM_ONLINE:
+	case MEM_OFFLINE:
+		/*
+		 * offload per node hstate [un]registration to a work thread
+		 * when transitioning to/from memoryless state.
+		 */
+		if (nid != NUMA_NO_NODE)
+			schedule_work(&node_devices[nid]->node_work);
+		break;
+
+	case MEM_GOING_ONLINE:
+	case MEM_GOING_OFFLINE:
+	case MEM_CANCEL_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+#endif	/* CONFIG_HUGETLBFS */
+#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+
+#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
+    !defined(CONFIG_HUGETLBFS)
+static inline int node_memory_callback(struct notifier_block *self,
+				unsigned long action, void *arg)
+{
+	return NOTIFY_OK;
+}
+
+static void init_node_hugetlb_work(int nid) { }
+
+#endif
+
+int __register_one_node(int nid)
+{
+	int error;
+	int cpu;
+
+	node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
+	if (!node_devices[nid])
+		return -ENOMEM;
+
+	error = register_node(node_devices[nid], nid);
+
+	/* link cpu under this node */
+	for_each_present_cpu(cpu) {
+		if (cpu_to_node(cpu) == nid)
+			register_cpu_under_node(cpu, nid);
+	}
+
+	/* initialize work queue for memory hot plug */
+	init_node_hugetlb_work(nid);
+
+	return error;
+}
+
+void unregister_one_node(int nid)
+{
+	if (!node_devices[nid])
+		return;
+
+	unregister_node(node_devices[nid]);
+	node_devices[nid] = NULL;
+}
+
+/*
+ * node states attributes
+ */
+
+static ssize_t print_nodes_state(enum node_states state, char *buf)
+{
+	int n;
+
+	n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+		      nodemask_pr_args(&node_states[state]));
+	buf[n++] = '\n';
+	buf[n] = '\0';
+	return n;
+}
+
+struct node_attr {
+	struct device_attribute attr;
+	enum node_states state;
+};
+
+static ssize_t show_node_state(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct node_attr *na = container_of(attr, struct node_attr, attr);
+	return print_nodes_state(na->state, buf);
+}
+
+#define _NODE_ATTR(name, state) \
+	{ __ATTR(name, 0444, show_node_state, NULL), state }
+
+static struct node_attr node_state_attr[] = {
+	[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
+	[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
+	[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
+#ifdef CONFIG_HIGHMEM
+	[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
+#endif
+	[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
+	[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
+};
+
+static struct attribute *node_state_attrs[] = {
+	&node_state_attr[N_POSSIBLE].attr.attr,
+	&node_state_attr[N_ONLINE].attr.attr,
+	&node_state_attr[N_NORMAL_MEMORY].attr.attr,
+#ifdef CONFIG_HIGHMEM
+	&node_state_attr[N_HIGH_MEMORY].attr.attr,
+#endif
+	&node_state_attr[N_MEMORY].attr.attr,
+	&node_state_attr[N_CPU].attr.attr,
+	NULL
+};
+
+static struct attribute_group memory_root_attr_group = {
+	.attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+	&memory_root_attr_group,
+	NULL,
+};
+
+#define NODE_CALLBACK_PRI	2	/* lower than SLAB */
+static int __init register_node_type(void)
+{
+	int ret;
+
+ 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
+ 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
+
+	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
+	if (!ret) {
+		static struct notifier_block node_memory_callback_nb = {
+			.notifier_call = node_memory_callback,
+			.priority = NODE_CALLBACK_PRI,
+		};
+		register_hotmemory_notifier(&node_memory_callback_nb);
+	}
+
+	/*
+	 * Note:  we're not going to unregister the node class if we fail
+	 * to register the node state class attribute files.
+	 */
+	return ret;
+}
+postcore_initcall(register_node_type);
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 0000000..c228644
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver core interface to the pinctrl subsystem.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * pinctrl_bind_pins() - called by the device core before probe
+ * @dev: the device that is just about to probe
+ */
+int pinctrl_bind_pins(struct device *dev)
+{
+	int ret;
+
+	if (dev->of_node_reused)
+		return 0;
+
+	dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
+	if (!dev->pins)
+		return -ENOMEM;
+
+	dev->pins->p = devm_pinctrl_get(dev);
+	if (IS_ERR(dev->pins->p)) {
+		dev_dbg(dev, "no pinctrl handle\n");
+		ret = PTR_ERR(dev->pins->p);
+		goto cleanup_alloc;
+	}
+
+	dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
+					PINCTRL_STATE_DEFAULT);
+	if (IS_ERR(dev->pins->default_state)) {
+		dev_dbg(dev, "no default pinctrl state\n");
+		ret = 0;
+		goto cleanup_get;
+	}
+
+	dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
+					PINCTRL_STATE_INIT);
+	if (IS_ERR(dev->pins->init_state)) {
+		/* Not supplying this state is perfectly legal */
+		dev_dbg(dev, "no init pinctrl state\n");
+
+		ret = pinctrl_select_state(dev->pins->p,
+					   dev->pins->default_state);
+	} else {
+		ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
+	}
+
+	if (ret) {
+		dev_dbg(dev, "failed to activate initial pinctrl state\n");
+		goto cleanup_get;
+	}
+
+#ifdef CONFIG_PM
+	/*
+	 * If power management is enabled, we also look for the optional
+	 * sleep and idle pin states, with semantics as defined in
+	 * <linux/pinctrl/pinctrl-state.h>
+	 */
+	dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
+					PINCTRL_STATE_SLEEP);
+	if (IS_ERR(dev->pins->sleep_state))
+		/* Not supplying this state is perfectly legal */
+		dev_dbg(dev, "no sleep pinctrl state\n");
+
+	dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
+					PINCTRL_STATE_IDLE);
+	if (IS_ERR(dev->pins->idle_state))
+		/* Not supplying this state is perfectly legal */
+		dev_dbg(dev, "no idle pinctrl state\n");
+#endif
+
+	return 0;
+
+	/*
+	 * If no pinctrl handle or default state was found for this device,
+	 * let's explicitly free the pin container in the device, there is
+	 * no point in keeping it around.
+	 */
+cleanup_get:
+	devm_pinctrl_put(dev->pins->p);
+cleanup_alloc:
+	devm_kfree(dev, dev->pins);
+	dev->pins = NULL;
+
+	/* Return deferrals */
+	if (ret == -EPROBE_DEFER)
+		return ret;
+	/* Return serious errors */
+	if (ret == -EINVAL)
+		return ret;
+	/* We ignore errors like -ENOENT meaning no pinctrl state */
+
+	return 0;
+}
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
new file mode 100644
index 0000000..60d6cc6
--- /dev/null
+++ b/drivers/base/platform-msi.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MSI framework for platform devices
+ *
+ * Copyright (C) 2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#define DEV_ID_SHIFT	21
+#define MAX_DEV_MSIS	(1 << (32 - DEV_ID_SHIFT))
+
+/*
+ * Internal data structure containing a (made up, but unique) devid
+ * and the callback to write the MSI message.
+ */
+struct platform_msi_priv_data {
+	struct device		*dev;
+	void 			*host_data;
+	msi_alloc_info_t	arg;
+	irq_write_msi_msg_t	write_msg;
+	int			devid;
+};
+
+/* The devid allocator */
+static DEFINE_IDA(platform_msi_devid_ida);
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Convert an msi_desc to a globaly unique identifier (per-device
+ * devid + msi_desc position in the msi_list).
+ */
+static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+{
+	u32 devid;
+
+	devid = desc->platform.msi_priv_data->devid;
+
+	return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+}
+
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+	arg->desc = desc;
+	arg->hwirq = platform_msi_calc_hwirq(desc);
+}
+
+static int platform_msi_init(struct irq_domain *domain,
+			     struct msi_domain_info *info,
+			     unsigned int virq, irq_hw_number_t hwirq,
+			     msi_alloc_info_t *arg)
+{
+	return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					     info->chip, info->chip_data);
+}
+#else
+#define platform_msi_set_desc		NULL
+#define platform_msi_init		NULL
+#endif
+
+static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+{
+	struct msi_domain_ops *ops = info->ops;
+
+	BUG_ON(!ops);
+
+	if (ops->msi_init == NULL)
+		ops->msi_init = platform_msi_init;
+	if (ops->set_desc == NULL)
+		ops->set_desc = platform_msi_set_desc;
+}
+
+static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct msi_desc *desc = irq_data_get_msi_desc(data);
+	struct platform_msi_priv_data *priv_data;
+
+	priv_data = desc->platform.msi_priv_data;
+
+	priv_data->write_msg(desc, msg);
+}
+
+static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+{
+	struct irq_chip *chip = info->chip;
+
+	BUG_ON(!chip);
+	if (!chip->irq_mask)
+		chip->irq_mask = irq_chip_mask_parent;
+	if (!chip->irq_unmask)
+		chip->irq_unmask = irq_chip_unmask_parent;
+	if (!chip->irq_eoi)
+		chip->irq_eoi = irq_chip_eoi_parent;
+	if (!chip->irq_set_affinity)
+		chip->irq_set_affinity = msi_domain_set_affinity;
+	if (!chip->irq_write_msi_msg)
+		chip->irq_write_msi_msg = platform_msi_write_msg;
+	if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
+		    !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
+		info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+}
+
+static void platform_msi_free_descs(struct device *dev, int base, int nvec)
+{
+	struct msi_desc *desc, *tmp;
+
+	list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+		if (desc->platform.msi_index >= base &&
+		    desc->platform.msi_index < (base + nvec)) {
+			list_del(&desc->list);
+			free_msi_entry(desc);
+		}
+	}
+}
+
+static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
+					     int nvec,
+					     struct platform_msi_priv_data *data)
+
+{
+	struct msi_desc *desc;
+	int i, base = 0;
+
+	if (!list_empty(dev_to_msi_list(dev))) {
+		desc = list_last_entry(dev_to_msi_list(dev),
+				       struct msi_desc, list);
+		base = desc->platform.msi_index + 1;
+	}
+
+	for (i = 0; i < nvec; i++) {
+		desc = alloc_msi_entry(dev, 1, NULL);
+		if (!desc)
+			break;
+
+		desc->platform.msi_priv_data = data;
+		desc->platform.msi_index = base + i;
+		desc->irq = virq ? virq + i : 0;
+
+		list_add_tail(&desc->list, dev_to_msi_list(dev));
+	}
+
+	if (i != nvec) {
+		/* Clean up the mess */
+		platform_msi_free_descs(dev, base, nvec);
+
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int platform_msi_alloc_descs(struct device *dev, int nvec,
+				    struct platform_msi_priv_data *data)
+
+{
+	return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
+}
+
+/**
+ * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
+ * @fwnode:		Optional fwnode of the interrupt controller
+ * @info:	MSI domain info
+ * @parent:	Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a platform MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
+						  struct msi_domain_info *info,
+						  struct irq_domain *parent)
+{
+	struct irq_domain *domain;
+
+	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+		platform_msi_update_dom_ops(info);
+	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+		platform_msi_update_chip_ops(info);
+
+	domain = msi_create_irq_domain(fwnode, info, parent);
+	if (domain)
+		irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
+
+	return domain;
+}
+
+static struct platform_msi_priv_data *
+platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
+			     irq_write_msi_msg_t write_msi_msg)
+{
+	struct platform_msi_priv_data *datap;
+	/*
+	 * Limit the number of interrupts to 2048 per device. Should we
+	 * need to bump this up, DEV_ID_SHIFT should be adjusted
+	 * accordingly (which would impact the max number of MSI
+	 * capable devices).
+	 */
+	if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+		return ERR_PTR(-EINVAL);
+
+	if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+		dev_err(dev, "Incompatible msi_domain, giving up\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Already had a helping of MSI? Greed... */
+	if (!list_empty(dev_to_msi_list(dev)))
+		return ERR_PTR(-EBUSY);
+
+	datap = kzalloc(sizeof(*datap), GFP_KERNEL);
+	if (!datap)
+		return ERR_PTR(-ENOMEM);
+
+	datap->devid = ida_simple_get(&platform_msi_devid_ida,
+				      0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+	if (datap->devid < 0) {
+		int err = datap->devid;
+		kfree(datap);
+		return ERR_PTR(err);
+	}
+
+	datap->write_msg = write_msi_msg;
+	datap->dev = dev;
+
+	return datap;
+}
+
+static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
+{
+	ida_simple_remove(&platform_msi_devid_ida, data->devid);
+	kfree(data);
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev:		The device for which to allocate interrupts
+ * @nvec:		The number of interrupts to allocate
+ * @write_msi_msg:	Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+				   irq_write_msi_msg_t write_msi_msg)
+{
+	struct platform_msi_priv_data *priv_data;
+	int err;
+
+	priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+	if (IS_ERR(priv_data))
+		return PTR_ERR(priv_data);
+
+	err = platform_msi_alloc_descs(dev, nvec, priv_data);
+	if (err)
+		goto out_free_priv_data;
+
+	err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+	if (err)
+		goto out_free_desc;
+
+	return 0;
+
+out_free_desc:
+	platform_msi_free_descs(dev, 0, nvec);
+out_free_priv_data:
+	platform_msi_free_priv_data(priv_data);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
+
+/**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+ * @dev:	The device for which to free interrupts
+ */
+void platform_msi_domain_free_irqs(struct device *dev)
+{
+	if (!list_empty(dev_to_msi_list(dev))) {
+		struct msi_desc *desc;
+
+		desc = first_msi_entry(dev);
+		platform_msi_free_priv_data(desc->platform.msi_priv_data);
+	}
+
+	msi_domain_free_irqs(dev->msi_domain, dev);
+	platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
+}
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
+
+/**
+ * platform_msi_get_host_data - Query the private data associated with
+ *                              a platform-msi domain
+ * @domain:	The platform-msi domain
+ *
+ * Returns the private data provided when calling
+ * platform_msi_create_device_domain.
+ */
+void *platform_msi_get_host_data(struct irq_domain *domain)
+{
+	struct platform_msi_priv_data *data = domain->host_data;
+	return data->host_data;
+}
+
+/**
+ * platform_msi_create_device_domain - Create a platform-msi domain
+ *
+ * @dev:		The device generating the MSIs
+ * @nvec:		The number of MSIs that need to be allocated
+ * @write_msi_msg:	Callback to write an interrupt message for @dev
+ * @ops:		The hierarchy domain operations to use
+ * @host_data:		Private data associated to this domain
+ *
+ * Returns an irqdomain for @nvec interrupts
+ */
+struct irq_domain *
+platform_msi_create_device_domain(struct device *dev,
+				  unsigned int nvec,
+				  irq_write_msi_msg_t write_msi_msg,
+				  const struct irq_domain_ops *ops,
+				  void *host_data)
+{
+	struct platform_msi_priv_data *data;
+	struct irq_domain *domain;
+	int err;
+
+	data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+	if (IS_ERR(data))
+		return NULL;
+
+	data->host_data = host_data;
+	domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec,
+					     dev->fwnode, ops, data);
+	if (!domain)
+		goto free_priv;
+
+	err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
+	if (err)
+		goto free_domain;
+
+	return domain;
+
+free_domain:
+	irq_domain_remove(domain);
+free_priv:
+	platform_msi_free_priv_data(data);
+	return NULL;
+}
+
+/**
+ * platform_msi_domain_free - Free interrupts associated with a platform-msi
+ *                            domain
+ *
+ * @domain:	The platform-msi domain
+ * @virq:	The base irq from which to perform the free operation
+ * @nvec:	How many interrupts to free from @virq
+ */
+void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+			      unsigned int nvec)
+{
+	struct platform_msi_priv_data *data = domain->host_data;
+	struct msi_desc *desc;
+	for_each_msi_entry(desc, data->dev) {
+		if (WARN_ON(!desc->irq || desc->nvec_used != 1))
+			return;
+		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
+			continue;
+
+		irq_domain_free_irqs_common(domain, desc->irq, 1);
+	}
+}
+
+/**
+ * platform_msi_domain_alloc - Allocate interrupts associated with
+ *			       a platform-msi domain
+ *
+ * @domain:	The platform-msi domain
+ * @virq:	The base irq from which to perform the allocate operation
+ * @nvec:	How many interrupts to free from @virq
+ *
+ * Return 0 on success, or an error code on failure. Must be called
+ * with irq_domain_mutex held (which can only be done as part of a
+ * top-level interrupt allocation).
+ */
+int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+			      unsigned int nr_irqs)
+{
+	struct platform_msi_priv_data *data = domain->host_data;
+	int err;
+
+	err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
+	if (err)
+		return err;
+
+	err = msi_domain_populate_irqs(domain->parent, data->dev,
+				       virq, nr_irqs, &data->arg);
+	if (err)
+		platform_msi_domain_free(domain, virq, nr_irqs);
+
+	return err;
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
new file mode 100644
index 0000000..dff82a3
--- /dev/null
+++ b/drivers/base/platform.c
@@ -0,0 +1,1488 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * platform.c - platform 'pseudo' bus for legacy devices
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ *
+ * Please see Documentation/driver-model/platform.txt for more
+ * information.
+ */
+
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/idr.h>
+#include <linux/acpi.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/limits.h>
+#include <linux/property.h>
+
+#include "base.h"
+#include "power/power.h"
+
+/* For automatically allocated device IDs */
+static DEFINE_IDA(platform_devid_ida);
+
+struct device platform_bus = {
+	.init_name	= "platform",
+};
+EXPORT_SYMBOL_GPL(platform_bus);
+
+/**
+ * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
+ * @pdev: platform device
+ *
+ * This is called before platform_device_add() such that any pdev_archdata may
+ * be setup before the platform_notifier is called.  So if a user needs to
+ * manipulate any relevant information in the pdev_archdata they can do:
+ *
+ *	platform_device_alloc()
+ *	... manipulate ...
+ *	platform_device_add()
+ *
+ * And if they don't care they can just call platform_device_register() and
+ * everything will just work out.
+ */
+void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+}
+
+/**
+ * platform_get_resource - get a resource for a device
+ * @dev: platform device
+ * @type: resource type
+ * @num: resource index
+ */
+struct resource *platform_get_resource(struct platform_device *dev,
+				       unsigned int type, unsigned int num)
+{
+	int i;
+
+	for (i = 0; i < dev->num_resources; i++) {
+		struct resource *r = &dev->resource[i];
+
+		if (type == resource_type(r) && num-- == 0)
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(platform_get_resource);
+
+/**
+ * platform_get_irq - get an IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ */
+int platform_get_irq(struct platform_device *dev, unsigned int num)
+{
+#ifdef CONFIG_SPARC
+	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+	if (!dev || num >= dev->archdata.num_irqs)
+		return -ENXIO;
+	return dev->archdata.irqs[num];
+#else
+	struct resource *r;
+	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+		int ret;
+
+		ret = of_irq_get(dev->dev.of_node, num);
+		if (ret > 0 || ret == -EPROBE_DEFER)
+			return ret;
+	}
+
+	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+	if (has_acpi_companion(&dev->dev)) {
+		if (r && r->flags & IORESOURCE_DISABLED) {
+			int ret;
+
+			ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
+			if (ret)
+				return ret;
+		}
+	}
+
+	/*
+	 * The resources may pass trigger flags to the irqs that need
+	 * to be set up. It so happens that the trigger flags for
+	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
+	 * settings.
+	 */
+	if (r && r->flags & IORESOURCE_BITS) {
+		struct irq_data *irqd;
+
+		irqd = irq_get_irq_data(r->start);
+		if (!irqd)
+			return -ENXIO;
+		irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
+	}
+
+	return r ? r->start : -ENXIO;
+#endif
+}
+EXPORT_SYMBOL_GPL(platform_get_irq);
+
+/**
+ * platform_irq_count - Count the number of IRQs a platform device uses
+ * @dev: platform device
+ *
+ * Return: Number of IRQs a platform device uses or EPROBE_DEFER
+ */
+int platform_irq_count(struct platform_device *dev)
+{
+	int ret, nr = 0;
+
+	while ((ret = platform_get_irq(dev, nr)) >= 0)
+		nr++;
+
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	return nr;
+}
+EXPORT_SYMBOL_GPL(platform_irq_count);
+
+/**
+ * platform_get_resource_byname - get a resource for a device by name
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *platform_get_resource_byname(struct platform_device *dev,
+					      unsigned int type,
+					      const char *name)
+{
+	int i;
+
+	for (i = 0; i < dev->num_resources; i++) {
+		struct resource *r = &dev->resource[i];
+
+		if (unlikely(!r->name))
+			continue;
+
+		if (type == resource_type(r) && !strcmp(r->name, name))
+			return r;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(platform_get_resource_byname);
+
+/**
+ * platform_get_irq_byname - get an IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ */
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
+{
+	struct resource *r;
+
+	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+		int ret;
+
+		ret = of_irq_get_byname(dev->dev.of_node, name);
+		if (ret > 0 || ret == -EPROBE_DEFER)
+			return ret;
+	}
+
+	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
+	return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname);
+
+/**
+ * platform_add_devices - add a numbers of platform devices
+ * @devs: array of platform devices to add
+ * @num: number of platform devices in array
+ */
+int platform_add_devices(struct platform_device **devs, int num)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < num; i++) {
+		ret = platform_device_register(devs[i]);
+		if (ret) {
+			while (--i >= 0)
+				platform_device_unregister(devs[i]);
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(platform_add_devices);
+
+struct platform_object {
+	struct platform_device pdev;
+	char name[];
+};
+
+/**
+ * platform_device_put - destroy a platform device
+ * @pdev: platform device to free
+ *
+ * Free all memory associated with a platform device.  This function must
+ * _only_ be externally called in error cases.  All other usage is a bug.
+ */
+void platform_device_put(struct platform_device *pdev)
+{
+	if (pdev)
+		put_device(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(platform_device_put);
+
+static void platform_device_release(struct device *dev)
+{
+	struct platform_object *pa = container_of(dev, struct platform_object,
+						  pdev.dev);
+
+	of_device_node_put(&pa->pdev.dev);
+	kfree(pa->pdev.dev.platform_data);
+	kfree(pa->pdev.mfd_cell);
+	kfree(pa->pdev.resource);
+	kfree(pa->pdev.driver_override);
+	kfree(pa);
+}
+
+/**
+ * platform_device_alloc - create a platform device
+ * @name: base name of the device we're adding
+ * @id: instance id
+ *
+ * Create a platform device object which can have other objects attached
+ * to it, and which will have attached objects freed when it is released.
+ */
+struct platform_device *platform_device_alloc(const char *name, int id)
+{
+	struct platform_object *pa;
+
+	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
+	if (pa) {
+		strcpy(pa->name, name);
+		pa->pdev.name = pa->name;
+		pa->pdev.id = id;
+		device_initialize(&pa->pdev.dev);
+		pa->pdev.dev.release = platform_device_release;
+		arch_setup_pdev_archdata(&pa->pdev);
+	}
+
+	return pa ? &pa->pdev : NULL;
+}
+EXPORT_SYMBOL_GPL(platform_device_alloc);
+
+/**
+ * platform_device_add_resources - add resources to a platform device
+ * @pdev: platform device allocated by platform_device_alloc to add resources to
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ *
+ * Add a copy of the resources to the platform device.  The memory
+ * associated with the resources will be freed when the platform device is
+ * released.
+ */
+int platform_device_add_resources(struct platform_device *pdev,
+				  const struct resource *res, unsigned int num)
+{
+	struct resource *r = NULL;
+
+	if (res) {
+		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
+		if (!r)
+			return -ENOMEM;
+	}
+
+	kfree(pdev->resource);
+	pdev->resource = r;
+	pdev->num_resources = num;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(platform_device_add_resources);
+
+/**
+ * platform_device_add_data - add platform-specific data to a platform device
+ * @pdev: platform device allocated by platform_device_alloc to add resources to
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * Add a copy of platform specific data to the platform device's
+ * platform_data pointer.  The memory associated with the platform data
+ * will be freed when the platform device is released.
+ */
+int platform_device_add_data(struct platform_device *pdev, const void *data,
+			     size_t size)
+{
+	void *d = NULL;
+
+	if (data) {
+		d = kmemdup(data, size, GFP_KERNEL);
+		if (!d)
+			return -ENOMEM;
+	}
+
+	kfree(pdev->dev.platform_data);
+	pdev->dev.platform_data = d;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(platform_device_add_data);
+
+/**
+ * platform_device_add_properties - add built-in properties to a platform device
+ * @pdev: platform device to add properties to
+ * @properties: null terminated array of properties to add
+ *
+ * The function will take deep copy of @properties and attach the copy to the
+ * platform device. The memory associated with properties will be freed when the
+ * platform device is released.
+ */
+int platform_device_add_properties(struct platform_device *pdev,
+				   const struct property_entry *properties)
+{
+	return device_add_properties(&pdev->dev, properties);
+}
+EXPORT_SYMBOL_GPL(platform_device_add_properties);
+
+/**
+ * platform_device_add - add a platform device to device hierarchy
+ * @pdev: platform device we're adding
+ *
+ * This is part 2 of platform_device_register(), though may be called
+ * separately _iff_ pdev was allocated by platform_device_alloc().
+ */
+int platform_device_add(struct platform_device *pdev)
+{
+	int i, ret;
+
+	if (!pdev)
+		return -EINVAL;
+
+	if (!pdev->dev.parent)
+		pdev->dev.parent = &platform_bus;
+
+	pdev->dev.bus = &platform_bus_type;
+
+	switch (pdev->id) {
+	default:
+		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
+		break;
+	case PLATFORM_DEVID_NONE:
+		dev_set_name(&pdev->dev, "%s", pdev->name);
+		break;
+	case PLATFORM_DEVID_AUTO:
+		/*
+		 * Automatically allocated device ID. We mark it as such so
+		 * that we remember it must be freed, and we append a suffix
+		 * to avoid namespace collision with explicit IDs.
+		 */
+		ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
+		if (ret < 0)
+			goto err_out;
+		pdev->id = ret;
+		pdev->id_auto = true;
+		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
+		break;
+	}
+
+	for (i = 0; i < pdev->num_resources; i++) {
+		struct resource *p, *r = &pdev->resource[i];
+
+		if (r->name == NULL)
+			r->name = dev_name(&pdev->dev);
+
+		p = r->parent;
+		if (!p) {
+			if (resource_type(r) == IORESOURCE_MEM)
+				p = &iomem_resource;
+			else if (resource_type(r) == IORESOURCE_IO)
+				p = &ioport_resource;
+		}
+
+		if (p && insert_resource(p, r)) {
+			dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
+			ret = -EBUSY;
+			goto failed;
+		}
+	}
+
+	pr_debug("Registering platform device '%s'. Parent at %s\n",
+		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
+
+	ret = device_add(&pdev->dev);
+	if (ret == 0)
+		return ret;
+
+ failed:
+	if (pdev->id_auto) {
+		ida_simple_remove(&platform_devid_ida, pdev->id);
+		pdev->id = PLATFORM_DEVID_AUTO;
+	}
+
+	while (--i >= 0) {
+		struct resource *r = &pdev->resource[i];
+		if (r->parent)
+			release_resource(r);
+	}
+
+ err_out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(platform_device_add);
+
+/**
+ * platform_device_del - remove a platform-level device
+ * @pdev: platform device we're removing
+ *
+ * Note that this function will also release all memory- and port-based
+ * resources owned by the device (@dev->resource).  This function must
+ * _only_ be externally called in error cases.  All other usage is a bug.
+ */
+void platform_device_del(struct platform_device *pdev)
+{
+	int i;
+
+	if (pdev) {
+		device_remove_properties(&pdev->dev);
+		device_del(&pdev->dev);
+
+		if (pdev->id_auto) {
+			ida_simple_remove(&platform_devid_ida, pdev->id);
+			pdev->id = PLATFORM_DEVID_AUTO;
+		}
+
+		for (i = 0; i < pdev->num_resources; i++) {
+			struct resource *r = &pdev->resource[i];
+			if (r->parent)
+				release_resource(r);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(platform_device_del);
+
+/**
+ * platform_device_register - add a platform-level device
+ * @pdev: platform device we're adding
+ */
+int platform_device_register(struct platform_device *pdev)
+{
+	device_initialize(&pdev->dev);
+	arch_setup_pdev_archdata(pdev);
+	return platform_device_add(pdev);
+}
+EXPORT_SYMBOL_GPL(platform_device_register);
+
+/**
+ * platform_device_unregister - unregister a platform-level device
+ * @pdev: platform device we're unregistering
+ *
+ * Unregistration is done in 2 steps. First we release all resources
+ * and remove it from the subsystem, then we drop reference count by
+ * calling platform_device_put().
+ */
+void platform_device_unregister(struct platform_device *pdev)
+{
+	platform_device_del(pdev);
+	platform_device_put(pdev);
+}
+EXPORT_SYMBOL_GPL(platform_device_unregister);
+
+/**
+ * platform_device_register_full - add a platform-level device with
+ * resources and platform-specific data
+ *
+ * @pdevinfo: data used to create device
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+struct platform_device *platform_device_register_full(
+		const struct platform_device_info *pdevinfo)
+{
+	int ret = -ENOMEM;
+	struct platform_device *pdev;
+
+	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
+	if (!pdev)
+		goto err_alloc;
+
+	pdev->dev.parent = pdevinfo->parent;
+	pdev->dev.fwnode = pdevinfo->fwnode;
+
+	if (pdevinfo->dma_mask) {
+		/*
+		 * This memory isn't freed when the device is put,
+		 * I don't have a nice idea for that though.  Conceptually
+		 * dma_mask in struct device should not be a pointer.
+		 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
+		 */
+		pdev->dev.dma_mask =
+			kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
+		if (!pdev->dev.dma_mask)
+			goto err;
+
+		*pdev->dev.dma_mask = pdevinfo->dma_mask;
+		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
+	}
+
+	ret = platform_device_add_resources(pdev,
+			pdevinfo->res, pdevinfo->num_res);
+	if (ret)
+		goto err;
+
+	ret = platform_device_add_data(pdev,
+			pdevinfo->data, pdevinfo->size_data);
+	if (ret)
+		goto err;
+
+	if (pdevinfo->properties) {
+		ret = platform_device_add_properties(pdev,
+						     pdevinfo->properties);
+		if (ret)
+			goto err;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+err:
+		ACPI_COMPANION_SET(&pdev->dev, NULL);
+		kfree(pdev->dev.dma_mask);
+
+err_alloc:
+		platform_device_put(pdev);
+		return ERR_PTR(ret);
+	}
+
+	return pdev;
+}
+EXPORT_SYMBOL_GPL(platform_device_register_full);
+
+static int platform_drv_probe(struct device *_dev)
+{
+	struct platform_driver *drv = to_platform_driver(_dev->driver);
+	struct platform_device *dev = to_platform_device(_dev);
+	int ret;
+
+	ret = of_clk_set_defaults(_dev->of_node, false);
+	if (ret < 0)
+		return ret;
+
+	ret = dev_pm_domain_attach(_dev, true);
+	if (ret)
+		goto out;
+
+	if (drv->probe) {
+		ret = drv->probe(dev);
+		if (ret)
+			dev_pm_domain_detach(_dev, true);
+	}
+
+out:
+	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+		dev_warn(_dev, "probe deferral not supported\n");
+		ret = -ENXIO;
+	}
+
+	return ret;
+}
+
+static int platform_drv_probe_fail(struct device *_dev)
+{
+	return -ENXIO;
+}
+
+static int platform_drv_remove(struct device *_dev)
+{
+	struct platform_driver *drv = to_platform_driver(_dev->driver);
+	struct platform_device *dev = to_platform_device(_dev);
+	int ret = 0;
+
+	if (drv->remove)
+		ret = drv->remove(dev);
+	dev_pm_domain_detach(_dev, true);
+
+	return ret;
+}
+
+static void platform_drv_shutdown(struct device *_dev)
+{
+	struct platform_driver *drv = to_platform_driver(_dev->driver);
+	struct platform_device *dev = to_platform_device(_dev);
+
+	if (drv->shutdown)
+		drv->shutdown(dev);
+}
+
+/**
+ * __platform_driver_register - register a driver for platform-level devices
+ * @drv: platform driver structure
+ * @owner: owning module/driver
+ */
+int __platform_driver_register(struct platform_driver *drv,
+				struct module *owner)
+{
+	drv->driver.owner = owner;
+	drv->driver.bus = &platform_bus_type;
+	drv->driver.probe = platform_drv_probe;
+	drv->driver.remove = platform_drv_remove;
+	drv->driver.shutdown = platform_drv_shutdown;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__platform_driver_register);
+
+/**
+ * platform_driver_unregister - unregister a driver for platform-level devices
+ * @drv: platform driver structure
+ */
+void platform_driver_unregister(struct platform_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(platform_driver_unregister);
+
+/**
+ * __platform_driver_probe - register driver for non-hotpluggable device
+ * @drv: platform driver structure
+ * @probe: the driver probe routine, probably from an __init section
+ * @module: module which will be the owner of the driver
+ *
+ * Use this instead of platform_driver_register() when you know the device
+ * is not hotpluggable and has already been registered, and you want to
+ * remove its run-once probe() infrastructure from memory after the driver
+ * has bound to the device.
+ *
+ * One typical use for this would be with drivers for controllers integrated
+ * into system-on-chip processors, where the controller devices have been
+ * configured as part of board setup.
+ *
+ * Note that this is incompatible with deferred probing.
+ *
+ * Returns zero if the driver registered and bound to a device, else returns
+ * a negative error code and with the driver not registered.
+ */
+int __init_or_module __platform_driver_probe(struct platform_driver *drv,
+		int (*probe)(struct platform_device *), struct module *module)
+{
+	int retval, code;
+
+	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
+		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
+			 drv->driver.name, __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * We have to run our probes synchronously because we check if
+	 * we find any devices to bind to and exit with error if there
+	 * are any.
+	 */
+	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
+	/*
+	 * Prevent driver from requesting probe deferral to avoid further
+	 * futile probe attempts.
+	 */
+	drv->prevent_deferred_probe = true;
+
+	/* make sure driver won't have bind/unbind attributes */
+	drv->driver.suppress_bind_attrs = true;
+
+	/* temporary section violation during probe() */
+	drv->probe = probe;
+	retval = code = __platform_driver_register(drv, module);
+
+	/*
+	 * Fixup that section violation, being paranoid about code scanning
+	 * the list of drivers in order to probe new devices.  Check to see
+	 * if the probe was successful, and make sure any forced probes of
+	 * new devices fail.
+	 */
+	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
+	drv->probe = NULL;
+	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
+		retval = -ENODEV;
+	drv->driver.probe = platform_drv_probe_fail;
+	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
+
+	if (code != retval)
+		platform_driver_unregister(drv);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(__platform_driver_probe);
+
+/**
+ * __platform_create_bundle - register driver and create corresponding device
+ * @driver: platform driver structure
+ * @probe: the driver probe routine, probably from an __init section
+ * @res: set of resources that needs to be allocated for the device
+ * @n_res: number of resources
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ * @module: module which will be the owner of the driver
+ *
+ * Use this in legacy-style modules that probe hardware directly and
+ * register a single platform device and corresponding platform driver.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+struct platform_device * __init_or_module __platform_create_bundle(
+			struct platform_driver *driver,
+			int (*probe)(struct platform_device *),
+			struct resource *res, unsigned int n_res,
+			const void *data, size_t size, struct module *module)
+{
+	struct platform_device *pdev;
+	int error;
+
+	pdev = platform_device_alloc(driver->driver.name, -1);
+	if (!pdev) {
+		error = -ENOMEM;
+		goto err_out;
+	}
+
+	error = platform_device_add_resources(pdev, res, n_res);
+	if (error)
+		goto err_pdev_put;
+
+	error = platform_device_add_data(pdev, data, size);
+	if (error)
+		goto err_pdev_put;
+
+	error = platform_device_add(pdev);
+	if (error)
+		goto err_pdev_put;
+
+	error = __platform_driver_probe(driver, probe, module);
+	if (error)
+		goto err_pdev_del;
+
+	return pdev;
+
+err_pdev_del:
+	platform_device_del(pdev);
+err_pdev_put:
+	platform_device_put(pdev);
+err_out:
+	return ERR_PTR(error);
+}
+EXPORT_SYMBOL_GPL(__platform_create_bundle);
+
+/**
+ * __platform_register_drivers - register an array of platform drivers
+ * @drivers: an array of drivers to register
+ * @count: the number of drivers to register
+ * @owner: module owning the drivers
+ *
+ * Registers platform drivers specified by an array. On failure to register a
+ * driver, all previously registered drivers will be unregistered. Callers of
+ * this API should use platform_unregister_drivers() to unregister drivers in
+ * the reverse order.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int __platform_register_drivers(struct platform_driver * const *drivers,
+				unsigned int count, struct module *owner)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < count; i++) {
+		pr_debug("registering platform driver %ps\n", drivers[i]);
+
+		err = __platform_driver_register(drivers[i], owner);
+		if (err < 0) {
+			pr_err("failed to register platform driver %ps: %d\n",
+			       drivers[i], err);
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	while (i--) {
+		pr_debug("unregistering platform driver %ps\n", drivers[i]);
+		platform_driver_unregister(drivers[i]);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(__platform_register_drivers);
+
+/**
+ * platform_unregister_drivers - unregister an array of platform drivers
+ * @drivers: an array of drivers to unregister
+ * @count: the number of drivers to unregister
+ *
+ * Unegisters platform drivers specified by an array. This is typically used
+ * to complement an earlier call to platform_register_drivers(). Drivers are
+ * unregistered in the reverse order in which they were registered.
+ */
+void platform_unregister_drivers(struct platform_driver * const *drivers,
+				 unsigned int count)
+{
+	while (count--) {
+		pr_debug("unregistering platform driver %ps\n", drivers[count]);
+		platform_driver_unregister(drivers[count]);
+	}
+}
+EXPORT_SYMBOL_GPL(platform_unregister_drivers);
+
+/* modalias support enables more hands-off userspace setup:
+ * (a) environment variable lets new-style hotplug events work once system is
+ *     fully running:  "modprobe $MODALIAS"
+ * (b) sysfs attribute lets new-style coldplug recover from hotplug events
+ *     mishandled before system is fully running:  "modprobe $(cat modalias)"
+ */
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct platform_device	*pdev = to_platform_device(dev);
+	int len;
+
+	len = of_device_modalias(dev, buf, PAGE_SIZE);
+	if (len != -ENODEV)
+		return len;
+
+	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+	if (len != -ENODEV)
+		return len;
+
+	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	char *driver_override, *old, *cp;
+
+	/* We need to keep extra room for a newline */
+	if (count >= (PAGE_SIZE - 1))
+		return -EINVAL;
+
+	driver_override = kstrndup(buf, count, GFP_KERNEL);
+	if (!driver_override)
+		return -ENOMEM;
+
+	cp = strchr(driver_override, '\n');
+	if (cp)
+		*cp = '\0';
+
+	device_lock(dev);
+	old = pdev->driver_override;
+	if (strlen(driver_override)) {
+		pdev->driver_override = driver_override;
+	} else {
+		kfree(driver_override);
+		pdev->driver_override = NULL;
+	}
+	device_unlock(dev);
+
+	kfree(old);
+
+	return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	ssize_t len;
+
+	device_lock(dev);
+	len = sprintf(buf, "%s\n", pdev->driver_override);
+	device_unlock(dev);
+	return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+
+static struct attribute *platform_dev_attrs[] = {
+	&dev_attr_modalias.attr,
+	&dev_attr_driver_override.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(platform_dev);
+
+static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct platform_device	*pdev = to_platform_device(dev);
+	int rc;
+
+	/* Some devices have extra OF data and an OF-style MODALIAS */
+	rc = of_device_uevent_modalias(dev, env);
+	if (rc != -ENODEV)
+		return rc;
+
+	rc = acpi_device_uevent_modalias(dev, env);
+	if (rc != -ENODEV)
+		return rc;
+
+	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
+			pdev->name);
+	return 0;
+}
+
+static const struct platform_device_id *platform_match_id(
+			const struct platform_device_id *id,
+			struct platform_device *pdev)
+{
+	while (id->name[0]) {
+		if (strcmp(pdev->name, id->name) == 0) {
+			pdev->id_entry = id;
+			return id;
+		}
+		id++;
+	}
+	return NULL;
+}
+
+/**
+ * platform_match - bind platform device to platform driver.
+ * @dev: device.
+ * @drv: driver.
+ *
+ * Platform device IDs are assumed to be encoded like this:
+ * "<name><instance>", where <name> is a short description of the type of
+ * device, like "pci" or "floppy", and <instance> is the enumerated
+ * instance of the device, like '0' or '42'.  Driver IDs are simply
+ * "<name>".  So, extract the <name> from the platform_device structure,
+ * and compare it against the name of the driver. Return whether they match
+ * or not.
+ */
+static int platform_match(struct device *dev, struct device_driver *drv)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct platform_driver *pdrv = to_platform_driver(drv);
+
+	/* When driver_override is set, only bind to the matching driver */
+	if (pdev->driver_override)
+		return !strcmp(pdev->driver_override, drv->name);
+
+	/* Attempt an OF style match first */
+	if (of_driver_match_device(dev, drv))
+		return 1;
+
+	/* Then try ACPI style match */
+	if (acpi_driver_match_device(dev, drv))
+		return 1;
+
+	/* Then try to match against the id table */
+	if (pdrv->id_table)
+		return platform_match_id(pdrv->id_table, pdev) != NULL;
+
+	/* fall-back to driver name match */
+	return (strcmp(pdev->name, drv->name) == 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+	struct platform_driver *pdrv = to_platform_driver(dev->driver);
+	struct platform_device *pdev = to_platform_device(dev);
+	int ret = 0;
+
+	if (dev->driver && pdrv->suspend)
+		ret = pdrv->suspend(pdev, mesg);
+
+	return ret;
+}
+
+static int platform_legacy_resume(struct device *dev)
+{
+	struct platform_driver *pdrv = to_platform_driver(dev->driver);
+	struct platform_device *pdev = to_platform_device(dev);
+	int ret = 0;
+
+	if (dev->driver && pdrv->resume)
+		ret = pdrv->resume(pdev);
+
+	return ret;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int platform_pm_suspend(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->suspend)
+			ret = drv->pm->suspend(dev);
+	} else {
+		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
+	}
+
+	return ret;
+}
+
+int platform_pm_resume(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->resume)
+			ret = drv->pm->resume(dev);
+	} else {
+		ret = platform_legacy_resume(dev);
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+
+int platform_pm_freeze(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->freeze)
+			ret = drv->pm->freeze(dev);
+	} else {
+		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
+	}
+
+	return ret;
+}
+
+int platform_pm_thaw(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->thaw)
+			ret = drv->pm->thaw(dev);
+	} else {
+		ret = platform_legacy_resume(dev);
+	}
+
+	return ret;
+}
+
+int platform_pm_poweroff(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->poweroff)
+			ret = drv->pm->poweroff(dev);
+	} else {
+		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
+	}
+
+	return ret;
+}
+
+int platform_pm_restore(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (!drv)
+		return 0;
+
+	if (drv->pm) {
+		if (drv->pm->restore)
+			ret = drv->pm->restore(dev);
+	} else {
+		ret = platform_legacy_resume(dev);
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+
+int platform_dma_configure(struct device *dev)
+{
+	enum dev_dma_attr attr;
+	int ret = 0;
+
+	if (dev->of_node) {
+		ret = of_dma_configure(dev, dev->of_node, true);
+	} else if (has_acpi_companion(dev)) {
+		attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
+		if (attr != DEV_DMA_NOT_SUPPORTED)
+			ret = acpi_dma_configure(dev, attr);
+	}
+
+	return ret;
+}
+
+static const struct dev_pm_ops platform_dev_pm_ops = {
+	.runtime_suspend = pm_generic_runtime_suspend,
+	.runtime_resume = pm_generic_runtime_resume,
+	USE_PLATFORM_PM_SLEEP_OPS
+};
+
+struct bus_type platform_bus_type = {
+	.name		= "platform",
+	.dev_groups	= platform_dev_groups,
+	.match		= platform_match,
+	.uevent		= platform_uevent,
+	.dma_configure	= platform_dma_configure,
+	.pm		= &platform_dev_pm_ops,
+};
+EXPORT_SYMBOL_GPL(platform_bus_type);
+
+int __init platform_bus_init(void)
+{
+	int error;
+
+	early_platform_cleanup();
+
+	error = device_register(&platform_bus);
+	if (error) {
+		put_device(&platform_bus);
+		return error;
+	}
+	error =  bus_register(&platform_bus_type);
+	if (error)
+		device_unregister(&platform_bus);
+	of_platform_register_reconfig_notifier();
+	return error;
+}
+
+#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
+u64 dma_get_required_mask(struct device *dev)
+{
+	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
+	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
+	u64 mask;
+
+	if (!high_totalram) {
+		/* convert to mask just covering totalram */
+		low_totalram = (1 << (fls(low_totalram) - 1));
+		low_totalram += low_totalram - 1;
+		mask = low_totalram;
+	} else {
+		high_totalram = (1 << (fls(high_totalram) - 1));
+		high_totalram += high_totalram - 1;
+		mask = (((u64)high_totalram) << 32) + 0xffffffff;
+	}
+	return mask;
+}
+EXPORT_SYMBOL_GPL(dma_get_required_mask);
+#endif
+
+static __initdata LIST_HEAD(early_platform_driver_list);
+static __initdata LIST_HEAD(early_platform_device_list);
+
+/**
+ * early_platform_driver_register - register early platform driver
+ * @epdrv: early_platform driver structure
+ * @buf: string passed from early_param()
+ *
+ * Helper function for early_platform_init() / early_platform_init_buffer()
+ */
+int __init early_platform_driver_register(struct early_platform_driver *epdrv,
+					  char *buf)
+{
+	char *tmp;
+	int n;
+
+	/* Simply add the driver to the end of the global list.
+	 * Drivers will by default be put on the list in compiled-in order.
+	 */
+	if (!epdrv->list.next) {
+		INIT_LIST_HEAD(&epdrv->list);
+		list_add_tail(&epdrv->list, &early_platform_driver_list);
+	}
+
+	/* If the user has specified device then make sure the driver
+	 * gets prioritized. The driver of the last device specified on
+	 * command line will be put first on the list.
+	 */
+	n = strlen(epdrv->pdrv->driver.name);
+	if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
+		list_move(&epdrv->list, &early_platform_driver_list);
+
+		/* Allow passing parameters after device name */
+		if (buf[n] == '\0' || buf[n] == ',')
+			epdrv->requested_id = -1;
+		else {
+			epdrv->requested_id = simple_strtoul(&buf[n + 1],
+							     &tmp, 10);
+
+			if (buf[n] != '.' || (tmp == &buf[n + 1])) {
+				epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
+				n = 0;
+			} else
+				n += strcspn(&buf[n + 1], ",") + 1;
+		}
+
+		if (buf[n] == ',')
+			n++;
+
+		if (epdrv->bufsize) {
+			memcpy(epdrv->buffer, &buf[n],
+			       min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
+			epdrv->buffer[epdrv->bufsize - 1] = '\0';
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * early_platform_add_devices - adds a number of early platform devices
+ * @devs: array of early platform devices to add
+ * @num: number of early platform devices in array
+ *
+ * Used by early architecture code to register early platform devices and
+ * their platform data.
+ */
+void __init early_platform_add_devices(struct platform_device **devs, int num)
+{
+	struct device *dev;
+	int i;
+
+	/* simply add the devices to list */
+	for (i = 0; i < num; i++) {
+		dev = &devs[i]->dev;
+
+		if (!dev->devres_head.next) {
+			pm_runtime_early_init(dev);
+			INIT_LIST_HEAD(&dev->devres_head);
+			list_add_tail(&dev->devres_head,
+				      &early_platform_device_list);
+		}
+	}
+}
+
+/**
+ * early_platform_driver_register_all - register early platform drivers
+ * @class_str: string to identify early platform driver class
+ *
+ * Used by architecture code to register all early platform drivers
+ * for a certain class. If omitted then only early platform drivers
+ * with matching kernel command line class parameters will be registered.
+ */
+void __init early_platform_driver_register_all(char *class_str)
+{
+	/* The "class_str" parameter may or may not be present on the kernel
+	 * command line. If it is present then there may be more than one
+	 * matching parameter.
+	 *
+	 * Since we register our early platform drivers using early_param()
+	 * we need to make sure that they also get registered in the case
+	 * when the parameter is missing from the kernel command line.
+	 *
+	 * We use parse_early_options() to make sure the early_param() gets
+	 * called at least once. The early_param() may be called more than
+	 * once since the name of the preferred device may be specified on
+	 * the kernel command line. early_platform_driver_register() handles
+	 * this case for us.
+	 */
+	parse_early_options(class_str);
+}
+
+/**
+ * early_platform_match - find early platform device matching driver
+ * @epdrv: early platform driver structure
+ * @id: id to match against
+ */
+static struct platform_device * __init
+early_platform_match(struct early_platform_driver *epdrv, int id)
+{
+	struct platform_device *pd;
+
+	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
+		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
+			if (pd->id == id)
+				return pd;
+
+	return NULL;
+}
+
+/**
+ * early_platform_left - check if early platform driver has matching devices
+ * @epdrv: early platform driver structure
+ * @id: return true if id or above exists
+ */
+static int __init early_platform_left(struct early_platform_driver *epdrv,
+				       int id)
+{
+	struct platform_device *pd;
+
+	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
+		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
+			if (pd->id >= id)
+				return 1;
+
+	return 0;
+}
+
+/**
+ * early_platform_driver_probe_id - probe drivers matching class_str and id
+ * @class_str: string to identify early platform driver class
+ * @id: id to match against
+ * @nr_probe: number of platform devices to successfully probe before exiting
+ */
+static int __init early_platform_driver_probe_id(char *class_str,
+						 int id,
+						 int nr_probe)
+{
+	struct early_platform_driver *epdrv;
+	struct platform_device *match;
+	int match_id;
+	int n = 0;
+	int left = 0;
+
+	list_for_each_entry(epdrv, &early_platform_driver_list, list) {
+		/* only use drivers matching our class_str */
+		if (strcmp(class_str, epdrv->class_str))
+			continue;
+
+		if (id == -2) {
+			match_id = epdrv->requested_id;
+			left = 1;
+
+		} else {
+			match_id = id;
+			left += early_platform_left(epdrv, id);
+
+			/* skip requested id */
+			switch (epdrv->requested_id) {
+			case EARLY_PLATFORM_ID_ERROR:
+			case EARLY_PLATFORM_ID_UNSET:
+				break;
+			default:
+				if (epdrv->requested_id == id)
+					match_id = EARLY_PLATFORM_ID_UNSET;
+			}
+		}
+
+		switch (match_id) {
+		case EARLY_PLATFORM_ID_ERROR:
+			pr_warn("%s: unable to parse %s parameter\n",
+				class_str, epdrv->pdrv->driver.name);
+			/* fall-through */
+		case EARLY_PLATFORM_ID_UNSET:
+			match = NULL;
+			break;
+		default:
+			match = early_platform_match(epdrv, match_id);
+		}
+
+		if (match) {
+			/*
+			 * Set up a sensible init_name to enable
+			 * dev_name() and others to be used before the
+			 * rest of the driver core is initialized.
+			 */
+			if (!match->dev.init_name && slab_is_available()) {
+				if (match->id != -1)
+					match->dev.init_name =
+						kasprintf(GFP_KERNEL, "%s.%d",
+							  match->name,
+							  match->id);
+				else
+					match->dev.init_name =
+						kasprintf(GFP_KERNEL, "%s",
+							  match->name);
+
+				if (!match->dev.init_name)
+					return -ENOMEM;
+			}
+
+			if (epdrv->pdrv->probe(match))
+				pr_warn("%s: unable to probe %s early.\n",
+					class_str, match->name);
+			else
+				n++;
+		}
+
+		if (n >= nr_probe)
+			break;
+	}
+
+	if (left)
+		return n;
+	else
+		return -ENODEV;
+}
+
+/**
+ * early_platform_driver_probe - probe a class of registered drivers
+ * @class_str: string to identify early platform driver class
+ * @nr_probe: number of platform devices to successfully probe before exiting
+ * @user_only: only probe user specified early platform devices
+ *
+ * Used by architecture code to probe registered early platform drivers
+ * within a certain class. For probe to happen a registered early platform
+ * device matching a registered early platform driver is needed.
+ */
+int __init early_platform_driver_probe(char *class_str,
+				       int nr_probe,
+				       int user_only)
+{
+	int k, n, i;
+
+	n = 0;
+	for (i = -2; n < nr_probe; i++) {
+		k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
+
+		if (k < 0)
+			break;
+
+		n += k;
+
+		if (user_only)
+			break;
+	}
+
+	return n;
+}
+
+/**
+ * early_platform_cleanup - clean up early platform code
+ */
+void __init early_platform_cleanup(void)
+{
+	struct platform_device *pd, *pd2;
+
+	/* clean up the devres list used to chain devices */
+	list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
+				 dev.devres_head) {
+		list_del(&pd->dev.devres_head);
+		memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
+	}
+}
+
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
new file mode 100644
index 0000000..e1bb691
--- /dev/null
+++ b/drivers/base/power/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
+obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o
+obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o domain_governor.o
+obj-$(CONFIG_HAVE_CLK)	+= clock_ops.o
+
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 0000000..5a42ae4
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,645 @@
+/*
+ * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
+ *
+ * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/pm_clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM_CLK
+
+enum pce_status {
+	PCE_STATUS_NONE = 0,
+	PCE_STATUS_ACQUIRED,
+	PCE_STATUS_ENABLED,
+	PCE_STATUS_ERROR,
+};
+
+struct pm_clock_entry {
+	struct list_head node;
+	char *con_id;
+	struct clk *clk;
+	enum pce_status status;
+};
+
+/**
+ * pm_clk_enable - Enable a clock, reporting any errors
+ * @dev: The device for the given clock
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+{
+	int ret;
+
+	if (ce->status < PCE_STATUS_ERROR) {
+		ret = clk_enable(ce->clk);
+		if (!ret)
+			ce->status = PCE_STATUS_ENABLED;
+		else
+			dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+				__func__, ce->clk, ret);
+	}
+}
+
+/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
+{
+	if (!ce->clk)
+		ce->clk = clk_get(dev, ce->con_id);
+	if (IS_ERR(ce->clk)) {
+		ce->status = PCE_STATUS_ERROR;
+	} else {
+		clk_prepare(ce->clk);
+		ce->status = PCE_STATUS_ACQUIRED;
+		dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+			ce->clk, ce->con_id);
+	}
+}
+
+static int __pm_clk_add(struct device *dev, const char *con_id,
+			struct clk *clk)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	struct pm_clock_entry *ce;
+
+	if (!psd)
+		return -EINVAL;
+
+	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+	if (!ce)
+		return -ENOMEM;
+
+	if (con_id) {
+		ce->con_id = kstrdup(con_id, GFP_KERNEL);
+		if (!ce->con_id) {
+			dev_err(dev,
+				"Not enough memory for clock connection ID.\n");
+			kfree(ce);
+			return -ENOMEM;
+		}
+	} else {
+		if (IS_ERR(clk)) {
+			kfree(ce);
+			return -ENOENT;
+		}
+		ce->clk = clk;
+	}
+
+	pm_clk_acquire(dev, ce);
+
+	spin_lock_irq(&psd->lock);
+	list_add_tail(&ce->node, &psd->clock_list);
+	spin_unlock_irq(&psd->lock);
+	return 0;
+}
+
+/**
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @con_id: Connection ID of the clock.
+ *
+ * Add the clock represented by @con_id to the list of clocks used for
+ * the power management of @dev.
+ */
+int pm_clk_add(struct device *dev, const char *con_id)
+{
+	return __pm_clk_add(dev, con_id, NULL);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add);
+
+/**
+ * pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @clk: Clock pointer
+ *
+ * Add the clock to the list of clocks used for the power management of @dev.
+ * The power-management code will take control of the clock reference, so
+ * callers should not call clk_put() on @clk after this function sucessfully
+ * returned.
+ */
+int pm_clk_add_clk(struct device *dev, struct clk *clk)
+{
+	return __pm_clk_add(dev, NULL, clk);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add_clk);
+
+
+/**
+ * of_pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @name: Name of clock that is going to be used for power management.
+ *
+ * Add the clock described in the 'clocks' device-tree node that matches
+ * with the 'name' provided, to the list of clocks used for the power
+ * management of @dev. On success, returns 0. Returns a negative error
+ * code if the clock is not found or cannot be added.
+ */
+int of_pm_clk_add_clk(struct device *dev, const char *name)
+{
+	struct clk *clk;
+	int ret;
+
+	if (!dev || !dev->of_node || !name)
+		return -EINVAL;
+
+	clk = of_clk_get_by_name(dev->of_node, name);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	ret = pm_clk_add_clk(dev, clk);
+	if (ret) {
+		clk_put(clk);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
+
+/**
+ * of_pm_clk_add_clks - Start using device clock(s) for power management.
+ * @dev: Device whose clock(s) is going to be used for power management.
+ *
+ * Add a series of clocks described in the 'clocks' device-tree node for
+ * a device to the list of clocks used for the power management of @dev.
+ * On success, returns the number of clocks added. Returns a negative
+ * error code if there are no clocks in the device node for the device
+ * or if adding a clock fails.
+ */
+int of_pm_clk_add_clks(struct device *dev)
+{
+	struct clk **clks;
+	int i, count;
+	int ret;
+
+	if (!dev || !dev->of_node)
+		return -EINVAL;
+
+	count = of_count_phandle_with_args(dev->of_node, "clocks",
+					   "#clock-cells");
+	if (count <= 0)
+		return -ENODEV;
+
+	clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
+	if (!clks)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		clks[i] = of_clk_get(dev->of_node, i);
+		if (IS_ERR(clks[i])) {
+			ret = PTR_ERR(clks[i]);
+			goto error;
+		}
+
+		ret = pm_clk_add_clk(dev, clks[i]);
+		if (ret) {
+			clk_put(clks[i]);
+			goto error;
+		}
+	}
+
+	kfree(clks);
+
+	return i;
+
+error:
+	while (i--)
+		pm_clk_remove_clk(dev, clks[i]);
+
+	kfree(clks);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
+
+/**
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
+ */
+static void __pm_clk_remove(struct pm_clock_entry *ce)
+{
+	if (!ce)
+		return;
+
+	if (ce->status < PCE_STATUS_ERROR) {
+		if (ce->status == PCE_STATUS_ENABLED)
+			clk_disable(ce->clk);
+
+		if (ce->status >= PCE_STATUS_ACQUIRED) {
+			clk_unprepare(ce->clk);
+			clk_put(ce->clk);
+		}
+	}
+
+	kfree(ce->con_id);
+	kfree(ce);
+}
+
+/**
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @con_id: Connection ID of the clock.
+ *
+ * Remove the clock represented by @con_id from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove(struct device *dev, const char *con_id)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	struct pm_clock_entry *ce;
+
+	if (!psd)
+		return;
+
+	spin_lock_irq(&psd->lock);
+
+	list_for_each_entry(ce, &psd->clock_list, node) {
+		if (!con_id && !ce->con_id)
+			goto remove;
+		else if (!con_id || !ce->con_id)
+			continue;
+		else if (!strcmp(con_id, ce->con_id))
+			goto remove;
+	}
+
+	spin_unlock_irq(&psd->lock);
+	return;
+
+ remove:
+	list_del(&ce->node);
+	spin_unlock_irq(&psd->lock);
+
+	__pm_clk_remove(ce);
+}
+EXPORT_SYMBOL_GPL(pm_clk_remove);
+
+/**
+ * pm_clk_remove_clk - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @clk: Clock pointer
+ *
+ * Remove the clock pointed to by @clk from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove_clk(struct device *dev, struct clk *clk)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	struct pm_clock_entry *ce;
+
+	if (!psd || !clk)
+		return;
+
+	spin_lock_irq(&psd->lock);
+
+	list_for_each_entry(ce, &psd->clock_list, node) {
+		if (clk == ce->clk)
+			goto remove;
+	}
+
+	spin_unlock_irq(&psd->lock);
+	return;
+
+ remove:
+	list_del(&ce->node);
+	spin_unlock_irq(&psd->lock);
+
+	__pm_clk_remove(ce);
+}
+EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
+
+/**
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
+ *
+ * Initialize the lock and clock_list members of the device's pm_subsys_data
+ * object.
+ */
+void pm_clk_init(struct device *dev)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	if (psd)
+		INIT_LIST_HEAD(&psd->clock_list);
+}
+EXPORT_SYMBOL_GPL(pm_clk_init);
+
+/**
+ * pm_clk_create - Create and initialize a device's list of PM clocks.
+ * @dev: Device to create and initialize the list of PM clocks for.
+ *
+ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
+ * members and make the @dev's power.subsys_data field point to it.
+ */
+int pm_clk_create(struct device *dev)
+{
+	return dev_pm_get_subsys_data(dev);
+}
+EXPORT_SYMBOL_GPL(pm_clk_create);
+
+/**
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
+ *
+ * Clear the @dev's power.subsys_data field, remove the list of clock entries
+ * from the struct pm_subsys_data object pointed to by it before and free
+ * that object.
+ */
+void pm_clk_destroy(struct device *dev)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	struct pm_clock_entry *ce, *c;
+	struct list_head list;
+
+	if (!psd)
+		return;
+
+	INIT_LIST_HEAD(&list);
+
+	spin_lock_irq(&psd->lock);
+
+	list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+		list_move(&ce->node, &list);
+
+	spin_unlock_irq(&psd->lock);
+
+	dev_pm_put_subsys_data(dev);
+
+	list_for_each_entry_safe_reverse(ce, c, &list, node) {
+		list_del(&ce->node);
+		__pm_clk_remove(ce);
+	}
+}
+EXPORT_SYMBOL_GPL(pm_clk_destroy);
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	struct pm_clock_entry *ce;
+	unsigned long flags;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (!psd)
+		return 0;
+
+	spin_lock_irqsave(&psd->lock, flags);
+
+	list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+		if (ce->status < PCE_STATUS_ERROR) {
+			if (ce->status == PCE_STATUS_ENABLED)
+				clk_disable(ce->clk);
+			ce->status = PCE_STATUS_ACQUIRED;
+		}
+	}
+
+	spin_unlock_irqrestore(&psd->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_suspend);
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	struct pm_clock_entry *ce;
+	unsigned long flags;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (!psd)
+		return 0;
+
+	spin_lock_irqsave(&psd->lock, flags);
+
+	list_for_each_entry(ce, &psd->clock_list, node)
+		__pm_clk_enable(dev, ce);
+
+	spin_unlock_irqrestore(&psd->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_resume);
+
+/**
+ * pm_clk_notify - Notify routine for device addition and removal.
+ * @nb: Notifier block object this function is a member of.
+ * @action: Operation being carried out by the caller.
+ * @data: Device the routine is being run for.
+ *
+ * For this function to work, @nb must be a member of an object of type
+ * struct pm_clk_notifier_block containing all of the requisite data.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
+ *
+ * If the device's pm_domain field is already populated with a value different
+ * from the one stored in the struct pm_clk_notifier_block object, the function
+ * does nothing.
+ */
+static int pm_clk_notify(struct notifier_block *nb,
+				 unsigned long action, void *data)
+{
+	struct pm_clk_notifier_block *clknb;
+	struct device *dev = data;
+	char **con_id;
+	int error;
+
+	dev_dbg(dev, "%s() %ld\n", __func__, action);
+
+	clknb = container_of(nb, struct pm_clk_notifier_block, nb);
+
+	switch (action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+		if (dev->pm_domain)
+			break;
+
+		error = pm_clk_create(dev);
+		if (error)
+			break;
+
+		dev_pm_domain_set(dev, clknb->pm_domain);
+		if (clknb->con_ids[0]) {
+			for (con_id = clknb->con_ids; *con_id; con_id++)
+				pm_clk_add(dev, *con_id);
+		} else {
+			pm_clk_add(dev, NULL);
+		}
+
+		break;
+	case BUS_NOTIFY_DEL_DEVICE:
+		if (dev->pm_domain != clknb->pm_domain)
+			break;
+
+		dev_pm_domain_set(dev, NULL);
+		pm_clk_destroy(dev);
+		break;
+	}
+
+	return 0;
+}
+
+int pm_clk_runtime_suspend(struct device *dev)
+{
+	int ret;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	ret = pm_generic_runtime_suspend(dev);
+	if (ret) {
+		dev_err(dev, "failed to suspend device\n");
+		return ret;
+	}
+
+	ret = pm_clk_suspend(dev);
+	if (ret) {
+		dev_err(dev, "failed to suspend clock\n");
+		pm_generic_runtime_resume(dev);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
+
+int pm_clk_runtime_resume(struct device *dev)
+{
+	int ret;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	ret = pm_clk_resume(dev);
+	if (ret) {
+		dev_err(dev, "failed to resume clock\n");
+		return ret;
+	}
+
+	return pm_generic_runtime_resume(dev);
+}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
+
+#else /* !CONFIG_PM_CLK */
+
+/**
+ * enable_clock - Enable a device clock.
+ * @dev: Device whose clock is to be enabled.
+ * @con_id: Connection ID of the clock.
+ */
+static void enable_clock(struct device *dev, const char *con_id)
+{
+	struct clk *clk;
+
+	clk = clk_get(dev, con_id);
+	if (!IS_ERR(clk)) {
+		clk_prepare_enable(clk);
+		clk_put(clk);
+		dev_info(dev, "Runtime PM disabled, clock forced on.\n");
+	}
+}
+
+/**
+ * disable_clock - Disable a device clock.
+ * @dev: Device whose clock is to be disabled.
+ * @con_id: Connection ID of the clock.
+ */
+static void disable_clock(struct device *dev, const char *con_id)
+{
+	struct clk *clk;
+
+	clk = clk_get(dev, con_id);
+	if (!IS_ERR(clk)) {
+		clk_disable_unprepare(clk);
+		clk_put(clk);
+		dev_info(dev, "Runtime PM disabled, clock forced off.\n");
+	}
+}
+
+/**
+ * pm_clk_notify - Notify routine for device addition and removal.
+ * @nb: Notifier block object this function is a member of.
+ * @action: Operation being carried out by the caller.
+ * @data: Device the routine is being run for.
+ *
+ * For this function to work, @nb must be a member of an object of type
+ * struct pm_clk_notifier_block containing all of the requisite data.
+ * Specifically, the con_ids member of that object is used to enable or disable
+ * the device's clocks, depending on @action.
+ */
+static int pm_clk_notify(struct notifier_block *nb,
+				 unsigned long action, void *data)
+{
+	struct pm_clk_notifier_block *clknb;
+	struct device *dev = data;
+	char **con_id;
+
+	dev_dbg(dev, "%s() %ld\n", __func__, action);
+
+	clknb = container_of(nb, struct pm_clk_notifier_block, nb);
+
+	switch (action) {
+	case BUS_NOTIFY_BIND_DRIVER:
+		if (clknb->con_ids[0]) {
+			for (con_id = clknb->con_ids; *con_id; con_id++)
+				enable_clock(dev, *con_id);
+		} else {
+			enable_clock(dev, NULL);
+		}
+		break;
+	case BUS_NOTIFY_DRIVER_NOT_BOUND:
+	case BUS_NOTIFY_UNBOUND_DRIVER:
+		if (clknb->con_ids[0]) {
+			for (con_id = clknb->con_ids; *con_id; con_id++)
+				disable_clock(dev, *con_id);
+		} else {
+			disable_clock(dev, NULL);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+#endif /* !CONFIG_PM_CLK */
+
+/**
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
+ * @bus: Bus type to add the notifier to.
+ * @clknb: Notifier to be added to the given bus type.
+ *
+ * The nb member of @clknb is not expected to be initialized and its
+ * notifier_call member will be replaced with pm_clk_notify().  However,
+ * the remaining members of @clknb should be populated prior to calling this
+ * routine.
+ */
+void pm_clk_add_notifier(struct bus_type *bus,
+				 struct pm_clk_notifier_block *clknb)
+{
+	if (!bus || !clknb)
+		return;
+
+	clknb->nb.notifier_call = pm_clk_notify;
+	bus_register_notifier(bus, &clknb->nb);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 0000000..b413951
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,212 @@
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+#include <linux/acpi.h>
+#include <linux/pm_domain.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter.  Return 0 if new object has been created or refcount
+ * increased, otherwise negative error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+	struct pm_subsys_data *psd;
+
+	psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+	if (!psd)
+		return -ENOMEM;
+
+	spin_lock_irq(&dev->power.lock);
+
+	if (dev->power.subsys_data) {
+		dev->power.subsys_data->refcount++;
+	} else {
+		spin_lock_init(&psd->lock);
+		psd->refcount = 1;
+		dev->power.subsys_data = psd;
+		pm_clk_init(dev);
+		psd = NULL;
+	}
+
+	spin_unlock_irq(&dev->power.lock);
+
+	/* kfree() verifies that its argument is nonzero. */
+	kfree(psd);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed.
+ */
+void dev_pm_put_subsys_data(struct device *dev)
+{
+	struct pm_subsys_data *psd;
+
+	spin_lock_irq(&dev->power.lock);
+
+	psd = dev_to_psd(dev);
+	if (!psd)
+		goto out;
+
+	if (--psd->refcount == 0)
+		dev->power.subsys_data = NULL;
+	else
+		psd = NULL;
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+	kfree(psd);
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+
+/**
+ * dev_pm_domain_attach - Attach a device to its PM domain.
+ * @dev: Device to attach.
+ * @power_on: Used to indicate whether we should power on the device.
+ *
+ * The @dev may only be attached to a single PM domain. By iterating through
+ * the available alternatives we try to find a valid PM domain for the device.
+ * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
+ * should be assigned by the corresponding attach function.
+ *
+ * This function should typically be invoked from subsystem level code during
+ * the probe phase. Especially for those that holds devices which requires
+ * power management through PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns 0 on successfully attached PM domain, or when it is found that the
+ * device doesn't need a PM domain, else a negative error code.
+ */
+int dev_pm_domain_attach(struct device *dev, bool power_on)
+{
+	int ret;
+
+	if (dev->pm_domain)
+		return 0;
+
+	ret = acpi_dev_pm_attach(dev, power_on);
+	if (!ret)
+		ret = genpd_dev_pm_attach(dev);
+
+	return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
+
+/**
+ * dev_pm_domain_attach_by_id - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @index: The index of the PM domain.
+ *
+ * As @dev may only be attached to a single PM domain, the backend PM domain
+ * provider creates a virtual device to attach instead. If attachment succeeds,
+ * the ->detach() callback in the struct dev_pm_domain are assigned by the
+ * corresponding backend attach function, as to deal with detaching of the
+ * created virtual device.
+ *
+ * This function should typically be invoked by a driver during the probe phase,
+ * in case its device requires power management through multiple PM domains. The
+ * driver may benefit from using the received device, to configure device-links
+ * towards its original device. Depending on the use-case and if needed, the
+ * links may be dynamically changed by the driver, which allows it to control
+ * the power to the PM domains independently from each other.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the virtual created device when successfully attached to its PM
+ * domain, NULL in case @dev don't need a PM domain, else an ERR_PTR().
+ * Note that, to detach the returned virtual device, the driver shall call
+ * dev_pm_domain_detach() on it, typically during the remove phase.
+ */
+struct device *dev_pm_domain_attach_by_id(struct device *dev,
+					  unsigned int index)
+{
+	if (dev->pm_domain)
+		return ERR_PTR(-EEXIST);
+
+	return genpd_dev_pm_attach_by_id(dev, index);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
+
+/**
+ * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * For a detailed function description, see dev_pm_domain_attach_by_id().
+ */
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+					    char *name)
+{
+	if (dev->pm_domain)
+		return ERR_PTR(-EEXIST);
+
+	return genpd_dev_pm_attach_by_name(dev, name);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
+
+/**
+ * dev_pm_domain_detach - Detach a device from its PM domain.
+ * @dev: Device to detach.
+ * @power_off: Used to indicate whether we should power off the device.
+ *
+ * This functions will reverse the actions from dev_pm_domain_attach() and
+ * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain.
+ * Typically it should be invoked during the remove phase, either from
+ * subsystem level code or from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach(struct device *dev, bool power_off)
+{
+	if (dev->pm_domain && dev->pm_domain->detach)
+		dev->pm_domain->detach(dev, power_off);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
+
+/**
+ * dev_pm_domain_set - Set PM domain of a device.
+ * @dev: Device whose PM domain is to be set.
+ * @pd: PM domain to be set, or NULL.
+ *
+ * Sets the PM domain the device belongs to. The PM domain of a device needs
+ * to be set before its probe finishes (it's bound to a driver).
+ *
+ * This function must be called with the device lock held.
+ */
+void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
+{
+	if (dev->pm_domain == pd)
+		return;
+
+	WARN(pd && device_is_bound(dev),
+	     "PM domains can only be changed for unbound devices\n");
+	dev->pm_domain = pd;
+	device_pm_check_callbacks(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_set);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 0000000..4b57141
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,2923 @@
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_clock.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/export.h>
+
+#include "power.h"
+
+#define GENPD_RETRY_MAX_MS	250		/* Approximate */
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
+({								\
+	type (*__routine)(struct device *__d); 			\
+	type __ret = (type)0;					\
+								\
+	__routine = genpd->dev_ops.callback; 			\
+	if (__routine) {					\
+		__ret = __routine(dev); 			\
+	}							\
+	__ret;							\
+})
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+struct genpd_lock_ops {
+	void (*lock)(struct generic_pm_domain *genpd);
+	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+	int (*lock_interruptible)(struct generic_pm_domain *genpd);
+	void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+	mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+					int depth)
+{
+	mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+	return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+	return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+	.lock = genpd_lock_mtx,
+	.lock_nested = genpd_lock_nested_mtx,
+	.lock_interruptible = genpd_lock_interruptible_mtx,
+	.unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+	__acquires(&genpd->slock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&genpd->slock, flags);
+	genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+					int depth)
+	__acquires(&genpd->slock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+	genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+	__acquires(&genpd->slock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&genpd->slock, flags);
+	genpd->lock_flags = flags;
+	return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+	__releases(&genpd->slock)
+{
+	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+	.lock = genpd_lock_spin,
+	.lock_nested = genpd_lock_nested_spin,
+	.lock_interruptible = genpd_lock_interruptible_spin,
+	.unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p)			p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p)			p->lock_ops->unlock(p)
+
+#define genpd_status_on(genpd)		(genpd->status == GPD_STATE_ACTIVE)
+#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
+#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+		const struct generic_pm_domain *genpd)
+{
+	bool ret;
+
+	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+	/*
+	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
+	 * to indicate a suboptimal configuration for PM. For an always on
+	 * domain this isn't case, thus don't warn.
+	 */
+	if (ret && !genpd_is_always_on(genpd))
+		dev_warn_once(dev, "PM domain %s will not be powered off\n",
+				genpd->name);
+
+	return ret;
+}
+
+/*
+ * Get the generic PM domain for a particular struct device.
+ * This validates the struct device pointer, the PM domain pointer,
+ * and checks that the PM domain pointer is a real generic PM domain.
+ * Any failure results in NULL being returned.
+ */
+static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
+{
+	struct generic_pm_domain *genpd = NULL, *gpd;
+
+	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
+		return NULL;
+
+	mutex_lock(&gpd_list_lock);
+	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+		if (&gpd->domain == dev->pm_domain) {
+			genpd = gpd;
+			break;
+		}
+	}
+	mutex_unlock(&gpd_list_lock);
+
+	return genpd;
+}
+
+/*
+ * This should only be used where we are certain that the pm_domain
+ * attached to the device is a genpd domain.
+ */
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+	if (IS_ERR_OR_NULL(dev->pm_domain))
+		return ERR_PTR(-EINVAL);
+
+	return pd_to_genpd(dev->pm_domain);
+}
+
+static int genpd_stop_dev(const struct generic_pm_domain *genpd,
+			  struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
+}
+
+static int genpd_start_dev(const struct generic_pm_domain *genpd,
+			   struct device *dev)
+{
+	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+	bool ret = false;
+
+	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
+		ret = !!atomic_dec_and_test(&genpd->sd_count);
+
+	return ret;
+}
+
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
+{
+	atomic_inc(&genpd->sd_count);
+	smp_mb__after_atomic();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void genpd_update_accounting(struct generic_pm_domain *genpd)
+{
+	ktime_t delta, now;
+
+	now = ktime_get();
+	delta = ktime_sub(now, genpd->accounting_time);
+
+	/*
+	 * If genpd->status is active, it means we are just
+	 * out of off and so update the idle time and vice
+	 * versa.
+	 */
+	if (genpd->status == GPD_STATE_ACTIVE) {
+		int state_idx = genpd->state_idx;
+
+		genpd->states[state_idx].idle_time =
+			ktime_add(genpd->states[state_idx].idle_time, delta);
+	} else {
+		genpd->on_time = ktime_add(genpd->on_time, delta);
+	}
+
+	genpd->accounting_time = now;
+}
+#else
+static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+#endif
+
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ *	   device doesn't have any performance state constraints left (And so
+ *	   the device wouldn't participate anymore to find the target
+ *	   performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+	struct generic_pm_domain *genpd;
+	struct generic_pm_domain_data *gpd_data, *pd_data;
+	struct pm_domain_data *pdd;
+	unsigned int prev;
+	int ret = 0;
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -ENODEV;
+
+	if (unlikely(!genpd->set_performance_state))
+		return -EINVAL;
+
+	if (unlikely(!dev->power.subsys_data ||
+		     !dev->power.subsys_data->domain_data)) {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	genpd_lock(genpd);
+
+	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+	prev = gpd_data->performance_state;
+	gpd_data->performance_state = state;
+
+	/* New requested state is same as Max requested state */
+	if (state == genpd->performance_state)
+		goto unlock;
+
+	/* New requested state is higher than Max requested state */
+	if (state > genpd->performance_state)
+		goto update_state;
+
+	/* Traverse all devices within the domain */
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		pd_data = to_gpd_data(pdd);
+
+		if (pd_data->performance_state > state)
+			state = pd_data->performance_state;
+	}
+
+	if (state == genpd->performance_state)
+		goto unlock;
+
+	/*
+	 * We aren't propagating performance state changes of a subdomain to its
+	 * masters as we don't have hardware that needs it. Over that, the
+	 * performance states of subdomain and its masters may not have
+	 * one-to-one mapping and would require additional information. We can
+	 * get back to this once we have hardware that needs it. For that
+	 * reason, we don't have to consider performance state of the subdomains
+	 * of genpd here.
+	 */
+
+update_state:
+	if (genpd_status_on(genpd)) {
+		ret = genpd->set_performance_state(genpd, state);
+		if (ret) {
+			gpd_data->performance_state = prev;
+			goto unlock;
+		}
+	}
+
+	genpd->performance_state = state;
+
+unlock:
+	genpd_unlock(genpd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
+static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+{
+	unsigned int state_idx = genpd->state_idx;
+	ktime_t time_start;
+	s64 elapsed_ns;
+	int ret;
+
+	if (!genpd->power_on)
+		return 0;
+
+	if (!timed)
+		return genpd->power_on(genpd);
+
+	time_start = ktime_get();
+	ret = genpd->power_on(genpd);
+	if (ret)
+		return ret;
+
+	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+
+	if (unlikely(genpd->set_performance_state)) {
+		ret = genpd->set_performance_state(genpd, genpd->performance_state);
+		if (ret) {
+			pr_warn("%s: Failed to set performance state %d (%d)\n",
+				genpd->name, genpd->performance_state, ret);
+		}
+	}
+
+	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
+		return ret;
+
+	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
+	genpd->max_off_time_changed = true;
+	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+		 genpd->name, "on", elapsed_ns);
+
+	return ret;
+}
+
+static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
+{
+	unsigned int state_idx = genpd->state_idx;
+	ktime_t time_start;
+	s64 elapsed_ns;
+	int ret;
+
+	if (!genpd->power_off)
+		return 0;
+
+	if (!timed)
+		return genpd->power_off(genpd);
+
+	time_start = ktime_get();
+	ret = genpd->power_off(genpd);
+	if (ret == -EBUSY)
+		return ret;
+
+	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
+		return ret;
+
+	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
+	genpd->max_off_time_changed = true;
+	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+		 genpd->name, "off", elapsed_ns);
+
+	return ret;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
+ * @genpd: PM domain to power off.
+ *
+ * Queue up the execution of genpd_power_off() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+	queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * genpd_power_off - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
+ * RPM status of the releated device is in an intermediate state, not yet turned
+ * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
+ * be RPM_SUSPENDED, while it tries to power off the PM domain.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, remove power from @genpd.
+ */
+static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+			   unsigned int depth)
+{
+	struct pm_domain_data *pdd;
+	struct gpd_link *link;
+	unsigned int not_suspended = 0;
+
+	/*
+	 * Do not try to power off the domain in the following situations:
+	 * (1) The domain is already in the "power off" state.
+	 * (2) System suspend is in progress.
+	 */
+	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
+		return 0;
+
+	/*
+	 * Abort power off for the PM domain in the following situations:
+	 * (1) The domain is configured as always on.
+	 * (2) When the domain has a subdomain being powered on.
+	 */
+	if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
+		return -EBUSY;
+
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		enum pm_qos_flags_status stat;
+
+		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
+		if (stat > PM_QOS_FLAGS_NONE)
+			return -EBUSY;
+
+		/*
+		 * Do not allow PM domain to be powered off, when an IRQ safe
+		 * device is part of a non-IRQ safe domain.
+		 */
+		if (!pm_runtime_suspended(pdd->dev) ||
+			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
+			not_suspended++;
+	}
+
+	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
+		return -EBUSY;
+
+	if (genpd->gov && genpd->gov->power_down_ok) {
+		if (!genpd->gov->power_down_ok(&genpd->domain))
+			return -EAGAIN;
+	}
+
+	if (genpd->power_off) {
+		int ret;
+
+		if (atomic_read(&genpd->sd_count) > 0)
+			return -EBUSY;
+
+		/*
+		 * If sd_count > 0 at this point, one of the subdomains hasn't
+		 * managed to call genpd_power_on() for the master yet after
+		 * incrementing it.  In that case genpd_power_on() will wait
+		 * for us to drop the lock, so we can call .power_off() and let
+		 * the genpd_power_on() restore power for us (this shouldn't
+		 * happen very often).
+		 */
+		ret = _genpd_power_off(genpd, true);
+		if (ret)
+			return ret;
+	}
+
+	genpd->status = GPD_STATE_POWER_OFF;
+	genpd_update_accounting(genpd);
+
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_dec(link->master);
+		genpd_lock_nested(link->master, depth + 1);
+		genpd_power_off(link->master, false, depth + 1);
+		genpd_unlock(link->master);
+	}
+
+	return 0;
+}
+
+/**
+ * genpd_power_on - Restore power to a given PM domain and its masters.
+ * @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
+ *
+ * Restore power to @genpd and all of its masters so that it is possible to
+ * resume a device belonging to it.
+ */
+static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
+{
+	struct gpd_link *link;
+	int ret = 0;
+
+	if (genpd_status_on(genpd))
+		return 0;
+
+	/*
+	 * The list is guaranteed not to change while the loop below is being
+	 * executed, unless one of the masters' .power_on() callbacks fiddles
+	 * with it.
+	 */
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		struct generic_pm_domain *master = link->master;
+
+		genpd_sd_counter_inc(master);
+
+		genpd_lock_nested(master, depth + 1);
+		ret = genpd_power_on(master, depth + 1);
+		genpd_unlock(master);
+
+		if (ret) {
+			genpd_sd_counter_dec(master);
+			goto err;
+		}
+	}
+
+	ret = _genpd_power_on(genpd, true);
+	if (ret)
+		goto err;
+
+	genpd->status = GPD_STATE_ACTIVE;
+	genpd_update_accounting(genpd);
+
+	return 0;
+
+ err:
+	list_for_each_entry_continue_reverse(link,
+					&genpd->slave_links,
+					slave_node) {
+		genpd_sd_counter_dec(link->master);
+		genpd_lock_nested(link->master, depth + 1);
+		genpd_power_off(link->master, false, depth + 1);
+		genpd_unlock(link->master);
+	}
+
+	return ret;
+}
+
+static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+				     unsigned long val, void *ptr)
+{
+	struct generic_pm_domain_data *gpd_data;
+	struct device *dev;
+
+	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
+	dev = gpd_data->base.dev;
+
+	for (;;) {
+		struct generic_pm_domain *genpd;
+		struct pm_domain_data *pdd;
+
+		spin_lock_irq(&dev->power.lock);
+
+		pdd = dev->power.subsys_data ?
+				dev->power.subsys_data->domain_data : NULL;
+		if (pdd) {
+			to_gpd_data(pdd)->td.constraint_changed = true;
+			genpd = dev_to_genpd(dev);
+		} else {
+			genpd = ERR_PTR(-ENODATA);
+		}
+
+		spin_unlock_irq(&dev->power.lock);
+
+		if (!IS_ERR(genpd)) {
+			genpd_lock(genpd);
+			genpd->max_off_time_changed = true;
+			genpd_unlock(genpd);
+		}
+
+		dev = dev->parent;
+		if (!dev || dev->power.ignore_children)
+			break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+	struct generic_pm_domain *genpd;
+
+	genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+	genpd_lock(genpd);
+	genpd_power_off(genpd, false, 0);
+	genpd_unlock(genpd);
+}
+
+/**
+ * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_suspend(struct device *dev)
+{
+	int (*cb)(struct device *__dev);
+
+	if (dev->type && dev->type->pm)
+		cb = dev->type->pm->runtime_suspend;
+	else if (dev->class && dev->class->pm)
+		cb = dev->class->pm->runtime_suspend;
+	else if (dev->bus && dev->bus->pm)
+		cb = dev->bus->pm->runtime_suspend;
+	else
+		cb = NULL;
+
+	if (!cb && dev->driver && dev->driver->pm)
+		cb = dev->driver->pm->runtime_suspend;
+
+	return cb ? cb(dev) : 0;
+}
+
+/**
+ * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_resume(struct device *dev)
+{
+	int (*cb)(struct device *__dev);
+
+	if (dev->type && dev->type->pm)
+		cb = dev->type->pm->runtime_resume;
+	else if (dev->class && dev->class->pm)
+		cb = dev->class->pm->runtime_resume;
+	else if (dev->bus && dev->bus->pm)
+		cb = dev->bus->pm->runtime_resume;
+	else
+		cb = NULL;
+
+	if (!cb && dev->driver && dev->driver->pm)
+		cb = dev->driver->pm->runtime_resume;
+
+	return cb ? cb(dev) : 0;
+}
+
+/**
+ * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int genpd_runtime_suspend(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	bool (*suspend_ok)(struct device *__dev);
+	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+	bool runtime_pm = pm_runtime_enabled(dev);
+	ktime_t time_start;
+	s64 elapsed_ns;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	/*
+	 * A runtime PM centric subsystem/driver may re-use the runtime PM
+	 * callbacks for other purposes than runtime PM. In those scenarios
+	 * runtime PM is disabled. Under these circumstances, we shall skip
+	 * validating/measuring the PM QoS latency.
+	 */
+	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
+	if (runtime_pm && suspend_ok && !suspend_ok(dev))
+		return -EBUSY;
+
+	/* Measure suspend latency. */
+	time_start = 0;
+	if (runtime_pm)
+		time_start = ktime_get();
+
+	ret = __genpd_runtime_suspend(dev);
+	if (ret)
+		return ret;
+
+	ret = genpd_stop_dev(genpd, dev);
+	if (ret) {
+		__genpd_runtime_resume(dev);
+		return ret;
+	}
+
+	/* Update suspend latency value if the measured time exceeds it. */
+	if (runtime_pm) {
+		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+		if (elapsed_ns > td->suspend_latency_ns) {
+			td->suspend_latency_ns = elapsed_ns;
+			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
+				elapsed_ns);
+			genpd->max_off_time_changed = true;
+			td->constraint_changed = true;
+		}
+	}
+
+	/*
+	 * If power.irq_safe is set, this routine may be run with
+	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
+	 */
+	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
+		return 0;
+
+	genpd_lock(genpd);
+	genpd_power_off(genpd, true, 0);
+	genpd_unlock(genpd);
+
+	return 0;
+}
+
+/**
+ * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int genpd_runtime_resume(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+	bool runtime_pm = pm_runtime_enabled(dev);
+	ktime_t time_start;
+	s64 elapsed_ns;
+	int ret;
+	bool timed = true;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	/*
+	 * As we don't power off a non IRQ safe domain, which holds
+	 * an IRQ safe device, we don't need to restore power to it.
+	 */
+	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
+		timed = false;
+		goto out;
+	}
+
+	genpd_lock(genpd);
+	ret = genpd_power_on(genpd, 0);
+	genpd_unlock(genpd);
+
+	if (ret)
+		return ret;
+
+ out:
+	/* Measure resume latency. */
+	time_start = 0;
+	if (timed && runtime_pm)
+		time_start = ktime_get();
+
+	ret = genpd_start_dev(genpd, dev);
+	if (ret)
+		goto err_poweroff;
+
+	ret = __genpd_runtime_resume(dev);
+	if (ret)
+		goto err_stop;
+
+	/* Update resume latency value if the measured time exceeds it. */
+	if (timed && runtime_pm) {
+		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+		if (elapsed_ns > td->resume_latency_ns) {
+			td->resume_latency_ns = elapsed_ns;
+			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
+				elapsed_ns);
+			genpd->max_off_time_changed = true;
+			td->constraint_changed = true;
+		}
+	}
+
+	return 0;
+
+err_stop:
+	genpd_stop_dev(genpd, dev);
+err_poweroff:
+	if (!pm_runtime_is_irq_safe(dev) ||
+		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+		genpd_lock(genpd);
+		genpd_power_off(genpd, true, 0);
+		genpd_unlock(genpd);
+	}
+
+	return ret;
+}
+
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+	pd_ignore_unused = true;
+	return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
+/**
+ * genpd_power_off_unused - Power off all PM domains with no devices in use.
+ */
+static int __init genpd_power_off_unused(void)
+{
+	struct generic_pm_domain *genpd;
+
+	if (pd_ignore_unused) {
+		pr_warn("genpd: Not disabling unused power domains\n");
+		return 0;
+	}
+
+	mutex_lock(&gpd_list_lock);
+
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+		genpd_queue_power_off_work(genpd);
+
+	mutex_unlock(&gpd_list_lock);
+
+	return 0;
+}
+late_initcall(genpd_power_off_unused);
+
+#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
+
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+	const struct generic_pm_domain *gpd;
+
+	if (IS_ERR_OR_NULL(genpd))
+		return false;
+
+	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+		if (gpd == genpd)
+			return true;
+
+	return false;
+}
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
+ * @genpd: PM domain to power off, if possible.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so.  Also, in that case propagate to its masters.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
+ */
+static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
+				 unsigned int depth)
+{
+	struct gpd_link *link;
+
+	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
+		return;
+
+	if (genpd->suspended_count != genpd->device_count
+	    || atomic_read(&genpd->sd_count) > 0)
+		return;
+
+	/* Choose the deepest state when suspending */
+	genpd->state_idx = genpd->state_count - 1;
+	if (_genpd_power_off(genpd, false))
+		return;
+
+	genpd->status = GPD_STATE_POWER_OFF;
+
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_dec(link->master);
+
+		if (use_lock)
+			genpd_lock_nested(link->master, depth + 1);
+
+		genpd_sync_power_off(link->master, use_lock, depth + 1);
+
+		if (use_lock)
+			genpd_unlock(link->master);
+	}
+}
+
+/**
+ * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
+ * @genpd: PM domain to power on.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
+ */
+static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
+				unsigned int depth)
+{
+	struct gpd_link *link;
+
+	if (genpd_status_on(genpd))
+		return;
+
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_inc(link->master);
+
+		if (use_lock)
+			genpd_lock_nested(link->master, depth + 1);
+
+		genpd_sync_power_on(link->master, use_lock, depth + 1);
+
+		if (use_lock)
+			genpd_unlock(link->master);
+	}
+
+	_genpd_power_on(genpd, false);
+
+	genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system).  In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev,
+			  const struct generic_pm_domain *genpd)
+{
+	bool active_wakeup;
+
+	if (!device_can_wakeup(dev))
+		return false;
+
+	active_wakeup = genpd_is_active_wakeup(genpd);
+	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
+ * genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int genpd_prepare(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	/*
+	 * If a wakeup request is pending for the device, it should be woken up
+	 * at this point and a system wakeup event should be reported if it's
+	 * set up to wake up the system from sleep states.
+	 */
+	if (resume_needed(dev, genpd))
+		pm_runtime_resume(dev);
+
+	genpd_lock(genpd);
+
+	if (genpd->prepared_count++ == 0)
+		genpd->suspended_count = 0;
+
+	genpd_unlock(genpd);
+
+	ret = pm_generic_prepare(dev);
+	if (ret < 0) {
+		genpd_lock(genpd);
+
+		genpd->prepared_count--;
+
+		genpd_unlock(genpd);
+	}
+
+	/* Never return 1, as genpd don't cope with the direct_complete path. */
+	return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * genpd_finish_suspend - Completion of suspend or hibernation of device in an
+ *   I/O pm domain.
+ * @dev: Device to suspend.
+ * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_finish_suspend(struct device *dev, bool poweroff)
+{
+	struct generic_pm_domain *genpd;
+	int ret = 0;
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (poweroff)
+		ret = pm_generic_poweroff_noirq(dev);
+	else
+		ret = pm_generic_suspend_noirq(dev);
+	if (ret)
+		return ret;
+
+	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+		return 0;
+
+	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+	    !pm_runtime_status_suspended(dev)) {
+		ret = genpd_stop_dev(genpd, dev);
+		if (ret) {
+			if (poweroff)
+				pm_generic_restore_noirq(dev);
+			else
+				pm_generic_resume_noirq(dev);
+			return ret;
+		}
+	}
+
+	genpd_lock(genpd);
+	genpd->suspended_count++;
+	genpd_sync_power_off(genpd, true, 0);
+	genpd_unlock(genpd);
+
+	return 0;
+}
+
+/**
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_suspend_noirq(struct device *dev)
+{
+	dev_dbg(dev, "%s()\n", __func__);
+
+	return genpd_finish_suspend(dev, false);
+}
+
+/**
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Restore power to the device's PM domain, if necessary, and start the device.
+ */
+static int genpd_resume_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+		return pm_generic_resume_noirq(dev);
+
+	genpd_lock(genpd);
+	genpd_sync_power_on(genpd, true, 0);
+	genpd->suspended_count--;
+	genpd_unlock(genpd);
+
+	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+	    !pm_runtime_status_suspended(dev)) {
+		ret = genpd_start_dev(genpd, dev);
+		if (ret)
+			return ret;
+	}
+
+	return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int genpd_freeze_noirq(struct device *dev)
+{
+	const struct generic_pm_domain *genpd;
+	int ret = 0;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	ret = pm_generic_freeze_noirq(dev);
+	if (ret)
+		return ret;
+
+	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+	    !pm_runtime_status_suspended(dev))
+		ret = genpd_stop_dev(genpd, dev);
+
+	return ret;
+}
+
+/**
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * @dev: Device to thaw.
+ *
+ * Start the device, unless power has been removed from the domain already
+ * before the system transition.
+ */
+static int genpd_thaw_noirq(struct device *dev)
+{
+	const struct generic_pm_domain *genpd;
+	int ret = 0;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+	    !pm_runtime_status_suspended(dev)) {
+		ret = genpd_start_dev(genpd, dev);
+		if (ret)
+			return ret;
+	}
+
+	return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
+ *   I/O PM domain.
+ * @dev: Device to poweroff.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_poweroff_noirq(struct device *dev)
+{
+	dev_dbg(dev, "%s()\n", __func__);
+
+	return genpd_finish_suspend(dev, true);
+}
+
+/**
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Make sure the domain will be in the same power state as before the
+ * hibernation the system is resuming from and start the device if necessary.
+ */
+static int genpd_restore_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret = 0;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	/*
+	 * At this point suspended_count == 0 means we are being run for the
+	 * first time for the given domain in the present cycle.
+	 */
+	genpd_lock(genpd);
+	if (genpd->suspended_count++ == 0)
+		/*
+		 * The boot kernel might put the domain into arbitrary state,
+		 * so make it appear as powered off to genpd_sync_power_on(),
+		 * so that it tries to power it on in case it was really off.
+		 */
+		genpd->status = GPD_STATE_POWER_OFF;
+
+	genpd_sync_power_on(genpd, true, 0);
+	genpd_unlock(genpd);
+
+	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+	    !pm_runtime_status_suspended(dev)) {
+		ret = genpd_start_dev(genpd, dev);
+		if (ret)
+			return ret;
+	}
+
+	return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void genpd_complete(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return;
+
+	pm_generic_complete(dev);
+
+	genpd_lock(genpd);
+
+	genpd->prepared_count--;
+	if (!genpd->prepared_count)
+		genpd_queue_power_off_work(genpd);
+
+	genpd_unlock(genpd);
+}
+
+/**
+ * genpd_syscore_switch - Switch power during system core suspend or resume.
+ * @dev: Device that normally is marked as "always on" to switch power for.
+ *
+ * This routine may only be called during the system core (syscore) suspend or
+ * resume phase for devices whose "always on" flags are set.
+ */
+static void genpd_syscore_switch(struct device *dev, bool suspend)
+{
+	struct generic_pm_domain *genpd;
+
+	genpd = dev_to_genpd(dev);
+	if (!genpd_present(genpd))
+		return;
+
+	if (suspend) {
+		genpd->suspended_count++;
+		genpd_sync_power_off(genpd, false, 0);
+	} else {
+		genpd_sync_power_on(genpd, false, 0);
+		genpd->suspended_count--;
+	}
+}
+
+void pm_genpd_syscore_poweroff(struct device *dev)
+{
+	genpd_syscore_switch(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
+
+void pm_genpd_syscore_poweron(struct device *dev)
+{
+	genpd_syscore_switch(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define genpd_prepare		NULL
+#define genpd_suspend_noirq	NULL
+#define genpd_resume_noirq	NULL
+#define genpd_freeze_noirq	NULL
+#define genpd_thaw_noirq	NULL
+#define genpd_poweroff_noirq	NULL
+#define genpd_restore_noirq	NULL
+#define genpd_complete		NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
+					struct gpd_timing_data *td)
+{
+	struct generic_pm_domain_data *gpd_data;
+	int ret;
+
+	ret = dev_pm_get_subsys_data(dev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+	if (!gpd_data) {
+		ret = -ENOMEM;
+		goto err_put;
+	}
+
+	if (td)
+		gpd_data->td = *td;
+
+	gpd_data->base.dev = dev;
+	gpd_data->td.constraint_changed = true;
+	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+
+	spin_lock_irq(&dev->power.lock);
+
+	if (dev->power.subsys_data->domain_data) {
+		ret = -EINVAL;
+		goto err_free;
+	}
+
+	dev->power.subsys_data->domain_data = &gpd_data->base;
+
+	spin_unlock_irq(&dev->power.lock);
+
+	return gpd_data;
+
+ err_free:
+	spin_unlock_irq(&dev->power.lock);
+	kfree(gpd_data);
+ err_put:
+	dev_pm_put_subsys_data(dev);
+	return ERR_PTR(ret);
+}
+
+static void genpd_free_dev_data(struct device *dev,
+				struct generic_pm_domain_data *gpd_data)
+{
+	spin_lock_irq(&dev->power.lock);
+
+	dev->power.subsys_data->domain_data = NULL;
+
+	spin_unlock_irq(&dev->power.lock);
+
+	kfree(gpd_data);
+	dev_pm_put_subsys_data(dev);
+}
+
+static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+			    struct gpd_timing_data *td)
+{
+	struct generic_pm_domain_data *gpd_data;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+		return -EINVAL;
+
+	gpd_data = genpd_alloc_dev_data(dev, td);
+	if (IS_ERR(gpd_data))
+		return PTR_ERR(gpd_data);
+
+	genpd_lock(genpd);
+
+	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
+	if (ret)
+		goto out;
+
+	dev_pm_domain_set(dev, &genpd->domain);
+
+	genpd->device_count++;
+	genpd->max_off_time_changed = true;
+
+	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+
+ out:
+	genpd_unlock(genpd);
+
+	if (ret)
+		genpd_free_dev_data(dev, gpd_data);
+	else
+		dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+	int ret;
+
+	mutex_lock(&gpd_list_lock);
+	ret = genpd_add_device(genpd, dev, NULL);
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_device);
+
+static int genpd_remove_device(struct generic_pm_domain *genpd,
+			       struct device *dev)
+{
+	struct generic_pm_domain_data *gpd_data;
+	struct pm_domain_data *pdd;
+	int ret = 0;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	pdd = dev->power.subsys_data->domain_data;
+	gpd_data = to_gpd_data(pdd);
+	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+
+	genpd_lock(genpd);
+
+	if (genpd->prepared_count > 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	genpd->device_count--;
+	genpd->max_off_time_changed = true;
+
+	if (genpd->detach_dev)
+		genpd->detach_dev(genpd, dev);
+
+	dev_pm_domain_set(dev, NULL);
+
+	list_del_init(&pdd->list_node);
+
+	genpd_unlock(genpd);
+
+	genpd_free_dev_data(dev, gpd_data);
+
+	return 0;
+
+ out:
+	genpd_unlock(genpd);
+	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct device *dev)
+{
+	struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
+
+	if (!genpd)
+		return -EINVAL;
+
+	return genpd_remove_device(genpd, dev);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
+
+static int genpd_add_subdomain(struct generic_pm_domain *genpd,
+			       struct generic_pm_domain *subdomain)
+{
+	struct gpd_link *link, *itr;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+	    || genpd == subdomain)
+		return -EINVAL;
+
+	/*
+	 * If the domain can be powered on/off in an IRQ safe
+	 * context, ensure that the subdomain can also be
+	 * powered on/off in that context.
+	 */
+	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+				genpd->name, subdomain->name);
+		return -EINVAL;
+	}
+
+	link = kzalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	genpd_lock(subdomain);
+	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
+
+	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(itr, &genpd->master_links, master_node) {
+		if (itr->slave == subdomain && itr->master == genpd) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	link->master = genpd;
+	list_add_tail(&link->master_node, &genpd->master_links);
+	link->slave = subdomain;
+	list_add_tail(&link->slave_node, &subdomain->slave_links);
+	if (genpd_status_on(subdomain))
+		genpd_sd_counter_inc(genpd);
+
+ out:
+	genpd_unlock(genpd);
+	genpd_unlock(subdomain);
+	if (ret)
+		kfree(link);
+	return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Master PM domain to add the subdomain to.
+ * @subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+			   struct generic_pm_domain *subdomain)
+{
+	int ret;
+
+	mutex_lock(&gpd_list_lock);
+	ret = genpd_add_subdomain(genpd, subdomain);
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Master PM domain to remove the subdomain from.
+ * @subdomain: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+			      struct generic_pm_domain *subdomain)
+{
+	struct gpd_link *l, *link;
+	int ret = -EINVAL;
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+		return -EINVAL;
+
+	genpd_lock(subdomain);
+	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
+
+	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+			subdomain->name);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
+		if (link->slave != subdomain)
+			continue;
+
+		list_del(&link->master_node);
+		list_del(&link->slave_node);
+		kfree(link);
+		if (genpd_status_on(subdomain))
+			genpd_sd_counter_dec(genpd);
+
+		ret = 0;
+		break;
+	}
+
+out:
+	genpd_unlock(genpd);
+	genpd_unlock(subdomain);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+	struct genpd_power_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	genpd->states = state;
+	genpd->state_count = 1;
+	genpd->free = state;
+
+	return 0;
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+		spin_lock_init(&genpd->slock);
+		genpd->lock_ops = &genpd_spin_ops;
+	} else {
+		mutex_init(&genpd->mlock);
+		genpd->lock_ops = &genpd_mtx_ops;
+	}
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ *
+ * Returns 0 on successful initialization, else a negative error code.
+ */
+int pm_genpd_init(struct generic_pm_domain *genpd,
+		  struct dev_power_governor *gov, bool is_off)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(genpd))
+		return -EINVAL;
+
+	INIT_LIST_HEAD(&genpd->master_links);
+	INIT_LIST_HEAD(&genpd->slave_links);
+	INIT_LIST_HEAD(&genpd->dev_list);
+	genpd_lock_init(genpd);
+	genpd->gov = gov;
+	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+	atomic_set(&genpd->sd_count, 0);
+	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+	genpd->device_count = 0;
+	genpd->max_off_time_ns = -1;
+	genpd->max_off_time_changed = true;
+	genpd->provider = NULL;
+	genpd->has_provider = false;
+	genpd->accounting_time = ktime_get();
+	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
+	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
+	genpd->domain.ops.prepare = genpd_prepare;
+	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+	genpd->domain.ops.complete = genpd_complete;
+
+	if (genpd->flags & GENPD_FLAG_PM_CLK) {
+		genpd->dev_ops.stop = pm_clk_suspend;
+		genpd->dev_ops.start = pm_clk_resume;
+	}
+
+	/* Always-on domains must be powered on at initialization. */
+	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
+		return -EINVAL;
+
+	/* Use only one "off" state if there were no states declared */
+	if (genpd->state_count == 0) {
+		ret = genpd_set_default_power_state(genpd);
+		if (ret)
+			return ret;
+	}
+
+	device_initialize(&genpd->dev);
+	dev_set_name(&genpd->dev, "%s", genpd->name);
+
+	mutex_lock(&gpd_list_lock);
+	list_add(&genpd->gpd_list_node, &gpd_list);
+	mutex_unlock(&gpd_list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_init);
+
+static int genpd_remove(struct generic_pm_domain *genpd)
+{
+	struct gpd_link *l, *link;
+
+	if (IS_ERR_OR_NULL(genpd))
+		return -EINVAL;
+
+	genpd_lock(genpd);
+
+	if (genpd->has_provider) {
+		genpd_unlock(genpd);
+		pr_err("Provider present, unable to remove %s\n", genpd->name);
+		return -EBUSY;
+	}
+
+	if (!list_empty(&genpd->master_links) || genpd->device_count) {
+		genpd_unlock(genpd);
+		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
+		return -EBUSY;
+	}
+
+	list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
+		list_del(&link->master_node);
+		list_del(&link->slave_node);
+		kfree(link);
+	}
+
+	list_del(&genpd->gpd_list_node);
+	genpd_unlock(genpd);
+	cancel_work_sync(&genpd->power_off_work);
+	kfree(genpd->free);
+	pr_debug("%s: removed %s\n", __func__, genpd->name);
+
+	return 0;
+}
+
+/**
+ * pm_genpd_remove - Remove a generic I/O PM domain
+ * @genpd: Pointer to PM domain that is to be removed.
+ *
+ * To remove the PM domain, this function:
+ *  - Removes the PM domain as a subdomain to any parent domains,
+ *    if it was added.
+ *  - Removes the PM domain from the list of registered PM domains.
+ *
+ * The PM domain will only be removed, if the associated provider has
+ * been removed, it is not a parent to any other PM domain and has no
+ * devices associated with it.
+ */
+int pm_genpd_remove(struct generic_pm_domain *genpd)
+{
+	int ret;
+
+	mutex_lock(&gpd_list_lock);
+	ret = genpd_remove(genpd);
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+
+/*
+ * Device Tree based PM domain providers.
+ *
+ * The code below implements generic device tree based PM domain providers that
+ * bind device tree nodes with generic PM domains registered in the system.
+ *
+ * Any driver that registers generic PM domains and needs to support binding of
+ * devices to these domains is supposed to register a PM domain provider, which
+ * maps a PM domain specifier retrieved from the device tree to a PM domain.
+ *
+ * Two simple mapping functions have been provided for convenience:
+ *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
+ *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
+ *    index.
+ */
+
+/**
+ * struct of_genpd_provider - PM domain provider registration structure
+ * @link: Entry in global list of PM domain providers
+ * @node: Pointer to device tree node of PM domain provider
+ * @xlate: Provider-specific xlate callback mapping a set of specifier cells
+ *         into a PM domain.
+ * @data: context pointer to be passed into @xlate callback
+ */
+struct of_genpd_provider {
+	struct list_head link;
+	struct device_node *node;
+	genpd_xlate_t xlate;
+	void *data;
+};
+
+/* List of registered PM domain providers. */
+static LIST_HEAD(of_genpd_providers);
+/* Mutex to protect the list above. */
+static DEFINE_MUTEX(of_genpd_mutex);
+
+/**
+ * genpd_xlate_simple() - Xlate function for direct node-domain mapping
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct generic_pm_domain
+ *
+ * This is a generic xlate function that can be used to model PM domains that
+ * have their own device tree nodes. The private data of xlate function needs
+ * to be a valid pointer to struct generic_pm_domain.
+ */
+static struct generic_pm_domain *genpd_xlate_simple(
+					struct of_phandle_args *genpdspec,
+					void *data)
+{
+	return data;
+}
+
+/**
+ * genpd_xlate_onecell() - Xlate function using a single index.
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct genpd_onecell_data
+ *
+ * This is a generic xlate function that can be used to model simple PM domain
+ * controllers that have one device tree node and provide multiple PM domains.
+ * A single cell is used as an index into an array of PM domains specified in
+ * the genpd_onecell_data struct when registering the provider.
+ */
+static struct generic_pm_domain *genpd_xlate_onecell(
+					struct of_phandle_args *genpdspec,
+					void *data)
+{
+	struct genpd_onecell_data *genpd_data = data;
+	unsigned int idx = genpdspec->args[0];
+
+	if (genpdspec->args_count != 1)
+		return ERR_PTR(-EINVAL);
+
+	if (idx >= genpd_data->num_domains) {
+		pr_err("%s: invalid domain index %u\n", __func__, idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!genpd_data->domains[idx])
+		return ERR_PTR(-ENOENT);
+
+	return genpd_data->domains[idx];
+}
+
+/**
+ * genpd_add_provider() - Register a PM domain provider for a node
+ * @np: Device node pointer associated with the PM domain provider.
+ * @xlate: Callback for decoding PM domain from phandle arguments.
+ * @data: Context pointer for @xlate callback.
+ */
+static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+			      void *data)
+{
+	struct of_genpd_provider *cp;
+
+	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	if (!cp)
+		return -ENOMEM;
+
+	cp->node = of_node_get(np);
+	cp->data = data;
+	cp->xlate = xlate;
+
+	mutex_lock(&of_genpd_mutex);
+	list_add(&cp->link, &of_genpd_providers);
+	mutex_unlock(&of_genpd_mutex);
+	pr_debug("Added domain provider from %pOF\n", np);
+
+	return 0;
+}
+
+/**
+ * of_genpd_add_provider_simple() - Register a simple PM domain provider
+ * @np: Device node pointer associated with the PM domain provider.
+ * @genpd: Pointer to PM domain associated with the PM domain provider.
+ */
+int of_genpd_add_provider_simple(struct device_node *np,
+				 struct generic_pm_domain *genpd)
+{
+	int ret = -EINVAL;
+
+	if (!np || !genpd)
+		return -EINVAL;
+
+	mutex_lock(&gpd_list_lock);
+
+	if (!genpd_present(genpd))
+		goto unlock;
+
+	genpd->dev.of_node = np;
+
+	/* Parse genpd OPP table */
+	if (genpd->set_performance_state) {
+		ret = dev_pm_opp_of_add_table(&genpd->dev);
+		if (ret) {
+			dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
+				ret);
+			goto unlock;
+		}
+	}
+
+	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
+	if (ret) {
+		if (genpd->set_performance_state)
+			dev_pm_opp_of_remove_table(&genpd->dev);
+
+		goto unlock;
+	}
+
+	genpd->provider = &np->fwnode;
+	genpd->has_provider = true;
+
+unlock:
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
+
+/**
+ * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
+ * @np: Device node pointer associated with the PM domain provider.
+ * @data: Pointer to the data associated with the PM domain provider.
+ */
+int of_genpd_add_provider_onecell(struct device_node *np,
+				  struct genpd_onecell_data *data)
+{
+	struct generic_pm_domain *genpd;
+	unsigned int i;
+	int ret = -EINVAL;
+
+	if (!np || !data)
+		return -EINVAL;
+
+	mutex_lock(&gpd_list_lock);
+
+	if (!data->xlate)
+		data->xlate = genpd_xlate_onecell;
+
+	for (i = 0; i < data->num_domains; i++) {
+		genpd = data->domains[i];
+
+		if (!genpd)
+			continue;
+		if (!genpd_present(genpd))
+			goto error;
+
+		genpd->dev.of_node = np;
+
+		/* Parse genpd OPP table */
+		if (genpd->set_performance_state) {
+			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
+			if (ret) {
+				dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
+					i, ret);
+				goto error;
+			}
+		}
+
+		genpd->provider = &np->fwnode;
+		genpd->has_provider = true;
+	}
+
+	ret = genpd_add_provider(np, data->xlate, data);
+	if (ret < 0)
+		goto error;
+
+	mutex_unlock(&gpd_list_lock);
+
+	return 0;
+
+error:
+	while (i--) {
+		genpd = data->domains[i];
+
+		if (!genpd)
+			continue;
+
+		genpd->provider = NULL;
+		genpd->has_provider = false;
+
+		if (genpd->set_performance_state)
+			dev_pm_opp_of_remove_table(&genpd->dev);
+	}
+
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
+
+/**
+ * of_genpd_del_provider() - Remove a previously registered PM domain provider
+ * @np: Device node pointer associated with the PM domain provider
+ */
+void of_genpd_del_provider(struct device_node *np)
+{
+	struct of_genpd_provider *cp, *tmp;
+	struct generic_pm_domain *gpd;
+
+	mutex_lock(&gpd_list_lock);
+	mutex_lock(&of_genpd_mutex);
+	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
+		if (cp->node == np) {
+			/*
+			 * For each PM domain associated with the
+			 * provider, set the 'has_provider' to false
+			 * so that the PM domain can be safely removed.
+			 */
+			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+				if (gpd->provider == &np->fwnode) {
+					gpd->has_provider = false;
+
+					if (!gpd->set_performance_state)
+						continue;
+
+					dev_pm_opp_of_remove_table(&gpd->dev);
+				}
+			}
+
+			list_del(&cp->link);
+			of_node_put(cp->node);
+			kfree(cp);
+			break;
+		}
+	}
+	mutex_unlock(&of_genpd_mutex);
+	mutex_unlock(&gpd_list_lock);
+}
+EXPORT_SYMBOL_GPL(of_genpd_del_provider);
+
+/**
+ * genpd_get_from_provider() - Look-up PM domain
+ * @genpdspec: OF phandle args to use for look-up
+ *
+ * Looks for a PM domain provider under the node specified by @genpdspec and if
+ * found, uses xlate function of the provider to map phandle args to a PM
+ * domain.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
+ * on failure.
+ */
+static struct generic_pm_domain *genpd_get_from_provider(
+					struct of_phandle_args *genpdspec)
+{
+	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
+	struct of_genpd_provider *provider;
+
+	if (!genpdspec)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&of_genpd_mutex);
+
+	/* Check if we have such a provider in our array */
+	list_for_each_entry(provider, &of_genpd_providers, link) {
+		if (provider->node == genpdspec->np)
+			genpd = provider->xlate(genpdspec, provider->data);
+		if (!IS_ERR(genpd))
+			break;
+	}
+
+	mutex_unlock(&of_genpd_mutex);
+
+	return genpd;
+}
+
+/**
+ * of_genpd_add_device() - Add a device to an I/O PM domain
+ * @genpdspec: OF phandle args to use for look-up PM domain
+ * @dev: Device to be added.
+ *
+ * Looks-up an I/O PM domain based upon phandle args provided and adds
+ * the device to the PM domain. Returns a negative error code on failure.
+ */
+int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	mutex_lock(&gpd_list_lock);
+
+	genpd = genpd_get_from_provider(genpdspec);
+	if (IS_ERR(genpd)) {
+		ret = PTR_ERR(genpd);
+		goto out;
+	}
+
+	ret = genpd_add_device(genpd, dev, NULL);
+
+out:
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_device);
+
+/**
+ * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and adds the subdomain to the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
+			   struct of_phandle_args *subdomain_spec)
+{
+	struct generic_pm_domain *parent, *subdomain;
+	int ret;
+
+	mutex_lock(&gpd_list_lock);
+
+	parent = genpd_get_from_provider(parent_spec);
+	if (IS_ERR(parent)) {
+		ret = PTR_ERR(parent);
+		goto out;
+	}
+
+	subdomain = genpd_get_from_provider(subdomain_spec);
+	if (IS_ERR(subdomain)) {
+		ret = PTR_ERR(subdomain);
+		goto out;
+	}
+
+	ret = genpd_add_subdomain(parent, subdomain);
+
+out:
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
+
+/**
+ * of_genpd_remove_last - Remove the last PM domain registered for a provider
+ * @provider: Pointer to device structure associated with provider
+ *
+ * Find the last PM domain that was added by a particular provider and
+ * remove this PM domain from the list of PM domains. The provider is
+ * identified by the 'provider' device structure that is passed. The PM
+ * domain will only be removed, if the provider associated with domain
+ * has been removed.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or
+ * ERR_PTR() on failure.
+ */
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
+{
+	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
+	int ret;
+
+	if (IS_ERR_OR_NULL(np))
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&gpd_list_lock);
+	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
+		if (gpd->provider == &np->fwnode) {
+			ret = genpd_remove(gpd);
+			genpd = ret ? ERR_PTR(ret) : gpd;
+			break;
+		}
+	}
+	mutex_unlock(&gpd_list_lock);
+
+	return genpd;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_last);
+
+static void genpd_release_dev(struct device *dev)
+{
+	kfree(dev);
+}
+
+static struct bus_type genpd_bus_type = {
+	.name		= "genpd",
+};
+
+/**
+ * genpd_dev_pm_detach - Detach a device from its PM domain.
+ * @dev: Device to detach.
+ * @power_off: Currently not used
+ *
+ * Try to locate a corresponding generic PM domain, which the device was
+ * attached to previously. If such is found, the device is detached from it.
+ */
+static void genpd_dev_pm_detach(struct device *dev, bool power_off)
+{
+	struct generic_pm_domain *pd;
+	unsigned int i;
+	int ret = 0;
+
+	pd = dev_to_genpd(dev);
+	if (IS_ERR(pd))
+		return;
+
+	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+
+	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
+		ret = genpd_remove_device(pd, dev);
+		if (ret != -EAGAIN)
+			break;
+
+		mdelay(i);
+		cond_resched();
+	}
+
+	if (ret < 0) {
+		dev_err(dev, "failed to remove from PM domain %s: %d",
+			pd->name, ret);
+		return;
+	}
+
+	/* Check if PM domain can be powered off after removing this device. */
+	genpd_queue_power_off_work(pd);
+
+	/* Unregister the device if it was created by genpd. */
+	if (dev->bus == &genpd_bus_type)
+		device_unregister(dev);
+}
+
+static void genpd_dev_pm_sync(struct device *dev)
+{
+	struct generic_pm_domain *pd;
+
+	pd = dev_to_genpd(dev);
+	if (IS_ERR(pd))
+		return;
+
+	genpd_queue_power_off_work(pd);
+}
+
+static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
+				 unsigned int index, bool power_on)
+{
+	struct of_phandle_args pd_args;
+	struct generic_pm_domain *pd;
+	int ret;
+
+	ret = of_parse_phandle_with_args(np, "power-domains",
+				"#power-domain-cells", index, &pd_args);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&gpd_list_lock);
+	pd = genpd_get_from_provider(&pd_args);
+	of_node_put(pd_args.np);
+	if (IS_ERR(pd)) {
+		mutex_unlock(&gpd_list_lock);
+		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
+			__func__, PTR_ERR(pd));
+		return driver_deferred_probe_check_state(dev);
+	}
+
+	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
+
+	ret = genpd_add_device(pd, dev, NULL);
+	mutex_unlock(&gpd_list_lock);
+
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to add to PM domain %s: %d",
+				pd->name, ret);
+		return ret;
+	}
+
+	dev->pm_domain->detach = genpd_dev_pm_detach;
+	dev->pm_domain->sync = genpd_dev_pm_sync;
+
+	if (power_on) {
+		genpd_lock(pd);
+		ret = genpd_power_on(pd, 0);
+		genpd_unlock(pd);
+	}
+
+	if (ret)
+		genpd_remove_device(pd, dev);
+
+	return ret ? -EPROBE_DEFER : 1;
+}
+
+/**
+ * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
+ * @dev: Device to attach.
+ *
+ * Parse device's OF node to find a PM domain specifier. If such is found,
+ * attaches the device to retrieved pm_domain ops.
+ *
+ * Returns 1 on successfully attached PM domain, 0 when the device don't need a
+ * PM domain or when multiple power-domains exists for it, else a negative error
+ * code. Note that if a power-domain exists for the device, but it cannot be
+ * found or turned on, then return -EPROBE_DEFER to ensure that the device is
+ * not probed and to re-try again later.
+ */
+int genpd_dev_pm_attach(struct device *dev)
+{
+	if (!dev->of_node)
+		return 0;
+
+	/*
+	 * Devices with multiple PM domains must be attached separately, as we
+	 * can only attach one PM domain per device.
+	 */
+	if (of_count_phandle_with_args(dev->of_node, "power-domains",
+				       "#power-domain-cells") != 1)
+		return 0;
+
+	return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+/**
+ * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @index: The index of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier at the provided @index.
+ * If such is found, creates a virtual device and attaches it to the retrieved
+ * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
+ * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
+ *
+ * Returns the created virtual device if successfully attached PM domain, NULL
+ * when the device don't need a PM domain, else an ERR_PTR() in case of
+ * failures. If a power-domain exists for the device, but cannot be found or
+ * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
+ * is not probed and to re-try again later.
+ */
+struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+					 unsigned int index)
+{
+	struct device *genpd_dev;
+	int num_domains;
+	int ret;
+
+	if (!dev->of_node)
+		return NULL;
+
+	/* Deal only with devices using multiple PM domains. */
+	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
+						 "#power-domain-cells");
+	if (num_domains < 2 || index >= num_domains)
+		return NULL;
+
+	/* Allocate and register device on the genpd bus. */
+	genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL);
+	if (!genpd_dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev));
+	genpd_dev->bus = &genpd_bus_type;
+	genpd_dev->release = genpd_release_dev;
+
+	ret = device_register(genpd_dev);
+	if (ret) {
+		kfree(genpd_dev);
+		return ERR_PTR(ret);
+	}
+
+	/* Try to attach the device to the PM domain at the specified index. */
+	ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
+	if (ret < 1) {
+		device_unregister(genpd_dev);
+		return ret ? ERR_PTR(ret) : NULL;
+	}
+
+	pm_runtime_enable(genpd_dev);
+	genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
+
+	return genpd_dev;
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
+
+/**
+ * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier using the
+ * power-domain-names DT property. For further description see
+ * genpd_dev_pm_attach_by_id().
+ */
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+{
+	int index;
+
+	if (!dev->of_node)
+		return NULL;
+
+	index = of_property_match_string(dev->of_node, "power-domain-names",
+					 name);
+	if (index < 0)
+		return NULL;
+
+	return genpd_dev_pm_attach_by_id(dev, index);
+}
+
+static const struct of_device_id idle_state_match[] = {
+	{ .compatible = "domain-idle-state", },
+	{ }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+				    struct device_node *state_node)
+{
+	int err;
+	u32 residency;
+	u32 entry_latency, exit_latency;
+
+	err = of_property_read_u32(state_node, "entry-latency-us",
+						&entry_latency);
+	if (err) {
+		pr_debug(" * %pOF missing entry-latency-us property\n",
+						state_node);
+		return -EINVAL;
+	}
+
+	err = of_property_read_u32(state_node, "exit-latency-us",
+						&exit_latency);
+	if (err) {
+		pr_debug(" * %pOF missing exit-latency-us property\n",
+						state_node);
+		return -EINVAL;
+	}
+
+	err = of_property_read_u32(state_node, "min-residency-us", &residency);
+	if (!err)
+		genpd_state->residency_ns = 1000 * residency;
+
+	genpd_state->power_on_latency_ns = 1000 * exit_latency;
+	genpd_state->power_off_latency_ns = 1000 * entry_latency;
+	genpd_state->fwnode = &state_node->fwnode;
+
+	return 0;
+}
+
+static int genpd_iterate_idle_states(struct device_node *dn,
+				     struct genpd_power_state *states)
+{
+	int ret;
+	struct of_phandle_iterator it;
+	struct device_node *np;
+	int i = 0;
+
+	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+	if (ret <= 0)
+		return ret;
+
+	/* Loop over the phandles until all the requested entry is found */
+	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
+		np = it.node;
+		if (!of_match_node(idle_state_match, np))
+			continue;
+		if (states) {
+			ret = genpd_parse_state(&states[i], np);
+			if (ret) {
+				pr_err("Parsing idle state node %pOF failed with err %d\n",
+				       np, ret);
+				of_node_put(np);
+				return ret;
+			}
+		}
+		i++;
+	}
+
+	return i;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use. If no domain idle states is found it returns
+ * -EINVAL and in case of errors, a negative error code.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+			struct genpd_power_state **states, int *n)
+{
+	struct genpd_power_state *st;
+	int ret;
+
+	ret = genpd_iterate_idle_states(dn, NULL);
+	if (ret <= 0)
+		return ret < 0 ? ret : -EINVAL;
+
+	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
+	if (!st)
+		return -ENOMEM;
+
+	ret = genpd_iterate_idle_states(dn, st);
+	if (ret <= 0) {
+		kfree(st);
+		return ret < 0 ? ret : -EINVAL;
+	}
+
+	*states = st;
+	*n = ret;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
+/**
+ * of_genpd_opp_to_performance_state- Gets performance state of device's
+ * power domain corresponding to a DT node's "required-opps" property.
+ *
+ * @dev: Device for which the performance-state needs to be found.
+ * @np: DT node where the "required-opps" property is present. This can be
+ *	the device node itself (if it doesn't have an OPP table) or a node
+ *	within the OPP table of a device (if device has an OPP table).
+ *
+ * Returns performance state corresponding to the "required-opps" property of
+ * a DT node. This calls platform specific genpd->opp_to_performance_state()
+ * callback to translate power domain OPP to performance state.
+ *
+ * Returns performance state on success and 0 on failure.
+ */
+unsigned int of_genpd_opp_to_performance_state(struct device *dev,
+					       struct device_node *np)
+{
+	struct generic_pm_domain *genpd;
+	struct dev_pm_opp *opp;
+	int state = 0;
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return 0;
+
+	if (unlikely(!genpd->set_performance_state))
+		return 0;
+
+	genpd_lock(genpd);
+
+	opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
+	if (IS_ERR(opp)) {
+		dev_err(dev, "Failed to find required OPP: %ld\n",
+			PTR_ERR(opp));
+		goto unlock;
+	}
+
+	state = genpd->opp_to_performance_state(genpd, opp);
+	dev_pm_opp_put(opp);
+
+unlock:
+	genpd_unlock(genpd);
+
+	return state;
+}
+EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
+
+static int __init genpd_bus_init(void)
+{
+	return bus_register(&genpd_bus_type);
+}
+core_initcall(genpd_bus_init);
+
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
+
+
+/***        debugfs support        ***/
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+static struct dentry *genpd_debugfs_dir;
+
+/*
+ * TODO: This function is a slightly modified version of rtpm_status_show
+ * from sysfs.c, so generalize it.
+ */
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+	static const char * const status_lookup[] = {
+		[RPM_ACTIVE] = "active",
+		[RPM_RESUMING] = "resuming",
+		[RPM_SUSPENDED] = "suspended",
+		[RPM_SUSPENDING] = "suspending"
+	};
+	const char *p = "";
+
+	if (dev->power.runtime_error)
+		p = "error";
+	else if (dev->power.disable_depth)
+		p = "unsupported";
+	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
+		p = status_lookup[dev->power.runtime_status];
+	else
+		WARN_ON(1);
+
+	seq_puts(s, p);
+}
+
+static int genpd_summary_one(struct seq_file *s,
+			struct generic_pm_domain *genpd)
+{
+	static const char * const status_lookup[] = {
+		[GPD_STATE_ACTIVE] = "on",
+		[GPD_STATE_POWER_OFF] = "off"
+	};
+	struct pm_domain_data *pm_data;
+	const char *kobj_path;
+	struct gpd_link *link;
+	char state[16];
+	int ret;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
+		goto exit;
+	if (!genpd_status_on(genpd))
+		snprintf(state, sizeof(state), "%s-%u",
+			 status_lookup[genpd->status], genpd->state_idx);
+	else
+		snprintf(state, sizeof(state), "%s",
+			 status_lookup[genpd->status]);
+	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
+
+	/*
+	 * Modifications on the list require holding locks on both
+	 * master and slave, so we are safe.
+	 * Also genpd->name is immutable.
+	 */
+	list_for_each_entry(link, &genpd->master_links, master_node) {
+		seq_printf(s, "%s", link->slave->name);
+		if (!list_is_last(&link->master_node, &genpd->master_links))
+			seq_puts(s, ", ");
+	}
+
+	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+		kobj_path = kobject_get_path(&pm_data->dev->kobj,
+				genpd_is_irq_safe(genpd) ?
+				GFP_ATOMIC : GFP_KERNEL);
+		if (kobj_path == NULL)
+			continue;
+
+		seq_printf(s, "\n    %-50s  ", kobj_path);
+		rtpm_status_str(s, pm_data->dev);
+		kfree(kobj_path);
+	}
+
+	seq_puts(s, "\n");
+exit:
+	genpd_unlock(genpd);
+
+	return 0;
+}
+
+static int genpd_summary_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd;
+	int ret = 0;
+
+	seq_puts(s, "domain                          status          slaves\n");
+	seq_puts(s, "    /device                                             runtime status\n");
+	seq_puts(s, "----------------------------------------------------------------------\n");
+
+	ret = mutex_lock_interruptible(&gpd_list_lock);
+	if (ret)
+		return -ERESTARTSYS;
+
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+		ret = genpd_summary_one(s, genpd);
+		if (ret)
+			break;
+	}
+	mutex_unlock(&gpd_list_lock);
+
+	return ret;
+}
+
+static int genpd_status_show(struct seq_file *s, void *data)
+{
+	static const char * const status_lookup[] = {
+		[GPD_STATE_ACTIVE] = "on",
+		[GPD_STATE_POWER_OFF] = "off"
+	};
+
+	struct generic_pm_domain *genpd = s->private;
+	int ret = 0;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
+		goto exit;
+
+	if (genpd->status == GPD_STATE_POWER_OFF)
+		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
+			genpd->state_idx);
+	else
+		seq_printf(s, "%s\n", status_lookup[genpd->status]);
+exit:
+	genpd_unlock(genpd);
+	return ret;
+}
+
+static int genpd_sub_domains_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd = s->private;
+	struct gpd_link *link;
+	int ret = 0;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	list_for_each_entry(link, &genpd->master_links, master_node)
+		seq_printf(s, "%s\n", link->slave->name);
+
+	genpd_unlock(genpd);
+	return ret;
+}
+
+static int genpd_idle_states_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd = s->private;
+	unsigned int i;
+	int ret = 0;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	seq_puts(s, "State          Time Spent(ms)\n");
+
+	for (i = 0; i < genpd->state_count; i++) {
+		ktime_t delta = 0;
+		s64 msecs;
+
+		if ((genpd->status == GPD_STATE_POWER_OFF) &&
+				(genpd->state_idx == i))
+			delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+		msecs = ktime_to_ms(
+			ktime_add(genpd->states[i].idle_time, delta));
+		seq_printf(s, "S%-13i %lld\n", i, msecs);
+	}
+
+	genpd_unlock(genpd);
+	return ret;
+}
+
+static int genpd_active_time_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd = s->private;
+	ktime_t delta = 0;
+	int ret = 0;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	if (genpd->status == GPD_STATE_ACTIVE)
+		delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+	seq_printf(s, "%lld ms\n", ktime_to_ms(
+				ktime_add(genpd->on_time, delta)));
+
+	genpd_unlock(genpd);
+	return ret;
+}
+
+static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd = s->private;
+	ktime_t delta = 0, total = 0;
+	unsigned int i;
+	int ret = 0;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	for (i = 0; i < genpd->state_count; i++) {
+
+		if ((genpd->status == GPD_STATE_POWER_OFF) &&
+				(genpd->state_idx == i))
+			delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+		total = ktime_add(total, genpd->states[i].idle_time);
+	}
+	total = ktime_add(total, delta);
+
+	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
+
+	genpd_unlock(genpd);
+	return ret;
+}
+
+
+static int genpd_devices_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd = s->private;
+	struct pm_domain_data *pm_data;
+	const char *kobj_path;
+	int ret = 0;
+
+	ret = genpd_lock_interruptible(genpd);
+	if (ret)
+		return -ERESTARTSYS;
+
+	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+		kobj_path = kobject_get_path(&pm_data->dev->kobj,
+				genpd_is_irq_safe(genpd) ?
+				GFP_ATOMIC : GFP_KERNEL);
+		if (kobj_path == NULL)
+			continue;
+
+		seq_printf(s, "%s\n", kobj_path);
+		kfree(kobj_path);
+	}
+
+	genpd_unlock(genpd);
+	return ret;
+}
+
+static int genpd_perf_state_show(struct seq_file *s, void *data)
+{
+	struct generic_pm_domain *genpd = s->private;
+
+	if (genpd_lock_interruptible(genpd))
+		return -ERESTARTSYS;
+
+	seq_printf(s, "%u\n", genpd->performance_state);
+
+	genpd_unlock(genpd);
+	return 0;
+}
+
+#define define_genpd_open_function(name) \
+static int genpd_##name##_open(struct inode *inode, struct file *file) \
+{ \
+	return single_open(file, genpd_##name##_show, inode->i_private); \
+}
+
+define_genpd_open_function(summary);
+define_genpd_open_function(status);
+define_genpd_open_function(sub_domains);
+define_genpd_open_function(idle_states);
+define_genpd_open_function(active_time);
+define_genpd_open_function(total_idle_time);
+define_genpd_open_function(devices);
+define_genpd_open_function(perf_state);
+
+#define define_genpd_debugfs_fops(name) \
+static const struct file_operations genpd_##name##_fops = { \
+	.open = genpd_##name##_open, \
+	.read = seq_read, \
+	.llseek = seq_lseek, \
+	.release = single_release, \
+}
+
+define_genpd_debugfs_fops(summary);
+define_genpd_debugfs_fops(status);
+define_genpd_debugfs_fops(sub_domains);
+define_genpd_debugfs_fops(idle_states);
+define_genpd_debugfs_fops(active_time);
+define_genpd_debugfs_fops(total_idle_time);
+define_genpd_debugfs_fops(devices);
+define_genpd_debugfs_fops(perf_state);
+
+static int __init genpd_debug_init(void)
+{
+	struct dentry *d;
+	struct generic_pm_domain *genpd;
+
+	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+
+	if (!genpd_debugfs_dir)
+		return -ENOMEM;
+
+	d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
+			genpd_debugfs_dir, NULL, &genpd_summary_fops);
+	if (!d)
+		return -ENOMEM;
+
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+		d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
+		if (!d)
+			return -ENOMEM;
+
+		debugfs_create_file("current_state", 0444,
+				d, genpd, &genpd_status_fops);
+		debugfs_create_file("sub_domains", 0444,
+				d, genpd, &genpd_sub_domains_fops);
+		debugfs_create_file("idle_states", 0444,
+				d, genpd, &genpd_idle_states_fops);
+		debugfs_create_file("active_time", 0444,
+				d, genpd, &genpd_active_time_fops);
+		debugfs_create_file("total_idle_time", 0444,
+				d, genpd, &genpd_total_idle_time_fops);
+		debugfs_create_file("devices", 0444,
+				d, genpd, &genpd_devices_fops);
+		if (genpd->set_performance_state)
+			debugfs_create_file("perf_state", 0444,
+					    d, genpd, &genpd_perf_state_fops);
+	}
+
+	return 0;
+}
+late_initcall(genpd_debug_init);
+
+static void __exit genpd_debug_exit(void)
+{
+	debugfs_remove_recursive(genpd_debugfs_dir);
+}
+__exitcall(genpd_debug_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 0000000..99896fb
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,259 @@
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+
+static int dev_update_qos_constraint(struct device *dev, void *data)
+{
+	s64 *constraint_ns_p = data;
+	s64 constraint_ns;
+
+	if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+		/*
+		 * Only take suspend-time QoS constraints of devices into
+		 * account, because constraints updated after the device has
+		 * been suspended are not guaranteed to be taken into account
+		 * anyway.  In order for them to take effect, the device has to
+		 * be resumed and suspended again.
+		 */
+		constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
+	} else {
+		/*
+		 * The child is not in a domain and there's no info on its
+		 * suspend/resume latencies, so assume them to be negligible and
+		 * take its current PM QoS constraint (that's the only thing
+		 * known at this point anyway).
+		 */
+		constraint_ns = dev_pm_qos_read_value(dev);
+		constraint_ns *= NSEC_PER_USEC;
+	}
+
+	if (constraint_ns < *constraint_ns_p)
+		*constraint_ns_p = constraint_ns;
+
+	return 0;
+}
+
+/**
+ * default_suspend_ok - Default PM domain governor routine to suspend devices.
+ * @dev: Device to check.
+ */
+static bool default_suspend_ok(struct device *dev)
+{
+	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+	unsigned long flags;
+	s64 constraint_ns;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	if (!td->constraint_changed) {
+		bool ret = td->cached_suspend_ok;
+
+		spin_unlock_irqrestore(&dev->power.lock, flags);
+		return ret;
+	}
+	td->constraint_changed = false;
+	td->cached_suspend_ok = false;
+	td->effective_constraint_ns = 0;
+	constraint_ns = __dev_pm_qos_read_value(dev);
+
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	if (constraint_ns == 0)
+		return false;
+
+	constraint_ns *= NSEC_PER_USEC;
+	/*
+	 * We can walk the children without any additional locking, because
+	 * they all have been suspended at this point and their
+	 * effective_constraint_ns fields won't be modified in parallel with us.
+	 */
+	if (!dev->power.ignore_children)
+		device_for_each_child(dev, &constraint_ns,
+				      dev_update_qos_constraint);
+
+	if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
+		/* "No restriction", so the device is allowed to suspend. */
+		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+		td->cached_suspend_ok = true;
+	} else if (constraint_ns == 0) {
+		/*
+		 * This triggers if one of the children that don't belong to a
+		 * domain has a zero PM QoS constraint and it's better not to
+		 * suspend then.  effective_constraint_ns is zero already and
+		 * cached_suspend_ok is false, so bail out.
+		 */
+		return false;
+	} else {
+		constraint_ns -= td->suspend_latency_ns +
+				td->resume_latency_ns;
+		/*
+		 * effective_constraint_ns is zero already and cached_suspend_ok
+		 * is false, so if the computed value is not positive, return
+		 * right away.
+		 */
+		if (constraint_ns <= 0)
+			return false;
+
+		td->effective_constraint_ns = constraint_ns;
+		td->cached_suspend_ok = true;
+	}
+
+	/*
+	 * The children have been suspended already, so we don't need to take
+	 * their suspend latencies into account here.
+	 */
+	return td->cached_suspend_ok;
+}
+
+static bool __default_power_down_ok(struct dev_pm_domain *pd,
+				     unsigned int state)
+{
+	struct generic_pm_domain *genpd = pd_to_genpd(pd);
+	struct gpd_link *link;
+	struct pm_domain_data *pdd;
+	s64 min_off_time_ns;
+	s64 off_on_time_ns;
+
+	off_on_time_ns = genpd->states[state].power_off_latency_ns +
+		genpd->states[state].power_on_latency_ns;
+
+
+	min_off_time_ns = -1;
+	/*
+	 * Check if subdomains can be off for enough time.
+	 *
+	 * All subdomains have been powered off already at this point.
+	 */
+	list_for_each_entry(link, &genpd->master_links, master_node) {
+		struct generic_pm_domain *sd = link->slave;
+		s64 sd_max_off_ns = sd->max_off_time_ns;
+
+		if (sd_max_off_ns < 0)
+			continue;
+
+		/*
+		 * Check if the subdomain is allowed to be off long enough for
+		 * the current domain to turn off and on (that's how much time
+		 * it will have to wait worst case).
+		 */
+		if (sd_max_off_ns <= off_on_time_ns)
+			return false;
+
+		if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
+			min_off_time_ns = sd_max_off_ns;
+	}
+
+	/*
+	 * Check if the devices in the domain can be off enough time.
+	 */
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		struct gpd_timing_data *td;
+		s64 constraint_ns;
+
+		/*
+		 * Check if the device is allowed to be off long enough for the
+		 * domain to turn off and on (that's how much time it will
+		 * have to wait worst case).
+		 */
+		td = &to_gpd_data(pdd)->td;
+		constraint_ns = td->effective_constraint_ns;
+		/*
+		 * Zero means "no suspend at all" and this runs only when all
+		 * devices in the domain are suspended, so it must be positive.
+		 */
+		if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
+			continue;
+
+		if (constraint_ns <= off_on_time_ns)
+			return false;
+
+		if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
+			min_off_time_ns = constraint_ns;
+	}
+
+	/*
+	 * If the computed minimum device off time is negative, there are no
+	 * latency constraints, so the domain can spend arbitrary time in the
+	 * "off" state.
+	 */
+	if (min_off_time_ns < 0)
+		return true;
+
+	/*
+	 * The difference between the computed minimum subdomain or device off
+	 * time and the time needed to turn the domain on is the maximum
+	 * theoretical time this domain can spend in the "off" state.
+	 */
+	genpd->max_off_time_ns = min_off_time_ns -
+		genpd->states[state].power_on_latency_ns;
+	return true;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+	struct generic_pm_domain *genpd = pd_to_genpd(pd);
+	struct gpd_link *link;
+
+	if (!genpd->max_off_time_changed)
+		return genpd->cached_power_down_ok;
+
+	/*
+	 * We have to invalidate the cached results for the masters, so
+	 * use the observation that default_power_down_ok() is not
+	 * going to be called for any master until this instance
+	 * returns.
+	 */
+	list_for_each_entry(link, &genpd->slave_links, slave_node)
+		link->master->max_off_time_changed = true;
+
+	genpd->max_off_time_ns = -1;
+	genpd->max_off_time_changed = false;
+	genpd->cached_power_down_ok = true;
+	genpd->state_idx = genpd->state_count - 1;
+
+	/* Find a state to power down to, starting from the deepest. */
+	while (!__default_power_down_ok(pd, genpd->state_idx)) {
+		if (genpd->state_idx == 0) {
+			genpd->cached_power_down_ok = false;
+			break;
+		}
+		genpd->state_idx--;
+	}
+
+	return genpd->cached_power_down_ok;
+}
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+	return false;
+}
+
+struct dev_power_governor simple_qos_governor = {
+	.suspend_ok = default_suspend_ok,
+	.power_down_ok = default_power_down_ok,
+};
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+	.power_down_ok = always_on_power_down_ok,
+	.suspend_ok = default_suspend_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
new file mode 100644
index 0000000..b2ed606
--- /dev/null
+++ b/drivers/base/power/generic_ops.c
@@ -0,0 +1,300 @@
+/*
+ * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/export.h>
+
+#ifdef CONFIG_PM
+/**
+ * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
+ * @dev: Device to suspend.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_suspend(), execute it and return its error code.  Otherwise,
+ * return 0.
+ */
+int pm_generic_runtime_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+	int ret;
+
+	ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
+
+/**
+ * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
+ * @dev: Device to resume.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_resume(), execute it and return its error code.  Otherwise,
+ * return 0.
+ */
+int pm_generic_runtime_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+	int ret;
+
+	ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * pm_generic_prepare - Generic routine preparing a device for power transition.
+ * @dev: Device to prepare.
+ *
+ * Prepare a device for a system-wide power transition.
+ */
+int pm_generic_prepare(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (drv && drv->pm && drv->pm->prepare)
+		ret = drv->pm->prepare(dev);
+
+	return ret;
+}
+
+/**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
+ * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_late(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
+
+/**
+ * pm_generic_suspend - Generic suspend callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend);
+
+/**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
+ * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_late(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
+
+/**
+ * pm_generic_freeze - Generic freeze callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze);
+
+/**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
+ * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_late(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
+
+/**
+ * pm_generic_poweroff - Generic poweroff callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+
+/**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
+ * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_early(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
+
+/**
+ * pm_generic_thaw - Generic thaw callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw);
+
+/**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
+ * pm_generic_resume_early - Generic resume_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_early(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_early);
+
+/**
+ * pm_generic_resume - Generic resume callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->resume ? pm->resume(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume);
+
+/**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
+ * pm_generic_restore_early - Generic restore_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_restore_early(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_early);
+
+/**
+ * pm_generic_restore - Generic restore callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	return pm && pm->restore ? pm->restore(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore);
+
+/**
+ * pm_generic_complete - Generic routine completing a device power transition.
+ * @dev: Device to handle.
+ *
+ * Complete a device power transition during a system-wide power transition.
+ */
+void pm_generic_complete(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+
+	if (drv && drv->pm && drv->pm->complete)
+		drv->pm->complete(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
new file mode 100644
index 0000000..a690fd4
--- /dev/null
+++ b/drivers/base/power/main.c
@@ -0,0 +1,2124 @@
+/*
+ * drivers/base/power/main.c - Where the driver meets power management.
+ *
+ * Copyright (c) 2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * This file is released under the GPLv2
+ *
+ *
+ * The driver model core calls device_pm_add() when a device is registered.
+ * This will initialize the embedded device_pm_info object in the device
+ * and add it to the list of power-controlled devices. sysfs entries for
+ * controlling device power management will also be added.
+ *
+ * A separate list is used for keeping track of power info, because the power
+ * domain dependencies may differ from the ancestral dependencies that the
+ * subsystem list maintains.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm-trace.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/async.h>
+#include <linux/suspend.h>
+#include <trace/events/power.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
+#include "../base.h"
+#include "power.h"
+
+typedef int (*pm_callback_t)(struct device *);
+
+/*
+ * The entries in the dpm_list list are in a depth first order, simply
+ * because children are guaranteed to be discovered after parents, and
+ * are inserted at the back of the list on discovery.
+ *
+ * Since device_pm_add() may be called with a device lock held,
+ * we must never try to acquire a device lock while holding
+ * dpm_list_mutex.
+ */
+
+LIST_HEAD(dpm_list);
+static LIST_HEAD(dpm_prepared_list);
+static LIST_HEAD(dpm_suspended_list);
+static LIST_HEAD(dpm_late_early_list);
+static LIST_HEAD(dpm_noirq_list);
+
+struct suspend_stats suspend_stats;
+static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
+
+static int async_error;
+
+static const char *pm_verb(int event)
+{
+	switch (event) {
+	case PM_EVENT_SUSPEND:
+		return "suspend";
+	case PM_EVENT_RESUME:
+		return "resume";
+	case PM_EVENT_FREEZE:
+		return "freeze";
+	case PM_EVENT_QUIESCE:
+		return "quiesce";
+	case PM_EVENT_HIBERNATE:
+		return "hibernate";
+	case PM_EVENT_THAW:
+		return "thaw";
+	case PM_EVENT_RESTORE:
+		return "restore";
+	case PM_EVENT_RECOVER:
+		return "recover";
+	default:
+		return "(unknown PM event)";
+	}
+}
+
+/**
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
+ * @dev: Device object being initialized.
+ */
+void device_pm_sleep_init(struct device *dev)
+{
+	dev->power.is_prepared = false;
+	dev->power.is_suspended = false;
+	dev->power.is_noirq_suspended = false;
+	dev->power.is_late_suspended = false;
+	init_completion(&dev->power.completion);
+	complete_all(&dev->power.completion);
+	dev->power.wakeup = NULL;
+	INIT_LIST_HEAD(&dev->power.entry);
+}
+
+/**
+ * device_pm_lock - Lock the list of active devices used by the PM core.
+ */
+void device_pm_lock(void)
+{
+	mutex_lock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_unlock - Unlock the list of active devices used by the PM core.
+ */
+void device_pm_unlock(void)
+{
+	mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_add - Add a device to the PM core's list of active devices.
+ * @dev: Device to add to the list.
+ */
+void device_pm_add(struct device *dev)
+{
+	pr_debug("PM: Adding info for %s:%s\n",
+		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+	device_pm_check_callbacks(dev);
+	mutex_lock(&dpm_list_mtx);
+	if (dev->parent && dev->parent->power.is_prepared)
+		dev_warn(dev, "parent %s should not be sleeping\n",
+			dev_name(dev->parent));
+	list_add_tail(&dev->power.entry, &dpm_list);
+	dev->power.in_dpm_list = true;
+	mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_remove - Remove a device from the PM core's list of active devices.
+ * @dev: Device to be removed from the list.
+ */
+void device_pm_remove(struct device *dev)
+{
+	pr_debug("PM: Removing info for %s:%s\n",
+		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+	complete_all(&dev->power.completion);
+	mutex_lock(&dpm_list_mtx);
+	list_del_init(&dev->power.entry);
+	dev->power.in_dpm_list = false;
+	mutex_unlock(&dpm_list_mtx);
+	device_wakeup_disable(dev);
+	pm_runtime_remove(dev);
+	device_pm_check_callbacks(dev);
+}
+
+/**
+ * device_pm_move_before - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come before.
+ */
+void device_pm_move_before(struct device *deva, struct device *devb)
+{
+	pr_debug("PM: Moving %s:%s before %s:%s\n",
+		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+	/* Delete deva from dpm_list and reinsert before devb. */
+	list_move_tail(&deva->power.entry, &devb->power.entry);
+}
+
+/**
+ * device_pm_move_after - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come after.
+ */
+void device_pm_move_after(struct device *deva, struct device *devb)
+{
+	pr_debug("PM: Moving %s:%s after %s:%s\n",
+		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+	/* Delete deva from dpm_list and reinsert after devb. */
+	list_move(&deva->power.entry, &devb->power.entry);
+}
+
+/**
+ * device_pm_move_last - Move device to end of the PM core's list of devices.
+ * @dev: Device to move in dpm_list.
+ */
+void device_pm_move_last(struct device *dev)
+{
+	pr_debug("PM: Moving %s:%s to end of list\n",
+		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+	list_move_tail(&dev->power.entry, &dpm_list);
+}
+
+static ktime_t initcall_debug_start(struct device *dev, void *cb)
+{
+	if (!pm_print_times_enabled)
+		return 0;
+
+	dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+		 task_pid_nr(current),
+		 dev->parent ? dev_name(dev->parent) : "none");
+	return ktime_get();
+}
+
+static void initcall_debug_report(struct device *dev, ktime_t calltime,
+				  void *cb, int error)
+{
+	ktime_t rettime;
+	s64 nsecs;
+
+	if (!pm_print_times_enabled)
+		return;
+
+	rettime = ktime_get();
+	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
+
+	dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
+		 (unsigned long long)nsecs >> 10);
+}
+
+/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+	if (!dev)
+		return;
+
+	if (async || (pm_async_enabled && dev->power.async_suspend))
+		wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+	dpm_wait(dev, *((bool *)async_ptr));
+	return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+       device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	/*
+	 * If the supplier goes away right after we've checked the link to it,
+	 * we'll wait for its completion to change the state, but that's fine,
+	 * because the only things that will block as a result are the SRCU
+	 * callbacks freeing the link objects for the links in the list we're
+	 * walking.
+	 */
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+			dpm_wait(link->supplier, async);
+
+	device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+	dpm_wait(dev->parent, async);
+	dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	/*
+	 * The status of a device link can only be changed from "dormant" by a
+	 * probe, but that cannot happen during system suspend/resume.  In
+	 * theory it can change to "dormant" at that time, but then it is
+	 * reasonable to wait for the target device anyway (eg. if it goes
+	 * away, it's better to wait for it to go away completely and then
+	 * continue instead of trying to continue in parallel with its
+	 * unregistration).
+	 */
+	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+			dpm_wait(link->consumer, async);
+
+	device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+	dpm_wait_for_children(dev, async);
+	dpm_wait_for_consumers(dev, async);
+}
+
+/**
+ * pm_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ */
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
+{
+	switch (state.event) {
+#ifdef CONFIG_SUSPEND
+	case PM_EVENT_SUSPEND:
+		return ops->suspend;
+	case PM_EVENT_RESUME:
+		return ops->resume;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+	case PM_EVENT_FREEZE:
+	case PM_EVENT_QUIESCE:
+		return ops->freeze;
+	case PM_EVENT_HIBERNATE:
+		return ops->poweroff;
+	case PM_EVENT_THAW:
+	case PM_EVENT_RECOVER:
+		return ops->thaw;
+		break;
+	case PM_EVENT_RESTORE:
+		return ops->restore;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+	}
+
+	return NULL;
+}
+
+/**
+ * pm_late_early_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
+				      pm_message_t state)
+{
+	switch (state.event) {
+#ifdef CONFIG_SUSPEND
+	case PM_EVENT_SUSPEND:
+		return ops->suspend_late;
+	case PM_EVENT_RESUME:
+		return ops->resume_early;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+	case PM_EVENT_FREEZE:
+	case PM_EVENT_QUIESCE:
+		return ops->freeze_late;
+	case PM_EVENT_HIBERNATE:
+		return ops->poweroff_late;
+	case PM_EVENT_THAW:
+	case PM_EVENT_RECOVER:
+		return ops->thaw_early;
+	case PM_EVENT_RESTORE:
+		return ops->restore_early;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+	}
+
+	return NULL;
+}
+
+/**
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
+{
+	switch (state.event) {
+#ifdef CONFIG_SUSPEND
+	case PM_EVENT_SUSPEND:
+		return ops->suspend_noirq;
+	case PM_EVENT_RESUME:
+		return ops->resume_noirq;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+	case PM_EVENT_FREEZE:
+	case PM_EVENT_QUIESCE:
+		return ops->freeze_noirq;
+	case PM_EVENT_HIBERNATE:
+		return ops->poweroff_noirq;
+	case PM_EVENT_THAW:
+	case PM_EVENT_RECOVER:
+		return ops->thaw_noirq;
+	case PM_EVENT_RESTORE:
+		return ops->restore_noirq;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+	}
+
+	return NULL;
+}
+
+static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
+{
+	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
+		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
+		", may wakeup" : "");
+}
+
+static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
+			int error)
+{
+	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
+		dev_name(dev), pm_verb(state.event), info, error);
+}
+
+static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
+			  const char *info)
+{
+	ktime_t calltime;
+	u64 usecs64;
+	int usecs;
+
+	calltime = ktime_get();
+	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
+	do_div(usecs64, NSEC_PER_USEC);
+	usecs = usecs64;
+	if (usecs == 0)
+		usecs = 1;
+
+	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
+		  info ?: "", info ? " " : "", pm_verb(state.event),
+		  error ? "aborted" : "complete",
+		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+}
+
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+			    pm_message_t state, const char *info)
+{
+	ktime_t calltime;
+	int error;
+
+	if (!cb)
+		return 0;
+
+	calltime = initcall_debug_start(dev, cb);
+
+	pm_dev_dbg(dev, state, info);
+	trace_device_pm_callback_start(dev, info, state.event);
+	error = cb(dev);
+	trace_device_pm_callback_end(dev, error);
+	suspend_report_result(cb, error);
+
+	initcall_debug_report(dev, calltime, cb, error);
+
+	return error;
+}
+
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+	struct device		*dev;
+	struct task_struct	*tsk;
+	struct timer_list	timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+	struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(struct timer_list *t)
+{
+	struct dpm_watchdog *wd = from_timer(wd, t, timer);
+
+	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+	show_stack(wd->tsk, NULL);
+	panic("%s %s: unrecoverable failure\n",
+		dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+	struct timer_list *timer = &wd->timer;
+
+	wd->dev = dev;
+	wd->tsk = current;
+
+	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
+	/* use same timeout value for both suspend and resume */
+	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+	add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+	struct timer_list *timer = &wd->timer;
+
+	del_timer_sync(timer);
+	destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
+/*------------------------- Resume routines -------------------------*/
+
+/**
+ * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * @dev: Target device.
+ *
+ * Make the core skip the "early resume" and "resume" phases for @dev.
+ *
+ * This function can be called by middle-layer code during the "noirq" phase of
+ * system resume if necessary, but not by device drivers.
+ */
+void dev_pm_skip_next_resume_phases(struct device *dev)
+{
+	dev->power.is_late_suspended = false;
+	dev->power.is_suspended = false;
+}
+
+/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+	switch (resume_msg.event) {
+	case PM_EVENT_RESUME:
+		return PMSG_SUSPEND;
+	case PM_EVENT_THAW:
+	case PM_EVENT_RESTORE:
+		return PMSG_FREEZE;
+	case PM_EVENT_RECOVER:
+		return PMSG_HIBERNATE;
+	}
+	return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+	return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+						pm_message_t state,
+						const char **info_p)
+{
+	pm_callback_t callback;
+	const char *info;
+
+	if (dev->pm_domain) {
+		info = "noirq power domain ";
+		callback = pm_noirq_op(&dev->pm_domain->ops, state);
+	} else if (dev->type && dev->type->pm) {
+		info = "noirq type ";
+		callback = pm_noirq_op(dev->type->pm, state);
+	} else if (dev->class && dev->class->pm) {
+		info = "noirq class ";
+		callback = pm_noirq_op(dev->class->pm, state);
+	} else if (dev->bus && dev->bus->pm) {
+		info = "noirq bus ";
+		callback = pm_noirq_op(dev->bus->pm, state);
+	} else {
+		return NULL;
+	}
+
+	if (info_p)
+		*info_p = info;
+
+	return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+						 pm_message_t state,
+						 const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+						pm_message_t state,
+						const char **info_p);
+
+/**
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+{
+	pm_callback_t callback;
+	const char *info;
+	bool skip_resume;
+	int error = 0;
+
+	TRACE_DEVICE(dev);
+	TRACE_RESUME(0);
+
+	if (dev->power.syscore || dev->power.direct_complete)
+		goto Out;
+
+	if (!dev->power.is_noirq_suspended)
+		goto Out;
+
+	dpm_wait_for_superior(dev, async);
+
+	skip_resume = dev_pm_may_skip_resume(dev);
+
+	callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+	if (callback)
+		goto Run;
+
+	if (skip_resume)
+		goto Skip;
+
+	if (dev_pm_smart_suspend_and_suspended(dev)) {
+		pm_message_t suspend_msg = suspend_event(state);
+
+		/*
+		 * If "freeze" callbacks have been skipped during a transition
+		 * related to hibernation, the subsequent "thaw" callbacks must
+		 * be skipped too or bad things may happen.  Otherwise, resume
+		 * callbacks are going to be run for the device, so its runtime
+		 * PM status must be changed to reflect the new state after the
+		 * transition under way.
+		 */
+		if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+		    !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+			if (state.event == PM_EVENT_THAW) {
+				skip_resume = true;
+				goto Skip;
+			} else {
+				pm_runtime_set_active(dev);
+			}
+		}
+	}
+
+	if (dev->driver && dev->driver->pm) {
+		info = "noirq driver ";
+		callback = pm_noirq_op(dev->driver->pm, state);
+	}
+
+Run:
+	error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
+	dev->power.is_noirq_suspended = false;
+
+	if (skip_resume) {
+		/*
+		 * The device is going to be left in suspend, but it might not
+		 * have been in runtime suspend before the system suspended, so
+		 * its runtime PM status needs to be updated to avoid confusing
+		 * the runtime PM framework when runtime PM is enabled for the
+		 * device again.
+		 */
+		pm_runtime_set_suspended(dev);
+		dev_pm_skip_next_resume_phases(dev);
+	}
+
+Out:
+	complete_all(&dev->power.completion);
+	TRACE_RESUME(error);
+	return error;
+}
+
+static bool is_async(struct device *dev)
+{
+	return dev->power.async_suspend && pm_async_enabled
+		&& !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = device_resume_noirq(dev, pm_transition, true);
+	if (error)
+		pm_dev_err(dev, pm_transition, " async", error);
+
+	put_device(dev);
+}
+
+void dpm_noirq_resume_devices(pm_message_t state)
+{
+	struct device *dev;
+	ktime_t starttime = ktime_get();
+
+	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
+	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+
+	/*
+	 * Advanced the async threads upfront,
+	 * in case the starting of async threads is
+	 * delayed by non-async resuming devices.
+	 */
+	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+		reinit_completion(&dev->power.completion);
+		if (is_async(dev)) {
+			get_device(dev);
+			async_schedule(async_resume_noirq, dev);
+		}
+	}
+
+	while (!list_empty(&dpm_noirq_list)) {
+		dev = to_device(dpm_noirq_list.next);
+		get_device(dev);
+		list_move_tail(&dev->power.entry, &dpm_late_early_list);
+		mutex_unlock(&dpm_list_mtx);
+
+		if (!is_async(dev)) {
+			int error;
+
+			error = device_resume_noirq(dev, state, false);
+			if (error) {
+				suspend_stats.failed_resume_noirq++;
+				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+				dpm_save_failed_dev(dev_name(dev));
+				pm_dev_err(dev, state, " noirq", error);
+			}
+		}
+
+		mutex_lock(&dpm_list_mtx);
+		put_device(dev);
+	}
+	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
+	dpm_show_time(starttime, state, 0, "noirq");
+	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+void dpm_noirq_end(void)
+{
+	resume_device_irqs();
+	device_wakeup_disarm_wake_irqs();
+	cpuidle_resume();
+}
+
+/**
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
+ * allow device drivers' interrupt handlers to be called.
+ */
+void dpm_resume_noirq(pm_message_t state)
+{
+	dpm_noirq_resume_devices(state);
+	dpm_noirq_end();
+}
+
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+						pm_message_t state,
+						const char **info_p)
+{
+	pm_callback_t callback;
+	const char *info;
+
+	if (dev->pm_domain) {
+		info = "early power domain ";
+		callback = pm_late_early_op(&dev->pm_domain->ops, state);
+	} else if (dev->type && dev->type->pm) {
+		info = "early type ";
+		callback = pm_late_early_op(dev->type->pm, state);
+	} else if (dev->class && dev->class->pm) {
+		info = "early class ";
+		callback = pm_late_early_op(dev->class->pm, state);
+	} else if (dev->bus && dev->bus->pm) {
+		info = "early bus ";
+		callback = pm_late_early_op(dev->bus->pm, state);
+	} else {
+		return NULL;
+	}
+
+	if (info_p)
+		*info_p = info;
+
+	return callback;
+}
+
+/**
+ * device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+{
+	pm_callback_t callback;
+	const char *info;
+	int error = 0;
+
+	TRACE_DEVICE(dev);
+	TRACE_RESUME(0);
+
+	if (dev->power.syscore || dev->power.direct_complete)
+		goto Out;
+
+	if (!dev->power.is_late_suspended)
+		goto Out;
+
+	dpm_wait_for_superior(dev, async);
+
+	callback = dpm_subsys_resume_early_cb(dev, state, &info);
+
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "early driver ";
+		callback = pm_late_early_op(dev->driver->pm, state);
+	}
+
+	error = dpm_run_callback(callback, dev, state, info);
+	dev->power.is_late_suspended = false;
+
+ Out:
+	TRACE_RESUME(error);
+
+	pm_runtime_enable(dev);
+	complete_all(&dev->power.completion);
+	return error;
+}
+
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = device_resume_early(dev, pm_transition, true);
+	if (error)
+		pm_dev_err(dev, pm_transition, " async", error);
+
+	put_device(dev);
+}
+
+/**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_early(pm_message_t state)
+{
+	struct device *dev;
+	ktime_t starttime = ktime_get();
+
+	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
+	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+
+	/*
+	 * Advanced the async threads upfront,
+	 * in case the starting of async threads is
+	 * delayed by non-async resuming devices.
+	 */
+	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+		reinit_completion(&dev->power.completion);
+		if (is_async(dev)) {
+			get_device(dev);
+			async_schedule(async_resume_early, dev);
+		}
+	}
+
+	while (!list_empty(&dpm_late_early_list)) {
+		dev = to_device(dpm_late_early_list.next);
+		get_device(dev);
+		list_move_tail(&dev->power.entry, &dpm_suspended_list);
+		mutex_unlock(&dpm_list_mtx);
+
+		if (!is_async(dev)) {
+			int error;
+
+			error = device_resume_early(dev, state, false);
+			if (error) {
+				suspend_stats.failed_resume_early++;
+				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+				dpm_save_failed_dev(dev_name(dev));
+				pm_dev_err(dev, state, " early", error);
+			}
+		}
+		mutex_lock(&dpm_list_mtx);
+		put_device(dev);
+	}
+	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
+	dpm_show_time(starttime, state, 0, "early");
+	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
+}
+
+/**
+ * dpm_resume_start - Execute "noirq" and "early" device callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_start(pm_message_t state)
+{
+	dpm_resume_noirq(state);
+	dpm_resume_early(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+/**
+ * device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+static int device_resume(struct device *dev, pm_message_t state, bool async)
+{
+	pm_callback_t callback = NULL;
+	const char *info = NULL;
+	int error = 0;
+	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+
+	TRACE_DEVICE(dev);
+	TRACE_RESUME(0);
+
+	if (dev->power.syscore)
+		goto Complete;
+
+	if (dev->power.direct_complete) {
+		/* Match the pm_runtime_disable() in __device_suspend(). */
+		pm_runtime_enable(dev);
+		goto Complete;
+	}
+
+	dpm_wait_for_superior(dev, async);
+	dpm_watchdog_set(&wd, dev);
+	device_lock(dev);
+
+	/*
+	 * This is a fib.  But we'll allow new children to be added below
+	 * a resumed device, even if the device hasn't been completed yet.
+	 */
+	dev->power.is_prepared = false;
+
+	if (!dev->power.is_suspended)
+		goto Unlock;
+
+	if (dev->pm_domain) {
+		info = "power domain ";
+		callback = pm_op(&dev->pm_domain->ops, state);
+		goto Driver;
+	}
+
+	if (dev->type && dev->type->pm) {
+		info = "type ";
+		callback = pm_op(dev->type->pm, state);
+		goto Driver;
+	}
+
+	if (dev->class && dev->class->pm) {
+		info = "class ";
+		callback = pm_op(dev->class->pm, state);
+		goto Driver;
+	}
+
+	if (dev->bus) {
+		if (dev->bus->pm) {
+			info = "bus ";
+			callback = pm_op(dev->bus->pm, state);
+		} else if (dev->bus->resume) {
+			info = "legacy bus ";
+			callback = dev->bus->resume;
+			goto End;
+		}
+	}
+
+ Driver:
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "driver ";
+		callback = pm_op(dev->driver->pm, state);
+	}
+
+ End:
+	error = dpm_run_callback(callback, dev, state, info);
+	dev->power.is_suspended = false;
+
+ Unlock:
+	device_unlock(dev);
+	dpm_watchdog_clear(&wd);
+
+ Complete:
+	complete_all(&dev->power.completion);
+
+	TRACE_RESUME(error);
+
+	return error;
+}
+
+static void async_resume(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = device_resume(dev, pm_transition, true);
+	if (error)
+		pm_dev_err(dev, pm_transition, " async", error);
+	put_device(dev);
+}
+
+/**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the appropriate "resume" callback for all devices whose status
+ * indicates that they are suspended.
+ */
+void dpm_resume(pm_message_t state)
+{
+	struct device *dev;
+	ktime_t starttime = ktime_get();
+
+	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
+	might_sleep();
+
+	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+	async_error = 0;
+
+	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
+		reinit_completion(&dev->power.completion);
+		if (is_async(dev)) {
+			get_device(dev);
+			async_schedule(async_resume, dev);
+		}
+	}
+
+	while (!list_empty(&dpm_suspended_list)) {
+		dev = to_device(dpm_suspended_list.next);
+		get_device(dev);
+		if (!is_async(dev)) {
+			int error;
+
+			mutex_unlock(&dpm_list_mtx);
+
+			error = device_resume(dev, state, false);
+			if (error) {
+				suspend_stats.failed_resume++;
+				dpm_save_failed_step(SUSPEND_RESUME);
+				dpm_save_failed_dev(dev_name(dev));
+				pm_dev_err(dev, state, "", error);
+			}
+
+			mutex_lock(&dpm_list_mtx);
+		}
+		if (!list_empty(&dev->power.entry))
+			list_move_tail(&dev->power.entry, &dpm_prepared_list);
+		put_device(dev);
+	}
+	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
+	dpm_show_time(starttime, state, 0, NULL);
+
+	cpufreq_resume();
+	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
+}
+
+/**
+ * device_complete - Complete a PM transition for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ */
+static void device_complete(struct device *dev, pm_message_t state)
+{
+	void (*callback)(struct device *) = NULL;
+	const char *info = NULL;
+
+	if (dev->power.syscore)
+		return;
+
+	device_lock(dev);
+
+	if (dev->pm_domain) {
+		info = "completing power domain ";
+		callback = dev->pm_domain->ops.complete;
+	} else if (dev->type && dev->type->pm) {
+		info = "completing type ";
+		callback = dev->type->pm->complete;
+	} else if (dev->class && dev->class->pm) {
+		info = "completing class ";
+		callback = dev->class->pm->complete;
+	} else if (dev->bus && dev->bus->pm) {
+		info = "completing bus ";
+		callback = dev->bus->pm->complete;
+	}
+
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "completing driver ";
+		callback = dev->driver->pm->complete;
+	}
+
+	if (callback) {
+		pm_dev_dbg(dev, state, info);
+		callback(dev);
+	}
+
+	device_unlock(dev);
+
+	pm_runtime_put(dev);
+}
+
+/**
+ * dpm_complete - Complete a PM transition for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->complete() callbacks for all devices whose PM status is not
+ * DPM_ON (this allows new devices to be registered).
+ */
+void dpm_complete(pm_message_t state)
+{
+	struct list_head list;
+
+	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
+	might_sleep();
+
+	INIT_LIST_HEAD(&list);
+	mutex_lock(&dpm_list_mtx);
+	while (!list_empty(&dpm_prepared_list)) {
+		struct device *dev = to_device(dpm_prepared_list.prev);
+
+		get_device(dev);
+		dev->power.is_prepared = false;
+		list_move(&dev->power.entry, &list);
+		mutex_unlock(&dpm_list_mtx);
+
+		trace_device_pm_callback_start(dev, "", state.event);
+		device_complete(dev, state);
+		trace_device_pm_callback_end(dev, 0);
+
+		mutex_lock(&dpm_list_mtx);
+		put_device(dev);
+	}
+	list_splice(&list, &dpm_list);
+	mutex_unlock(&dpm_list_mtx);
+
+	/* Allow device probing and trigger re-probing of deferred devices */
+	device_unblock_probing();
+	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
+}
+
+/**
+ * dpm_resume_end - Execute "resume" callbacks and complete system transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute "resume" callbacks for all devices and complete the PM transition of
+ * the system.
+ */
+void dpm_resume_end(pm_message_t state)
+{
+	dpm_resume(state);
+	dpm_complete(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_end);
+
+
+/*------------------------- Suspend routines -------------------------*/
+
+/**
+ * resume_event - Return a "resume" message for given "suspend" sleep state.
+ * @sleep_state: PM message representing a sleep state.
+ *
+ * Return a PM message representing the resume event corresponding to given
+ * sleep state.
+ */
+static pm_message_t resume_event(pm_message_t sleep_state)
+{
+	switch (sleep_state.event) {
+	case PM_EVENT_SUSPEND:
+		return PMSG_RESUME;
+	case PM_EVENT_FREEZE:
+	case PM_EVENT_QUIESCE:
+		return PMSG_RECOVER;
+	case PM_EVENT_HIBERNATE:
+		return PMSG_RESTORE;
+	}
+	return PMSG_ON;
+}
+
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	if (dev->parent)
+		dev->parent->power.must_resume = true;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		link->supplier->power.must_resume = true;
+
+	device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+						 pm_message_t state,
+						 const char **info_p)
+{
+	pm_callback_t callback;
+	const char *info;
+
+	if (dev->pm_domain) {
+		info = "noirq power domain ";
+		callback = pm_noirq_op(&dev->pm_domain->ops, state);
+	} else if (dev->type && dev->type->pm) {
+		info = "noirq type ";
+		callback = pm_noirq_op(dev->type->pm, state);
+	} else if (dev->class && dev->class->pm) {
+		info = "noirq class ";
+		callback = pm_noirq_op(dev->class->pm, state);
+	} else if (dev->bus && dev->bus->pm) {
+		info = "noirq bus ";
+		callback = pm_noirq_op(dev->bus->pm, state);
+	} else {
+		return NULL;
+	}
+
+	if (info_p)
+		*info_p = info;
+
+	return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+			       bool no_subsys_suspend_noirq)
+{
+	pm_message_t resume_msg = resume_event(state);
+
+	/*
+	 * If all of the device driver's "noirq", "late" and "early" callbacks
+	 * are invoked directly by the core, the decision to allow the device to
+	 * stay in suspend can be based on its current runtime PM status and its
+	 * wakeup settings.
+	 */
+	if (no_subsys_suspend_noirq &&
+	    !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+	    !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+	    !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+		return !pm_runtime_status_suspended(dev) &&
+			(resume_msg.event != PM_EVENT_RESUME ||
+			 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+	/*
+	 * The only safe strategy here is to require that if the device may not
+	 * be left in suspend, resume callbacks must be invoked for it.
+	 */
+	return !dev->power.may_skip_resume;
+}
+
+/**
+ * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+{
+	pm_callback_t callback;
+	const char *info;
+	bool no_subsys_cb = false;
+	int error = 0;
+
+	TRACE_DEVICE(dev);
+	TRACE_SUSPEND(0);
+
+	dpm_wait_for_subordinate(dev, async);
+
+	if (async_error)
+		goto Complete;
+
+	if (pm_wakeup_pending()) {
+		async_error = -EBUSY;
+		goto Complete;
+	}
+
+	if (dev->power.syscore || dev->power.direct_complete)
+		goto Complete;
+
+	callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+	if (callback)
+		goto Run;
+
+	no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+	if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+		goto Skip;
+
+	if (dev->driver && dev->driver->pm) {
+		info = "noirq driver ";
+		callback = pm_noirq_op(dev->driver->pm, state);
+	}
+
+Run:
+	error = dpm_run_callback(callback, dev, state, info);
+	if (error) {
+		async_error = error;
+		goto Complete;
+	}
+
+Skip:
+	dev->power.is_noirq_suspended = true;
+
+	if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+		dev->power.must_resume = dev->power.must_resume ||
+				atomic_read(&dev->power.usage_count) > 1 ||
+				device_must_resume(dev, state, no_subsys_cb);
+	} else {
+		dev->power.must_resume = true;
+	}
+
+	if (dev->power.must_resume)
+		dpm_superior_set_must_resume(dev);
+
+Complete:
+	complete_all(&dev->power.completion);
+	TRACE_SUSPEND(error);
+	return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = __device_suspend_noirq(dev, pm_transition, true);
+	if (error) {
+		dpm_save_failed_dev(dev_name(dev));
+		pm_dev_err(dev, pm_transition, " async", error);
+	}
+
+	put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+	reinit_completion(&dev->power.completion);
+
+	if (is_async(dev)) {
+		get_device(dev);
+		async_schedule(async_suspend_noirq, dev);
+		return 0;
+	}
+	return __device_suspend_noirq(dev, pm_transition, false);
+}
+
+void dpm_noirq_begin(void)
+{
+	cpuidle_pause();
+	device_wakeup_arm_wake_irqs();
+	suspend_device_irqs();
+}
+
+int dpm_noirq_suspend_devices(pm_message_t state)
+{
+	ktime_t starttime = ktime_get();
+	int error = 0;
+
+	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
+	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+	async_error = 0;
+
+	while (!list_empty(&dpm_late_early_list)) {
+		struct device *dev = to_device(dpm_late_early_list.prev);
+
+		get_device(dev);
+		mutex_unlock(&dpm_list_mtx);
+
+		error = device_suspend_noirq(dev);
+
+		mutex_lock(&dpm_list_mtx);
+		if (error) {
+			pm_dev_err(dev, state, " noirq", error);
+			dpm_save_failed_dev(dev_name(dev));
+			put_device(dev);
+			break;
+		}
+		if (!list_empty(&dev->power.entry))
+			list_move(&dev->power.entry, &dpm_noirq_list);
+		put_device(dev);
+
+		if (async_error)
+			break;
+	}
+	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
+	if (!error)
+		error = async_error;
+
+	if (error) {
+		suspend_stats.failed_suspend_noirq++;
+		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
+	}
+	dpm_show_time(starttime, state, error, "noirq");
+	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
+	return error;
+}
+
+/**
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prevent device drivers' interrupt handlers from being called and invoke
+ * "noirq" suspend callbacks for all non-sysdev devices.
+ */
+int dpm_suspend_noirq(pm_message_t state)
+{
+	int ret;
+
+	dpm_noirq_begin();
+	ret = dpm_noirq_suspend_devices(state);
+	if (ret)
+		dpm_resume_noirq(resume_event(state));
+
+	return ret;
+}
+
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+	struct device *parent = dev->parent;
+
+	if (!parent)
+		return;
+
+	spin_lock_irq(&parent->power.lock);
+
+	if (dev->power.wakeup_path && !parent->power.ignore_children)
+		parent->power.wakeup_path = true;
+
+	spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+						pm_message_t state,
+						const char **info_p)
+{
+	pm_callback_t callback;
+	const char *info;
+
+	if (dev->pm_domain) {
+		info = "late power domain ";
+		callback = pm_late_early_op(&dev->pm_domain->ops, state);
+	} else if (dev->type && dev->type->pm) {
+		info = "late type ";
+		callback = pm_late_early_op(dev->type->pm, state);
+	} else if (dev->class && dev->class->pm) {
+		info = "late class ";
+		callback = pm_late_early_op(dev->class->pm, state);
+	} else if (dev->bus && dev->bus->pm) {
+		info = "late bus ";
+		callback = pm_late_early_op(dev->bus->pm, state);
+	} else {
+		return NULL;
+	}
+
+	if (info_p)
+		*info_p = info;
+
+	return callback;
+}
+
+/**
+ * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+{
+	pm_callback_t callback;
+	const char *info;
+	int error = 0;
+
+	TRACE_DEVICE(dev);
+	TRACE_SUSPEND(0);
+
+	__pm_runtime_disable(dev, false);
+
+	dpm_wait_for_subordinate(dev, async);
+
+	if (async_error)
+		goto Complete;
+
+	if (pm_wakeup_pending()) {
+		async_error = -EBUSY;
+		goto Complete;
+	}
+
+	if (dev->power.syscore || dev->power.direct_complete)
+		goto Complete;
+
+	callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+	if (callback)
+		goto Run;
+
+	if (dev_pm_smart_suspend_and_suspended(dev) &&
+	    !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+		goto Skip;
+
+	if (dev->driver && dev->driver->pm) {
+		info = "late driver ";
+		callback = pm_late_early_op(dev->driver->pm, state);
+	}
+
+Run:
+	error = dpm_run_callback(callback, dev, state, info);
+	if (error) {
+		async_error = error;
+		goto Complete;
+	}
+	dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+	dev->power.is_late_suspended = true;
+
+Complete:
+	TRACE_SUSPEND(error);
+	complete_all(&dev->power.completion);
+	return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = __device_suspend_late(dev, pm_transition, true);
+	if (error) {
+		dpm_save_failed_dev(dev_name(dev));
+		pm_dev_err(dev, pm_transition, " async", error);
+	}
+	put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+	reinit_completion(&dev->power.completion);
+
+	if (is_async(dev)) {
+		get_device(dev);
+		async_schedule(async_suspend_late, dev);
+		return 0;
+	}
+
+	return __device_suspend_late(dev, pm_transition, false);
+}
+
+/**
+ * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_late(pm_message_t state)
+{
+	ktime_t starttime = ktime_get();
+	int error = 0;
+
+	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
+	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+	async_error = 0;
+
+	while (!list_empty(&dpm_suspended_list)) {
+		struct device *dev = to_device(dpm_suspended_list.prev);
+
+		get_device(dev);
+		mutex_unlock(&dpm_list_mtx);
+
+		error = device_suspend_late(dev);
+
+		mutex_lock(&dpm_list_mtx);
+		if (!list_empty(&dev->power.entry))
+			list_move(&dev->power.entry, &dpm_late_early_list);
+
+		if (error) {
+			pm_dev_err(dev, state, " late", error);
+			dpm_save_failed_dev(dev_name(dev));
+			put_device(dev);
+			break;
+		}
+		put_device(dev);
+
+		if (async_error)
+			break;
+	}
+	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
+	if (!error)
+		error = async_error;
+	if (error) {
+		suspend_stats.failed_suspend_late++;
+		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
+		dpm_resume_early(resume_event(state));
+	}
+	dpm_show_time(starttime, state, error, "late");
+	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
+	return error;
+}
+
+/**
+ * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_end(pm_message_t state)
+{
+	int error = dpm_suspend_late(state);
+	if (error)
+		return error;
+
+	error = dpm_suspend_noirq(state);
+	if (error) {
+		dpm_resume_early(resume_event(state));
+		return error;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_end);
+
+/**
+ * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+ * @dev: Device to suspend.
+ * @state: PM transition of the system being carried out.
+ * @cb: Suspend callback to execute.
+ * @info: string description of caller.
+ */
+static int legacy_suspend(struct device *dev, pm_message_t state,
+			  int (*cb)(struct device *dev, pm_message_t state),
+			  const char *info)
+{
+	int error;
+	ktime_t calltime;
+
+	calltime = initcall_debug_start(dev, cb);
+
+	trace_device_pm_callback_start(dev, info, state.event);
+	error = cb(dev, state);
+	trace_device_pm_callback_end(dev, error);
+	suspend_report_result(cb, error);
+
+	initcall_debug_report(dev, calltime, cb, error);
+
+	return error;
+}
+
+static void dpm_clear_superiors_direct_complete(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	if (dev->parent) {
+		spin_lock_irq(&dev->parent->power.lock);
+		dev->parent->power.direct_complete = false;
+		spin_unlock_irq(&dev->parent->power.lock);
+	}
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+		spin_lock_irq(&link->supplier->power.lock);
+		link->supplier->power.direct_complete = false;
+		spin_unlock_irq(&link->supplier->power.lock);
+	}
+
+	device_links_read_unlock(idx);
+}
+
+/**
+ * __device_suspend - Execute "suspend" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ */
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+{
+	pm_callback_t callback = NULL;
+	const char *info = NULL;
+	int error = 0;
+	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+
+	TRACE_DEVICE(dev);
+	TRACE_SUSPEND(0);
+
+	dpm_wait_for_subordinate(dev, async);
+
+	if (async_error) {
+		dev->power.direct_complete = false;
+		goto Complete;
+	}
+
+	/*
+	 * If a device configured to wake up the system from sleep states
+	 * has been suspended at run time and there's a resume request pending
+	 * for it, this is equivalent to the device signaling wakeup, so the
+	 * system suspend operation should be aborted.
+	 */
+	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+		pm_wakeup_event(dev, 0);
+
+	if (pm_wakeup_pending()) {
+		dev->power.direct_complete = false;
+		async_error = -EBUSY;
+		goto Complete;
+	}
+
+	if (dev->power.syscore)
+		goto Complete;
+
+	if (dev->power.direct_complete) {
+		if (pm_runtime_status_suspended(dev)) {
+			pm_runtime_disable(dev);
+			if (pm_runtime_status_suspended(dev))
+				goto Complete;
+
+			pm_runtime_enable(dev);
+		}
+		dev->power.direct_complete = false;
+	}
+
+	dev->power.may_skip_resume = false;
+	dev->power.must_resume = false;
+
+	dpm_watchdog_set(&wd, dev);
+	device_lock(dev);
+
+	if (dev->pm_domain) {
+		info = "power domain ";
+		callback = pm_op(&dev->pm_domain->ops, state);
+		goto Run;
+	}
+
+	if (dev->type && dev->type->pm) {
+		info = "type ";
+		callback = pm_op(dev->type->pm, state);
+		goto Run;
+	}
+
+	if (dev->class && dev->class->pm) {
+		info = "class ";
+		callback = pm_op(dev->class->pm, state);
+		goto Run;
+	}
+
+	if (dev->bus) {
+		if (dev->bus->pm) {
+			info = "bus ";
+			callback = pm_op(dev->bus->pm, state);
+		} else if (dev->bus->suspend) {
+			pm_dev_dbg(dev, state, "legacy bus ");
+			error = legacy_suspend(dev, state, dev->bus->suspend,
+						"legacy bus ");
+			goto End;
+		}
+	}
+
+ Run:
+	if (!callback && dev->driver && dev->driver->pm) {
+		info = "driver ";
+		callback = pm_op(dev->driver->pm, state);
+	}
+
+	error = dpm_run_callback(callback, dev, state, info);
+
+ End:
+	if (!error) {
+		dev->power.is_suspended = true;
+		if (device_may_wakeup(dev))
+			dev->power.wakeup_path = true;
+
+		dpm_propagate_wakeup_to_parent(dev);
+		dpm_clear_superiors_direct_complete(dev);
+	}
+
+	device_unlock(dev);
+	dpm_watchdog_clear(&wd);
+
+ Complete:
+	if (error)
+		async_error = error;
+
+	complete_all(&dev->power.completion);
+	TRACE_SUSPEND(error);
+	return error;
+}
+
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+	struct device *dev = (struct device *)data;
+	int error;
+
+	error = __device_suspend(dev, pm_transition, true);
+	if (error) {
+		dpm_save_failed_dev(dev_name(dev));
+		pm_dev_err(dev, pm_transition, " async", error);
+	}
+
+	put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+	reinit_completion(&dev->power.completion);
+
+	if (is_async(dev)) {
+		get_device(dev);
+		async_schedule(async_suspend, dev);
+		return 0;
+	}
+
+	return __device_suspend(dev, pm_transition, false);
+}
+
+/**
+ * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend(pm_message_t state)
+{
+	ktime_t starttime = ktime_get();
+	int error = 0;
+
+	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
+	might_sleep();
+
+	cpufreq_suspend();
+
+	mutex_lock(&dpm_list_mtx);
+	pm_transition = state;
+	async_error = 0;
+	while (!list_empty(&dpm_prepared_list)) {
+		struct device *dev = to_device(dpm_prepared_list.prev);
+
+		get_device(dev);
+		mutex_unlock(&dpm_list_mtx);
+
+		error = device_suspend(dev);
+
+		mutex_lock(&dpm_list_mtx);
+		if (error) {
+			pm_dev_err(dev, state, "", error);
+			dpm_save_failed_dev(dev_name(dev));
+			put_device(dev);
+			break;
+		}
+		if (!list_empty(&dev->power.entry))
+			list_move(&dev->power.entry, &dpm_suspended_list);
+		put_device(dev);
+		if (async_error)
+			break;
+	}
+	mutex_unlock(&dpm_list_mtx);
+	async_synchronize_full();
+	if (!error)
+		error = async_error;
+	if (error) {
+		suspend_stats.failed_suspend++;
+		dpm_save_failed_step(SUSPEND_SUSPEND);
+	}
+	dpm_show_time(starttime, state, error, NULL);
+	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
+	return error;
+}
+
+/**
+ * device_prepare - Prepare a device for system power transition.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for given device.  No new children of the
+ * device may be registered after this function has returned.
+ */
+static int device_prepare(struct device *dev, pm_message_t state)
+{
+	int (*callback)(struct device *) = NULL;
+	int ret = 0;
+
+	if (dev->power.syscore)
+		return 0;
+
+	WARN_ON(!pm_runtime_enabled(dev) &&
+		dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+					      DPM_FLAG_LEAVE_SUSPENDED));
+
+	/*
+	 * If a device's parent goes into runtime suspend at the wrong time,
+	 * it won't be possible to resume the device.  To prevent this we
+	 * block runtime suspend here, during the prepare phase, and allow
+	 * it again during the complete phase.
+	 */
+	pm_runtime_get_noresume(dev);
+
+	device_lock(dev);
+
+	dev->power.wakeup_path = false;
+
+	if (dev->power.no_pm_callbacks)
+		goto unlock;
+
+	if (dev->pm_domain)
+		callback = dev->pm_domain->ops.prepare;
+	else if (dev->type && dev->type->pm)
+		callback = dev->type->pm->prepare;
+	else if (dev->class && dev->class->pm)
+		callback = dev->class->pm->prepare;
+	else if (dev->bus && dev->bus->pm)
+		callback = dev->bus->pm->prepare;
+
+	if (!callback && dev->driver && dev->driver->pm)
+		callback = dev->driver->pm->prepare;
+
+	if (callback)
+		ret = callback(dev);
+
+unlock:
+	device_unlock(dev);
+
+	if (ret < 0) {
+		suspend_report_result(callback, ret);
+		pm_runtime_put(dev);
+		return ret;
+	}
+	/*
+	 * A positive return value from ->prepare() means "this device appears
+	 * to be runtime-suspended and its state is fine, so if it really is
+	 * runtime-suspended, you can leave it in that state provided that you
+	 * will do the same thing with all of its descendants".  This only
+	 * applies to suspend transitions, however.
+	 */
+	spin_lock_irq(&dev->power.lock);
+	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+		((pm_runtime_suspended(dev) && ret > 0) ||
+		 dev->power.no_pm_callbacks) &&
+		!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
+	spin_unlock_irq(&dev->power.lock);
+	return 0;
+}
+
+/**
+ * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for all devices.
+ */
+int dpm_prepare(pm_message_t state)
+{
+	int error = 0;
+
+	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
+	might_sleep();
+
+	/*
+	 * Give a chance for the known devices to complete their probes, before
+	 * disable probing of devices. This sync point is important at least
+	 * at boot time + hibernation restore.
+	 */
+	wait_for_device_probe();
+	/*
+	 * It is unsafe if probing of devices will happen during suspend or
+	 * hibernation and system behavior will be unpredictable in this case.
+	 * So, let's prohibit device's probing here and defer their probes
+	 * instead. The normal behavior will be restored in dpm_complete().
+	 */
+	device_block_probing();
+
+	mutex_lock(&dpm_list_mtx);
+	while (!list_empty(&dpm_list)) {
+		struct device *dev = to_device(dpm_list.next);
+
+		get_device(dev);
+		mutex_unlock(&dpm_list_mtx);
+
+		trace_device_pm_callback_start(dev, "", state.event);
+		error = device_prepare(dev, state);
+		trace_device_pm_callback_end(dev, error);
+
+		mutex_lock(&dpm_list_mtx);
+		if (error) {
+			if (error == -EAGAIN) {
+				put_device(dev);
+				error = 0;
+				continue;
+			}
+			printk(KERN_INFO "PM: Device %s not prepared "
+				"for power transition: code %d\n",
+				dev_name(dev), error);
+			put_device(dev);
+			break;
+		}
+		dev->power.is_prepared = true;
+		if (!list_empty(&dev->power.entry))
+			list_move_tail(&dev->power.entry, &dpm_prepared_list);
+		put_device(dev);
+	}
+	mutex_unlock(&dpm_list_mtx);
+	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
+	return error;
+}
+
+/**
+ * dpm_suspend_start - Prepare devices for PM transition and suspend them.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prepare all non-sysdev devices for system PM transition and execute "suspend"
+ * callbacks for them.
+ */
+int dpm_suspend_start(pm_message_t state)
+{
+	int error;
+
+	error = dpm_prepare(state);
+	if (error) {
+		suspend_stats.failed_prepare++;
+		dpm_save_failed_step(SUSPEND_PREPARE);
+	} else
+		error = dpm_suspend(state);
+	return error;
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_start);
+
+void __suspend_report_result(const char *function, void *fn, int ret)
+{
+	if (ret)
+		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
+}
+EXPORT_SYMBOL_GPL(__suspend_report_result);
+
+/**
+ * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
+ * @dev: Device to wait for.
+ * @subordinate: Device that needs to wait for @dev.
+ */
+int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
+{
+	dpm_wait(dev, subordinate->power.async_suspend);
+	return async_error;
+}
+EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
+
+/**
+ * dpm_for_each_dev - device iterator.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over devices in dpm_list, and call @fn for each device,
+ * passing it @data.
+ */
+void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
+{
+	struct device *dev;
+
+	if (!fn)
+		return;
+
+	device_pm_lock();
+	list_for_each_entry(dev, &dpm_list, power.entry)
+		fn(dev, data);
+	device_pm_unlock();
+}
+EXPORT_SYMBOL_GPL(dpm_for_each_dev);
+
+static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
+{
+	if (!ops)
+		return true;
+
+	return !ops->prepare &&
+	       !ops->suspend &&
+	       !ops->suspend_late &&
+	       !ops->suspend_noirq &&
+	       !ops->resume_noirq &&
+	       !ops->resume_early &&
+	       !ops->resume &&
+	       !ops->complete;
+}
+
+void device_pm_check_callbacks(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	dev->power.no_pm_callbacks =
+		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+		 !dev->bus->suspend && !dev->bus->resume)) &&
+		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+		 !dev->driver->suspend && !dev->driver->resume));
+	spin_unlock_irq(&dev->power.lock);
+}
+
+bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+{
+	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+		pm_runtime_status_suspended(dev);
+}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
new file mode 100644
index 0000000..c511def
--- /dev/null
+++ b/drivers/base/power/power.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/pm_qos.h>
+
+static inline void device_pm_init_common(struct device *dev)
+{
+	if (!dev->power.early_init) {
+		spin_lock_init(&dev->power.lock);
+		dev->power.qos = NULL;
+		dev->power.early_init = true;
+	}
+}
+
+#ifdef CONFIG_PM
+
+static inline void pm_runtime_early_init(struct device *dev)
+{
+	dev->power.disable_depth = 1;
+	device_pm_init_common(dev);
+}
+
+extern void pm_runtime_init(struct device *dev);
+extern void pm_runtime_reinit(struct device *dev);
+extern void pm_runtime_remove(struct device *dev);
+
+#define WAKE_IRQ_DEDICATED_ALLOCATED	BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED	BIT(1)
+#define WAKE_IRQ_DEDICATED_MASK		(WAKE_IRQ_DEDICATED_ALLOCATED | \
+					 WAKE_IRQ_DEDICATED_MANAGED)
+
+struct wake_irq {
+	struct device *dev;
+	unsigned int status;
+	int irq;
+	const char *name;
+};
+
+extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+					 bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
+extern void device_wakeup_detach_irq(struct device *dev);
+extern void device_wakeup_arm_wake_irqs(void);
+extern void device_wakeup_disarm_wake_irqs(void);
+
+#else
+
+static inline void device_wakeup_attach_irq(struct device *dev,
+					    struct wake_irq *wakeirq) {}
+
+static inline void device_wakeup_detach_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * sysfs.c
+ */
+
+extern int dpm_sysfs_add(struct device *dev);
+extern void dpm_sysfs_remove(struct device *dev);
+extern void rpm_sysfs_remove(struct device *dev);
+extern int wakeup_sysfs_add(struct device *dev);
+extern void wakeup_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
+extern int pm_qos_sysfs_add_flags(struct device *dev);
+extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
+
+#else /* CONFIG_PM */
+
+static inline void pm_runtime_early_init(struct device *dev)
+{
+	device_pm_init_common(dev);
+}
+
+static inline void pm_runtime_init(struct device *dev) {}
+static inline void pm_runtime_reinit(struct device *dev) {}
+static inline void pm_runtime_remove(struct device *dev) {}
+
+static inline int dpm_sysfs_add(struct device *dev) { return 0; }
+static inline void dpm_sysfs_remove(struct device *dev) {}
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+
+/* drivers/base/power/main.c */
+extern struct list_head dpm_list;	/* The active device list */
+
+static inline struct device *to_device(struct list_head *entry)
+{
+	return container_of(entry, struct device, power.entry);
+}
+
+extern void device_pm_sleep_init(struct device *dev);
+extern void device_pm_add(struct device *);
+extern void device_pm_remove(struct device *);
+extern void device_pm_move_before(struct device *, struct device *);
+extern void device_pm_move_after(struct device *, struct device *);
+extern void device_pm_move_last(struct device *);
+extern void device_pm_check_callbacks(struct device *dev);
+
+static inline bool device_pm_initialized(struct device *dev)
+{
+	return dev->power.in_dpm_list;
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_sleep_init(struct device *dev) {}
+
+static inline void device_pm_add(struct device *dev) {}
+
+static inline void device_pm_remove(struct device *dev)
+{
+	pm_runtime_remove(dev);
+}
+
+static inline void device_pm_move_before(struct device *deva,
+					 struct device *devb) {}
+static inline void device_pm_move_after(struct device *deva,
+					struct device *devb) {}
+static inline void device_pm_move_last(struct device *dev) {}
+
+static inline void device_pm_check_callbacks(struct device *dev) {}
+
+static inline bool device_pm_initialized(struct device *dev)
+{
+	return device_is_registered(dev);
+}
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_init(struct device *dev)
+{
+	device_pm_init_common(dev);
+	device_pm_sleep_init(dev);
+	pm_runtime_init(dev);
+}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
new file mode 100644
index 0000000..3382542
--- /dev/null
+++ b/drivers/base/power/qos.c
@@ -0,0 +1,886 @@
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register a per-device notification callback using the
+ * dev_pm_qos_*_notifier API. The notification chain data is stored in the
+ * per-device constraint data struct.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is tored into the device
+ *    dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ *   is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ *  . A global mutex protects the constraints users from the data being
+ *     allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <trace/events/power.h>
+
+#include "power.h"
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
+
+/**
+ * __dev_pm_qos_flags - Check PM QoS flags for a given device.
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+	struct dev_pm_qos *qos = dev->power.qos;
+	struct pm_qos_flags *pqf;
+	s32 val;
+
+	lockdep_assert_held(&dev->power.lock);
+
+	if (IS_ERR_OR_NULL(qos))
+		return PM_QOS_FLAGS_UNDEFINED;
+
+	pqf = &qos->flags;
+	if (list_empty(&pqf->list))
+		return PM_QOS_FLAGS_UNDEFINED;
+
+	val = pqf->effective_flags & mask;
+	if (val)
+		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
+
+	return PM_QOS_FLAGS_NONE;
+}
+
+/**
+ * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ */
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+	unsigned long irqflags;
+	enum pm_qos_flags_status ret;
+
+	spin_lock_irqsave(&dev->power.lock, irqflags);
+	ret = __dev_pm_qos_flags(dev, mask);
+	spin_unlock_irqrestore(&dev->power.lock, irqflags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
+
+/**
+ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_read_value(struct device *dev)
+{
+	lockdep_assert_held(&dev->power.lock);
+
+	return dev_pm_qos_raw_read_value(dev);
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
+ * @dev: Device to get the PM QoS constraint value for.
+ */
+s32 dev_pm_qos_read_value(struct device *dev)
+{
+	unsigned long flags;
+	s32 ret;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	ret = __dev_pm_qos_read_value(dev);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return ret;
+}
+
+/**
+ * apply_constraint - Add/modify/remove device PM QoS request.
+ * @req: Constraint request to apply
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device callbacks.
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+			    enum pm_qos_req_action action, s32 value)
+{
+	struct dev_pm_qos *qos = req->dev->power.qos;
+	int ret;
+
+	switch(req->type) {
+	case DEV_PM_QOS_RESUME_LATENCY:
+		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
+			value = 0;
+
+		ret = pm_qos_update_target(&qos->resume_latency,
+					   &req->data.pnode, action, value);
+		break;
+	case DEV_PM_QOS_LATENCY_TOLERANCE:
+		ret = pm_qos_update_target(&qos->latency_tolerance,
+					   &req->data.pnode, action, value);
+		if (ret) {
+			value = pm_qos_read_value(&qos->latency_tolerance);
+			req->dev->power.set_latency_tolerance(req->dev, value);
+		}
+		break;
+	case DEV_PM_QOS_FLAGS:
+		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
+					  action, value);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+	struct dev_pm_qos *qos;
+	struct pm_qos_constraints *c;
+	struct blocking_notifier_head *n;
+
+	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+	if (!qos)
+		return -ENOMEM;
+
+	n = kzalloc(sizeof(*n), GFP_KERNEL);
+	if (!n) {
+		kfree(qos);
+		return -ENOMEM;
+	}
+	BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+	c = &qos->resume_latency;
+	plist_head_init(&c->list);
+	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+	c->type = PM_QOS_MIN;
+	c->notifiers = n;
+
+	c = &qos->latency_tolerance;
+	plist_head_init(&c->list);
+	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+	c->type = PM_QOS_MIN;
+
+	INIT_LIST_HEAD(&qos->flags.list);
+
+	spin_lock_irq(&dev->power.lock);
+	dev->power.qos = qos;
+	spin_unlock_irq(&dev->power.lock);
+
+	return 0;
+}
+
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+	struct dev_pm_qos *qos;
+	struct dev_pm_qos_request *req, *tmp;
+	struct pm_qos_constraints *c;
+	struct pm_qos_flags *f;
+
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	/*
+	 * If the device's PM QoS resume latency limit or PM QoS flags have been
+	 * exposed to user space, they have to be hidden at this point.
+	 */
+	pm_qos_sysfs_remove_resume_latency(dev);
+	pm_qos_sysfs_remove_flags(dev);
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	__dev_pm_qos_hide_latency_limit(dev);
+	__dev_pm_qos_hide_flags(dev);
+
+	qos = dev->power.qos;
+	if (!qos)
+		goto out;
+
+	/* Flush the constraints lists for the device. */
+	c = &qos->resume_latency;
+	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+		/*
+		 * Update constraints list and call the notification
+		 * callbacks if needed
+		 */
+		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+		memset(req, 0, sizeof(*req));
+	}
+	c = &qos->latency_tolerance;
+	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+		memset(req, 0, sizeof(*req));
+	}
+	f = &qos->flags;
+	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
+		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+		memset(req, 0, sizeof(*req));
+	}
+
+	spin_lock_irq(&dev->power.lock);
+	dev->power.qos = ERR_PTR(-ENODEV);
+	spin_unlock_irq(&dev->power.lock);
+
+	kfree(qos->resume_latency.notifiers);
+	kfree(qos);
+
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
+
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+}
+
+static bool dev_pm_qos_invalid_req_type(struct device *dev,
+					enum dev_pm_qos_req_type type)
+{
+	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
+	       !dev->power.set_latency_tolerance;
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+				    struct dev_pm_qos_request *req,
+				    enum dev_pm_qos_req_type type, s32 value)
+{
+	int ret = 0;
+
+	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
+		return -EINVAL;
+
+	if (WARN(dev_pm_qos_request_active(req),
+		 "%s() called for already added request\n", __func__))
+		return -EINVAL;
+
+	if (IS_ERR(dev->power.qos))
+		ret = -ENODEV;
+	else if (!dev->power.qos)
+		ret = dev_pm_qos_constraints_allocate(dev);
+
+	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+	if (!ret) {
+		req->dev = dev;
+		req->type = type;
+		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+	}
+	return ret;
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @type: type of the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle.  Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+			   enum dev_pm_qos_req_type type, s32 value)
+{
+	int ret;
+
+	mutex_lock(&dev_pm_qos_mtx);
+	ret = __dev_pm_qos_add_request(dev, req, type, value);
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
+ * @req : PM QoS request to modify.
+ * @new_value: New value to request.
+ */
+static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+				       s32 new_value)
+{
+	s32 curr_value;
+	int ret = 0;
+
+	if (!req) /*guard against callers passing in null */
+		return -EINVAL;
+
+	if (WARN(!dev_pm_qos_request_active(req),
+		 "%s() called for unknown object\n", __func__))
+		return -EINVAL;
+
+	if (IS_ERR_OR_NULL(req->dev->power.qos))
+		return -ENODEV;
+
+	switch(req->type) {
+	case DEV_PM_QOS_RESUME_LATENCY:
+	case DEV_PM_QOS_LATENCY_TOLERANCE:
+		curr_value = req->data.pnode.prio;
+		break;
+	case DEV_PM_QOS_FLAGS:
+		curr_value = req->data.flr.flags;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
+					new_value);
+	if (curr_value != new_value)
+		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
+
+	return ret;
+}
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
+{
+	int ret;
+
+	mutex_lock(&dev_pm_qos_mtx);
+	ret = __dev_pm_qos_update_request(req, new_value);
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+	int ret;
+
+	if (!req) /*guard against callers passing in null */
+		return -EINVAL;
+
+	if (WARN(!dev_pm_qos_request_active(req),
+		 "%s() called for unknown object\n", __func__))
+		return -EINVAL;
+
+	if (IS_ERR_OR_NULL(req->dev->power.qos))
+		return -ENODEV;
+
+	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
+					PM_QOS_DEFAULT_VALUE);
+	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+	memset(req, 0, sizeof(*req));
+	return ret;
+}
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+	int ret;
+
+	mutex_lock(&dev_pm_qos_mtx);
+	ret = __dev_pm_qos_remove_request(req);
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ *
+ * If the device's constraints object doesn't exist when this routine is called,
+ * it will be created (or error code will be returned if that fails).
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+{
+	int ret = 0;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR(dev->power.qos))
+		ret = -ENODEV;
+	else if (!dev->power.qos)
+		ret = dev_pm_qos_constraints_allocate(dev);
+
+	if (!ret)
+		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+						       notifier);
+
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+			       struct notifier_block *notifier)
+{
+	int retval = 0;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	/* Silently return if the constraints object is not present. */
+	if (!IS_ERR_OR_NULL(dev->power.qos))
+		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+							    notifier);
+
+	mutex_unlock(&dev_pm_qos_mtx);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+				    struct dev_pm_qos_request *req,
+				    enum dev_pm_qos_req_type type, s32 value)
+{
+	struct device *ancestor = dev->parent;
+	int ret = -ENODEV;
+
+	switch (type) {
+	case DEV_PM_QOS_RESUME_LATENCY:
+		while (ancestor && !ancestor->power.ignore_children)
+			ancestor = ancestor->parent;
+
+		break;
+	case DEV_PM_QOS_LATENCY_TOLERANCE:
+		while (ancestor && !ancestor->power.set_latency_tolerance)
+			ancestor = ancestor->parent;
+
+		break;
+	default:
+		ancestor = NULL;
+	}
+	if (ancestor)
+		ret = dev_pm_qos_add_request(ancestor, req, type, value);
+
+	if (ret < 0)
+		req->dev = NULL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
+
+static void __dev_pm_qos_drop_user_request(struct device *dev,
+					   enum dev_pm_qos_req_type type)
+{
+	struct dev_pm_qos_request *req = NULL;
+
+	switch(type) {
+	case DEV_PM_QOS_RESUME_LATENCY:
+		req = dev->power.qos->resume_latency_req;
+		dev->power.qos->resume_latency_req = NULL;
+		break;
+	case DEV_PM_QOS_LATENCY_TOLERANCE:
+		req = dev->power.qos->latency_tolerance_req;
+		dev->power.qos->latency_tolerance_req = NULL;
+		break;
+	case DEV_PM_QOS_FLAGS:
+		req = dev->power.qos->flags_req;
+		dev->power.qos->flags_req = NULL;
+		break;
+	}
+	__dev_pm_qos_remove_request(req);
+	kfree(req);
+}
+
+static void dev_pm_qos_drop_user_request(struct device *dev,
+					 enum dev_pm_qos_req_type type)
+{
+	mutex_lock(&dev_pm_qos_mtx);
+	__dev_pm_qos_drop_user_request(dev, type);
+	mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
+ * @dev: Device whose PM QoS latency limit is to be exposed to user space.
+ * @value: Initial value of the latency limit.
+ */
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
+{
+	struct dev_pm_qos_request *req;
+	int ret;
+
+	if (!device_is_registered(dev) || value < 0)
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
+	if (ret < 0) {
+		kfree(req);
+		return ret;
+	}
+
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR_OR_NULL(dev->power.qos))
+		ret = -ENODEV;
+	else if (dev->power.qos->resume_latency_req)
+		ret = -EEXIST;
+
+	if (ret < 0) {
+		__dev_pm_qos_remove_request(req);
+		kfree(req);
+		mutex_unlock(&dev_pm_qos_mtx);
+		goto out;
+	}
+	dev->power.qos->resume_latency_req = req;
+
+	mutex_unlock(&dev_pm_qos_mtx);
+
+	ret = pm_qos_sysfs_add_resume_latency(dev);
+	if (ret)
+		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
+
+ out:
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
+}
+
+/**
+ * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
+ * @dev: Device whose PM QoS latency limit is to be hidden from user space.
+ */
+void dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	pm_qos_sysfs_remove_resume_latency(dev);
+
+	mutex_lock(&dev_pm_qos_mtx);
+	__dev_pm_qos_hide_latency_limit(dev);
+	mutex_unlock(&dev_pm_qos_mtx);
+
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+
+/**
+ * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
+ * @dev: Device whose PM QoS flags are to be exposed to user space.
+ * @val: Initial values of the flags.
+ */
+int dev_pm_qos_expose_flags(struct device *dev, s32 val)
+{
+	struct dev_pm_qos_request *req;
+	int ret;
+
+	if (!device_is_registered(dev))
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
+	if (ret < 0) {
+		kfree(req);
+		return ret;
+	}
+
+	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR_OR_NULL(dev->power.qos))
+		ret = -ENODEV;
+	else if (dev->power.qos->flags_req)
+		ret = -EEXIST;
+
+	if (ret < 0) {
+		__dev_pm_qos_remove_request(req);
+		kfree(req);
+		mutex_unlock(&dev_pm_qos_mtx);
+		goto out;
+	}
+	dev->power.qos->flags_req = req;
+
+	mutex_unlock(&dev_pm_qos_mtx);
+
+	ret = pm_qos_sysfs_add_flags(dev);
+	if (ret)
+		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+
+ out:
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+	pm_runtime_put(dev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
+		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+}
+
+/**
+ * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
+ * @dev: Device whose PM QoS flags are to be hidden from user space.
+ */
+void dev_pm_qos_hide_flags(struct device *dev)
+{
+	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	pm_qos_sysfs_remove_flags(dev);
+
+	mutex_lock(&dev_pm_qos_mtx);
+	__dev_pm_qos_hide_flags(dev);
+	mutex_unlock(&dev_pm_qos_mtx);
+
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+	pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+
+/**
+ * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
+ * @dev: Device to update the PM QoS flags request for.
+ * @mask: Flags to set/clear.
+ * @set: Whether to set or clear the flags (true means set).
+ */
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
+{
+	s32 value;
+	int ret;
+
+	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	value = dev_pm_qos_requested_flags(dev);
+	if (set)
+		value |= mask;
+	else
+		value &= ~mask;
+
+	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
+	pm_runtime_put(dev);
+	return ret;
+}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+	s32 ret;
+
+	mutex_lock(&dev_pm_qos_mtx);
+	ret = IS_ERR_OR_NULL(dev->power.qos)
+		|| !dev->power.qos->latency_tolerance_req ?
+			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+			dev->power.qos->latency_tolerance_req->data.pnode.prio;
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+	int ret;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (IS_ERR_OR_NULL(dev->power.qos)
+	    || !dev->power.qos->latency_tolerance_req) {
+		struct dev_pm_qos_request *req;
+
+		if (val < 0) {
+			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+				ret = 0;
+			else
+				ret = -EINVAL;
+			goto out;
+		}
+		req = kzalloc(sizeof(*req), GFP_KERNEL);
+		if (!req) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+		if (ret < 0) {
+			kfree(req);
+			goto out;
+		}
+		dev->power.qos->latency_tolerance_req = req;
+	} else {
+		if (val < 0) {
+			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+			ret = 0;
+		} else {
+			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+		}
+	}
+
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+	int ret;
+
+	if (!dev->power.set_latency_tolerance)
+		return -EINVAL;
+
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+	ret = pm_qos_sysfs_add_latency_tolerance(dev);
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+	pm_qos_sysfs_remove_latency_tolerance(dev);
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+	/* Remove the request from user space now */
+	pm_runtime_get_sync(dev);
+	dev_pm_qos_update_user_latency_tolerance(dev,
+		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+	pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 0000000..beb85c3
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1715 @@
+/*
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
+ *
+ * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/sched/mm.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <trace/events/rpm.h>
+
+#include "../base.h"
+#include "power.h"
+
+typedef int (*pm_callback_t)(struct device *);
+
+static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
+{
+	pm_callback_t cb;
+	const struct dev_pm_ops *ops;
+
+	if (dev->pm_domain)
+		ops = &dev->pm_domain->ops;
+	else if (dev->type && dev->type->pm)
+		ops = dev->type->pm;
+	else if (dev->class && dev->class->pm)
+		ops = dev->class->pm;
+	else if (dev->bus && dev->bus->pm)
+		ops = dev->bus->pm;
+	else
+		ops = NULL;
+
+	if (ops)
+		cb = *(pm_callback_t *)((void *)ops + cb_offset);
+	else
+		cb = NULL;
+
+	if (!cb && dev->driver && dev->driver->pm)
+		cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+
+	return cb;
+}
+
+#define RPM_GET_CALLBACK(dev, callback) \
+		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
+static int rpm_resume(struct device *dev, int rpmflags);
+static int rpm_suspend(struct device *dev, int rpmflags);
+
+/**
+ * update_pm_runtime_accounting - Update the time accounting of power states
+ * @dev: Device to update the accounting for
+ *
+ * In order to be able to have time accounting of the various power states
+ * (as used by programs such as PowerTOP to show the effectiveness of runtime
+ * PM), we need to track the time spent in each state.
+ * update_pm_runtime_accounting must be called each time before the
+ * runtime_status field is updated, to account the time in the old state
+ * correctly.
+ */
+void update_pm_runtime_accounting(struct device *dev)
+{
+	unsigned long now = jiffies;
+	unsigned long delta;
+
+	delta = now - dev->power.accounting_timestamp;
+
+	dev->power.accounting_timestamp = now;
+
+	if (dev->power.disable_depth > 0)
+		return;
+
+	if (dev->power.runtime_status == RPM_SUSPENDED)
+		dev->power.suspended_jiffies += delta;
+	else
+		dev->power.active_jiffies += delta;
+}
+
+static void __update_runtime_status(struct device *dev, enum rpm_status status)
+{
+	update_pm_runtime_accounting(dev);
+	dev->power.runtime_status = status;
+}
+
+/**
+ * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_deactivate_timer(struct device *dev)
+{
+	if (dev->power.timer_expires > 0) {
+		del_timer(&dev->power.suspend_timer);
+		dev->power.timer_expires = 0;
+	}
+}
+
+/**
+ * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_cancel_pending(struct device *dev)
+{
+	pm_runtime_deactivate_timer(dev);
+	/*
+	 * In case there's a request pending, make sure its work function will
+	 * return without doing anything.
+	 */
+	dev->power.request = RPM_REQ_NONE;
+}
+
+/*
+ * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
+ * @dev: Device to handle.
+ *
+ * Compute the autosuspend-delay expiration time based on the device's
+ * power.last_busy time.  If the delay has already expired or is disabled
+ * (negative) or the power.use_autosuspend flag isn't set, return 0.
+ * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
+ *
+ * This function may be called either with or without dev->power.lock held.
+ * Either way it can be racy, since power.last_busy may be updated at any time.
+ */
+unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
+{
+	int autosuspend_delay;
+	long elapsed;
+	unsigned long last_busy;
+	unsigned long expires = 0;
+
+	if (!dev->power.use_autosuspend)
+		goto out;
+
+	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
+	if (autosuspend_delay < 0)
+		goto out;
+
+	last_busy = READ_ONCE(dev->power.last_busy);
+	elapsed = jiffies - last_busy;
+	if (elapsed < 0)
+		goto out;	/* jiffies has wrapped around. */
+
+	/*
+	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
+	 * up to the nearest second.
+	 */
+	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
+	if (autosuspend_delay >= 1000)
+		expires = round_jiffies(expires);
+	expires += !expires;
+	if (elapsed >= expires - last_busy)
+		expires = 0;	/* Already expired. */
+
+ out:
+	return expires;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+	return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ *     If memory allocation with GFP_KERNEL is called inside runtime
+ *     resume/suspend callback of any one of its ancestors(or the
+ *     block device itself), the deadlock may be triggered inside the
+ *     memory allocation since it might not complete until the block
+ *     device becomes active and the involed page I/O finishes. The
+ *     situation is pointed out first by Alan Stern. Network device
+ *     are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+	static DEFINE_MUTEX(dev_hotplug_mutex);
+
+	mutex_lock(&dev_hotplug_mutex);
+	for (;;) {
+		bool enabled;
+
+		/* hold power lock since bitfield is not SMP-safe. */
+		spin_lock_irq(&dev->power.lock);
+		enabled = dev->power.memalloc_noio;
+		dev->power.memalloc_noio = enable;
+		spin_unlock_irq(&dev->power.lock);
+
+		/*
+		 * not need to enable ancestors any more if the device
+		 * has been enabled.
+		 */
+		if (enabled && enable)
+			break;
+
+		dev = dev->parent;
+
+		/*
+		 * clear flag of the parent device only if all the
+		 * children don't set the flag because ancestor's
+		 * flag was set by any one of the descendants.
+		 */
+		if (!dev || (!enable &&
+			     device_for_each_child(dev, NULL,
+						   dev_memalloc_noio)))
+			break;
+	}
+	mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
+/**
+ * rpm_check_suspend_allowed - Test whether a device may be suspended.
+ * @dev: Device to test.
+ */
+static int rpm_check_suspend_allowed(struct device *dev)
+{
+	int retval = 0;
+
+	if (dev->power.runtime_error)
+		retval = -EINVAL;
+	else if (dev->power.disable_depth > 0)
+		retval = -EACCES;
+	else if (atomic_read(&dev->power.usage_count) > 0)
+		retval = -EAGAIN;
+	else if (!dev->power.ignore_children &&
+			atomic_read(&dev->power.child_count))
+		retval = -EBUSY;
+
+	/* Pending resume requests take precedence over suspends. */
+	else if ((dev->power.deferred_resume
+			&& dev->power.runtime_status == RPM_SUSPENDING)
+	    || (dev->power.request_pending
+			&& dev->power.request == RPM_REQ_RESUME))
+		retval = -EAGAIN;
+	else if (__dev_pm_qos_read_value(dev) == 0)
+		retval = -EPERM;
+	else if (dev->power.runtime_status == RPM_SUSPENDED)
+		retval = 1;
+
+	return retval;
+}
+
+static int rpm_get_suppliers(struct device *dev)
+{
+	struct device_link *link;
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+		int retval;
+
+		if (!(link->flags & DL_FLAG_PM_RUNTIME))
+			continue;
+
+		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+		    link->rpm_active)
+			continue;
+
+		retval = pm_runtime_get_sync(link->supplier);
+		/* Ignore suppliers with disabled runtime PM. */
+		if (retval < 0 && retval != -EACCES) {
+			pm_runtime_put_noidle(link->supplier);
+			return retval;
+		}
+		link->rpm_active = true;
+	}
+	return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+	struct device_link *link;
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (link->rpm_active &&
+		    READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+			pm_runtime_put(link->supplier);
+			link->rpm_active = false;
+		}
+}
+
+/**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+	int retval, idx;
+	bool use_links = dev->power.links_count > 0;
+
+	if (dev->power.irq_safe) {
+		spin_unlock(&dev->power.lock);
+	} else {
+		spin_unlock_irq(&dev->power.lock);
+
+		/*
+		 * Resume suppliers if necessary.
+		 *
+		 * The device's runtime PM status cannot change until this
+		 * routine returns, so it is safe to read the status outside of
+		 * the lock.
+		 */
+		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+			idx = device_links_read_lock();
+
+			retval = rpm_get_suppliers(dev);
+			if (retval)
+				goto fail;
+
+			device_links_read_unlock(idx);
+		}
+	}
+
+	retval = cb(dev);
+
+	if (dev->power.irq_safe) {
+		spin_lock(&dev->power.lock);
+	} else {
+		/*
+		 * If the device is suspending and the callback has returned
+		 * success, drop the usage counters of the suppliers that have
+		 * been reference counted on its resume.
+		 *
+		 * Do that if resume fails too.
+		 */
+		if (use_links
+		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+			idx = device_links_read_lock();
+
+ fail:
+			rpm_put_suppliers(dev);
+
+			device_links_read_unlock(idx);
+		}
+
+		spin_lock_irq(&dev->power.lock);
+	}
+
+	return retval;
+}
+
+/**
+ * rpm_idle - Notify device bus type if the device can be suspended.
+ * @dev: Device to notify the bus type about.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be suspended.  If
+ * another idle notification has been started earlier, return immediately.  If
+ * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_idle(struct device *dev, int rpmflags)
+{
+	int (*callback)(struct device *);
+	int retval;
+
+	trace_rpm_idle_rcuidle(dev, rpmflags);
+	retval = rpm_check_suspend_allowed(dev);
+	if (retval < 0)
+		;	/* Conditions are wrong. */
+
+	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
+	else if (dev->power.runtime_status != RPM_ACTIVE)
+		retval = -EAGAIN;
+
+	/*
+	 * Any pending request other than an idle notification takes
+	 * precedence over us, except that the timer may be running.
+	 */
+	else if (dev->power.request_pending &&
+	    dev->power.request > RPM_REQ_IDLE)
+		retval = -EAGAIN;
+
+	/* Act as though RPM_NOWAIT is always set. */
+	else if (dev->power.idle_notification)
+		retval = -EINPROGRESS;
+	if (retval)
+		goto out;
+
+	/* Pending requests need to be canceled. */
+	dev->power.request = RPM_REQ_NONE;
+
+	if (dev->power.no_callbacks)
+		goto out;
+
+	/* Carry out an asynchronous or a synchronous idle notification. */
+	if (rpmflags & RPM_ASYNC) {
+		dev->power.request = RPM_REQ_IDLE;
+		if (!dev->power.request_pending) {
+			dev->power.request_pending = true;
+			queue_work(pm_wq, &dev->power.work);
+		}
+		trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+		return 0;
+	}
+
+	dev->power.idle_notification = true;
+
+	callback = RPM_GET_CALLBACK(dev, runtime_idle);
+
+	if (callback)
+		retval = __rpm_callback(callback, dev);
+
+	dev->power.idle_notification = false;
+	wake_up_all(&dev->power.wait_queue);
+
+ out:
+	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
+}
+
+/**
+ * rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+{
+	int retval;
+
+	if (!cb)
+		return -ENOSYS;
+
+	if (dev->power.memalloc_noio) {
+		unsigned int noio_flag;
+
+		/*
+		 * Deadlock might be caused if memory allocation with
+		 * GFP_KERNEL happens inside runtime_suspend and
+		 * runtime_resume callbacks of one block device's
+		 * ancestor or the block device itself. Network
+		 * device might be thought as part of iSCSI block
+		 * device, so network device and its ancestor should
+		 * be marked as memalloc_noio too.
+		 */
+		noio_flag = memalloc_noio_save();
+		retval = __rpm_callback(cb, dev);
+		memalloc_noio_restore(noio_flag);
+	} else {
+		retval = __rpm_callback(cb, dev);
+	}
+
+	dev->power.runtime_error = retval;
+	return retval != -EACCES ? retval : -EIO;
+}
+
+/**
+ * rpm_suspend - Carry out runtime suspend of given device.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_suspend(struct device *dev, int rpmflags)
+	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+	int (*callback)(struct device *);
+	struct device *parent = NULL;
+	int retval;
+
+	trace_rpm_suspend_rcuidle(dev, rpmflags);
+
+ repeat:
+	retval = rpm_check_suspend_allowed(dev);
+
+	if (retval < 0)
+		;	/* Conditions are wrong. */
+
+	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
+	else if (dev->power.runtime_status == RPM_RESUMING &&
+	    !(rpmflags & RPM_ASYNC))
+		retval = -EAGAIN;
+	if (retval)
+		goto out;
+
+	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
+	if ((rpmflags & RPM_AUTO)
+	    && dev->power.runtime_status != RPM_SUSPENDING) {
+		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
+
+		if (expires != 0) {
+			/* Pending requests need to be canceled. */
+			dev->power.request = RPM_REQ_NONE;
+
+			/*
+			 * Optimization: If the timer is already running and is
+			 * set to expire at or before the autosuspend delay,
+			 * avoid the overhead of resetting it.  Just let it
+			 * expire; pm_suspend_timer_fn() will take care of the
+			 * rest.
+			 */
+			if (!(dev->power.timer_expires && time_before_eq(
+			    dev->power.timer_expires, expires))) {
+				dev->power.timer_expires = expires;
+				mod_timer(&dev->power.suspend_timer, expires);
+			}
+			dev->power.timer_autosuspends = 1;
+			goto out;
+		}
+	}
+
+	/* Other scheduled or pending requests need to be canceled. */
+	pm_runtime_cancel_pending(dev);
+
+	if (dev->power.runtime_status == RPM_SUSPENDING) {
+		DEFINE_WAIT(wait);
+
+		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
+			retval = -EINPROGRESS;
+			goto out;
+		}
+
+		if (dev->power.irq_safe) {
+			spin_unlock(&dev->power.lock);
+
+			cpu_relax();
+
+			spin_lock(&dev->power.lock);
+			goto repeat;
+		}
+
+		/* Wait for the other suspend running in parallel with us. */
+		for (;;) {
+			prepare_to_wait(&dev->power.wait_queue, &wait,
+					TASK_UNINTERRUPTIBLE);
+			if (dev->power.runtime_status != RPM_SUSPENDING)
+				break;
+
+			spin_unlock_irq(&dev->power.lock);
+
+			schedule();
+
+			spin_lock_irq(&dev->power.lock);
+		}
+		finish_wait(&dev->power.wait_queue, &wait);
+		goto repeat;
+	}
+
+	if (dev->power.no_callbacks)
+		goto no_callback;	/* Assume success. */
+
+	/* Carry out an asynchronous or a synchronous suspend. */
+	if (rpmflags & RPM_ASYNC) {
+		dev->power.request = (rpmflags & RPM_AUTO) ?
+		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
+		if (!dev->power.request_pending) {
+			dev->power.request_pending = true;
+			queue_work(pm_wq, &dev->power.work);
+		}
+		goto out;
+	}
+
+	__update_runtime_status(dev, RPM_SUSPENDING);
+
+	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+
+	dev_pm_enable_wake_irq_check(dev, true);
+	retval = rpm_callback(callback, dev);
+	if (retval)
+		goto fail;
+
+ no_callback:
+	__update_runtime_status(dev, RPM_SUSPENDED);
+	pm_runtime_deactivate_timer(dev);
+
+	if (dev->parent) {
+		parent = dev->parent;
+		atomic_add_unless(&parent->power.child_count, -1, 0);
+	}
+	wake_up_all(&dev->power.wait_queue);
+
+	if (dev->power.deferred_resume) {
+		dev->power.deferred_resume = false;
+		rpm_resume(dev, 0);
+		retval = -EAGAIN;
+		goto out;
+	}
+
+	/* Maybe the parent is now able to suspend. */
+	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+		spin_unlock(&dev->power.lock);
+
+		spin_lock(&parent->power.lock);
+		rpm_idle(parent, RPM_ASYNC);
+		spin_unlock(&parent->power.lock);
+
+		spin_lock(&dev->power.lock);
+	}
+
+ out:
+	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+
+	return retval;
+
+ fail:
+	dev_pm_disable_wake_irq_check(dev);
+	__update_runtime_status(dev, RPM_ACTIVE);
+	dev->power.deferred_resume = false;
+	wake_up_all(&dev->power.wait_queue);
+
+	if (retval == -EAGAIN || retval == -EBUSY) {
+		dev->power.runtime_error = 0;
+
+		/*
+		 * If the callback routine failed an autosuspend, and
+		 * if the last_busy time has been updated so that there
+		 * is a new autosuspend expiration time, automatically
+		 * reschedule another autosuspend.
+		 */
+		if ((rpmflags & RPM_AUTO) &&
+		    pm_runtime_autosuspend_expiration(dev) != 0)
+			goto repeat;
+	} else {
+		pm_runtime_cancel_pending(dev);
+	}
+	goto out;
+}
+
+/**
+ * rpm_resume - Carry out runtime resume of given device.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be resumed.  Cancel
+ * any scheduled or pending requests.  If another resume has been started
+ * earlier, either return immediately or wait for it to finish, depending on the
+ * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
+ * parallel with this function, either tell the other process to resume after
+ * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
+ * flag is set then queue a resume request; otherwise run the
+ * ->runtime_resume() callback directly.  Queue an idle notification for the
+ * device if the resume succeeded.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_resume(struct device *dev, int rpmflags)
+	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+	int (*callback)(struct device *);
+	struct device *parent = NULL;
+	int retval = 0;
+
+	trace_rpm_resume_rcuidle(dev, rpmflags);
+
+ repeat:
+	if (dev->power.runtime_error)
+		retval = -EINVAL;
+	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+	    && dev->power.runtime_status == RPM_ACTIVE)
+		retval = 1;
+	else if (dev->power.disable_depth > 0)
+		retval = -EACCES;
+	if (retval)
+		goto out;
+
+	/*
+	 * Other scheduled or pending requests need to be canceled.  Small
+	 * optimization: If an autosuspend timer is running, leave it running
+	 * rather than cancelling it now only to restart it again in the near
+	 * future.
+	 */
+	dev->power.request = RPM_REQ_NONE;
+	if (!dev->power.timer_autosuspends)
+		pm_runtime_deactivate_timer(dev);
+
+	if (dev->power.runtime_status == RPM_ACTIVE) {
+		retval = 1;
+		goto out;
+	}
+
+	if (dev->power.runtime_status == RPM_RESUMING
+	    || dev->power.runtime_status == RPM_SUSPENDING) {
+		DEFINE_WAIT(wait);
+
+		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
+			if (dev->power.runtime_status == RPM_SUSPENDING)
+				dev->power.deferred_resume = true;
+			else
+				retval = -EINPROGRESS;
+			goto out;
+		}
+
+		if (dev->power.irq_safe) {
+			spin_unlock(&dev->power.lock);
+
+			cpu_relax();
+
+			spin_lock(&dev->power.lock);
+			goto repeat;
+		}
+
+		/* Wait for the operation carried out in parallel with us. */
+		for (;;) {
+			prepare_to_wait(&dev->power.wait_queue, &wait,
+					TASK_UNINTERRUPTIBLE);
+			if (dev->power.runtime_status != RPM_RESUMING
+			    && dev->power.runtime_status != RPM_SUSPENDING)
+				break;
+
+			spin_unlock_irq(&dev->power.lock);
+
+			schedule();
+
+			spin_lock_irq(&dev->power.lock);
+		}
+		finish_wait(&dev->power.wait_queue, &wait);
+		goto repeat;
+	}
+
+	/*
+	 * See if we can skip waking up the parent.  This is safe only if
+	 * power.no_callbacks is set, because otherwise we don't know whether
+	 * the resume will actually succeed.
+	 */
+	if (dev->power.no_callbacks && !parent && dev->parent) {
+		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
+		if (dev->parent->power.disable_depth > 0
+		    || dev->parent->power.ignore_children
+		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
+			atomic_inc(&dev->parent->power.child_count);
+			spin_unlock(&dev->parent->power.lock);
+			retval = 1;
+			goto no_callback;	/* Assume success. */
+		}
+		spin_unlock(&dev->parent->power.lock);
+	}
+
+	/* Carry out an asynchronous or a synchronous resume. */
+	if (rpmflags & RPM_ASYNC) {
+		dev->power.request = RPM_REQ_RESUME;
+		if (!dev->power.request_pending) {
+			dev->power.request_pending = true;
+			queue_work(pm_wq, &dev->power.work);
+		}
+		retval = 0;
+		goto out;
+	}
+
+	if (!parent && dev->parent) {
+		/*
+		 * Increment the parent's usage counter and resume it if
+		 * necessary.  Not needed if dev is irq-safe; then the
+		 * parent is permanently resumed.
+		 */
+		parent = dev->parent;
+		if (dev->power.irq_safe)
+			goto skip_parent;
+		spin_unlock(&dev->power.lock);
+
+		pm_runtime_get_noresume(parent);
+
+		spin_lock(&parent->power.lock);
+		/*
+		 * Resume the parent if it has runtime PM enabled and not been
+		 * set to ignore its children.
+		 */
+		if (!parent->power.disable_depth
+		    && !parent->power.ignore_children) {
+			rpm_resume(parent, 0);
+			if (parent->power.runtime_status != RPM_ACTIVE)
+				retval = -EBUSY;
+		}
+		spin_unlock(&parent->power.lock);
+
+		spin_lock(&dev->power.lock);
+		if (retval)
+			goto out;
+		goto repeat;
+	}
+ skip_parent:
+
+	if (dev->power.no_callbacks)
+		goto no_callback;	/* Assume success. */
+
+	__update_runtime_status(dev, RPM_RESUMING);
+
+	callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+	dev_pm_disable_wake_irq_check(dev);
+	retval = rpm_callback(callback, dev);
+	if (retval) {
+		__update_runtime_status(dev, RPM_SUSPENDED);
+		pm_runtime_cancel_pending(dev);
+		dev_pm_enable_wake_irq_check(dev, false);
+	} else {
+ no_callback:
+		__update_runtime_status(dev, RPM_ACTIVE);
+		pm_runtime_mark_last_busy(dev);
+		if (parent)
+			atomic_inc(&parent->power.child_count);
+	}
+	wake_up_all(&dev->power.wait_queue);
+
+	if (retval >= 0)
+		rpm_idle(dev, RPM_ASYNC);
+
+ out:
+	if (parent && !dev->power.irq_safe) {
+		spin_unlock_irq(&dev->power.lock);
+
+		pm_runtime_put(parent);
+
+		spin_lock_irq(&dev->power.lock);
+	}
+
+	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+
+	return retval;
+}
+
+/**
+ * pm_runtime_work - Universal runtime PM work function.
+ * @work: Work structure used for scheduling the execution of this function.
+ *
+ * Use @work to get the device object the work is to be done for, determine what
+ * is to be done and execute the appropriate runtime PM function.
+ */
+static void pm_runtime_work(struct work_struct *work)
+{
+	struct device *dev = container_of(work, struct device, power.work);
+	enum rpm_request req;
+
+	spin_lock_irq(&dev->power.lock);
+
+	if (!dev->power.request_pending)
+		goto out;
+
+	req = dev->power.request;
+	dev->power.request = RPM_REQ_NONE;
+	dev->power.request_pending = false;
+
+	switch (req) {
+	case RPM_REQ_NONE:
+		break;
+	case RPM_REQ_IDLE:
+		rpm_idle(dev, RPM_NOWAIT);
+		break;
+	case RPM_REQ_SUSPEND:
+		rpm_suspend(dev, RPM_NOWAIT);
+		break;
+	case RPM_REQ_AUTOSUSPEND:
+		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
+		break;
+	case RPM_REQ_RESUME:
+		rpm_resume(dev, RPM_NOWAIT);
+		break;
+	}
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+}
+
+/**
+ * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
+ * @data: Device pointer passed by pm_schedule_suspend().
+ *
+ * Check if the time is right and queue a suspend request.
+ */
+static void pm_suspend_timer_fn(struct timer_list *t)
+{
+	struct device *dev = from_timer(dev, t, power.suspend_timer);
+	unsigned long flags;
+	unsigned long expires;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	expires = dev->power.timer_expires;
+	/* If 'expire' is after 'jiffies' we've been called too early. */
+	if (expires > 0 && !time_after(expires, jiffies)) {
+		dev->power.timer_expires = 0;
+		rpm_suspend(dev, dev->power.timer_autosuspends ?
+		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
+	}
+
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+
+/**
+ * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
+ * @dev: Device to suspend.
+ * @delay: Time to wait before submitting a suspend request, in milliseconds.
+ */
+int pm_schedule_suspend(struct device *dev, unsigned int delay)
+{
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	if (!delay) {
+		retval = rpm_suspend(dev, RPM_ASYNC);
+		goto out;
+	}
+
+	retval = rpm_check_suspend_allowed(dev);
+	if (retval)
+		goto out;
+
+	/* Other scheduled or pending requests need to be canceled. */
+	pm_runtime_cancel_pending(dev);
+
+	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
+	dev->power.timer_expires += !dev->power.timer_expires;
+	dev->power.timer_autosuspends = 0;
+	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
+
+ out:
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+
+/**
+ * __pm_runtime_idle - Entry point for runtime idle operations.
+ * @dev: Device to send idle notification for.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero.  Then carry out an idle
+ * notification, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_idle(struct device *dev, int rpmflags)
+{
+	unsigned long flags;
+	int retval;
+
+	if (rpmflags & RPM_GET_PUT) {
+		if (!atomic_dec_and_test(&dev->power.usage_count))
+			return 0;
+	}
+
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	retval = rpm_idle(dev, rpmflags);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_idle);
+
+/**
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero.  Then carry out a suspend,
+ * either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_suspend(struct device *dev, int rpmflags)
+{
+	unsigned long flags;
+	int retval;
+
+	if (rpmflags & RPM_GET_PUT) {
+		if (!atomic_dec_and_test(&dev->power.usage_count))
+			return 0;
+	}
+
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	retval = rpm_suspend(dev, rpmflags);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
+
+/**
+ * __pm_runtime_resume - Entry point for runtime resume operations.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
+ * carry out a resume, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_resume(struct device *dev, int rpmflags)
+{
+	unsigned long flags;
+	int retval;
+
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+			dev->power.runtime_status != RPM_ACTIVE);
+
+	if (rpmflags & RPM_GET_PUT)
+		atomic_inc(&dev->power.usage_count);
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	retval = rpm_resume(dev, rpmflags);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_resume);
+
+/**
+ * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * @dev: Device to handle.
+ *
+ * Return -EINVAL if runtime PM is disabled for the device.
+ *
+ * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
+ * and the runtime PM usage counter is nonzero, increment the counter and
+ * return 1.  Otherwise return 0 without changing the counter.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	retval = dev->power.disable_depth > 0 ? -EINVAL :
+		dev->power.runtime_status == RPM_ACTIVE
+			&& atomic_inc_not_zero(&dev->power.usage_count);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
+ * __pm_runtime_set_status - Set runtime PM status of a device.
+ * @dev: Device to handle.
+ * @status: New runtime PM status of the device.
+ *
+ * If runtime PM of the device is disabled or its power.runtime_error field is
+ * different from zero, the status may be changed either to RPM_ACTIVE, or to
+ * RPM_SUSPENDED, as long as that reflects the actual state of the device.
+ * However, if the device has a parent and the parent is not active, and the
+ * parent's power.ignore_children flag is unset, the device's status cannot be
+ * set to RPM_ACTIVE, so -EBUSY is returned in that case.
+ *
+ * If successful, __pm_runtime_set_status() clears the power.runtime_error field
+ * and the device parent's counter of unsuspended children is modified to
+ * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
+ * notification request for the parent is submitted.
+ */
+int __pm_runtime_set_status(struct device *dev, unsigned int status)
+{
+	struct device *parent = dev->parent;
+	unsigned long flags;
+	bool notify_parent = false;
+	int error = 0;
+
+	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	if (!dev->power.runtime_error && !dev->power.disable_depth) {
+		error = -EAGAIN;
+		goto out;
+	}
+
+	if (dev->power.runtime_status == status || !parent)
+		goto out_set;
+
+	if (status == RPM_SUSPENDED) {
+		atomic_add_unless(&parent->power.child_count, -1, 0);
+		notify_parent = !parent->power.ignore_children;
+	} else {
+		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
+
+		/*
+		 * It is invalid to put an active child under a parent that is
+		 * not active, has runtime PM enabled and the
+		 * 'power.ignore_children' flag unset.
+		 */
+		if (!parent->power.disable_depth
+		    && !parent->power.ignore_children
+		    && parent->power.runtime_status != RPM_ACTIVE) {
+			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
+				dev_name(dev),
+				dev_name(parent));
+			error = -EBUSY;
+		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
+			atomic_inc(&parent->power.child_count);
+		}
+
+		spin_unlock(&parent->power.lock);
+
+		if (error)
+			goto out;
+	}
+
+ out_set:
+	__update_runtime_status(dev, status);
+	dev->power.runtime_error = 0;
+ out:
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	if (notify_parent)
+		pm_request_idle(parent);
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
+
+/**
+ * __pm_runtime_barrier - Cancel pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Flush all pending requests for the device from pm_wq and wait for all
+ * runtime PM operations involving the device in progress to complete.
+ *
+ * Should be called under dev->power.lock with interrupts disabled.
+ */
+static void __pm_runtime_barrier(struct device *dev)
+{
+	pm_runtime_deactivate_timer(dev);
+
+	if (dev->power.request_pending) {
+		dev->power.request = RPM_REQ_NONE;
+		spin_unlock_irq(&dev->power.lock);
+
+		cancel_work_sync(&dev->power.work);
+
+		spin_lock_irq(&dev->power.lock);
+		dev->power.request_pending = false;
+	}
+
+	if (dev->power.runtime_status == RPM_SUSPENDING
+	    || dev->power.runtime_status == RPM_RESUMING
+	    || dev->power.idle_notification) {
+		DEFINE_WAIT(wait);
+
+		/* Suspend, wake-up or idle notification in progress. */
+		for (;;) {
+			prepare_to_wait(&dev->power.wait_queue, &wait,
+					TASK_UNINTERRUPTIBLE);
+			if (dev->power.runtime_status != RPM_SUSPENDING
+			    && dev->power.runtime_status != RPM_RESUMING
+			    && !dev->power.idle_notification)
+				break;
+			spin_unlock_irq(&dev->power.lock);
+
+			schedule();
+
+			spin_lock_irq(&dev->power.lock);
+		}
+		finish_wait(&dev->power.wait_queue, &wait);
+	}
+}
+
+/**
+ * pm_runtime_barrier - Flush pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Prevent the device from being suspended by incrementing its usage counter and
+ * if there's a pending resume request for the device, wake the device up.
+ * Next, make sure that all pending requests for the device have been flushed
+ * from pm_wq and wait for all runtime PM operations involving the device in
+ * progress to complete.
+ *
+ * Return value:
+ * 1, if there was a resume request pending and the device had to be woken up,
+ * 0, otherwise
+ */
+int pm_runtime_barrier(struct device *dev)
+{
+	int retval = 0;
+
+	pm_runtime_get_noresume(dev);
+	spin_lock_irq(&dev->power.lock);
+
+	if (dev->power.request_pending
+	    && dev->power.request == RPM_REQ_RESUME) {
+		rpm_resume(dev, 0);
+		retval = 1;
+	}
+
+	__pm_runtime_barrier(dev);
+
+	spin_unlock_irq(&dev->power.lock);
+	pm_runtime_put_noidle(dev);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_barrier);
+
+/**
+ * __pm_runtime_disable - Disable runtime PM of a device.
+ * @dev: Device to handle.
+ * @check_resume: If set, check if there's a resume request for the device.
+ *
+ * Increment power.disable_depth for the device and if it was zero previously,
+ * cancel all pending runtime PM requests for the device and wait for all
+ * operations in progress to complete.  The device can be either active or
+ * suspended after its runtime PM has been disabled.
+ *
+ * If @check_resume is set and there's a resume request pending when
+ * __pm_runtime_disable() is called and power.disable_depth is zero, the
+ * function will wake up the device before disabling its runtime PM.
+ */
+void __pm_runtime_disable(struct device *dev, bool check_resume)
+{
+	spin_lock_irq(&dev->power.lock);
+
+	if (dev->power.disable_depth > 0) {
+		dev->power.disable_depth++;
+		goto out;
+	}
+
+	/*
+	 * Wake up the device if there's a resume request pending, because that
+	 * means there probably is some I/O to process and disabling runtime PM
+	 * shouldn't prevent the device from processing the I/O.
+	 */
+	if (check_resume && dev->power.request_pending
+	    && dev->power.request == RPM_REQ_RESUME) {
+		/*
+		 * Prevent suspends and idle notifications from being carried
+		 * out after we have woken up the device.
+		 */
+		pm_runtime_get_noresume(dev);
+
+		rpm_resume(dev, 0);
+
+		pm_runtime_put_noidle(dev);
+	}
+
+	if (!dev->power.disable_depth++)
+		__pm_runtime_barrier(dev);
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_disable);
+
+/**
+ * pm_runtime_enable - Enable runtime PM of a device.
+ * @dev: Device to handle.
+ */
+void pm_runtime_enable(struct device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	if (dev->power.disable_depth > 0)
+		dev->power.disable_depth--;
+	else
+		dev_warn(dev, "Unbalanced %s!\n", __func__);
+
+	WARN(!dev->power.disable_depth &&
+	     dev->power.runtime_status == RPM_SUSPENDED &&
+	     !dev->power.ignore_children &&
+	     atomic_read(&dev->power.child_count) > 0,
+	     "Enabling runtime PM for inactive device (%s) with active children\n",
+	     dev_name(dev));
+
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_enable);
+
+/**
+ * pm_runtime_forbid - Block runtime PM of a device.
+ * @dev: Device to handle.
+ *
+ * Increase the device's usage count and clear its power.runtime_auto flag,
+ * so that it cannot be suspended at run time until pm_runtime_allow() is called
+ * for it.
+ */
+void pm_runtime_forbid(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	if (!dev->power.runtime_auto)
+		goto out;
+
+	dev->power.runtime_auto = false;
+	atomic_inc(&dev->power.usage_count);
+	rpm_resume(dev, 0);
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_forbid);
+
+/**
+ * pm_runtime_allow - Unblock runtime PM of a device.
+ * @dev: Device to handle.
+ *
+ * Decrease the device's usage count and set its power.runtime_auto flag.
+ */
+void pm_runtime_allow(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.runtime_auto)
+		goto out;
+
+	dev->power.runtime_auto = true;
+	if (atomic_dec_and_test(&dev->power.usage_count))
+		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_allow);
+
+/**
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
+ * @dev: Device to handle.
+ *
+ * Set the power.no_callbacks flag, which tells the PM core that this
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own.  The runtime sysfs attributes will be removed.
+ */
+void pm_runtime_no_callbacks(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	dev->power.no_callbacks = 1;
+	spin_unlock_irq(&dev->power.lock);
+	if (device_is_registered(dev))
+		rpm_sysfs_remove(dev);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
+
+/**
+ * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
+ * @dev: Device to handle
+ *
+ * Set the power.irq_safe flag, which tells the PM core that the
+ * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
+ * always be invoked with the spinlock held and interrupts disabled.  It also
+ * causes the parent's usage counter to be permanently incremented, preventing
+ * the parent from runtime suspending -- otherwise an irq-safe child might have
+ * to wait for a non-irq-safe parent.
+ */
+void pm_runtime_irq_safe(struct device *dev)
+{
+	if (dev->parent)
+		pm_runtime_get_sync(dev->parent);
+	spin_lock_irq(&dev->power.lock);
+	dev->power.irq_safe = 1;
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
+
+/**
+ * update_autosuspend - Handle a change to a device's autosuspend settings.
+ * @dev: Device to handle.
+ * @old_delay: The former autosuspend_delay value.
+ * @old_use: The former use_autosuspend value.
+ *
+ * Prevent runtime suspend if the new delay is negative and use_autosuspend is
+ * set; otherwise allow it.  Send an idle notification if suspends are allowed.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static void update_autosuspend(struct device *dev, int old_delay, int old_use)
+{
+	int delay = dev->power.autosuspend_delay;
+
+	/* Should runtime suspend be prevented now? */
+	if (dev->power.use_autosuspend && delay < 0) {
+
+		/* If it used to be allowed then prevent it. */
+		if (!old_use || old_delay >= 0) {
+			atomic_inc(&dev->power.usage_count);
+			rpm_resume(dev, 0);
+		}
+	}
+
+	/* Runtime suspend should be allowed now. */
+	else {
+
+		/* If it used to be prevented then allow it. */
+		if (old_use && old_delay < 0)
+			atomic_dec(&dev->power.usage_count);
+
+		/* Maybe we can autosuspend now. */
+		rpm_idle(dev, RPM_AUTO);
+	}
+}
+
+/**
+ * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
+ * @dev: Device to handle.
+ * @delay: Value of the new delay in milliseconds.
+ *
+ * Set the device's power.autosuspend_delay value.  If it changes to negative
+ * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
+ * changes the other way, allow runtime suspends.
+ */
+void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
+{
+	int old_delay, old_use;
+
+	spin_lock_irq(&dev->power.lock);
+	old_delay = dev->power.autosuspend_delay;
+	old_use = dev->power.use_autosuspend;
+	dev->power.autosuspend_delay = delay;
+	update_autosuspend(dev, old_delay, old_use);
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
+
+/**
+ * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
+ * @dev: Device to handle.
+ * @use: New value for use_autosuspend.
+ *
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
+ * suspends as needed.
+ */
+void __pm_runtime_use_autosuspend(struct device *dev, bool use)
+{
+	int old_delay, old_use;
+
+	spin_lock_irq(&dev->power.lock);
+	old_delay = dev->power.autosuspend_delay;
+	old_use = dev->power.use_autosuspend;
+	dev->power.use_autosuspend = use;
+	update_autosuspend(dev, old_delay, old_use);
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
+
+/**
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
+ * @dev: Device object to initialize.
+ */
+void pm_runtime_init(struct device *dev)
+{
+	dev->power.runtime_status = RPM_SUSPENDED;
+	dev->power.idle_notification = false;
+
+	dev->power.disable_depth = 1;
+	atomic_set(&dev->power.usage_count, 0);
+
+	dev->power.runtime_error = 0;
+
+	atomic_set(&dev->power.child_count, 0);
+	pm_suspend_ignore_children(dev, false);
+	dev->power.runtime_auto = true;
+
+	dev->power.request_pending = false;
+	dev->power.request = RPM_REQ_NONE;
+	dev->power.deferred_resume = false;
+	dev->power.accounting_timestamp = jiffies;
+	INIT_WORK(&dev->power.work, pm_runtime_work);
+
+	dev->power.timer_expires = 0;
+	timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
+
+	init_waitqueue_head(&dev->power.wait_queue);
+}
+
+/**
+ * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
+ * @dev: Device object to re-initialize.
+ */
+void pm_runtime_reinit(struct device *dev)
+{
+	if (!pm_runtime_enabled(dev)) {
+		if (dev->power.runtime_status == RPM_ACTIVE)
+			pm_runtime_set_suspended(dev);
+		if (dev->power.irq_safe) {
+			spin_lock_irq(&dev->power.lock);
+			dev->power.irq_safe = 0;
+			spin_unlock_irq(&dev->power.lock);
+			if (dev->parent)
+				pm_runtime_put(dev->parent);
+		}
+	}
+}
+
+/**
+ * pm_runtime_remove - Prepare for removing a device from device hierarchy.
+ * @dev: Device object being removed from device hierarchy.
+ */
+void pm_runtime_remove(struct device *dev)
+{
+	__pm_runtime_disable(dev, false);
+	pm_runtime_reinit(dev);
+}
+
+/**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->rpm_active) {
+			pm_runtime_put_noidle(dev);
+			link->rpm_active = false;
+		}
+	}
+
+	device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (link->flags & DL_FLAG_PM_RUNTIME)
+			pm_runtime_get_sync(link->supplier);
+
+	device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (link->flags & DL_FLAG_PM_RUNTIME)
+			pm_runtime_put(link->supplier);
+
+	device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	dev->power.links_count++;
+	spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+	rpm_put_suppliers(dev);
+
+	spin_lock_irq(&dev->power.lock);
+	WARN_ON(dev->power.links_count == 0);
+	dev->power.links_count--;
+	spin_unlock_irq(&dev->power.lock);
+}
+
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+	return atomic_read(&dev->power.usage_count) <= 1 &&
+		(atomic_read(&dev->power.child_count) == 0 ||
+		 dev->power.ignore_children);
+}
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent).  Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states.  It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+	int (*callback)(struct device *);
+	int ret;
+
+	pm_runtime_disable(dev);
+	if (pm_runtime_status_suspended(dev))
+		return 0;
+
+	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+
+	ret = callback ? callback(dev) : 0;
+	if (ret)
+		goto err;
+
+	/*
+	 * If the device can stay in suspend after the system-wide transition
+	 * to the working state that will follow, drop the children counter of
+	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
+	 * function will be called again for it in the meantime.
+	 */
+	if (pm_runtime_need_not_resume(dev))
+		pm_runtime_set_suspended(dev);
+	else
+		__update_runtime_status(dev, RPM_SUSPENDED);
+
+	return 0;
+
+err:
+	pm_runtime_enable(dev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state if needed.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume.  In the other case, we defer the resume to be managed
+ * via runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+	int (*callback)(struct device *);
+	int ret = 0;
+
+	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
+		goto out;
+
+	/*
+	 * The value of the parent's children counter is correct already, so
+	 * just update the status of the device.
+	 */
+	__update_runtime_status(dev, RPM_ACTIVE);
+
+	callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+	ret = callback ? callback(dev) : 0;
+	if (ret) {
+		pm_runtime_set_suspended(dev);
+		goto out;
+	}
+
+	pm_runtime_mark_last_busy(dev);
+out:
+	pm_runtime_enable(dev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
new file mode 100644
index 0000000..d713738
--- /dev/null
+++ b/drivers/base/power/sysfs.c
@@ -0,0 +1,735 @@
+/*
+ * drivers/base/power/sysfs.c - sysfs entries for device PM
+ */
+
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/export.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include "power.h"
+
+/*
+ *	control - Report/change current runtime PM setting of the device
+ *
+ *	Runtime power management of a device can be blocked with the help of
+ *	this attribute.  All devices have one of the following two values for
+ *	the power/control file:
+ *
+ *	 + "auto\n" to allow the device to be power managed at run time;
+ *	 + "on\n" to prevent the device from being power managed at run time;
+ *
+ *	The default for all devices is "auto", which means that devices may be
+ *	subject to automatic power management, depending on their drivers.
+ *	Changing this attribute to "on" prevents the driver from power managing
+ *	the device at run time.  Doing that while the device is suspended causes
+ *	it to be woken up.
+ *
+ *	wakeup - Report/change current wakeup option for device
+ *
+ *	Some devices support "wakeup" events, which are hardware signals
+ *	used to activate devices from suspended or low power states.  Such
+ *	devices have one of three values for the sysfs power/wakeup file:
+ *
+ *	 + "enabled\n" to issue the events;
+ *	 + "disabled\n" not to do so; or
+ *	 + "\n" for temporary or permanent inability to issue wakeup.
+ *
+ *	(For example, unconfigured USB devices can't issue wakeups.)
+ *
+ *	Familiar examples of devices that can issue wakeup events include
+ *	keyboards and mice (both PS2 and USB styles), power buttons, modems,
+ *	"Wake-On-LAN" Ethernet links, GPIO lines, and more.  Some events
+ *	will wake the entire system from a suspend state; others may just
+ *	wake up the device (if the system as a whole is already active).
+ *	Some wakeup events use normal IRQ lines; other use special out
+ *	of band signaling.
+ *
+ *	It is the responsibility of device drivers to enable (or disable)
+ *	wakeup signaling as part of changing device power states, respecting
+ *	the policy choices provided through the driver model.
+ *
+ *	Devices may not be able to generate wakeup events from all power
+ *	states.  Also, the events may be ignored in some configurations;
+ *	for example, they might need help from other devices that aren't
+ *	active, or which may have wakeup disabled.  Some drivers rely on
+ *	wakeup events internally (unless they are disabled), keeping
+ *	their hardware in low power modes whenever they're unused.  This
+ *	saves runtime power, without requiring system-wide sleep states.
+ *
+ *	async - Report/change current async suspend setting for the device
+ *
+ *	Asynchronous suspend and resume of the device during system-wide power
+ *	state transitions can be enabled by writing "enabled" to this file.
+ *	Analogously, if "disabled" is written to this file, the device will be
+ *	suspended and resumed synchronously.
+ *
+ *	All devices have one of the following two values for power/async:
+ *
+ *	 + "enabled\n" to permit the asynchronous suspend/resume of the device;
+ *	 + "disabled\n" to forbid it;
+ *
+ *	NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ *	of a device unless it is certain that all of the PM dependencies of the
+ *	device are known to the PM core.  However, for some devices this
+ *	attribute is set to "enabled" by bus type code or device drivers and in
+ *	that cases it should be safe to leave the default value.
+ *
+ *	autosuspend_delay_ms - Report/change a device's autosuspend_delay value
+ *
+ *	Some drivers don't want to carry out a runtime suspend as soon as a
+ *	device becomes idle; they want it always to remain idle for some period
+ *	of time before suspending it.  This period is the autosuspend_delay
+ *	value (expressed in milliseconds) and it can be controlled by the user.
+ *	If the value is negative then the device will never be runtime
+ *	suspended.
+ *
+ *	NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
+ *	value are used only if the driver calls pm_runtime_use_autosuspend().
+ *
+ *	wakeup_count - Report the number of wakeup events related to the device
+ */
+
+const char power_group_name[] = "power";
+EXPORT_SYMBOL_GPL(power_group_name);
+
+static const char ctrl_auto[] = "auto";
+static const char ctrl_on[] = "on";
+
+static ssize_t control_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	return sprintf(buf, "%s\n",
+				dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+}
+
+static ssize_t control_store(struct device * dev, struct device_attribute *attr,
+			     const char * buf, size_t n)
+{
+	device_lock(dev);
+	if (sysfs_streq(buf, ctrl_auto))
+		pm_runtime_allow(dev);
+	else if (sysfs_streq(buf, ctrl_on))
+		pm_runtime_forbid(dev);
+	else
+		n = -EINVAL;
+	device_unlock(dev);
+	return n;
+}
+
+static DEVICE_ATTR_RW(control);
+
+static ssize_t runtime_active_time_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	spin_lock_irq(&dev->power.lock);
+	update_pm_runtime_accounting(dev);
+	ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
+	spin_unlock_irq(&dev->power.lock);
+	return ret;
+}
+
+static DEVICE_ATTR_RO(runtime_active_time);
+
+static ssize_t runtime_suspended_time_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	spin_lock_irq(&dev->power.lock);
+	update_pm_runtime_accounting(dev);
+	ret = sprintf(buf, "%i\n",
+		jiffies_to_msecs(dev->power.suspended_jiffies));
+	spin_unlock_irq(&dev->power.lock);
+	return ret;
+}
+
+static DEVICE_ATTR_RO(runtime_suspended_time);
+
+static ssize_t runtime_status_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	const char *p;
+
+	if (dev->power.runtime_error) {
+		p = "error\n";
+	} else if (dev->power.disable_depth) {
+		p = "unsupported\n";
+	} else {
+		switch (dev->power.runtime_status) {
+		case RPM_SUSPENDED:
+			p = "suspended\n";
+			break;
+		case RPM_SUSPENDING:
+			p = "suspending\n";
+			break;
+		case RPM_RESUMING:
+			p = "resuming\n";
+			break;
+		case RPM_ACTIVE:
+			p = "active\n";
+			break;
+		default:
+			return -EIO;
+		}
+	}
+	return sprintf(buf, p);
+}
+
+static DEVICE_ATTR_RO(runtime_status);
+
+static ssize_t autosuspend_delay_ms_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (!dev->power.use_autosuspend)
+		return -EIO;
+	return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
+}
+
+static ssize_t autosuspend_delay_ms_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t n)
+{
+	long delay;
+
+	if (!dev->power.use_autosuspend)
+		return -EIO;
+
+	if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
+		return -EINVAL;
+
+	device_lock(dev);
+	pm_runtime_set_autosuspend_delay(dev, delay);
+	device_unlock(dev);
+	return n;
+}
+
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
+
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+	if (value == 0)
+		return sprintf(buf, "n/a\n");
+	if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+		value = 0;
+
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t n)
+{
+	s32 value;
+	int ret;
+
+	if (!kstrtos32(buf, 0, &value)) {
+		/*
+		 * Prevent users from writing negative or "no constraint" values
+		 * directly.
+		 */
+		if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+			return -EINVAL;
+
+		if (value == 0)
+			value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+	} else if (sysfs_streq(buf, "n/a")) {
+		value = 0;
+	} else {
+		return -EINVAL;
+	}
+
+	ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+					value);
+	return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
+
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+	if (value < 0)
+		return sprintf(buf, "auto\n");
+	if (value == PM_QOS_LATENCY_ANY)
+		return sprintf(buf, "any\n");
+
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+						 struct device_attribute *attr,
+						 const char *buf, size_t n)
+{
+	s32 value;
+	int ret;
+
+	if (kstrtos32(buf, 0, &value) == 0) {
+		/* Users can't write negative values directly */
+		if (value < 0)
+			return -EINVAL;
+	} else {
+		if (sysfs_streq(buf, "auto"))
+			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+		else if (sysfs_streq(buf, "any"))
+			value = PM_QOS_LATENCY_ANY;
+		else
+			return -EINVAL;
+	}
+	ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+	return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
+
+static ssize_t pm_qos_no_power_off_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+					& PM_QOS_FLAG_NO_POWER_OFF));
+}
+
+static ssize_t pm_qos_no_power_off_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t n)
+{
+	int ret;
+
+	if (kstrtoint(buf, 0, &ret))
+		return -EINVAL;
+
+	if (ret != 0 && ret != 1)
+		return -EINVAL;
+
+	ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
+	return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
+
+#ifdef CONFIG_PM_SLEEP
+static const char _enabled[] = "enabled";
+static const char _disabled[] = "disabled";
+
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	return sprintf(buf, "%s\n", device_can_wakeup(dev)
+		? (device_may_wakeup(dev) ? _enabled : _disabled)
+		: "");
+}
+
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t n)
+{
+	if (!device_can_wakeup(dev))
+		return -EINVAL;
+
+	if (sysfs_streq(buf, _enabled))
+		device_set_wakeup_enable(dev, 1);
+	else if (sysfs_streq(buf, _disabled))
+		device_set_wakeup_enable(dev, 0);
+	else
+		return -EINVAL;
+	return n;
+}
+
+static DEVICE_ATTR_RW(wakeup);
+
+static ssize_t wakeup_count_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	unsigned long count = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		count = dev->power.wakeup->wakeup_count;
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_count);
+
+static ssize_t wakeup_active_count_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	unsigned long count = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		count = dev->power.wakeup->active_count;
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_active_count);
+
+static ssize_t wakeup_abort_count_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	unsigned long count = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		count = dev->power.wakeup->wakeup_count;
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_abort_count);
+
+static ssize_t wakeup_expire_count_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	unsigned long count = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		count = dev->power.wakeup->expire_count;
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_expire_count);
+
+static ssize_t wakeup_active_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	unsigned int active = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		active = dev->power.wakeup->active;
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_active);
+
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	s64 msec = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		msec = ktime_to_ms(dev->power.wakeup->total_time);
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
+
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	s64 msec = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		msec = ktime_to_ms(dev->power.wakeup->max_time);
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
+
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	s64 msec = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		msec = ktime_to_ms(dev->power.wakeup->last_time);
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+						 struct device_attribute *attr,
+						 char *buf)
+{
+	s64 msec = 0;
+	bool enabled = false;
+
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
+		enabled = true;
+	}
+	spin_unlock_irq(&dev->power.lock);
+	return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
+#endif /* CONFIG_PM_AUTOSLEEP */
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+static ssize_t runtime_usage_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+static DEVICE_ATTR_RO(runtime_usage);
+
+static ssize_t runtime_active_kids_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	return sprintf(buf, "%d\n", dev->power.ignore_children ?
+		0 : atomic_read(&dev->power.child_count));
+}
+static DEVICE_ATTR_RO(runtime_active_kids);
+
+static ssize_t runtime_enabled_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	if (dev->power.disable_depth && (dev->power.runtime_auto == false))
+		return sprintf(buf, "disabled & forbidden\n");
+	if (dev->power.disable_depth)
+		return sprintf(buf, "disabled\n");
+	if (dev->power.runtime_auto == false)
+		return sprintf(buf, "forbidden\n");
+	return sprintf(buf, "enabled\n");
+}
+static DEVICE_ATTR_RO(runtime_enabled);
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	return sprintf(buf, "%s\n",
+			device_async_suspend_enabled(dev) ?
+				_enabled : _disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t n)
+{
+	if (sysfs_streq(buf, _enabled))
+		device_enable_async_suspend(dev);
+	else if (sysfs_streq(buf, _disabled))
+		device_disable_async_suspend(dev);
+	else
+		return -EINVAL;
+	return n;
+}
+
+static DEVICE_ATTR_RW(async);
+
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
+
+static struct attribute *power_attrs[] = {
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_SLEEP
+	&dev_attr_async.attr,
+#endif
+	&dev_attr_runtime_status.attr,
+	&dev_attr_runtime_usage.attr,
+	&dev_attr_runtime_active_kids.attr,
+	&dev_attr_runtime_enabled.attr,
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
+	NULL,
+};
+static const struct attribute_group pm_attr_group = {
+	.name	= power_group_name,
+	.attrs	= power_attrs,
+};
+
+static struct attribute *wakeup_attrs[] = {
+#ifdef CONFIG_PM_SLEEP
+	&dev_attr_wakeup.attr,
+	&dev_attr_wakeup_count.attr,
+	&dev_attr_wakeup_active_count.attr,
+	&dev_attr_wakeup_abort_count.attr,
+	&dev_attr_wakeup_expire_count.attr,
+	&dev_attr_wakeup_active.attr,
+	&dev_attr_wakeup_total_time_ms.attr,
+	&dev_attr_wakeup_max_time_ms.attr,
+	&dev_attr_wakeup_last_time_ms.attr,
+#ifdef CONFIG_PM_AUTOSLEEP
+	&dev_attr_wakeup_prevent_sleep_time_ms.attr,
+#endif
+#endif
+	NULL,
+};
+static const struct attribute_group pm_wakeup_attr_group = {
+	.name	= power_group_name,
+	.attrs	= wakeup_attrs,
+};
+
+static struct attribute *runtime_attrs[] = {
+#ifndef CONFIG_PM_ADVANCED_DEBUG
+	&dev_attr_runtime_status.attr,
+#endif
+	&dev_attr_control.attr,
+	&dev_attr_runtime_suspended_time.attr,
+	&dev_attr_runtime_active_time.attr,
+	&dev_attr_autosuspend_delay_ms.attr,
+	NULL,
+};
+static const struct attribute_group pm_runtime_attr_group = {
+	.name	= power_group_name,
+	.attrs	= runtime_attrs,
+};
+
+static struct attribute *pm_qos_resume_latency_attrs[] = {
+	&dev_attr_pm_qos_resume_latency_us.attr,
+	NULL,
+};
+static const struct attribute_group pm_qos_resume_latency_attr_group = {
+	.name	= power_group_name,
+	.attrs	= pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+	&dev_attr_pm_qos_latency_tolerance_us.attr,
+	NULL,
+};
+static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
+	.name	= power_group_name,
+	.attrs	= pm_qos_latency_tolerance_attrs,
+};
+
+static struct attribute *pm_qos_flags_attrs[] = {
+	&dev_attr_pm_qos_no_power_off.attr,
+	NULL,
+};
+static const struct attribute_group pm_qos_flags_attr_group = {
+	.name	= power_group_name,
+	.attrs	= pm_qos_flags_attrs,
+};
+
+int dpm_sysfs_add(struct device *dev)
+{
+	int rc;
+
+	rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
+	if (rc)
+		return rc;
+
+	if (pm_runtime_callbacks_present(dev)) {
+		rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
+		if (rc)
+			goto err_out;
+	}
+	if (device_can_wakeup(dev)) {
+		rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+		if (rc)
+			goto err_runtime;
+	}
+	if (dev->power.set_latency_tolerance) {
+		rc = sysfs_merge_group(&dev->kobj,
+				       &pm_qos_latency_tolerance_attr_group);
+		if (rc)
+			goto err_wakeup;
+	}
+	return 0;
+
+ err_wakeup:
+	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+	sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+ err_out:
+	sysfs_remove_group(&dev->kobj, &pm_attr_group);
+	return rc;
+}
+
+int wakeup_sysfs_add(struct device *dev)
+{
+	return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+}
+
+void wakeup_sysfs_remove(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+}
+
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
+{
+	return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
+}
+
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
+}
+
+int pm_qos_sysfs_add_flags(struct device *dev)
+{
+	return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
+}
+
+void pm_qos_sysfs_remove_flags(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
+}
+
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+	return sysfs_merge_group(&dev->kobj,
+				 &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
+void rpm_sysfs_remove(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+}
+
+void dpm_sysfs_remove(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+	dev_pm_qos_constraints_destroy(dev);
+	rpm_sysfs_remove(dev);
+	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+	sysfs_remove_group(&dev->kobj, &pm_attr_group);
+}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
new file mode 100644
index 0000000..1cda505
--- /dev/null
+++ b/drivers/base/power/trace.c
@@ -0,0 +1,293 @@
+/*
+ * drivers/base/power/trace.c
+ *
+ * Copyright (C) 2006 Linus Torvalds
+ *
+ * Trace facility for suspend/resume problems, when none of the
+ * devices may be working.
+ */
+
+#include <linux/pm-trace.h>
+#include <linux/export.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+
+#include <linux/mc146818rtc.h>
+
+#include "power.h"
+
+/*
+ * Horrid, horrid, horrid.
+ *
+ * It turns out that the _only_ piece of hardware that actually
+ * keeps its value across a hard boot (and, more importantly, the
+ * POST init sequence) is literally the realtime clock.
+ *
+ * Never mind that an RTC chip has 114 bytes (and often a whole
+ * other bank of an additional 128 bytes) of nice SRAM that is
+ * _designed_ to keep data - the POST will clear it. So we literally
+ * can just use the few bytes of actual time data, which means that
+ * we're really limited.
+ *
+ * It means, for example, that we can't use the seconds at all
+ * (since the time between the hang and the boot might be more
+ * than a minute), and we'd better not depend on the low bits of
+ * the minutes either.
+ *
+ * There are the wday fields etc, but I wouldn't guarantee those
+ * are dependable either. And if the date isn't valid, either the
+ * hw or POST will do strange things.
+ *
+ * So we're left with:
+ *  - year: 0-99
+ *  - month: 0-11
+ *  - day-of-month: 1-28
+ *  - hour: 0-23
+ *  - min: (0-30)*2
+ *
+ * Giving us a total range of 0-16128000 (0xf61800), ie less
+ * than 24 bits of actual data we can save across reboots.
+ *
+ * And if your box can't boot in less than three minutes,
+ * you're screwed.
+ *
+ * Now, almost 24 bits of data is pitifully small, so we need
+ * to be pretty dense if we want to use it for anything nice.
+ * What we do is that instead of saving off nice readable info,
+ * we save off _hashes_ of information that we can hopefully
+ * regenerate after the reboot.
+ *
+ * In particular, this means that we might be unlucky, and hit
+ * a case where we have a hash collision, and we end up not
+ * being able to tell for certain exactly which case happened.
+ * But that's hopefully unlikely.
+ *
+ * What we do is to take the bits we can fit, and split them
+ * into three parts (16*997*1009 = 16095568), and use the values
+ * for:
+ *  - 0-15: user-settable
+ *  - 0-996: file + line number
+ *  - 0-1008: device
+ */
+#define USERHASH (16)
+#define FILEHASH (997)
+#define DEVHASH (1009)
+
+#define DEVSEED (7919)
+
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
+static unsigned int dev_hash_value;
+
+static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
+{
+	unsigned int n = user + USERHASH*(file + FILEHASH*device);
+
+	// June 7th, 2006
+	static struct rtc_time time = {
+		.tm_sec = 0,
+		.tm_min = 0,
+		.tm_hour = 0,
+		.tm_mday = 7,
+		.tm_mon = 5,	// June - counting from zero
+		.tm_year = 106,
+		.tm_wday = 3,
+		.tm_yday = 160,
+		.tm_isdst = 1
+	};
+
+	time.tm_year = (n % 100);
+	n /= 100;
+	time.tm_mon = (n % 12);
+	n /= 12;
+	time.tm_mday = (n % 28) + 1;
+	n /= 28;
+	time.tm_hour = (n % 24);
+	n /= 24;
+	time.tm_min = (n % 20) * 3;
+	n /= 20;
+	mc146818_set_time(&time);
+	pm_trace_rtc_abused = true;
+	return n ? -1 : 0;
+}
+
+static unsigned int read_magic_time(void)
+{
+	struct rtc_time time;
+	unsigned int val;
+
+	mc146818_get_time(&time);
+	pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
+		time.tm_hour, time.tm_min, time.tm_sec,
+		time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
+	val = time.tm_year;				/* 100 years */
+	if (val > 100)
+		val -= 100;
+	val += time.tm_mon * 100;			/* 12 months */
+	val += (time.tm_mday-1) * 100 * 12;		/* 28 month-days */
+	val += time.tm_hour * 100 * 12 * 28;		/* 24 hours */
+	val += (time.tm_min / 3) * 100 * 12 * 28 * 24;	/* 20 3-minute intervals */
+	return val;
+}
+
+/*
+ * This is just the sdbm hash function with a user-supplied
+ * seed and final size parameter.
+ */
+static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod)
+{
+	unsigned char c;
+	while ((c = *data++) != 0) {
+		seed = (seed << 16) + (seed << 6) - seed + c;
+	}
+	return seed % mod;
+}
+
+void set_trace_device(struct device *dev)
+{
+	dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
+}
+EXPORT_SYMBOL(set_trace_device);
+
+/*
+ * We could just take the "tracedata" index into the .tracedata
+ * section instead. Generating a hash of the data gives us a
+ * chance to work across kernel versions, and perhaps more
+ * importantly it also gives us valid/invalid check (ie we will
+ * likely not give totally bogus reports - if the hash matches,
+ * it's not any guarantee, but it's a high _likelihood_ that
+ * the match is valid).
+ */
+void generate_pm_trace(const void *tracedata, unsigned int user)
+{
+	unsigned short lineno = *(unsigned short *)tracedata;
+	const char *file = *(const char **)(tracedata + 2);
+	unsigned int user_hash_value, file_hash_value;
+
+	user_hash_value = user % USERHASH;
+	file_hash_value = hash_string(lineno, file, FILEHASH);
+	set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
+}
+EXPORT_SYMBOL(generate_pm_trace);
+
+extern char __tracedata_start[], __tracedata_end[];
+static int show_file_hash(unsigned int value)
+{
+	int match;
+	char *tracedata;
+
+	match = 0;
+	for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
+			tracedata += 2 + sizeof(unsigned long)) {
+		unsigned short lineno = *(unsigned short *)tracedata;
+		const char *file = *(const char **)(tracedata + 2);
+		unsigned int hash = hash_string(lineno, file, FILEHASH);
+		if (hash != value)
+			continue;
+		pr_info("  hash matches %s:%u\n", file, lineno);
+		match++;
+	}
+	return match;
+}
+
+static int show_dev_hash(unsigned int value)
+{
+	int match = 0;
+	struct list_head *entry;
+
+	device_pm_lock();
+	entry = dpm_list.prev;
+	while (entry != &dpm_list) {
+		struct device * dev = to_device(entry);
+		unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
+		if (hash == value) {
+			dev_info(dev, "hash matches\n");
+			match++;
+		}
+		entry = entry->prev;
+	}
+	device_pm_unlock();
+	return match;
+}
+
+static unsigned int hash_value_early_read;
+
+int show_trace_dev_match(char *buf, size_t size)
+{
+	unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
+	int ret = 0;
+	struct list_head *entry;
+
+	/*
+	 * It's possible that multiple devices will match the hash and we can't
+	 * tell which is the culprit, so it's best to output them all.
+	 */
+	device_pm_lock();
+	entry = dpm_list.prev;
+	while (size && entry != &dpm_list) {
+		struct device *dev = to_device(entry);
+		unsigned int hash = hash_string(DEVSEED, dev_name(dev),
+						DEVHASH);
+		if (hash == value) {
+			int len = snprintf(buf, size, "%s\n",
+					    dev_driver_string(dev));
+			if (len > size)
+				len = size;
+			buf += len;
+			ret += len;
+			size -= len;
+		}
+		entry = entry->prev;
+	}
+	device_pm_unlock();
+	return ret;
+}
+
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+	switch (mode) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		if (pm_trace_rtc_abused) {
+			pm_trace_rtc_abused = false;
+			pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+	.notifier_call = pm_trace_notify,
+};
+
+static int early_resume_init(void)
+{
+	hash_value_early_read = read_magic_time();
+	register_pm_notifier(&pm_trace_nb);
+	return 0;
+}
+
+static int late_resume_init(void)
+{
+	unsigned int val = hash_value_early_read;
+	unsigned int user, file, dev;
+
+	user = val % USERHASH;
+	val = val / USERHASH;
+	file = val % FILEHASH;
+	val = val / FILEHASH;
+	dev = val /* % DEVHASH */;
+
+	pr_info("  Magic number: %d:%d:%d\n", user, file, dev);
+	show_file_hash(file);
+	show_dev_hash(dev);
+	return 0;
+}
+
+core_initcall(early_resume_init);
+late_initcall(late_resume_init);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
new file mode 100644
index 0000000..b8fa5c0
--- /dev/null
+++ b/drivers/base/power/wakeirq.c
@@ -0,0 +1,360 @@
+/*
+ * wakeirq.c - Device wakeirq helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
+ * @dev: Device entry
+ * @irq: Device wake-up capable interrupt
+ * @wirq: Wake irq specific data
+ *
+ * Internal function to attach either a device IO interrupt or a
+ * dedicated wake-up interrupt as a wake IRQ.
+ */
+static int dev_pm_attach_wake_irq(struct device *dev, int irq,
+				  struct wake_irq *wirq)
+{
+	unsigned long flags;
+
+	if (!dev || !wirq)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	if (dev_WARN_ONCE(dev, dev->power.wakeirq,
+			  "wake irq already initialized\n")) {
+		spin_unlock_irqrestore(&dev->power.lock, flags);
+		return -EEXIST;
+	}
+
+	dev->power.wakeirq = wirq;
+	device_wakeup_attach_irq(dev, wirq);
+
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+	return 0;
+}
+
+/**
+ * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
+ * automatically configured for wake-up from suspend  based
+ * on the device specific sysfs wakeup entry. Typically called
+ * during driver probe after calling device_init_wakeup().
+ */
+int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+	struct wake_irq *wirq;
+	int err;
+
+	if (irq < 0)
+		return -EINVAL;
+
+	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+	if (!wirq)
+		return -ENOMEM;
+
+	wirq->dev = dev;
+	wirq->irq = irq;
+
+	err = dev_pm_attach_wake_irq(dev, irq, wirq);
+	if (err)
+		kfree(wirq);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
+
+/**
+ * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
+ * @dev: Device entry
+ *
+ * Detach a device wake IRQ and free resources.
+ *
+ * Note that it's OK for drivers to call this without calling
+ * dev_pm_set_wake_irq() as all the driver instances may not have
+ * a wake IRQ configured. This avoid adding wake IRQ specific
+ * checks into the drivers.
+ */
+void dev_pm_clear_wake_irq(struct device *dev)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+	unsigned long flags;
+
+	if (!wirq)
+		return;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	device_wakeup_detach_irq(dev);
+	dev->power.wakeirq = NULL;
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
+		free_irq(wirq->irq, wirq);
+		wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+	}
+	kfree(wirq->name);
+	kfree(wirq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+
+/**
+ * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
+ * @irq: Device specific dedicated wake-up interrupt
+ * @_wirq: Wake IRQ data
+ *
+ * Some devices have a separate wake-up interrupt in addition to the
+ * device IO interrupt. The wake-up interrupt signals that a device
+ * should be woken up from it's idle state. This handler uses device
+ * specific pm_runtime functions to wake the device, and then it's
+ * up to the device to do whatever it needs to. Note that as the
+ * device may need to restore context and start up regulators, we
+ * use a threaded IRQ.
+ *
+ * Also note that we are not resending the lost device interrupts.
+ * We assume that the wake-up interrupt just needs to wake-up the
+ * device, and then device's pm_runtime_resume() can deal with the
+ * situation.
+ */
+static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+{
+	struct wake_irq *wirq = _wirq;
+	int res;
+
+	/* Maybe abort suspend? */
+	if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+		pm_wakeup_event(wirq->dev, 0);
+
+		return IRQ_HANDLED;
+	}
+
+	/* We don't want RPM_ASYNC or RPM_NOWAIT here */
+	res = pm_runtime_resume(wirq->dev);
+	if (res < 0)
+		dev_warn(wirq->dev,
+			 "wake IRQ with no resume: %i\n", res);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+	struct wake_irq *wirq;
+	int err;
+
+	if (irq < 0)
+		return -EINVAL;
+
+	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+	if (!wirq)
+		return -ENOMEM;
+
+	wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
+	if (!wirq->name) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	wirq->dev = dev;
+	wirq->irq = irq;
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+	/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
+	irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+	/*
+	 * Consumer device may need to power up and restore state
+	 * so we use a threaded irq.
+	 */
+	err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
+				   IRQF_ONESHOT, wirq->name, wirq);
+	if (err)
+		goto err_free_name;
+
+	err = dev_pm_attach_wake_irq(dev, irq, wirq);
+	if (err)
+		goto err_free_irq;
+
+	wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+
+	return err;
+
+err_free_irq:
+	free_irq(irq, wirq);
+err_free_name:
+	kfree(wirq->name);
+err_free:
+	kfree(wirq);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
+
+/**
+ * dev_pm_enable_wake_irq - Enable device wake-up interrupt
+ * @dev: Device
+ *
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
+ *
+ * Note that for runtime_suspend()) the wake-up interrupts
+ * should be unconditionally enabled unlike for suspend()
+ * that is conditional.
+ */
+void dev_pm_enable_wake_irq(struct device *dev)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+
+	if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
+		enable_irq(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
+
+/**
+ * dev_pm_disable_wake_irq - Disable device wake-up interrupt
+ * @dev: Device
+ *
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
+ */
+void dev_pm_disable_wake_irq(struct device *dev)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+
+	if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
+		disable_irq_nosync(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
+
+/**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+				  bool can_change_status)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+
+	if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+		return;
+
+	if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+		goto enable;
+	} else if (can_change_status) {
+		wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+		goto enable;
+	}
+
+	return;
+
+enable:
+	enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+
+	if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+		return;
+
+	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+		disable_irq_nosync(wirq->irq);
+}
+
+/**
+ * dev_pm_arm_wake_irq - Arm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Sets up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+{
+	if (!wirq)
+		return;
+
+	if (device_may_wakeup(wirq->dev)) {
+		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+		    !pm_runtime_status_suspended(wirq->dev))
+			enable_irq(wirq->irq);
+
+		enable_irq_wake(wirq->irq);
+	}
+}
+
+/**
+ * dev_pm_disarm_wake_irq - Disarm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Clears up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+{
+	if (!wirq)
+		return;
+
+	if (device_may_wakeup(wirq->dev)) {
+		disable_irq_wake(wirq->irq);
+
+		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+		    !pm_runtime_status_suspended(wirq->dev))
+			disable_irq_nosync(wirq->irq);
+	}
+}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
new file mode 100644
index 0000000..5fa1898
--- /dev/null
+++ b/drivers/base/power/wakeup.c
@@ -0,0 +1,1117 @@
+/*
+ * drivers/base/power/wakeup.c - System wakeup events framework
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/capability.h>
+#include <linux/export.h>
+#include <linux/suspend.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/pm_wakeirq.h>
+#include <trace/events/power.h>
+
+#include "power.h"
+
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state	(PM_SUSPEND_ON)
+#endif
+
+/*
+ * If set, the suspend/hibernate code will abort transitions to a sleep state
+ * if wakeup events are registered during or immediately before the transition.
+ */
+bool events_check_enabled __read_mostly;
+
+/* First wakeup IRQ seen by the kernel in the last cycle. */
+unsigned int pm_wakeup_irq __read_mostly;
+
+/* If greater than 0 and the system is suspending, terminate the suspend. */
+static atomic_t pm_abort_suspend __read_mostly;
+
+/*
+ * Combined counters of registered wakeup events and wakeup events in progress.
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+static atomic_t combined_event_count = ATOMIC_INIT(0);
+
+#define IN_PROGRESS_BITS	(sizeof(int) * 4)
+#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
+
+static void split_counters(unsigned int *cnt, unsigned int *inpr)
+{
+	unsigned int comb = atomic_read(&combined_event_count);
+
+	*cnt = (comb >> IN_PROGRESS_BITS);
+	*inpr = comb & MAX_IN_PROGRESS;
+}
+
+/* A preserved old value of the events counter. */
+static unsigned int saved_count;
+
+static DEFINE_RAW_SPINLOCK(events_lock);
+
+static void pm_wakeup_timer_fn(struct timer_list *t);
+
+static LIST_HEAD(wakeup_sources);
+
+static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
+static struct wakeup_source deleted_ws = {
+	.name = "deleted",
+	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
+};
+
+/**
+ * wakeup_source_prepare - Prepare a new wakeup source for initialization.
+ * @ws: Wakeup source to prepare.
+ * @name: Pointer to the name of the new wakeup source.
+ *
+ * Callers must ensure that the @name string won't be freed when @ws is still in
+ * use.
+ */
+void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
+{
+	if (ws) {
+		memset(ws, 0, sizeof(*ws));
+		ws->name = name;
+	}
+}
+EXPORT_SYMBOL_GPL(wakeup_source_prepare);
+
+/**
+ * wakeup_source_create - Create a struct wakeup_source object.
+ * @name: Name of the new wakeup source.
+ */
+struct wakeup_source *wakeup_source_create(const char *name)
+{
+	struct wakeup_source *ws;
+
+	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
+	if (!ws)
+		return NULL;
+
+	wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
+	return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_create);
+
+/**
+ * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
+ * @ws: Wakeup source to prepare for destruction.
+ *
+ * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
+ * be run in parallel with this function for the same wakeup source object.
+ */
+void wakeup_source_drop(struct wakeup_source *ws)
+{
+	if (!ws)
+		return;
+
+	del_timer_sync(&ws->timer);
+	__pm_relax(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_drop);
+
+/*
+ * Record wakeup_source statistics being deleted into a dummy wakeup_source.
+ */
+static void wakeup_source_record(struct wakeup_source *ws)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&deleted_ws.lock, flags);
+
+	if (ws->event_count) {
+		deleted_ws.total_time =
+			ktime_add(deleted_ws.total_time, ws->total_time);
+		deleted_ws.prevent_sleep_time =
+			ktime_add(deleted_ws.prevent_sleep_time,
+				  ws->prevent_sleep_time);
+		deleted_ws.max_time =
+			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
+				deleted_ws.max_time : ws->max_time;
+		deleted_ws.event_count += ws->event_count;
+		deleted_ws.active_count += ws->active_count;
+		deleted_ws.relax_count += ws->relax_count;
+		deleted_ws.expire_count += ws->expire_count;
+		deleted_ws.wakeup_count += ws->wakeup_count;
+	}
+
+	spin_unlock_irqrestore(&deleted_ws.lock, flags);
+}
+
+/**
+ * wakeup_source_destroy - Destroy a struct wakeup_source object.
+ * @ws: Wakeup source to destroy.
+ *
+ * Use only for wakeup source objects created with wakeup_source_create().
+ */
+void wakeup_source_destroy(struct wakeup_source *ws)
+{
+	if (!ws)
+		return;
+
+	wakeup_source_drop(ws);
+	wakeup_source_record(ws);
+	kfree_const(ws->name);
+	kfree(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_destroy);
+
+/**
+ * wakeup_source_add - Add given object to the list of wakeup sources.
+ * @ws: Wakeup source object to add to the list.
+ */
+void wakeup_source_add(struct wakeup_source *ws)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!ws))
+		return;
+
+	spin_lock_init(&ws->lock);
+	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
+	ws->active = false;
+
+	raw_spin_lock_irqsave(&events_lock, flags);
+	list_add_rcu(&ws->entry, &wakeup_sources);
+	raw_spin_unlock_irqrestore(&events_lock, flags);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+/**
+ * wakeup_source_remove - Remove given object from the wakeup sources list.
+ * @ws: Wakeup source object to remove from the list.
+ */
+void wakeup_source_remove(struct wakeup_source *ws)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!ws))
+		return;
+
+	raw_spin_lock_irqsave(&events_lock, flags);
+	list_del_rcu(&ws->entry);
+	raw_spin_unlock_irqrestore(&events_lock, flags);
+	synchronize_srcu(&wakeup_srcu);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_remove);
+
+/**
+ * wakeup_source_register - Create wakeup source and add it to the list.
+ * @name: Name of the wakeup source to register.
+ */
+struct wakeup_source *wakeup_source_register(const char *name)
+{
+	struct wakeup_source *ws;
+
+	ws = wakeup_source_create(name);
+	if (ws)
+		wakeup_source_add(ws);
+
+	return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_register);
+
+/**
+ * wakeup_source_unregister - Remove wakeup source from the list and remove it.
+ * @ws: Wakeup source object to unregister.
+ */
+void wakeup_source_unregister(struct wakeup_source *ws)
+{
+	if (ws) {
+		wakeup_source_remove(ws);
+		wakeup_source_destroy(ws);
+	}
+}
+EXPORT_SYMBOL_GPL(wakeup_source_unregister);
+
+/**
+ * device_wakeup_attach - Attach a wakeup source object to a device object.
+ * @dev: Device to handle.
+ * @ws: Wakeup source object to attach to @dev.
+ *
+ * This causes @dev to be treated as a wakeup device.
+ */
+static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
+{
+	spin_lock_irq(&dev->power.lock);
+	if (dev->power.wakeup) {
+		spin_unlock_irq(&dev->power.lock);
+		return -EEXIST;
+	}
+	dev->power.wakeup = ws;
+	if (dev->power.wakeirq)
+		device_wakeup_attach_irq(dev, dev->power.wakeirq);
+	spin_unlock_irq(&dev->power.lock);
+	return 0;
+}
+
+/**
+ * device_wakeup_enable - Enable given device to be a wakeup source.
+ * @dev: Device to handle.
+ *
+ * Create a wakeup source object, register it and attach it to @dev.
+ */
+int device_wakeup_enable(struct device *dev)
+{
+	struct wakeup_source *ws;
+	int ret;
+
+	if (!dev || !dev->power.can_wakeup)
+		return -EINVAL;
+
+	if (pm_suspend_target_state != PM_SUSPEND_ON)
+		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
+	ws = wakeup_source_register(dev_name(dev));
+	if (!ws)
+		return -ENOMEM;
+
+	ret = device_wakeup_attach(dev, ws);
+	if (ret)
+		wakeup_source_unregister(ws);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_enable);
+
+/**
+ * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
+ * @dev: Device to handle
+ * @wakeirq: Device specific wakeirq entry
+ *
+ * Attach a device wakeirq to the wakeup source so the device
+ * wake IRQ can be configured automatically for suspend and
+ * resume.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_attach_irq(struct device *dev,
+			     struct wake_irq *wakeirq)
+{
+	struct wakeup_source *ws;
+
+	ws = dev->power.wakeup;
+	if (!ws)
+		return;
+
+	if (ws->wakeirq)
+		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
+
+	ws->wakeirq = wakeirq;
+}
+
+/**
+ * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
+ * @dev: Device to handle
+ *
+ * Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_detach_irq(struct device *dev)
+{
+	struct wakeup_source *ws;
+
+	ws = dev->power.wakeup;
+	if (ws)
+		ws->wakeirq = NULL;
+}
+
+/**
+ * device_wakeup_arm_wake_irqs(void)
+ *
+ * Itereates over the list of device wakeirqs to arm them.
+ */
+void device_wakeup_arm_wake_irqs(void)
+{
+	struct wakeup_source *ws;
+	int srcuidx;
+
+	srcuidx = srcu_read_lock(&wakeup_srcu);
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+		dev_pm_arm_wake_irq(ws->wakeirq);
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+
+/**
+ * device_wakeup_disarm_wake_irqs(void)
+ *
+ * Itereates over the list of device wakeirqs to disarm them.
+ */
+void device_wakeup_disarm_wake_irqs(void)
+{
+	struct wakeup_source *ws;
+	int srcuidx;
+
+	srcuidx = srcu_read_lock(&wakeup_srcu);
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+		dev_pm_disarm_wake_irq(ws->wakeirq);
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+
+/**
+ * device_wakeup_detach - Detach a device's wakeup source object from it.
+ * @dev: Device to detach the wakeup source object from.
+ *
+ * After it returns, @dev will not be treated as a wakeup device any more.
+ */
+static struct wakeup_source *device_wakeup_detach(struct device *dev)
+{
+	struct wakeup_source *ws;
+
+	spin_lock_irq(&dev->power.lock);
+	ws = dev->power.wakeup;
+	dev->power.wakeup = NULL;
+	spin_unlock_irq(&dev->power.lock);
+	return ws;
+}
+
+/**
+ * device_wakeup_disable - Do not regard a device as a wakeup source any more.
+ * @dev: Device to handle.
+ *
+ * Detach the @dev's wakeup source object from it, unregister this wakeup source
+ * object and destroy it.
+ */
+int device_wakeup_disable(struct device *dev)
+{
+	struct wakeup_source *ws;
+
+	if (!dev || !dev->power.can_wakeup)
+		return -EINVAL;
+
+	ws = device_wakeup_detach(dev);
+	wakeup_source_unregister(ws);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_disable);
+
+/**
+ * device_set_wakeup_capable - Set/reset device wakeup capability flag.
+ * @dev: Device to handle.
+ * @capable: Whether or not @dev is capable of waking up the system from sleep.
+ *
+ * If @capable is set, set the @dev's power.can_wakeup flag and add its
+ * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
+ * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
+ *
+ * This function may sleep and it can't be called from any context where
+ * sleeping is not allowed.
+ */
+void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+	if (!!dev->power.can_wakeup == !!capable)
+		return;
+
+	dev->power.can_wakeup = capable;
+	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
+		if (capable) {
+			int ret = wakeup_sysfs_add(dev);
+
+			if (ret)
+				dev_info(dev, "Wakeup sysfs attributes not added\n");
+		} else {
+			wakeup_sysfs_remove(dev);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
+
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled.  The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc.  Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
+ */
+int device_init_wakeup(struct device *dev, bool enable)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	if (enable) {
+		device_set_wakeup_capable(dev, true);
+		ret = device_wakeup_enable(dev);
+	} else {
+		device_wakeup_disable(dev);
+		device_set_wakeup_capable(dev, false);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(device_init_wakeup);
+
+/**
+ * device_set_wakeup_enable - Enable or disable a device to wake up the system.
+ * @dev: Device to handle.
+ */
+int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
+
+/**
+ * wakeup_source_not_registered - validate the given wakeup source.
+ * @ws: Wakeup source to be validated.
+ */
+static bool wakeup_source_not_registered(struct wakeup_source *ws)
+{
+	/*
+	 * Use timer struct to check if the given source is initialized
+	 * by wakeup_source_add.
+	 */
+	return ws->timer.function != pm_wakeup_timer_fn;
+}
+
+/*
+ * The functions below use the observation that each wakeup event starts a
+ * period in which the system should not be suspended.  The moment this period
+ * will end depends on how the wakeup event is going to be processed after being
+ * detected and all of the possible cases can be divided into two distinct
+ * groups.
+ *
+ * First, a wakeup event may be detected by the same functional unit that will
+ * carry out the entire processing of it and possibly will pass it to user space
+ * for further processing.  In that case the functional unit that has detected
+ * the event may later "close" the "no suspend" period associated with it
+ * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
+ * pm_relax(), balanced with each other, is supposed to be used in such
+ * situations.
+ *
+ * Second, a wakeup event may be detected by one functional unit and processed
+ * by another one.  In that case the unit that has detected it cannot really
+ * "close" the "no suspend" period associated with it, unless it knows in
+ * advance what's going to happen to the event during processing.  This
+ * knowledge, however, may not be available to it, so it can simply specify time
+ * to wait before the system can be suspended and pass it as the second
+ * argument of pm_wakeup_event().
+ *
+ * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
+ * "no suspend" period will be ended either by the pm_relax(), or by the timer
+ * function executed when the timer expires, whichever comes first.
+ */
+
+/**
+ * wakup_source_activate - Mark given wakeup source as active.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and, if @ws has just been activated, notify the PM
+ * core of the event by incrementing the counter of of wakeup events being
+ * processed.
+ */
+static void wakeup_source_activate(struct wakeup_source *ws)
+{
+	unsigned int cec;
+
+	if (WARN_ONCE(wakeup_source_not_registered(ws),
+			"unregistered wakeup source\n"))
+		return;
+
+	ws->active = true;
+	ws->active_count++;
+	ws->last_time = ktime_get();
+	if (ws->autosleep_enabled)
+		ws->start_prevent_time = ws->last_time;
+
+	/* Increment the counter of events in progress. */
+	cec = atomic_inc_return(&combined_event_count);
+
+	trace_wakeup_source_activate(ws->name, cec);
+}
+
+/**
+ * wakeup_source_report_event - Report wakeup event using the given source.
+ * @ws: Wakeup source to report the event for.
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ */
+static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
+{
+	ws->event_count++;
+	/* This is racy, but the counter is approximate anyway. */
+	if (events_check_enabled)
+		ws->wakeup_count++;
+
+	if (!ws->active)
+		wakeup_source_activate(ws);
+
+	if (hard)
+		pm_system_wakeup();
+}
+
+/**
+ * __pm_stay_awake - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_stay_awake(struct wakeup_source *ws)
+{
+	unsigned long flags;
+
+	if (!ws)
+		return;
+
+	spin_lock_irqsave(&ws->lock, flags);
+
+	wakeup_source_report_event(ws, false);
+	del_timer(&ws->timer);
+	ws->timer_expires = 0;
+
+	spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_stay_awake);
+
+/**
+ * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
+ * @dev: Device the wakeup event is related to.
+ *
+ * Notify the PM core of a wakeup event (signaled by @dev) by calling
+ * __pm_stay_awake for the @dev's wakeup source object.
+ *
+ * Call this function after detecting of a wakeup event if pm_relax() is going
+ * to be called directly after processing the event (and possibly passing it to
+ * user space for further processing).
+ */
+void pm_stay_awake(struct device *dev)
+{
+	unsigned long flags;
+
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	__pm_stay_awake(dev->power.wakeup);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_stay_awake);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
+{
+	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
+	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
+}
+#else
+static inline void update_prevent_sleep_time(struct wakeup_source *ws,
+					     ktime_t now) {}
+#endif
+
+/**
+ * wakup_source_deactivate - Mark given wakeup source as inactive.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and notify the PM core that the wakeup source has
+ * become inactive by decrementing the counter of wakeup events being processed
+ * and incrementing the counter of registered wakeup events.
+ */
+static void wakeup_source_deactivate(struct wakeup_source *ws)
+{
+	unsigned int cnt, inpr, cec;
+	ktime_t duration;
+	ktime_t now;
+
+	ws->relax_count++;
+	/*
+	 * __pm_relax() may be called directly or from a timer function.
+	 * If it is called directly right after the timer function has been
+	 * started, but before the timer function calls __pm_relax(), it is
+	 * possible that __pm_stay_awake() will be called in the meantime and
+	 * will set ws->active.  Then, ws->active may be cleared immediately
+	 * by the __pm_relax() called from the timer function, but in such a
+	 * case ws->relax_count will be different from ws->active_count.
+	 */
+	if (ws->relax_count != ws->active_count) {
+		ws->relax_count--;
+		return;
+	}
+
+	ws->active = false;
+
+	now = ktime_get();
+	duration = ktime_sub(now, ws->last_time);
+	ws->total_time = ktime_add(ws->total_time, duration);
+	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
+		ws->max_time = duration;
+
+	ws->last_time = now;
+	del_timer(&ws->timer);
+	ws->timer_expires = 0;
+
+	if (ws->autosleep_enabled)
+		update_prevent_sleep_time(ws, now);
+
+	/*
+	 * Increment the counter of registered wakeup events and decrement the
+	 * couter of wakeup events in progress simultaneously.
+	 */
+	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+	trace_wakeup_source_deactivate(ws->name, cec);
+
+	split_counters(&cnt, &inpr);
+	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
+		wake_up(&wakeup_count_wait_queue);
+}
+
+/**
+ * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * Call this function for wakeup events whose processing started with calling
+ * __pm_stay_awake().
+ *
+ * It is safe to call it from interrupt context.
+ */
+void __pm_relax(struct wakeup_source *ws)
+{
+	unsigned long flags;
+
+	if (!ws)
+		return;
+
+	spin_lock_irqsave(&ws->lock, flags);
+	if (ws->active)
+		wakeup_source_deactivate(ws);
+	spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_relax);
+
+/**
+ * pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @dev: Device that signaled the event.
+ *
+ * Execute __pm_relax() for the @dev's wakeup source object.
+ */
+void pm_relax(struct device *dev)
+{
+	unsigned long flags;
+
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	__pm_relax(dev->power.wakeup);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_relax);
+
+/**
+ * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
+ * @data: Address of the wakeup source object associated with the event source.
+ *
+ * Call wakeup_source_deactivate() for the wakeup source whose address is stored
+ * in @data if it is currently active and its timer has not been canceled and
+ * the expiration time of the timer is not in future.
+ */
+static void pm_wakeup_timer_fn(struct timer_list *t)
+{
+	struct wakeup_source *ws = from_timer(ws, t, timer);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ws->lock, flags);
+
+	if (ws->active && ws->timer_expires
+	    && time_after_eq(jiffies, ws->timer_expires)) {
+		wakeup_source_deactivate(ws);
+		ws->expire_count++;
+	}
+
+	spin_unlock_irqrestore(&ws->lock, flags);
+}
+
+/**
+ * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the event source.
+ * @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ *
+ * Notify the PM core of a wakeup event whose source is @ws that will take
+ * approximately @msec milliseconds to be processed by the kernel.  If @ws is
+ * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
+ * execute pm_wakeup_timer_fn() in future.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
+{
+	unsigned long flags;
+	unsigned long expires;
+
+	if (!ws)
+		return;
+
+	spin_lock_irqsave(&ws->lock, flags);
+
+	wakeup_source_report_event(ws, hard);
+
+	if (!msec) {
+		wakeup_source_deactivate(ws);
+		goto unlock;
+	}
+
+	expires = jiffies + msecs_to_jiffies(msec);
+	if (!expires)
+		expires = 1;
+
+	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
+		mod_timer(&ws->timer, expires);
+		ws->timer_expires = expires;
+	}
+
+ unlock:
+	spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
+
+/**
+ * pm_wakeup_event - Notify the PM core of a wakeup event.
+ * @dev: Device the wakeup event is related to.
+ * @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ *
+ * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
+ */
+void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
+{
+	unsigned long flags;
+
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
+
+void pm_print_active_wakeup_sources(void)
+{
+	struct wakeup_source *ws;
+	int srcuidx, active = 0;
+	struct wakeup_source *last_activity_ws = NULL;
+
+	srcuidx = srcu_read_lock(&wakeup_srcu);
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (ws->active) {
+			pr_debug("active wakeup source: %s\n", ws->name);
+			active = 1;
+		} else if (!active &&
+			   (!last_activity_ws ||
+			    ktime_to_ns(ws->last_time) >
+			    ktime_to_ns(last_activity_ws->last_time))) {
+			last_activity_ws = ws;
+		}
+	}
+
+	if (!active && last_activity_ws)
+		pr_debug("last active wakeup source: %s\n",
+			last_activity_ws->name);
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
+
+/**
+ * pm_wakeup_pending - Check if power transition in progress should be aborted.
+ *
+ * Compare the current number of registered wakeup events with its preserved
+ * value from the past and return true if new wakeup events have been registered
+ * since the old value was stored.  Also return true if the current number of
+ * wakeup events being processed is different from zero.
+ */
+bool pm_wakeup_pending(void)
+{
+	unsigned long flags;
+	bool ret = false;
+
+	raw_spin_lock_irqsave(&events_lock, flags);
+	if (events_check_enabled) {
+		unsigned int cnt, inpr;
+
+		split_counters(&cnt, &inpr);
+		ret = (cnt != saved_count || inpr > 0);
+		events_check_enabled = !ret;
+	}
+	raw_spin_unlock_irqrestore(&events_lock, flags);
+
+	if (ret) {
+		pr_debug("PM: Wakeup pending, aborting suspend\n");
+		pm_print_active_wakeup_sources();
+	}
+
+	return ret || atomic_read(&pm_abort_suspend) > 0;
+}
+
+void pm_system_wakeup(void)
+{
+	atomic_inc(&pm_abort_suspend);
+	s2idle_wake();
+}
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
+
+void pm_system_cancel_wakeup(void)
+{
+	atomic_dec(&pm_abort_suspend);
+}
+
+void pm_wakeup_clear(bool reset)
+{
+	pm_wakeup_irq = 0;
+	if (reset)
+		atomic_set(&pm_abort_suspend, 0);
+}
+
+void pm_system_irq_wakeup(unsigned int irq_number)
+{
+	if (pm_wakeup_irq == 0) {
+		pm_wakeup_irq = irq_number;
+		pm_system_wakeup();
+	}
+}
+
+/**
+ * pm_get_wakeup_count - Read the number of registered wakeup events.
+ * @count: Address to store the value at.
+ * @block: Whether or not to block.
+ *
+ * Store the number of registered wakeup events at the address in @count.  If
+ * @block is set, block until the current number of wakeup events being
+ * processed is zero.
+ *
+ * Return 'false' if the current number of wakeup events being processed is
+ * nonzero.  Otherwise return 'true'.
+ */
+bool pm_get_wakeup_count(unsigned int *count, bool block)
+{
+	unsigned int cnt, inpr;
+
+	if (block) {
+		DEFINE_WAIT(wait);
+
+		for (;;) {
+			prepare_to_wait(&wakeup_count_wait_queue, &wait,
+					TASK_INTERRUPTIBLE);
+			split_counters(&cnt, &inpr);
+			if (inpr == 0 || signal_pending(current))
+				break;
+			pm_print_active_wakeup_sources();
+			schedule();
+		}
+		finish_wait(&wakeup_count_wait_queue, &wait);
+	}
+
+	split_counters(&cnt, &inpr);
+	*count = cnt;
+	return !inpr;
+}
+
+/**
+ * pm_save_wakeup_count - Save the current number of registered wakeup events.
+ * @count: Value to compare with the current number of registered wakeup events.
+ *
+ * If @count is equal to the current number of registered wakeup events and the
+ * current number of wakeup events being processed is zero, store @count as the
+ * old number of registered wakeup events for pm_check_wakeup_events(), enable
+ * wakeup events detection and return 'true'.  Otherwise disable wakeup events
+ * detection and return 'false'.
+ */
+bool pm_save_wakeup_count(unsigned int count)
+{
+	unsigned int cnt, inpr;
+	unsigned long flags;
+
+	events_check_enabled = false;
+	raw_spin_lock_irqsave(&events_lock, flags);
+	split_counters(&cnt, &inpr);
+	if (cnt == count && inpr == 0) {
+		saved_count = count;
+		events_check_enabled = true;
+	}
+	raw_spin_unlock_irqrestore(&events_lock, flags);
+	return events_check_enabled;
+}
+
+#ifdef CONFIG_PM_AUTOSLEEP
+/**
+ * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
+ * @enabled: Whether to set or to clear the autosleep_enabled flags.
+ */
+void pm_wakep_autosleep_enabled(bool set)
+{
+	struct wakeup_source *ws;
+	ktime_t now = ktime_get();
+	int srcuidx;
+
+	srcuidx = srcu_read_lock(&wakeup_srcu);
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		spin_lock_irq(&ws->lock);
+		if (ws->autosleep_enabled != set) {
+			ws->autosleep_enabled = set;
+			if (ws->active) {
+				if (set)
+					ws->start_prevent_time = now;
+				else
+					update_prevent_sleep_time(ws, now);
+			}
+		}
+		spin_unlock_irq(&ws->lock);
+	}
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+#endif /* CONFIG_PM_AUTOSLEEP */
+
+static struct dentry *wakeup_sources_stats_dentry;
+
+/**
+ * print_wakeup_source_stats - Print wakeup source statistics information.
+ * @m: seq_file to print the statistics into.
+ * @ws: Wakeup source object to print the statistics for.
+ */
+static int print_wakeup_source_stats(struct seq_file *m,
+				     struct wakeup_source *ws)
+{
+	unsigned long flags;
+	ktime_t total_time;
+	ktime_t max_time;
+	unsigned long active_count;
+	ktime_t active_time;
+	ktime_t prevent_sleep_time;
+
+	spin_lock_irqsave(&ws->lock, flags);
+
+	total_time = ws->total_time;
+	max_time = ws->max_time;
+	prevent_sleep_time = ws->prevent_sleep_time;
+	active_count = ws->active_count;
+	if (ws->active) {
+		ktime_t now = ktime_get();
+
+		active_time = ktime_sub(now, ws->last_time);
+		total_time = ktime_add(total_time, active_time);
+		if (active_time > max_time)
+			max_time = active_time;
+
+		if (ws->autosleep_enabled)
+			prevent_sleep_time = ktime_add(prevent_sleep_time,
+				ktime_sub(now, ws->start_prevent_time));
+	} else {
+		active_time = 0;
+	}
+
+	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+		   ws->name, active_count, ws->event_count,
+		   ws->wakeup_count, ws->expire_count,
+		   ktime_to_ms(active_time), ktime_to_ms(total_time),
+		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
+		   ktime_to_ms(prevent_sleep_time));
+
+	spin_unlock_irqrestore(&ws->lock, flags);
+
+	return 0;
+}
+
+static void *wakeup_sources_stats_seq_start(struct seq_file *m,
+					loff_t *pos)
+{
+	struct wakeup_source *ws;
+	loff_t n = *pos;
+	int *srcuidx = m->private;
+
+	if (n == 0) {
+		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+			"expire_count\tactive_since\ttotal_time\tmax_time\t"
+			"last_change\tprevent_suspend_time\n");
+	}
+
+	*srcuidx = srcu_read_lock(&wakeup_srcu);
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (n-- <= 0)
+			return ws;
+	}
+
+	return NULL;
+}
+
+static void *wakeup_sources_stats_seq_next(struct seq_file *m,
+					void *v, loff_t *pos)
+{
+	struct wakeup_source *ws = v;
+	struct wakeup_source *next_ws = NULL;
+
+	++(*pos);
+
+	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
+		next_ws = ws;
+		break;
+	}
+
+	return next_ws;
+}
+
+static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
+{
+	int *srcuidx = m->private;
+
+	srcu_read_unlock(&wakeup_srcu, *srcuidx);
+}
+
+/**
+ * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
+ * @m: seq_file to print the statistics into.
+ * @v: wakeup_source of each iteration
+ */
+static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
+{
+	struct wakeup_source *ws = v;
+
+	print_wakeup_source_stats(m, ws);
+
+	return 0;
+}
+
+static const struct seq_operations wakeup_sources_stats_seq_ops = {
+	.start = wakeup_sources_stats_seq_start,
+	.next  = wakeup_sources_stats_seq_next,
+	.stop  = wakeup_sources_stats_seq_stop,
+	.show  = wakeup_sources_stats_seq_show,
+};
+
+static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
+}
+
+static const struct file_operations wakeup_sources_stats_fops = {
+	.owner = THIS_MODULE,
+	.open = wakeup_sources_stats_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release_private,
+};
+
+static int __init wakeup_sources_debugfs_init(void)
+{
+	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
+			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
+	return 0;
+}
+
+postcore_initcall(wakeup_sources_debugfs_init);
diff --git a/drivers/base/property.c b/drivers/base/property.c
new file mode 100644
index 0000000..240ab52
--- /dev/null
+++ b/drivers/base/property.c
@@ -0,0 +1,1485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * property.c - Unified device property interface.
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/property.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+
+struct property_set {
+	struct device *dev;
+	struct fwnode_handle fwnode;
+	const struct property_entry *properties;
+};
+
+static const struct fwnode_operations pset_fwnode_ops;
+
+static inline bool is_pset_node(const struct fwnode_handle *fwnode)
+{
+	return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops;
+}
+
+#define to_pset_node(__fwnode)						\
+	({								\
+		typeof(__fwnode) __to_pset_node_fwnode = __fwnode;	\
+									\
+		is_pset_node(__to_pset_node_fwnode) ?			\
+			container_of(__to_pset_node_fwnode,		\
+				     struct property_set, fwnode) :	\
+			NULL;						\
+	})
+
+static const struct property_entry *
+pset_prop_get(const struct property_set *pset, const char *name)
+{
+	const struct property_entry *prop;
+
+	if (!pset || !pset->properties)
+		return NULL;
+
+	for (prop = pset->properties; prop->name; prop++)
+		if (!strcmp(name, prop->name))
+			return prop;
+
+	return NULL;
+}
+
+static const void *property_get_pointer(const struct property_entry *prop)
+{
+	switch (prop->type) {
+	case DEV_PROP_U8:
+		if (prop->is_array)
+			return prop->pointer.u8_data;
+		return &prop->value.u8_data;
+	case DEV_PROP_U16:
+		if (prop->is_array)
+			return prop->pointer.u16_data;
+		return &prop->value.u16_data;
+	case DEV_PROP_U32:
+		if (prop->is_array)
+			return prop->pointer.u32_data;
+		return &prop->value.u32_data;
+	case DEV_PROP_U64:
+		if (prop->is_array)
+			return prop->pointer.u64_data;
+		return &prop->value.u64_data;
+	case DEV_PROP_STRING:
+		if (prop->is_array)
+			return prop->pointer.str;
+		return &prop->value.str;
+	default:
+		return NULL;
+	}
+}
+
+static void property_set_pointer(struct property_entry *prop, const void *pointer)
+{
+	switch (prop->type) {
+	case DEV_PROP_U8:
+		if (prop->is_array)
+			prop->pointer.u8_data = pointer;
+		else
+			prop->value.u8_data = *((u8 *)pointer);
+		break;
+	case DEV_PROP_U16:
+		if (prop->is_array)
+			prop->pointer.u16_data = pointer;
+		else
+			prop->value.u16_data = *((u16 *)pointer);
+		break;
+	case DEV_PROP_U32:
+		if (prop->is_array)
+			prop->pointer.u32_data = pointer;
+		else
+			prop->value.u32_data = *((u32 *)pointer);
+		break;
+	case DEV_PROP_U64:
+		if (prop->is_array)
+			prop->pointer.u64_data = pointer;
+		else
+			prop->value.u64_data = *((u64 *)pointer);
+		break;
+	case DEV_PROP_STRING:
+		if (prop->is_array)
+			prop->pointer.str = pointer;
+		else
+			prop->value.str = pointer;
+		break;
+	default:
+		break;
+	}
+}
+
+static const void *pset_prop_find(const struct property_set *pset,
+				  const char *propname, size_t length)
+{
+	const struct property_entry *prop;
+	const void *pointer;
+
+	prop = pset_prop_get(pset, propname);
+	if (!prop)
+		return ERR_PTR(-EINVAL);
+	pointer = property_get_pointer(prop);
+	if (!pointer)
+		return ERR_PTR(-ENODATA);
+	if (length > prop->length)
+		return ERR_PTR(-EOVERFLOW);
+	return pointer;
+}
+
+static int pset_prop_read_u8_array(const struct property_set *pset,
+				   const char *propname,
+				   u8 *values, size_t nval)
+{
+	const void *pointer;
+	size_t length = nval * sizeof(*values);
+
+	pointer = pset_prop_find(pset, propname, length);
+	if (IS_ERR(pointer))
+		return PTR_ERR(pointer);
+
+	memcpy(values, pointer, length);
+	return 0;
+}
+
+static int pset_prop_read_u16_array(const struct property_set *pset,
+				    const char *propname,
+				    u16 *values, size_t nval)
+{
+	const void *pointer;
+	size_t length = nval * sizeof(*values);
+
+	pointer = pset_prop_find(pset, propname, length);
+	if (IS_ERR(pointer))
+		return PTR_ERR(pointer);
+
+	memcpy(values, pointer, length);
+	return 0;
+}
+
+static int pset_prop_read_u32_array(const struct property_set *pset,
+				    const char *propname,
+				    u32 *values, size_t nval)
+{
+	const void *pointer;
+	size_t length = nval * sizeof(*values);
+
+	pointer = pset_prop_find(pset, propname, length);
+	if (IS_ERR(pointer))
+		return PTR_ERR(pointer);
+
+	memcpy(values, pointer, length);
+	return 0;
+}
+
+static int pset_prop_read_u64_array(const struct property_set *pset,
+				    const char *propname,
+				    u64 *values, size_t nval)
+{
+	const void *pointer;
+	size_t length = nval * sizeof(*values);
+
+	pointer = pset_prop_find(pset, propname, length);
+	if (IS_ERR(pointer))
+		return PTR_ERR(pointer);
+
+	memcpy(values, pointer, length);
+	return 0;
+}
+
+static int pset_prop_count_elems_of_size(const struct property_set *pset,
+					 const char *propname, size_t length)
+{
+	const struct property_entry *prop;
+
+	prop = pset_prop_get(pset, propname);
+	if (!prop)
+		return -EINVAL;
+
+	return prop->length / length;
+}
+
+static int pset_prop_read_string_array(const struct property_set *pset,
+				       const char *propname,
+				       const char **strings, size_t nval)
+{
+	const struct property_entry *prop;
+	const void *pointer;
+	size_t array_len, length;
+
+	/* Find out the array length. */
+	prop = pset_prop_get(pset, propname);
+	if (!prop)
+		return -EINVAL;
+
+	if (!prop->is_array)
+		/* The array length for a non-array string property is 1. */
+		array_len = 1;
+	else
+		/* Find the length of an array. */
+		array_len = pset_prop_count_elems_of_size(pset, propname,
+							  sizeof(const char *));
+
+	/* Return how many there are if strings is NULL. */
+	if (!strings)
+		return array_len;
+
+	array_len = min(nval, array_len);
+	length = array_len * sizeof(*strings);
+
+	pointer = pset_prop_find(pset, propname, length);
+	if (IS_ERR(pointer))
+		return PTR_ERR(pointer);
+
+	memcpy(strings, pointer, length);
+
+	return array_len;
+}
+
+struct fwnode_handle *dev_fwnode(struct device *dev)
+{
+	return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+		&dev->of_node->fwnode : dev->fwnode;
+}
+EXPORT_SYMBOL_GPL(dev_fwnode);
+
+static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode,
+					 const char *propname)
+{
+	return !!pset_prop_get(to_pset_node(fwnode), propname);
+}
+
+static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode,
+				      const char *propname,
+				      unsigned int elem_size, void *val,
+				      size_t nval)
+{
+	const struct property_set *node = to_pset_node(fwnode);
+
+	if (!val)
+		return pset_prop_count_elems_of_size(node, propname, elem_size);
+
+	switch (elem_size) {
+	case sizeof(u8):
+		return pset_prop_read_u8_array(node, propname, val, nval);
+	case sizeof(u16):
+		return pset_prop_read_u16_array(node, propname, val, nval);
+	case sizeof(u32):
+		return pset_prop_read_u32_array(node, propname, val, nval);
+	case sizeof(u64):
+		return pset_prop_read_u64_array(node, propname, val, nval);
+	}
+
+	return -ENXIO;
+}
+
+static int
+pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+				       const char *propname,
+				       const char **val, size_t nval)
+{
+	return pset_prop_read_string_array(to_pset_node(fwnode), propname,
+					   val, nval);
+}
+
+static const struct fwnode_operations pset_fwnode_ops = {
+	.property_present = pset_fwnode_property_present,
+	.property_read_int_array = pset_fwnode_read_int_array,
+	.property_read_string_array = pset_fwnode_property_read_string_array,
+};
+
+/**
+ * device_property_present - check if a property of a device is present
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Check if property @propname is present in the device firmware description.
+ */
+bool device_property_present(struct device *dev, const char *propname)
+{
+	return fwnode_property_present(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_present);
+
+/**
+ * fwnode_property_present - check if a property of a firmware node is present
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ */
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+			     const char *propname)
+{
+	bool ret;
+
+	ret = fwnode_call_bool_op(fwnode, property_present, propname);
+	if (ret == false && !IS_ERR_OR_NULL(fwnode) &&
+	    !IS_ERR_OR_NULL(fwnode->secondary))
+		ret = fwnode_call_bool_op(fwnode->secondary, property_present,
+					 propname);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_present);
+
+/**
+ * device_property_read_u8_array - return a u8 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u8 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u8_array(struct device *dev, const char *propname,
+				  u8 *val, size_t nval)
+{
+	return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u8_array);
+
+/**
+ * device_property_read_u16_array - return a u16 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u16 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u16_array(struct device *dev, const char *propname,
+				   u16 *val, size_t nval)
+{
+	return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u16_array);
+
+/**
+ * device_property_read_u32_array - return a u32 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u32 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u32_array(struct device *dev, const char *propname,
+				   u32 *val, size_t nval)
+{
+	return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u32_array);
+
+/**
+ * device_property_read_u64_array - return a u64 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u64 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u64_array(struct device *dev, const char *propname,
+				   u64 *val, size_t nval)
+{
+	return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u64_array);
+
+/**
+ * device_property_read_string_array - return a string array property of device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of string properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * Return: number of values read on success if @val is non-NULL,
+ *	   number of values available on success if @val is NULL,
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO or %-EILSEQ if the property is not an array of strings,
+ *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_string_array(struct device *dev, const char *propname,
+				      const char **val, size_t nval)
+{
+	return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_string_array);
+
+/**
+ * device_property_read_string - return a string property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The value is stored here
+ *
+ * Function reads property @propname from the device firmware description and
+ * stores the value into @val if found. The value is checked to be a string.
+ *
+ * Return: %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO or %-EILSEQ if the property type is not a string.
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_string(struct device *dev, const char *propname,
+				const char **val)
+{
+	return fwnode_property_read_string(dev_fwnode(dev), propname, val);
+}
+EXPORT_SYMBOL_GPL(device_property_read_string);
+
+/**
+ * device_property_match_string - find a string in an array and return index
+ * @dev: Device to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of strings,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_match_string(struct device *dev, const char *propname,
+				 const char *string)
+{
+	return fwnode_property_match_string(dev_fwnode(dev), propname, string);
+}
+EXPORT_SYMBOL_GPL(device_property_match_string);
+
+static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
+					  const char *propname,
+					  unsigned int elem_size, void *val,
+					  size_t nval)
+{
+	int ret;
+
+	ret = fwnode_call_int_op(fwnode, property_read_int_array, propname,
+				 elem_size, val, nval);
+	if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
+	    !IS_ERR_OR_NULL(fwnode->secondary))
+		ret = fwnode_call_int_op(
+			fwnode->secondary, property_read_int_array, propname,
+			elem_size, val, nval);
+
+	return ret;
+}
+
+/**
+ * fwnode_property_read_u8_array - return a u8 array property of firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u8 properties with @propname from @fwnode and stores them to
+ * @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
+				  const char *propname, u8 *val, size_t nval)
+{
+	return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
+					      val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
+
+/**
+ * fwnode_property_read_u16_array - return a u16 array property of firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u16 properties with @propname from @fwnode and store them to
+ * @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
+				   const char *propname, u16 *val, size_t nval)
+{
+	return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
+					      val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
+
+/**
+ * fwnode_property_read_u32_array - return a u32 array property of firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u32 properties with @propname from @fwnode store them to
+ * @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
+				   const char *propname, u32 *val, size_t nval)
+{
+	return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
+					      val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
+
+/**
+ * fwnode_property_read_u64_array - return a u64 array property firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u64 properties with @propname from @fwnode and store them to
+ * @val if found.
+ *
+ * Return: number of values if @val was %NULL,
+ *         %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of numbers,
+ *	   %-EOVERFLOW if the size of the property is not as expected,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
+				   const char *propname, u64 *val, size_t nval)
+{
+	return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
+					      val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
+
+/**
+ * fwnode_property_read_string_array - return string array property of a node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an string list property @propname from the given firmware node and store
+ * them to @val if found.
+ *
+ * Return: number of values read on success if @val is non-NULL,
+ *	   number of values available on success if @val is NULL,
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO or %-EILSEQ if the property is not an array of strings,
+ *	   %-EOVERFLOW if the size of the property is not as expected,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+				      const char *propname, const char **val,
+				      size_t nval)
+{
+	int ret;
+
+	ret = fwnode_call_int_op(fwnode, property_read_string_array, propname,
+				 val, nval);
+	if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
+	    !IS_ERR_OR_NULL(fwnode->secondary))
+		ret = fwnode_call_int_op(fwnode->secondary,
+					 property_read_string_array, propname,
+					 val, nval);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
+
+/**
+ * fwnode_property_read_string - return a string property of a firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The value is stored here
+ *
+ * Read property @propname from the given firmware node and store the value into
+ * @val if found.  The value is checked to be a string.
+ *
+ * Return: %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO or %-EILSEQ if the property is not a string,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
+				const char *propname, const char **val)
+{
+	int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
+
+	return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_string);
+
+/**
+ * fwnode_property_match_string - find a string in an array and return index
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: %0 if the property was found (success),
+ *	   %-EINVAL if given arguments are not valid,
+ *	   %-ENODATA if the property does not have a value,
+ *	   %-EPROTO if the property is not an array of strings,
+ *	   %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
+	const char *propname, const char *string)
+{
+	const char **values;
+	int nval, ret;
+
+	nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+	if (nval < 0)
+		return nval;
+
+	if (nval == 0)
+		return -ENODATA;
+
+	values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
+	if (!values)
+		return -ENOMEM;
+
+	ret = fwnode_property_read_string_array(fwnode, propname, values, nval);
+	if (ret < 0)
+		goto out;
+
+	ret = match_string(values, nval, string);
+	if (ret < 0)
+		ret = -ENODATA;
+out:
+	kfree(values);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+
+/**
+ * fwnode_property_get_reference_args() - Find a reference with arguments
+ * @fwnode:	Firmware node where to look for the reference
+ * @prop:	The name of the property
+ * @nargs_prop:	The name of the property telling the number of
+ *		arguments in the referred node. NULL if @nargs is known,
+ *		otherwise @nargs is ignored. Only relevant on OF.
+ * @nargs:	Number of arguments. Ignored if @nargs_prop is non-NULL.
+ * @index:	Index of the reference, from zero onwards.
+ * @args:	Result structure with reference and integer arguments.
+ *
+ * Obtain a reference based on a named property in an fwnode, with
+ * integer arguments.
+ *
+ * Caller is responsible to call fwnode_handle_put() on the returned
+ * args->fwnode pointer.
+ *
+ * Returns: %0 on success
+ *	    %-ENOENT when the index is out of bounds, the index has an empty
+ *		     reference or the property was not found
+ *	    %-EINVAL on parse error
+ */
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+				       const char *prop, const char *nargs_prop,
+				       unsigned int nargs, unsigned int index,
+				       struct fwnode_reference_args *args)
+{
+	return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+				  nargs, index, args);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+
+static void property_entry_free_data(const struct property_entry *p)
+{
+	const void *pointer = property_get_pointer(p);
+	size_t i, nval;
+
+	if (p->is_array) {
+		if (p->type == DEV_PROP_STRING && p->pointer.str) {
+			nval = p->length / sizeof(const char *);
+			for (i = 0; i < nval; i++)
+				kfree(p->pointer.str[i]);
+		}
+		kfree(pointer);
+	} else if (p->type == DEV_PROP_STRING) {
+		kfree(p->value.str);
+	}
+	kfree(p->name);
+}
+
+static int property_copy_string_array(struct property_entry *dst,
+				      const struct property_entry *src)
+{
+	const char **d;
+	size_t nval = src->length / sizeof(*d);
+	int i;
+
+	d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	for (i = 0; i < nval; i++) {
+		d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
+		if (!d[i] && src->pointer.str[i]) {
+			while (--i >= 0)
+				kfree(d[i]);
+			kfree(d);
+			return -ENOMEM;
+		}
+	}
+
+	dst->pointer.str = d;
+	return 0;
+}
+
+static int property_entry_copy_data(struct property_entry *dst,
+				    const struct property_entry *src)
+{
+	const void *pointer = property_get_pointer(src);
+	const void *new;
+	int error;
+
+	if (src->is_array) {
+		if (!src->length)
+			return -ENODATA;
+
+		if (src->type == DEV_PROP_STRING) {
+			error = property_copy_string_array(dst, src);
+			if (error)
+				return error;
+			new = dst->pointer.str;
+		} else {
+			new = kmemdup(pointer, src->length, GFP_KERNEL);
+			if (!new)
+				return -ENOMEM;
+		}
+	} else if (src->type == DEV_PROP_STRING) {
+		new = kstrdup(src->value.str, GFP_KERNEL);
+		if (!new && src->value.str)
+			return -ENOMEM;
+	} else {
+		new = pointer;
+	}
+
+	dst->length = src->length;
+	dst->is_array = src->is_array;
+	dst->type = src->type;
+
+	property_set_pointer(dst, new);
+
+	dst->name = kstrdup(src->name, GFP_KERNEL);
+	if (!dst->name)
+		goto out_free_data;
+
+	return 0;
+
+out_free_data:
+	property_entry_free_data(dst);
+	return -ENOMEM;
+}
+
+/**
+ * property_entries_dup - duplicate array of properties
+ * @properties: array of properties to copy
+ *
+ * This function creates a deep copy of the given NULL-terminated array
+ * of property entries.
+ */
+struct property_entry *
+property_entries_dup(const struct property_entry *properties)
+{
+	struct property_entry *p;
+	int i, n = 0;
+
+	while (properties[n].name)
+		n++;
+
+	p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < n; i++) {
+		int ret = property_entry_copy_data(&p[i], &properties[i]);
+		if (ret) {
+			while (--i >= 0)
+				property_entry_free_data(&p[i]);
+			kfree(p);
+			return ERR_PTR(ret);
+		}
+	}
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(property_entries_dup);
+
+/**
+ * property_entries_free - free previously allocated array of properties
+ * @properties: array of properties to destroy
+ *
+ * This function frees given NULL-terminated array of property entries,
+ * along with their data.
+ */
+void property_entries_free(const struct property_entry *properties)
+{
+	const struct property_entry *p;
+
+	for (p = properties; p->name; p++)
+		property_entry_free_data(p);
+
+	kfree(properties);
+}
+EXPORT_SYMBOL_GPL(property_entries_free);
+
+/**
+ * pset_free_set - releases memory allocated for copied property set
+ * @pset: Property set to release
+ *
+ * Function takes previously copied property set and releases all the
+ * memory allocated to it.
+ */
+static void pset_free_set(struct property_set *pset)
+{
+	if (!pset)
+		return;
+
+	property_entries_free(pset->properties);
+	kfree(pset);
+}
+
+/**
+ * pset_copy_set - copies property set
+ * @pset: Property set to copy
+ *
+ * This function takes a deep copy of the given property set and returns
+ * pointer to the copy. Call device_free_property_set() to free resources
+ * allocated in this function.
+ *
+ * Return: Pointer to the new property set or error pointer.
+ */
+static struct property_set *pset_copy_set(const struct property_set *pset)
+{
+	struct property_entry *properties;
+	struct property_set *p;
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return ERR_PTR(-ENOMEM);
+
+	properties = property_entries_dup(pset->properties);
+	if (IS_ERR(properties)) {
+		kfree(p);
+		return ERR_CAST(properties);
+	}
+
+	p->properties = properties;
+	return p;
+}
+
+/**
+ * device_remove_properties - Remove properties from a device object.
+ * @dev: Device whose properties to remove.
+ *
+ * The function removes properties previously associated to the device
+ * secondary firmware node with device_add_properties(). Memory allocated
+ * to the properties will also be released.
+ */
+void device_remove_properties(struct device *dev)
+{
+	struct fwnode_handle *fwnode;
+	struct property_set *pset;
+
+	fwnode = dev_fwnode(dev);
+	if (!fwnode)
+		return;
+	/*
+	 * Pick either primary or secondary node depending which one holds
+	 * the pset. If there is no real firmware node (ACPI/DT) primary
+	 * will hold the pset.
+	 */
+	pset = to_pset_node(fwnode);
+	if (pset) {
+		set_primary_fwnode(dev, NULL);
+	} else {
+		pset = to_pset_node(fwnode->secondary);
+		if (pset && dev == pset->dev)
+			set_secondary_fwnode(dev, NULL);
+	}
+	if (pset && dev == pset->dev)
+		pset_free_set(pset);
+}
+EXPORT_SYMBOL_GPL(device_remove_properties);
+
+/**
+ * device_add_properties - Add a collection of properties to a device object.
+ * @dev: Device to add properties to.
+ * @properties: Collection of properties to add.
+ *
+ * Associate a collection of device properties represented by @properties with
+ * @dev as its secondary firmware node. The function takes a copy of
+ * @properties.
+ */
+int device_add_properties(struct device *dev,
+			  const struct property_entry *properties)
+{
+	struct property_set *p, pset;
+
+	if (!properties)
+		return -EINVAL;
+
+	pset.properties = properties;
+
+	p = pset_copy_set(&pset);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+
+	p->fwnode.ops = &pset_fwnode_ops;
+	set_secondary_fwnode(dev, &p->fwnode);
+	p->dev = dev;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(device_add_properties);
+
+/**
+ * fwnode_get_next_parent - Iterate to the node's parent
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * This is like fwnode_get_parent() except that it drops the refcount
+ * on the passed node, making it suitable for iterating through a
+ * node's parents.
+ *
+ * Returns a node pointer with refcount incremented, use
+ * fwnode_handle_node() on it when done.
+ */
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
+{
+	struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+
+	fwnode_handle_put(fwnode);
+
+	return parent;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
+
+/**
+ * fwnode_get_parent - Return parent firwmare node
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * Return parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+	return fwnode_call_ptr_op(fwnode, get_parent);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_parent);
+
+/**
+ * fwnode_get_next_child_node - Return the next child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ */
+struct fwnode_handle *
+fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+			   struct fwnode_handle *child)
+{
+	return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
+
+/**
+ * fwnode_get_next_available_child_node - Return the next
+ * available child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ */
+struct fwnode_handle *
+fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
+				     struct fwnode_handle *child)
+{
+	struct fwnode_handle *next_child = child;
+
+	if (!fwnode)
+		return NULL;
+
+	do {
+		next_child = fwnode_get_next_child_node(fwnode, next_child);
+
+		if (!next_child || fwnode_device_is_available(next_child))
+			break;
+	} while (next_child);
+
+	return next_child;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
+
+/**
+ * device_get_next_child_node - Return the next child node handle for a device
+ * @dev: Device to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a null handle.
+ */
+struct fwnode_handle *device_get_next_child_node(struct device *dev,
+						 struct fwnode_handle *child)
+{
+	struct acpi_device *adev = ACPI_COMPANION(dev);
+	struct fwnode_handle *fwnode = NULL;
+
+	if (dev->of_node)
+		fwnode = &dev->of_node->fwnode;
+	else if (adev)
+		fwnode = acpi_fwnode_handle(adev);
+
+	return fwnode_get_next_child_node(fwnode, child);
+}
+EXPORT_SYMBOL_GPL(device_get_next_child_node);
+
+/**
+ * fwnode_get_named_child_node - Return first matching named child node handle
+ * @fwnode: Firmware node to find the named child node for.
+ * @childname: String to match child node name against.
+ */
+struct fwnode_handle *
+fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+			    const char *childname)
+{
+	return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_named_child_node);
+
+/**
+ * device_get_named_child_node - Return first matching named child node handle
+ * @dev: Device to find the named child node for.
+ * @childname: String to match child node name against.
+ */
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+						  const char *childname)
+{
+	return fwnode_get_named_child_node(dev_fwnode(dev), childname);
+}
+EXPORT_SYMBOL_GPL(device_get_named_child_node);
+
+/**
+ * fwnode_handle_get - Obtain a reference to a device node
+ * @fwnode: Pointer to the device node to obtain the reference to.
+ *
+ * Returns the fwnode handle.
+ */
+struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
+{
+	if (!fwnode_has_op(fwnode, get))
+		return fwnode;
+
+	return fwnode_call_ptr_op(fwnode, get);
+}
+EXPORT_SYMBOL_GPL(fwnode_handle_get);
+
+/**
+ * fwnode_handle_put - Drop reference to a device node
+ * @fwnode: Pointer to the device node to drop the reference to.
+ *
+ * This has to be used when terminating device_for_each_child_node() iteration
+ * with break or return to prevent stale device node references from being left
+ * behind.
+ */
+void fwnode_handle_put(struct fwnode_handle *fwnode)
+{
+	fwnode_call_void_op(fwnode, put);
+}
+EXPORT_SYMBOL_GPL(fwnode_handle_put);
+
+/**
+ * fwnode_device_is_available - check if a device is available for use
+ * @fwnode: Pointer to the fwnode of the device.
+ */
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
+{
+	return fwnode_call_bool_op(fwnode, device_is_available);
+}
+EXPORT_SYMBOL_GPL(fwnode_device_is_available);
+
+/**
+ * device_get_child_node_count - return the number of child nodes for device
+ * @dev: Device to cound the child nodes for
+ */
+unsigned int device_get_child_node_count(struct device *dev)
+{
+	struct fwnode_handle *child;
+	unsigned int count = 0;
+
+	device_for_each_child_node(dev, child)
+		count++;
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(device_get_child_node_count);
+
+bool device_dma_supported(struct device *dev)
+{
+	/* For DT, this is always supported.
+	 * For ACPI, this depends on CCA, which
+	 * is determined by the acpi_dma_supported().
+	 */
+	if (IS_ENABLED(CONFIG_OF) && dev->of_node)
+		return true;
+
+	return acpi_dma_supported(ACPI_COMPANION(dev));
+}
+EXPORT_SYMBOL_GPL(device_dma_supported);
+
+enum dev_dma_attr device_get_dma_attr(struct device *dev)
+{
+	enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
+
+	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+		if (of_dma_is_coherent(dev->of_node))
+			attr = DEV_DMA_COHERENT;
+		else
+			attr = DEV_DMA_NON_COHERENT;
+	} else
+		attr = acpi_get_dma_attr(ACPI_COMPANION(dev));
+
+	return attr;
+}
+EXPORT_SYMBOL_GPL(device_get_dma_attr);
+
+/**
+ * fwnode_get_phy_mode - Get phy mode for given firmware node
+ * @fwnode:	Pointer to the given node
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode)
+{
+	const char *pm;
+	int err, i;
+
+	err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
+	if (err < 0)
+		err = fwnode_property_read_string(fwnode,
+						  "phy-connection-type", &pm);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+		if (!strcasecmp(pm, phy_modes(i)))
+			return i;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev:	Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+	return fwnode_get_phy_mode(dev_fwnode(dev));
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
+				 const char *name, char *addr,
+				 int alen)
+{
+	int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
+
+	if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
+		return addr;
+	return NULL;
+}
+
+/**
+ * fwnode_get_mac_address - Get the MAC from the firmware node
+ * @fwnode:	Pointer to the firmware node
+ * @addr:	Address of buffer to store the MAC in
+ * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use.  'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address.  If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware.  For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses.  Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+*/
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
+{
+	char *res;
+
+	res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
+	if (res)
+		return res;
+
+	res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
+	if (res)
+		return res;
+
+	return fwnode_get_mac_addr(fwnode, "address", addr, alen);
+}
+EXPORT_SYMBOL(fwnode_get_mac_address);
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev:	Pointer to the device
+ * @addr:	Address of buffer to store the MAC in
+ * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN
+ */
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+	return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
+}
+EXPORT_SYMBOL(device_get_mac_address);
+
+/**
+ * fwnode_irq_get - Get IRQ directly from a fwnode
+ * @fwnode:	Pointer to the firmware node
+ * @index:	Zero-based index of the IRQ
+ *
+ * Returns Linux IRQ number on success. Other values are determined
+ * accordingly to acpi_/of_ irq_get() operation.
+ */
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index)
+{
+	struct device_node *of_node = to_of_node(fwnode);
+	struct resource res;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_OF) && of_node)
+		return of_irq_get(of_node, index);
+
+	ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
+	if (ret)
+		return ret;
+
+	return res.start;
+}
+EXPORT_SYMBOL(fwnode_irq_get);
+
+/**
+ * device_graph_get_next_endpoint - Get next endpoint firmware node
+ * @fwnode: Pointer to the parent firmware node
+ * @prev: Previous endpoint node or %NULL to get the first
+ *
+ * Returns an endpoint firmware node pointer or %NULL if no more endpoints
+ * are available.
+ */
+struct fwnode_handle *
+fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+			       struct fwnode_handle *prev)
+{
+	return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev);
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
+
+/**
+ * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint
+ * @endpoint: Endpoint firmware node of the port
+ *
+ * Return: the firmware node of the device the @endpoint belongs to.
+ */
+struct fwnode_handle *
+fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
+{
+	struct fwnode_handle *port, *parent;
+
+	port = fwnode_get_parent(endpoint);
+	parent = fwnode_call_ptr_op(port, graph_get_port_parent);
+
+	fwnode_handle_put(port);
+
+	return parent;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
+
+/**
+ * fwnode_graph_get_remote_port_parent - Return fwnode of a remote device
+ * @fwnode: Endpoint firmware node pointing to the remote endpoint
+ *
+ * Extracts firmware node of a remote device the @fwnode points to.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
+{
+	struct fwnode_handle *endpoint, *parent;
+
+	endpoint = fwnode_graph_get_remote_endpoint(fwnode);
+	parent = fwnode_graph_get_port_parent(endpoint);
+
+	fwnode_handle_put(endpoint);
+
+	return parent;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
+
+/**
+ * fwnode_graph_get_remote_port - Return fwnode of a remote port
+ * @fwnode: Endpoint firmware node pointing to the remote endpoint
+ *
+ * Extracts firmware node of a remote port the @fwnode points to.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
+{
+	return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
+
+/**
+ * fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint
+ * @fwnode: Endpoint firmware node pointing to the remote endpoint
+ *
+ * Extracts firmware node of a remote endpoint the @fwnode points to.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+	return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
+
+/**
+ * fwnode_graph_get_remote_node - get remote parent node for given port/endpoint
+ * @fwnode: pointer to parent fwnode_handle containing graph port/endpoint
+ * @port_id: identifier of the parent port node
+ * @endpoint_id: identifier of the endpoint node
+ *
+ * Return: Remote fwnode handle associated with remote endpoint node linked
+ *	   to @node. Use fwnode_node_put() on it when done.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
+			     u32 endpoint_id)
+{
+	struct fwnode_handle *endpoint = NULL;
+
+	while ((endpoint = fwnode_graph_get_next_endpoint(fwnode, endpoint))) {
+		struct fwnode_endpoint fwnode_ep;
+		struct fwnode_handle *remote;
+		int ret;
+
+		ret = fwnode_graph_parse_endpoint(endpoint, &fwnode_ep);
+		if (ret < 0)
+			continue;
+
+		if (fwnode_ep.port != port_id || fwnode_ep.id != endpoint_id)
+			continue;
+
+		remote = fwnode_graph_get_remote_port_parent(endpoint);
+		if (!remote)
+			return NULL;
+
+		return fwnode_device_is_available(remote) ? remote : NULL;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
+
+/**
+ * fwnode_graph_parse_endpoint - parse common endpoint node properties
+ * @fwnode: pointer to endpoint fwnode_handle
+ * @endpoint: pointer to the fwnode endpoint data structure
+ *
+ * Parse @fwnode representing a graph endpoint node and store the
+ * information in @endpoint. The caller must hold a reference to
+ * @fwnode.
+ */
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+				struct fwnode_endpoint *endpoint)
+{
+	memset(endpoint, 0, sizeof(*endpoint));
+
+	return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
+}
+EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
+
+const void *device_get_match_data(struct device *dev)
+{
+	return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
+}
+EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
new file mode 100644
index 0000000..6ad5ef4
--- /dev/null
+++ b/drivers/base/regmap/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+# Generic register map support.  There are no user servicable options here,
+# this is an API intended to be used by other kernel subsystems.  These
+# subsystems should select the appropriate symbols.
+
+config REGMAP
+	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+	select IRQ_DOMAIN if REGMAP_IRQ
+	bool
+
+config REGCACHE_COMPRESSED
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
+	bool
+
+config REGMAP_AC97
+	tristate
+
+config REGMAP_I2C
+	tristate
+	depends on I2C
+
+config REGMAP_SLIMBUS
+	tristate
+	depends on SLIMBUS
+
+config REGMAP_SPI
+	tristate
+	depends on SPI
+
+config REGMAP_SPMI
+	tristate
+	depends on SPMI
+
+config REGMAP_W1
+	tristate
+	depends on W1
+
+config REGMAP_MMIO
+	tristate
+
+config REGMAP_IRQ
+	bool
+
+config REGMAP_SOUNDWIRE
+	tristate
+	depends on SOUNDWIRE_BUS
+
+config REGMAP_SCCB
+	tristate
+	depends on I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
new file mode 100644
index 0000000..f5b4e88
--- /dev/null
+++ b/drivers/base/regmap/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+# For include/trace/define_trace.h to include trace.h
+CFLAGS_regmap.o := -I$(src)
+
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o
+obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o
+obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
+obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o
+obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
+obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
+obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
+obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
+obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
new file mode 100644
index 0000000..a6bf34d
--- /dev/null
+++ b/drivers/base/regmap/internal.h
@@ -0,0 +1,297 @@
+/*
+ * Register map access API internal header
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGMAP_INTERNAL_H
+#define _REGMAP_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+struct regmap;
+struct regcache_ops;
+
+struct regmap_debugfs_off_cache {
+	struct list_head list;
+	off_t min;
+	off_t max;
+	unsigned int base_reg;
+	unsigned int max_reg;
+};
+
+struct regmap_format {
+	size_t buf_size;
+	size_t reg_bytes;
+	size_t pad_bytes;
+	size_t val_bytes;
+	void (*format_write)(struct regmap *map,
+			     unsigned int reg, unsigned int val);
+	void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
+	void (*format_val)(void *buf, unsigned int val, unsigned int shift);
+	unsigned int (*parse_val)(const void *buf);
+	void (*parse_inplace)(void *buf);
+};
+
+struct regmap_async {
+	struct list_head list;
+	struct regmap *map;
+	void *work_buf;
+};
+
+struct regmap {
+	union {
+		struct mutex mutex;
+		struct {
+			spinlock_t spinlock;
+			unsigned long spinlock_flags;
+		};
+	};
+	regmap_lock lock;
+	regmap_unlock unlock;
+	void *lock_arg; /* This is passed to lock/unlock functions */
+	gfp_t alloc_flags;
+
+	struct device *dev; /* Device we do I/O on */
+	void *work_buf;     /* Scratch buffer used to format I/O */
+	struct regmap_format format;  /* Buffer format */
+	const struct regmap_bus *bus;
+	void *bus_context;
+	const char *name;
+
+	bool async;
+	spinlock_t async_lock;
+	wait_queue_head_t async_waitq;
+	struct list_head async_list;
+	struct list_head async_free;
+	int async_ret;
+
+#ifdef CONFIG_DEBUG_FS
+	bool debugfs_disable;
+	struct dentry *debugfs;
+	const char *debugfs_name;
+
+	unsigned int debugfs_reg_len;
+	unsigned int debugfs_val_len;
+	unsigned int debugfs_tot_len;
+
+	struct list_head debugfs_off_cache;
+	struct mutex cache_lock;
+#endif
+
+	unsigned int max_register;
+	bool (*writeable_reg)(struct device *dev, unsigned int reg);
+	bool (*readable_reg)(struct device *dev, unsigned int reg);
+	bool (*volatile_reg)(struct device *dev, unsigned int reg);
+	bool (*precious_reg)(struct device *dev, unsigned int reg);
+	bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+	const struct regmap_access_table *wr_table;
+	const struct regmap_access_table *rd_table;
+	const struct regmap_access_table *volatile_table;
+	const struct regmap_access_table *precious_table;
+	const struct regmap_access_table *rd_noinc_table;
+
+	int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+	int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+	int (*reg_update_bits)(void *context, unsigned int reg,
+			       unsigned int mask, unsigned int val);
+
+	bool defer_caching;
+
+	unsigned long read_flag_mask;
+	unsigned long write_flag_mask;
+
+	/* number of bits to (left) shift the reg value when formatting*/
+	int reg_shift;
+	int reg_stride;
+	int reg_stride_order;
+
+	/* regcache specific members */
+	const struct regcache_ops *cache_ops;
+	enum regcache_type cache_type;
+
+	/* number of bytes in reg_defaults_raw */
+	unsigned int cache_size_raw;
+	/* number of bytes per word in reg_defaults_raw */
+	unsigned int cache_word_size;
+	/* number of entries in reg_defaults */
+	unsigned int num_reg_defaults;
+	/* number of entries in reg_defaults_raw */
+	unsigned int num_reg_defaults_raw;
+
+	/* if set, only the cache is modified not the HW */
+	bool cache_only;
+	/* if set, only the HW is modified not the cache */
+	bool cache_bypass;
+	/* if set, remember to free reg_defaults_raw */
+	bool cache_free;
+
+	struct reg_default *reg_defaults;
+	const void *reg_defaults_raw;
+	void *cache;
+	/* if set, the cache contains newer data than the HW */
+	bool cache_dirty;
+	/* if set, the HW registers are known to match map->reg_defaults */
+	bool no_sync_defaults;
+
+	struct reg_sequence *patch;
+	int patch_regs;
+
+	/* if set, converts bulk read to single read */
+	bool use_single_read;
+	/* if set, converts bulk read to single read */
+	bool use_single_write;
+	/* if set, the device supports multi write mode */
+	bool can_multi_write;
+
+	/* if set, raw reads/writes are limited to this size */
+	size_t max_raw_read;
+	size_t max_raw_write;
+
+	struct rb_root range_tree;
+	void *selector_work_buf;	/* Scratch buffer used for selector */
+
+	struct hwspinlock *hwlock;
+};
+
+struct regcache_ops {
+	const char *name;
+	enum regcache_type type;
+	int (*init)(struct regmap *map);
+	int (*exit)(struct regmap *map);
+#ifdef CONFIG_DEBUG_FS
+	void (*debugfs_init)(struct regmap *map);
+#endif
+	int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
+	int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
+	int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
+	int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
+};
+
+bool regmap_cached(struct regmap *map, unsigned int reg);
+bool regmap_writeable(struct regmap *map, unsigned int reg);
+bool regmap_readable(struct regmap *map, unsigned int reg);
+bool regmap_volatile(struct regmap *map, unsigned int reg);
+bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+		  unsigned int val);
+
+struct regmap_range_node {
+	struct rb_node node;
+	const char *name;
+	struct regmap *map;
+
+	unsigned int range_min;
+	unsigned int range_max;
+
+	unsigned int selector_reg;
+	unsigned int selector_mask;
+	int selector_shift;
+
+	unsigned int window_start;
+	unsigned int window_len;
+};
+
+struct regmap_field {
+	struct regmap *regmap;
+	unsigned int mask;
+	/* lsb */
+	unsigned int shift;
+	unsigned int reg;
+
+	unsigned int id_size;
+	unsigned int id_offset;
+};
+
+#ifdef CONFIG_DEBUG_FS
+extern void regmap_debugfs_initcall(void);
+extern void regmap_debugfs_init(struct regmap *map, const char *name);
+extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+	map->debugfs_disable = true;
+}
+
+#else
+static inline void regmap_debugfs_initcall(void) { }
+static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
+static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
+#endif
+
+/* regcache core declarations */
+int regcache_init(struct regmap *map, const struct regmap_config *config);
+void regcache_exit(struct regmap *map);
+int regcache_read(struct regmap *map,
+		       unsigned int reg, unsigned int *value);
+int regcache_write(struct regmap *map,
+			unsigned int reg, unsigned int value);
+int regcache_sync(struct regmap *map);
+int regcache_sync_block(struct regmap *map, void *block,
+			unsigned long *cache_present,
+			unsigned int block_base, unsigned int start,
+			unsigned int end);
+
+static inline const void *regcache_get_val_addr(struct regmap *map,
+						const void *base,
+						unsigned int idx)
+{
+	return base + (map->cache_word_size * idx);
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+			      unsigned int idx);
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+		      unsigned int val);
+int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+		      const void *val, size_t val_len);
+
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+					 const struct regmap_bus *bus,
+					 const struct regmap_config *config);
+
+extern struct regcache_ops regcache_rbtree_ops;
+extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
+
+static inline const char *regmap_name(const struct regmap *map)
+{
+	if (map->dev)
+		return dev_name(map->dev);
+
+	return map->name;
+}
+
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+					     unsigned int index)
+{
+	if (map->reg_stride_order >= 0)
+		return index << map->reg_stride_order;
+	else
+		return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+						       unsigned int reg)
+{
+	return reg >> map->reg_stride_order;
+}
+
+#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 0000000..bc6cd88
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,87 @@
+/*
+ * Register cache access API - flat caching support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static inline unsigned int regcache_flat_get_index(const struct regmap *map,
+						   unsigned int reg)
+{
+	return regcache_get_index_by_order(map, reg);
+}
+
+static int regcache_flat_init(struct regmap *map)
+{
+	int i;
+	unsigned int *cache;
+
+	if (!map || map->reg_stride_order < 0 || !map->max_register)
+		return -EINVAL;
+
+	map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
+			     + 1, sizeof(unsigned int), GFP_KERNEL);
+	if (!map->cache)
+		return -ENOMEM;
+
+	cache = map->cache;
+
+	for (i = 0; i < map->num_reg_defaults; i++) {
+		unsigned int reg = map->reg_defaults[i].reg;
+		unsigned int index = regcache_flat_get_index(map, reg);
+
+		cache[index] = map->reg_defaults[i].def;
+	}
+
+	return 0;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+	kfree(map->cache);
+	map->cache = NULL;
+
+	return 0;
+}
+
+static int regcache_flat_read(struct regmap *map,
+			      unsigned int reg, unsigned int *value)
+{
+	unsigned int *cache = map->cache;
+	unsigned int index = regcache_flat_get_index(map, reg);
+
+	*value = cache[index];
+
+	return 0;
+}
+
+static int regcache_flat_write(struct regmap *map, unsigned int reg,
+			       unsigned int value)
+{
+	unsigned int *cache = map->cache;
+	unsigned int index = regcache_flat_get_index(map, reg);
+
+	cache[index] = value;
+
+	return 0;
+}
+
+struct regcache_ops regcache_flat_ops = {
+	.type = REGCACHE_FLAT,
+	.name = "flat",
+	.init = regcache_flat_init,
+	.exit = regcache_flat_exit,
+	.read = regcache_flat_read,
+	.write = regcache_flat_write,
+};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
new file mode 100644
index 0000000..4ff3113
--- /dev/null
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -0,0 +1,374 @@
+/*
+ * Register cache access API - LZO caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/lzo.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_lzo_exit(struct regmap *map);
+
+struct regcache_lzo_ctx {
+	void *wmem;
+	void *dst;
+	const void *src;
+	size_t src_len;
+	size_t dst_len;
+	size_t decompressed_size;
+	unsigned long *sync_bmp;
+	int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int regcache_lzo_block_count(struct regmap *map)
+{
+	return LZO_BLOCK_NUM;
+}
+
+static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
+{
+	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	if (!lzo_ctx->wmem)
+		return -ENOMEM;
+	return 0;
+}
+
+static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
+{
+	size_t compress_size;
+	int ret;
+
+	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+		return -EINVAL;
+	lzo_ctx->dst_len = compress_size;
+	return 0;
+}
+
+static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
+{
+	size_t dst_len;
+	int ret;
+
+	dst_len = lzo_ctx->dst_len;
+	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+				    lzo_ctx->dst, &dst_len);
+	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+		return -EINVAL;
+	return 0;
+}
+
+static int regcache_lzo_compress_cache_block(struct regmap *map,
+		struct regcache_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = regcache_lzo_compress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int regcache_lzo_decompress_cache_block(struct regmap *map,
+		struct regcache_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = regcache_lzo_decompress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static inline int regcache_lzo_get_blkindex(struct regmap *map,
+					    unsigned int reg)
+{
+	return ((reg / map->reg_stride) * map->cache_word_size) /
+		DIV_ROUND_UP(map->cache_size_raw,
+			     regcache_lzo_block_count(map));
+}
+
+static inline int regcache_lzo_get_blkpos(struct regmap *map,
+					  unsigned int reg)
+{
+	return (reg / map->reg_stride) %
+		    (DIV_ROUND_UP(map->cache_size_raw,
+				  regcache_lzo_block_count(map)) /
+		     map->cache_word_size);
+}
+
+static inline int regcache_lzo_get_blksize(struct regmap *map)
+{
+	return DIV_ROUND_UP(map->cache_size_raw,
+			    regcache_lzo_block_count(map));
+}
+
+static int regcache_lzo_init(struct regmap *map)
+{
+	struct regcache_lzo_ctx **lzo_blocks;
+	size_t bmp_size;
+	int ret, i, blksize, blkcount;
+	const char *p, *end;
+	unsigned long *sync_bmp;
+
+	ret = 0;
+
+	blkcount = regcache_lzo_block_count(map);
+	map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
+			     GFP_KERNEL);
+	if (!map->cache)
+		return -ENOMEM;
+	lzo_blocks = map->cache;
+
+	/*
+	 * allocate a bitmap to be used when syncing the cache with
+	 * the hardware.  Each time a register is modified, the corresponding
+	 * bit is set in the bitmap, so we know that we have to sync
+	 * that register.
+	 */
+	bmp_size = map->num_reg_defaults_raw;
+	sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long),
+				 GFP_KERNEL);
+	if (!sync_bmp) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	bitmap_zero(sync_bmp, bmp_size);
+
+	/* allocate the lzo blocks and initialize them */
+	for (i = 0; i < blkcount; i++) {
+		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+					GFP_KERNEL);
+		if (!lzo_blocks[i]) {
+			kfree(sync_bmp);
+			ret = -ENOMEM;
+			goto err;
+		}
+		lzo_blocks[i]->sync_bmp = sync_bmp;
+		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+		/* alloc the working space for the compressed block */
+		ret = regcache_lzo_prepare(lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	blksize = regcache_lzo_get_blksize(map);
+	p = map->reg_defaults_raw;
+	end = map->reg_defaults_raw + map->cache_size_raw;
+	/* compress the register map and fill the lzo blocks */
+	for (i = 0; i < blkcount; i++, p += blksize) {
+		lzo_blocks[i]->src = p;
+		if (p + blksize > end)
+			lzo_blocks[i]->src_len = end - p;
+		else
+			lzo_blocks[i]->src_len = blksize;
+		ret = regcache_lzo_compress_cache_block(map,
+						       lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+		lzo_blocks[i]->decompressed_size =
+			lzo_blocks[i]->src_len;
+	}
+
+	return 0;
+err:
+	regcache_lzo_exit(map);
+	return ret;
+}
+
+static int regcache_lzo_exit(struct regmap *map)
+{
+	struct regcache_lzo_ctx **lzo_blocks;
+	int i, blkcount;
+
+	lzo_blocks = map->cache;
+	if (!lzo_blocks)
+		return 0;
+
+	blkcount = regcache_lzo_block_count(map);
+	/*
+	 * the pointer to the bitmap used for syncing the cache
+	 * is shared amongst all lzo_blocks.  Ensure it is freed
+	 * only once.
+	 */
+	if (lzo_blocks[0])
+		kfree(lzo_blocks[0]->sync_bmp);
+	for (i = 0; i < blkcount; i++) {
+		if (lzo_blocks[i]) {
+			kfree(lzo_blocks[i]->wmem);
+			kfree(lzo_blocks[i]->dst);
+		}
+		/* each lzo_block is a pointer returned by kmalloc or NULL */
+		kfree(lzo_blocks[i]);
+	}
+	kfree(lzo_blocks);
+	map->cache = NULL;
+	return 0;
+}
+
+static int regcache_lzo_read(struct regmap *map,
+			     unsigned int reg, unsigned int *value)
+{
+	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = regcache_lzo_get_blkindex(map, reg);
+	/* register index within the decompressed block */
+	blkpos = regcache_lzo_get_blkpos(map, reg);
+	lzo_blocks = map->cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+	if (ret >= 0)
+		/* fetch the value from the cache */
+		*value = regcache_get_val(map, lzo_block->dst, blkpos);
+
+	kfree(lzo_block->dst);
+	/* restore the pointer and length of the compressed block */
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+
+	return ret;
+}
+
+static int regcache_lzo_write(struct regmap *map,
+			      unsigned int reg, unsigned int value)
+{
+	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = regcache_lzo_get_blkindex(map, reg);
+	/* register index within the decompressed block */
+	blkpos = regcache_lzo_get_blkpos(map, reg);
+	lzo_blocks = map->cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* write the new value to the cache */
+	if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* prepare the source to be the decompressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* compress the block */
+	ret = regcache_lzo_compress_cache_block(map, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		kfree(lzo_block->src);
+		goto out;
+	}
+
+	/* set the bit so we know we have to sync this register */
+	set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
+	kfree(tmp_dst);
+	kfree(lzo_block->src);
+	return 0;
+out:
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return ret;
+}
+
+static int regcache_lzo_sync(struct regmap *map, unsigned int min,
+			     unsigned int max)
+{
+	struct regcache_lzo_ctx **lzo_blocks;
+	unsigned int val;
+	int i;
+	int ret;
+
+	lzo_blocks = map->cache;
+	i = min;
+	for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
+			      lzo_blocks[0]->sync_bmp_nbits) {
+		if (i > max)
+			continue;
+
+		ret = regcache_read(map, i, &val);
+		if (ret)
+			return ret;
+
+		/* Is this the hardware default?  If so skip. */
+		ret = regcache_lookup_reg(map, i);
+		if (ret > 0 && val == map->reg_defaults[ret].def)
+			continue;
+
+		map->cache_bypass = true;
+		ret = _regmap_write(map, i, val);
+		map->cache_bypass = false;
+		if (ret)
+			return ret;
+		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+			i, val);
+	}
+
+	return 0;
+}
+
+struct regcache_ops regcache_lzo_ops = {
+	.type = REGCACHE_COMPRESSED,
+	.name = "lzo",
+	.init = regcache_lzo_init,
+	.exit = regcache_lzo_exit,
+	.read = regcache_lzo_read,
+	.write = regcache_lzo_write,
+	.sync = regcache_lzo_sync
+};
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
new file mode 100644
index 0000000..b1e9aae
--- /dev/null
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -0,0 +1,568 @@
+/*
+ * Register cache access API - rbtree caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+				 unsigned int value);
+static int regcache_rbtree_exit(struct regmap *map);
+
+struct regcache_rbtree_node {
+	/* block of adjacent registers */
+	void *block;
+	/* Which registers are present */
+	long *cache_present;
+	/* base register handled by this block */
+	unsigned int base_reg;
+	/* number of registers available in the block */
+	unsigned int blklen;
+	/* the actual rbtree node holding this block */
+	struct rb_node node;
+} __attribute__ ((packed));
+
+struct regcache_rbtree_ctx {
+	struct rb_root root;
+	struct regcache_rbtree_node *cached_rbnode;
+};
+
+static inline void regcache_rbtree_get_base_top_reg(
+	struct regmap *map,
+	struct regcache_rbtree_node *rbnode,
+	unsigned int *base, unsigned int *top)
+{
+	*base = rbnode->base_reg;
+	*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
+}
+
+static unsigned int regcache_rbtree_get_register(struct regmap *map,
+	struct regcache_rbtree_node *rbnode, unsigned int idx)
+{
+	return regcache_get_val(map, rbnode->block, idx);
+}
+
+static void regcache_rbtree_set_register(struct regmap *map,
+					 struct regcache_rbtree_node *rbnode,
+					 unsigned int idx, unsigned int val)
+{
+	set_bit(idx, rbnode->cache_present);
+	regcache_set_val(map, rbnode->block, idx, val);
+}
+
+static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
+							   unsigned int reg)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+	struct rb_node *node;
+	struct regcache_rbtree_node *rbnode;
+	unsigned int base_reg, top_reg;
+
+	rbnode = rbtree_ctx->cached_rbnode;
+	if (rbnode) {
+		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+						 &top_reg);
+		if (reg >= base_reg && reg <= top_reg)
+			return rbnode;
+	}
+
+	node = rbtree_ctx->root.rb_node;
+	while (node) {
+		rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+						 &top_reg);
+		if (reg >= base_reg && reg <= top_reg) {
+			rbtree_ctx->cached_rbnode = rbnode;
+			return rbnode;
+		} else if (reg > top_reg) {
+			node = node->rb_right;
+		} else if (reg < base_reg) {
+			node = node->rb_left;
+		}
+	}
+
+	return NULL;
+}
+
+static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
+				  struct regcache_rbtree_node *rbnode)
+{
+	struct rb_node **new, *parent;
+	struct regcache_rbtree_node *rbnode_tmp;
+	unsigned int base_reg_tmp, top_reg_tmp;
+	unsigned int base_reg;
+
+	parent = NULL;
+	new = &root->rb_node;
+	while (*new) {
+		rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
+		/* base and top registers of the current rbnode */
+		regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
+						 &top_reg_tmp);
+		/* base register of the rbnode to be added */
+		base_reg = rbnode->base_reg;
+		parent = *new;
+		/* if this register has already been inserted, just return */
+		if (base_reg >= base_reg_tmp &&
+		    base_reg <= top_reg_tmp)
+			return 0;
+		else if (base_reg > top_reg_tmp)
+			new = &((*new)->rb_right);
+		else if (base_reg < base_reg_tmp)
+			new = &((*new)->rb_left);
+	}
+
+	/* insert the node into the rbtree */
+	rb_link_node(&rbnode->node, parent, new);
+	rb_insert_color(&rbnode->node, root);
+
+	return 1;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rbtree_show(struct seq_file *s, void *ignored)
+{
+	struct regmap *map = s->private;
+	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+	struct regcache_rbtree_node *n;
+	struct rb_node *node;
+	unsigned int base, top;
+	size_t mem_size;
+	int nodes = 0;
+	int registers = 0;
+	int this_registers, average;
+
+	map->lock(map->lock_arg);
+
+	mem_size = sizeof(*rbtree_ctx);
+
+	for (node = rb_first(&rbtree_ctx->root); node != NULL;
+	     node = rb_next(node)) {
+		n = rb_entry(node, struct regcache_rbtree_node, node);
+		mem_size += sizeof(*n);
+		mem_size += (n->blklen * map->cache_word_size);
+		mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
+
+		regcache_rbtree_get_base_top_reg(map, n, &base, &top);
+		this_registers = ((top - base) / map->reg_stride) + 1;
+		seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
+
+		nodes++;
+		registers += this_registers;
+	}
+
+	if (nodes)
+		average = registers / nodes;
+	else
+		average = 0;
+
+	seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
+		   nodes, registers, average, mem_size);
+
+	map->unlock(map->lock_arg);
+
+	return 0;
+}
+
+static int rbtree_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rbtree_show, inode->i_private);
+}
+
+static const struct file_operations rbtree_fops = {
+	.open		= rbtree_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void rbtree_debugfs_init(struct regmap *map)
+{
+	debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
+}
+#endif
+
+static int regcache_rbtree_init(struct regmap *map)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	int i;
+	int ret;
+
+	map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+	if (!map->cache)
+		return -ENOMEM;
+
+	rbtree_ctx = map->cache;
+	rbtree_ctx->root = RB_ROOT;
+	rbtree_ctx->cached_rbnode = NULL;
+
+	for (i = 0; i < map->num_reg_defaults; i++) {
+		ret = regcache_rbtree_write(map,
+					    map->reg_defaults[i].reg,
+					    map->reg_defaults[i].def);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	regcache_rbtree_exit(map);
+	return ret;
+}
+
+static int regcache_rbtree_exit(struct regmap *map)
+{
+	struct rb_node *next;
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct regcache_rbtree_node *rbtree_node;
+
+	/* if we've already been called then just return */
+	rbtree_ctx = map->cache;
+	if (!rbtree_ctx)
+		return 0;
+
+	/* free up the rbtree */
+	next = rb_first(&rbtree_ctx->root);
+	while (next) {
+		rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
+		next = rb_next(&rbtree_node->node);
+		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+		kfree(rbtree_node->cache_present);
+		kfree(rbtree_node->block);
+		kfree(rbtree_node);
+	}
+
+	/* release the resources */
+	kfree(map->cache);
+	map->cache = NULL;
+
+	return 0;
+}
+
+static int regcache_rbtree_read(struct regmap *map,
+				unsigned int reg, unsigned int *value)
+{
+	struct regcache_rbtree_node *rbnode;
+	unsigned int reg_tmp;
+
+	rbnode = regcache_rbtree_lookup(map, reg);
+	if (rbnode) {
+		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
+		if (!test_bit(reg_tmp, rbnode->cache_present))
+			return -ENOENT;
+		*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
+	} else {
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+
+static int regcache_rbtree_insert_to_block(struct regmap *map,
+					   struct regcache_rbtree_node *rbnode,
+					   unsigned int base_reg,
+					   unsigned int top_reg,
+					   unsigned int reg,
+					   unsigned int value)
+{
+	unsigned int blklen;
+	unsigned int pos, offset;
+	unsigned long *present;
+	u8 *blk;
+
+	blklen = (top_reg - base_reg) / map->reg_stride + 1;
+	pos = (reg - base_reg) / map->reg_stride;
+	offset = (rbnode->base_reg - base_reg) / map->reg_stride;
+
+	blk = krealloc(rbnode->block,
+		       blklen * map->cache_word_size,
+		       GFP_KERNEL);
+	if (!blk)
+		return -ENOMEM;
+
+	if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+		present = krealloc(rbnode->cache_present,
+				   BITS_TO_LONGS(blklen) * sizeof(*present),
+				   GFP_KERNEL);
+		if (!present) {
+			kfree(blk);
+			return -ENOMEM;
+		}
+
+		memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+		       (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+		       * sizeof(*present));
+	} else {
+		present = rbnode->cache_present;
+	}
+
+	/* insert the register value in the correct place in the rbnode block */
+	if (pos == 0) {
+		memmove(blk + offset * map->cache_word_size,
+			blk, rbnode->blklen * map->cache_word_size);
+		bitmap_shift_left(present, present, offset, blklen);
+	}
+
+	/* update the rbnode block, its size and the base register */
+	rbnode->block = blk;
+	rbnode->blklen = blklen;
+	rbnode->base_reg = base_reg;
+	rbnode->cache_present = present;
+
+	regcache_rbtree_set_register(map, rbnode, pos, value);
+	return 0;
+}
+
+static struct regcache_rbtree_node *
+regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+{
+	struct regcache_rbtree_node *rbnode;
+	const struct regmap_range *range;
+	int i;
+
+	rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
+	if (!rbnode)
+		return NULL;
+
+	/* If there is a read table then use it to guess at an allocation */
+	if (map->rd_table) {
+		for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
+			if (regmap_reg_in_range(reg,
+						&map->rd_table->yes_ranges[i]))
+				break;
+		}
+
+		if (i != map->rd_table->n_yes_ranges) {
+			range = &map->rd_table->yes_ranges[i];
+			rbnode->blklen = (range->range_max - range->range_min) /
+				map->reg_stride	+ 1;
+			rbnode->base_reg = range->range_min;
+		}
+	}
+
+	if (!rbnode->blklen) {
+		rbnode->blklen = 1;
+		rbnode->base_reg = reg;
+	}
+
+	rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
+				      GFP_KERNEL);
+	if (!rbnode->block)
+		goto err_free;
+
+	rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
+					sizeof(*rbnode->cache_present),
+					GFP_KERNEL);
+	if (!rbnode->cache_present)
+		goto err_free_block;
+
+	return rbnode;
+
+err_free_block:
+	kfree(rbnode->block);
+err_free:
+	kfree(rbnode);
+	return NULL;
+}
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+				 unsigned int value)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct regcache_rbtree_node *rbnode, *rbnode_tmp;
+	struct rb_node *node;
+	unsigned int reg_tmp;
+	int ret;
+
+	rbtree_ctx = map->cache;
+
+	/* if we can't locate it in the cached rbnode we'll have
+	 * to traverse the rbtree looking for it.
+	 */
+	rbnode = regcache_rbtree_lookup(map, reg);
+	if (rbnode) {
+		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
+		regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
+	} else {
+		unsigned int base_reg, top_reg;
+		unsigned int new_base_reg, new_top_reg;
+		unsigned int min, max;
+		unsigned int max_dist;
+		unsigned int dist, best_dist = UINT_MAX;
+
+		max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
+			map->cache_word_size;
+		if (reg < max_dist)
+			min = 0;
+		else
+			min = reg - max_dist;
+		max = reg + max_dist;
+
+		/* look for an adjacent register to the one we are about to add */
+		node = rbtree_ctx->root.rb_node;
+		while (node) {
+			rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
+					      node);
+
+			regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
+				&base_reg, &top_reg);
+
+			if (base_reg <= max && top_reg >= min) {
+				if (reg < base_reg)
+					dist = base_reg - reg;
+				else if (reg > top_reg)
+					dist = reg - top_reg;
+				else
+					dist = 0;
+				if (dist < best_dist) {
+					rbnode = rbnode_tmp;
+					best_dist = dist;
+					new_base_reg = min(reg, base_reg);
+					new_top_reg = max(reg, top_reg);
+				}
+			}
+
+			/*
+			 * Keep looking, we want to choose the closest block,
+			 * otherwise we might end up creating overlapping
+			 * blocks, which breaks the rbtree.
+			 */
+			if (reg < base_reg)
+				node = node->rb_left;
+			else if (reg > top_reg)
+				node = node->rb_right;
+			else
+				break;
+		}
+
+		if (rbnode) {
+			ret = regcache_rbtree_insert_to_block(map, rbnode,
+							      new_base_reg,
+							      new_top_reg, reg,
+							      value);
+			if (ret)
+				return ret;
+			rbtree_ctx->cached_rbnode = rbnode;
+			return 0;
+		}
+
+		/* We did not manage to find a place to insert it in
+		 * an existing block so create a new rbnode.
+		 */
+		rbnode = regcache_rbtree_node_alloc(map, reg);
+		if (!rbnode)
+			return -ENOMEM;
+		regcache_rbtree_set_register(map, rbnode,
+					     reg - rbnode->base_reg, value);
+		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
+		rbtree_ctx->cached_rbnode = rbnode;
+	}
+
+	return 0;
+}
+
+static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
+				unsigned int max)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct rb_node *node;
+	struct regcache_rbtree_node *rbnode;
+	unsigned int base_reg, top_reg;
+	unsigned int start, end;
+	int ret;
+
+	rbtree_ctx = map->cache;
+	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+		rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+
+		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+			&top_reg);
+		if (base_reg > max)
+			break;
+		if (top_reg < min)
+			continue;
+
+		if (min > base_reg)
+			start = (min - base_reg) / map->reg_stride;
+		else
+			start = 0;
+
+		if (max < top_reg)
+			end = (max - base_reg) / map->reg_stride + 1;
+		else
+			end = rbnode->blklen;
+
+		ret = regcache_sync_block(map, rbnode->block,
+					  rbnode->cache_present,
+					  rbnode->base_reg, start, end);
+		if (ret != 0)
+			return ret;
+	}
+
+	return regmap_async_complete(map);
+}
+
+static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
+				unsigned int max)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct regcache_rbtree_node *rbnode;
+	struct rb_node *node;
+	unsigned int base_reg, top_reg;
+	unsigned int start, end;
+
+	rbtree_ctx = map->cache;
+	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+		rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+
+		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+			&top_reg);
+		if (base_reg > max)
+			break;
+		if (top_reg < min)
+			continue;
+
+		if (min > base_reg)
+			start = (min - base_reg) / map->reg_stride;
+		else
+			start = 0;
+
+		if (max < top_reg)
+			end = (max - base_reg) / map->reg_stride + 1;
+		else
+			end = rbnode->blklen;
+
+		bitmap_clear(rbnode->cache_present, start, end - start);
+	}
+
+	return 0;
+}
+
+struct regcache_ops regcache_rbtree_ops = {
+	.type = REGCACHE_RBTREE,
+	.name = "rbtree",
+	.init = regcache_rbtree_init,
+	.exit = regcache_rbtree_exit,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init = rbtree_debugfs_init,
+#endif
+	.read = regcache_rbtree_read,
+	.write = regcache_rbtree_write,
+	.sync = regcache_rbtree_sync,
+	.drop = regcache_rbtree_drop,
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
new file mode 100644
index 0000000..7735603
--- /dev/null
+++ b/drivers/base/regmap/regcache.c
@@ -0,0 +1,789 @@
+/*
+ * Register cache access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bsearch.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "trace.h"
+#include "internal.h"
+
+static const struct regcache_ops *cache_types[] = {
+	&regcache_rbtree_ops,
+#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
+	&regcache_lzo_ops,
+#endif
+	&regcache_flat_ops,
+};
+
+static int regcache_hw_init(struct regmap *map)
+{
+	int i, j;
+	int ret;
+	int count;
+	unsigned int reg, val;
+	void *tmp_buf;
+
+	if (!map->num_reg_defaults_raw)
+		return -EINVAL;
+
+	/* calculate the size of reg_defaults */
+	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
+		if (regmap_readable(map, i * map->reg_stride) &&
+		    !regmap_volatile(map, i * map->reg_stride))
+			count++;
+
+	/* all registers are unreadable or volatile, so just bypass */
+	if (!count) {
+		map->cache_bypass = true;
+		return 0;
+	}
+
+	map->num_reg_defaults = count;
+	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
+					  GFP_KERNEL);
+	if (!map->reg_defaults)
+		return -ENOMEM;
+
+	if (!map->reg_defaults_raw) {
+		bool cache_bypass = map->cache_bypass;
+		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+
+		/* Bypass the cache access till data read from HW */
+		map->cache_bypass = true;
+		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
+		if (!tmp_buf) {
+			ret = -ENOMEM;
+			goto err_free;
+		}
+		ret = regmap_raw_read(map, 0, tmp_buf,
+				      map->cache_size_raw);
+		map->cache_bypass = cache_bypass;
+		if (ret == 0) {
+			map->reg_defaults_raw = tmp_buf;
+			map->cache_free = 1;
+		} else {
+			kfree(tmp_buf);
+		}
+	}
+
+	/* fill the reg_defaults */
+	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+		reg = i * map->reg_stride;
+
+		if (!regmap_readable(map, reg))
+			continue;
+
+		if (regmap_volatile(map, reg))
+			continue;
+
+		if (map->reg_defaults_raw) {
+			val = regcache_get_val(map, map->reg_defaults_raw, i);
+		} else {
+			bool cache_bypass = map->cache_bypass;
+
+			map->cache_bypass = true;
+			ret = regmap_read(map, reg, &val);
+			map->cache_bypass = cache_bypass;
+			if (ret != 0) {
+				dev_err(map->dev, "Failed to read %d: %d\n",
+					reg, ret);
+				goto err_free;
+			}
+		}
+
+		map->reg_defaults[j].reg = reg;
+		map->reg_defaults[j].def = val;
+		j++;
+	}
+
+	return 0;
+
+err_free:
+	kfree(map->reg_defaults);
+
+	return ret;
+}
+
+int regcache_init(struct regmap *map, const struct regmap_config *config)
+{
+	int ret;
+	int i;
+	void *tmp_buf;
+
+	if (map->cache_type == REGCACHE_NONE) {
+		if (config->reg_defaults || config->num_reg_defaults_raw)
+			dev_warn(map->dev,
+				 "No cache used with register defaults set!\n");
+
+		map->cache_bypass = true;
+		return 0;
+	}
+
+	if (config->reg_defaults && !config->num_reg_defaults) {
+		dev_err(map->dev,
+			 "Register defaults are set without the number!\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < config->num_reg_defaults; i++)
+		if (config->reg_defaults[i].reg % map->reg_stride)
+			return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
+		if (cache_types[i]->type == map->cache_type)
+			break;
+
+	if (i == ARRAY_SIZE(cache_types)) {
+		dev_err(map->dev, "Could not match compress type: %d\n",
+			map->cache_type);
+		return -EINVAL;
+	}
+
+	map->num_reg_defaults = config->num_reg_defaults;
+	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+	map->reg_defaults_raw = config->reg_defaults_raw;
+	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+
+	map->cache = NULL;
+	map->cache_ops = cache_types[i];
+
+	if (!map->cache_ops->read ||
+	    !map->cache_ops->write ||
+	    !map->cache_ops->name)
+		return -EINVAL;
+
+	/* We still need to ensure that the reg_defaults
+	 * won't vanish from under us.  We'll need to make
+	 * a copy of it.
+	 */
+	if (config->reg_defaults) {
+		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
+				  sizeof(struct reg_default), GFP_KERNEL);
+		if (!tmp_buf)
+			return -ENOMEM;
+		map->reg_defaults = tmp_buf;
+	} else if (map->num_reg_defaults_raw) {
+		/* Some devices such as PMICs don't have cache defaults,
+		 * we cope with this by reading back the HW registers and
+		 * crafting the cache defaults by hand.
+		 */
+		ret = regcache_hw_init(map);
+		if (ret < 0)
+			return ret;
+		if (map->cache_bypass)
+			return 0;
+	}
+
+	if (!map->max_register)
+		map->max_register = map->num_reg_defaults_raw;
+
+	if (map->cache_ops->init) {
+		dev_dbg(map->dev, "Initializing %s cache\n",
+			map->cache_ops->name);
+		ret = map->cache_ops->init(map);
+		if (ret)
+			goto err_free;
+	}
+	return 0;
+
+err_free:
+	kfree(map->reg_defaults);
+	if (map->cache_free)
+		kfree(map->reg_defaults_raw);
+
+	return ret;
+}
+
+void regcache_exit(struct regmap *map)
+{
+	if (map->cache_type == REGCACHE_NONE)
+		return;
+
+	BUG_ON(!map->cache_ops);
+
+	kfree(map->reg_defaults);
+	if (map->cache_free)
+		kfree(map->reg_defaults_raw);
+
+	if (map->cache_ops->exit) {
+		dev_dbg(map->dev, "Destroying %s cache\n",
+			map->cache_ops->name);
+		map->cache_ops->exit(map);
+	}
+}
+
+/**
+ * regcache_read - Fetch the value of a given register from the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_read(struct regmap *map,
+		  unsigned int reg, unsigned int *value)
+{
+	int ret;
+
+	if (map->cache_type == REGCACHE_NONE)
+		return -ENOSYS;
+
+	BUG_ON(!map->cache_ops);
+
+	if (!regmap_volatile(map, reg)) {
+		ret = map->cache_ops->read(map, reg, value);
+
+		if (ret == 0)
+			trace_regmap_reg_read_cache(map, reg, *value);
+
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * regcache_write - Set the value of a given register in the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_write(struct regmap *map,
+		   unsigned int reg, unsigned int value)
+{
+	if (map->cache_type == REGCACHE_NONE)
+		return 0;
+
+	BUG_ON(!map->cache_ops);
+
+	if (!regmap_volatile(map, reg))
+		return map->cache_ops->write(map, reg, value);
+
+	return 0;
+}
+
+static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
+				    unsigned int val)
+{
+	int ret;
+
+	/* If we don't know the chip just got reset, then sync everything. */
+	if (!map->no_sync_defaults)
+		return true;
+
+	/* Is this the hardware default?  If so skip. */
+	ret = regcache_lookup_reg(map, reg);
+	if (ret >= 0 && val == map->reg_defaults[ret].def)
+		return false;
+	return true;
+}
+
+static int regcache_default_sync(struct regmap *map, unsigned int min,
+				 unsigned int max)
+{
+	unsigned int reg;
+
+	for (reg = min; reg <= max; reg += map->reg_stride) {
+		unsigned int val;
+		int ret;
+
+		if (regmap_volatile(map, reg) ||
+		    !regmap_writeable(map, reg))
+			continue;
+
+		ret = regcache_read(map, reg, &val);
+		if (ret)
+			return ret;
+
+		if (!regcache_reg_needs_sync(map, reg, val))
+			continue;
+
+		map->cache_bypass = true;
+		ret = _regmap_write(map, reg, val);
+		map->cache_bypass = false;
+		if (ret) {
+			dev_err(map->dev, "Unable to sync register %#x. %d\n",
+				reg, ret);
+			return ret;
+		}
+		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
+	}
+
+	return 0;
+}
+
+/**
+ * regcache_sync - Sync the register cache with the hardware.
+ *
+ * @map: map to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile.  In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync(struct regmap *map)
+{
+	int ret = 0;
+	unsigned int i;
+	const char *name;
+	bool bypass;
+
+	BUG_ON(!map->cache_ops);
+
+	map->lock(map->lock_arg);
+	/* Remember the initial bypass state */
+	bypass = map->cache_bypass;
+	dev_dbg(map->dev, "Syncing %s cache\n",
+		map->cache_ops->name);
+	name = map->cache_ops->name;
+	trace_regcache_sync(map, name, "start");
+
+	if (!map->cache_dirty)
+		goto out;
+
+	map->async = true;
+
+	/* Apply any patch first */
+	map->cache_bypass = true;
+	for (i = 0; i < map->patch_regs; i++) {
+		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to write %x = %x: %d\n",
+				map->patch[i].reg, map->patch[i].def, ret);
+			goto out;
+		}
+	}
+	map->cache_bypass = false;
+
+	if (map->cache_ops->sync)
+		ret = map->cache_ops->sync(map, 0, map->max_register);
+	else
+		ret = regcache_default_sync(map, 0, map->max_register);
+
+	if (ret == 0)
+		map->cache_dirty = false;
+
+out:
+	/* Restore the bypass state */
+	map->async = false;
+	map->cache_bypass = bypass;
+	map->no_sync_defaults = false;
+	map->unlock(map->lock_arg);
+
+	regmap_async_complete(map);
+
+	trace_regcache_sync(map, name, "stop");
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync);
+
+/**
+ * regcache_sync_region - Sync part  of the register cache with the hardware.
+ *
+ * @map: map to sync.
+ * @min: first register to sync
+ * @max: last register to sync
+ *
+ * Write all non-default register values in the specified region to
+ * the hardware.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync_region(struct regmap *map, unsigned int min,
+			 unsigned int max)
+{
+	int ret = 0;
+	const char *name;
+	bool bypass;
+
+	BUG_ON(!map->cache_ops);
+
+	map->lock(map->lock_arg);
+
+	/* Remember the initial bypass state */
+	bypass = map->cache_bypass;
+
+	name = map->cache_ops->name;
+	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
+
+	trace_regcache_sync(map, name, "start region");
+
+	if (!map->cache_dirty)
+		goto out;
+
+	map->async = true;
+
+	if (map->cache_ops->sync)
+		ret = map->cache_ops->sync(map, min, max);
+	else
+		ret = regcache_default_sync(map, min, max);
+
+out:
+	/* Restore the bypass state */
+	map->cache_bypass = bypass;
+	map->async = false;
+	map->no_sync_defaults = false;
+	map->unlock(map->lock_arg);
+
+	regmap_async_complete(map);
+
+	trace_regcache_sync(map, name, "stop region");
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync_region);
+
+/**
+ * regcache_drop_region - Discard part of the register cache
+ *
+ * @map: map to operate on
+ * @min: first register to discard
+ * @max: last register to discard
+ *
+ * Discard part of the register cache.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_drop_region(struct regmap *map, unsigned int min,
+			 unsigned int max)
+{
+	int ret = 0;
+
+	if (!map->cache_ops || !map->cache_ops->drop)
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	trace_regcache_drop_region(map, min, max);
+
+	ret = map->cache_ops->drop(map, min, max);
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_drop_region);
+
+/**
+ * regcache_cache_only - Put a register map into cache only mode
+ *
+ * @map: map to configure
+ * @enable: flag if changes should be written to the hardware
+ *
+ * When a register map is marked as cache only writes to the register
+ * map API will only update the register cache, they will not cause
+ * any hardware changes.  This is useful for allowing portions of
+ * drivers to act as though the device were functioning as normal when
+ * it is disabled for power saving reasons.
+ */
+void regcache_cache_only(struct regmap *map, bool enable)
+{
+	map->lock(map->lock_arg);
+	WARN_ON(map->cache_bypass && enable);
+	map->cache_only = enable;
+	trace_regmap_cache_only(map, enable);
+	map->unlock(map->lock_arg);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_only);
+
+/**
+ * regcache_mark_dirty - Indicate that HW registers were reset to default values
+ *
+ * @map: map to mark
+ *
+ * Inform regcache that the device has been powered down or reset, so that
+ * on resume, regcache_sync() knows to write out all non-default values
+ * stored in the cache.
+ *
+ * If this function is not called, regcache_sync() will assume that
+ * the hardware state still matches the cache state, modulo any writes that
+ * happened when cache_only was true.
+ */
+void regcache_mark_dirty(struct regmap *map)
+{
+	map->lock(map->lock_arg);
+	map->cache_dirty = true;
+	map->no_sync_defaults = true;
+	map->unlock(map->lock_arg);
+}
+EXPORT_SYMBOL_GPL(regcache_mark_dirty);
+
+/**
+ * regcache_cache_bypass - Put a register map into cache bypass mode
+ *
+ * @map: map to configure
+ * @enable: flag if changes should not be written to the cache
+ *
+ * When a register map is marked with the cache bypass option, writes
+ * to the register map API will only update the hardware and not the
+ * the cache directly.  This is useful when syncing the cache back to
+ * the hardware.
+ */
+void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+	map->lock(map->lock_arg);
+	WARN_ON(map->cache_only && enable);
+	map->cache_bypass = enable;
+	trace_regmap_cache_bypass(map, enable);
+	map->unlock(map->lock_arg);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+		      unsigned int val)
+{
+	if (regcache_get_val(map, base, idx) == val)
+		return true;
+
+	/* Use device native format if possible */
+	if (map->format.format_val) {
+		map->format.format_val(base + (map->cache_word_size * idx),
+				       val, 0);
+		return false;
+	}
+
+	switch (map->cache_word_size) {
+	case 1: {
+		u8 *cache = base;
+
+		cache[idx] = val;
+		break;
+	}
+	case 2: {
+		u16 *cache = base;
+
+		cache[idx] = val;
+		break;
+	}
+	case 4: {
+		u32 *cache = base;
+
+		cache[idx] = val;
+		break;
+	}
+#ifdef CONFIG_64BIT
+	case 8: {
+		u64 *cache = base;
+
+		cache[idx] = val;
+		break;
+	}
+#endif
+	default:
+		BUG();
+	}
+	return false;
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+			      unsigned int idx)
+{
+	if (!base)
+		return -EINVAL;
+
+	/* Use device native format if possible */
+	if (map->format.parse_val)
+		return map->format.parse_val(regcache_get_val_addr(map, base,
+								   idx));
+
+	switch (map->cache_word_size) {
+	case 1: {
+		const u8 *cache = base;
+
+		return cache[idx];
+	}
+	case 2: {
+		const u16 *cache = base;
+
+		return cache[idx];
+	}
+	case 4: {
+		const u32 *cache = base;
+
+		return cache[idx];
+	}
+#ifdef CONFIG_64BIT
+	case 8: {
+		const u64 *cache = base;
+
+		return cache[idx];
+	}
+#endif
+	default:
+		BUG();
+	}
+	/* unreachable */
+	return -1;
+}
+
+static int regcache_default_cmp(const void *a, const void *b)
+{
+	const struct reg_default *_a = a;
+	const struct reg_default *_b = b;
+
+	return _a->reg - _b->reg;
+}
+
+int regcache_lookup_reg(struct regmap *map, unsigned int reg)
+{
+	struct reg_default key;
+	struct reg_default *r;
+
+	key.reg = reg;
+	key.def = 0;
+
+	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
+		    sizeof(struct reg_default), regcache_default_cmp);
+
+	if (r)
+		return r - map->reg_defaults;
+	else
+		return -ENOENT;
+}
+
+static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
+{
+	if (!cache_present)
+		return true;
+
+	return test_bit(idx, cache_present);
+}
+
+static int regcache_sync_block_single(struct regmap *map, void *block,
+				      unsigned long *cache_present,
+				      unsigned int block_base,
+				      unsigned int start, unsigned int end)
+{
+	unsigned int i, regtmp, val;
+	int ret;
+
+	for (i = start; i < end; i++) {
+		regtmp = block_base + (i * map->reg_stride);
+
+		if (!regcache_reg_present(cache_present, i) ||
+		    !regmap_writeable(map, regtmp))
+			continue;
+
+		val = regcache_get_val(map, block, i);
+		if (!regcache_reg_needs_sync(map, regtmp, val))
+			continue;
+
+		map->cache_bypass = true;
+
+		ret = _regmap_write(map, regtmp, val);
+
+		map->cache_bypass = false;
+		if (ret != 0) {
+			dev_err(map->dev, "Unable to sync register %#x. %d\n",
+				regtmp, ret);
+			return ret;
+		}
+		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+			regtmp, val);
+	}
+
+	return 0;
+}
+
+static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
+					 unsigned int base, unsigned int cur)
+{
+	size_t val_bytes = map->format.val_bytes;
+	int ret, count;
+
+	if (*data == NULL)
+		return 0;
+
+	count = (cur - base) / map->reg_stride;
+
+	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
+		count * val_bytes, count, base, cur - map->reg_stride);
+
+	map->cache_bypass = true;
+
+	ret = _regmap_raw_write(map, base, *data, count * val_bytes);
+	if (ret)
+		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
+			base, cur - map->reg_stride, ret);
+
+	map->cache_bypass = false;
+
+	*data = NULL;
+
+	return ret;
+}
+
+static int regcache_sync_block_raw(struct regmap *map, void *block,
+			    unsigned long *cache_present,
+			    unsigned int block_base, unsigned int start,
+			    unsigned int end)
+{
+	unsigned int i, val;
+	unsigned int regtmp = 0;
+	unsigned int base = 0;
+	const void *data = NULL;
+	int ret;
+
+	for (i = start; i < end; i++) {
+		regtmp = block_base + (i * map->reg_stride);
+
+		if (!regcache_reg_present(cache_present, i) ||
+		    !regmap_writeable(map, regtmp)) {
+			ret = regcache_sync_block_raw_flush(map, &data,
+							    base, regtmp);
+			if (ret != 0)
+				return ret;
+			continue;
+		}
+
+		val = regcache_get_val(map, block, i);
+		if (!regcache_reg_needs_sync(map, regtmp, val)) {
+			ret = regcache_sync_block_raw_flush(map, &data,
+							    base, regtmp);
+			if (ret != 0)
+				return ret;
+			continue;
+		}
+
+		if (!data) {
+			data = regcache_get_val_addr(map, block, i);
+			base = regtmp;
+		}
+	}
+
+	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+			map->reg_stride);
+}
+
+int regcache_sync_block(struct regmap *map, void *block,
+			unsigned long *cache_present,
+			unsigned int block_base, unsigned int start,
+			unsigned int end)
+{
+	if (regmap_can_raw_write(map) && !map->use_single_write)
+		return regcache_sync_block_raw(map, block, cache_present,
+					       block_base, start, end);
+	else
+		return regcache_sync_block_single(map, block, cache_present,
+						  block_base, start, end);
+}
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
new file mode 100644
index 0000000..c03ebfd
--- /dev/null
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -0,0 +1,101 @@
+/*
+ * Register map access API - AC'97 support
+ *
+ * Copyright 2013 Linaro Ltd.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <sound/ac97_codec.h>
+
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case AC97_RESET:
+	case AC97_POWERDOWN:
+	case AC97_INT_PAGING:
+	case AC97_EXTENDED_ID:
+	case AC97_EXTENDED_STATUS:
+	case AC97_EXTENDED_MID:
+	case AC97_EXTENDED_MSTATUS:
+	case AC97_GPIO_STATUS:
+	case AC97_MISC_AFE:
+	case AC97_VENDOR_ID1:
+	case AC97_VENDOR_ID2:
+	case AC97_CODEC_CLASS_REV:
+	case AC97_PCI_SVID:
+	case AC97_PCI_SID:
+	case AC97_FUNC_SELECT:
+	case AC97_FUNC_INFO:
+	case AC97_SENSE_INFO:
+		return true;
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile);
+
+static int regmap_ac97_reg_read(void *context, unsigned int reg,
+	unsigned int *val)
+{
+	struct snd_ac97 *ac97 = context;
+
+	*val = ac97->bus->ops->read(ac97, reg);
+
+	return 0;
+}
+
+static int regmap_ac97_reg_write(void *context, unsigned int reg,
+	unsigned int val)
+{
+	struct snd_ac97 *ac97 = context;
+
+	ac97->bus->ops->write(ac97, reg, val);
+
+	return 0;
+}
+
+static const struct regmap_bus ac97_regmap_bus = {
+	.reg_write = regmap_ac97_reg_write,
+	.reg_read = regmap_ac97_reg_read,
+};
+
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name)
+{
+	return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_ac97);
+
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name)
+{
+	return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
new file mode 100644
index 0000000..87b562e
--- /dev/null
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -0,0 +1,681 @@
+/*
+ * Register map access API - debugfs
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/list.h>
+
+#include "internal.h"
+
+struct regmap_debugfs_node {
+	struct regmap *map;
+	const char *name;
+	struct list_head link;
+};
+
+static unsigned int dummy_index;
+static struct dentry *regmap_debugfs_root;
+static LIST_HEAD(regmap_debugfs_early_list);
+static DEFINE_MUTEX(regmap_debugfs_early_lock);
+
+/* Calculate the length of a fixed format  */
+static size_t regmap_calc_reg_len(int max_val)
+{
+	return snprintf(NULL, 0, "%x", max_val);
+}
+
+static ssize_t regmap_name_read_file(struct file *file,
+				     char __user *user_buf, size_t count,
+				     loff_t *ppos)
+{
+	struct regmap *map = file->private_data;
+	const char *name = "nodev";
+	int ret;
+	char *buf;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (map->dev && map->dev->driver)
+		name = map->dev->driver->name;
+
+	ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+	if (ret < 0) {
+		kfree(buf);
+		return ret;
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations regmap_name_fops = {
+	.open = simple_open,
+	.read = regmap_name_read_file,
+	.llseek = default_llseek,
+};
+
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+	struct regmap_debugfs_off_cache *c;
+
+	while (!list_empty(&map->debugfs_off_cache)) {
+		c = list_first_entry(&map->debugfs_off_cache,
+				     struct regmap_debugfs_off_cache,
+				     list);
+		list_del(&c->list);
+		kfree(c);
+	}
+}
+
+static bool regmap_printable(struct regmap *map, unsigned int reg)
+{
+	if (regmap_precious(map, reg))
+		return false;
+
+	if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
+		return false;
+
+	return true;
+}
+
+/*
+ * Work out where the start offset maps into register numbers, bearing
+ * in mind that we suppress hidden registers.
+ */
+static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
+						  unsigned int base,
+						  loff_t from,
+						  loff_t *pos)
+{
+	struct regmap_debugfs_off_cache *c = NULL;
+	loff_t p = 0;
+	unsigned int i, ret;
+	unsigned int fpos_offset;
+	unsigned int reg_offset;
+
+	/* Suppress the cache if we're using a subrange */
+	if (base)
+		return base;
+
+	/*
+	 * If we don't have a cache build one so we don't have to do a
+	 * linear scan each time.
+	 */
+	mutex_lock(&map->cache_lock);
+	i = base;
+	if (list_empty(&map->debugfs_off_cache)) {
+		for (; i <= map->max_register; i += map->reg_stride) {
+			/* Skip unprinted registers, closing off cache entry */
+			if (!regmap_printable(map, i)) {
+				if (c) {
+					c->max = p - 1;
+					c->max_reg = i - map->reg_stride;
+					list_add_tail(&c->list,
+						      &map->debugfs_off_cache);
+					c = NULL;
+				}
+
+				continue;
+			}
+
+			/* No cache entry?  Start a new one */
+			if (!c) {
+				c = kzalloc(sizeof(*c), GFP_KERNEL);
+				if (!c) {
+					regmap_debugfs_free_dump_cache(map);
+					mutex_unlock(&map->cache_lock);
+					return base;
+				}
+				c->min = p;
+				c->base_reg = i;
+			}
+
+			p += map->debugfs_tot_len;
+		}
+	}
+
+	/* Close the last entry off if we didn't scan beyond it */
+	if (c) {
+		c->max = p - 1;
+		c->max_reg = i - map->reg_stride;
+		list_add_tail(&c->list,
+			      &map->debugfs_off_cache);
+	}
+
+	/*
+	 * This should never happen; we return above if we fail to
+	 * allocate and we should never be in this code if there are
+	 * no registers at all.
+	 */
+	WARN_ON(list_empty(&map->debugfs_off_cache));
+	ret = base;
+
+	/* Find the relevant block:offset */
+	list_for_each_entry(c, &map->debugfs_off_cache, list) {
+		if (from >= c->min && from <= c->max) {
+			fpos_offset = from - c->min;
+			reg_offset = fpos_offset / map->debugfs_tot_len;
+			*pos = c->min + (reg_offset * map->debugfs_tot_len);
+			mutex_unlock(&map->cache_lock);
+			return c->base_reg + (reg_offset * map->reg_stride);
+		}
+
+		*pos = c->max;
+		ret = c->max_reg;
+	}
+	mutex_unlock(&map->cache_lock);
+
+	return ret;
+}
+
+static inline void regmap_calc_tot_len(struct regmap *map,
+				       void *buf, size_t count)
+{
+	/* Calculate the length of a fixed format  */
+	if (!map->debugfs_tot_len) {
+		map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
+		map->debugfs_val_len = 2 * map->format.val_bytes;
+		map->debugfs_tot_len = map->debugfs_reg_len +
+			map->debugfs_val_len + 3;      /* : \n */
+	}
+}
+
+static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
+				   unsigned int to, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	size_t buf_pos = 0;
+	loff_t p = *ppos;
+	ssize_t ret;
+	int i;
+	char *buf;
+	unsigned int val, start_reg;
+
+	if (*ppos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	regmap_calc_tot_len(map, buf, count);
+
+	/* Work out which register we're starting at */
+	start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
+
+	for (i = start_reg; i <= to; i += map->reg_stride) {
+		if (!regmap_readable(map, i) && !regmap_cached(map, i))
+			continue;
+
+		if (regmap_precious(map, i))
+			continue;
+
+		/* If we're in the region the user is trying to read */
+		if (p >= *ppos) {
+			/* ...but not beyond it */
+			if (buf_pos + map->debugfs_tot_len > count)
+				break;
+
+			/* Format the register */
+			snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
+				 map->debugfs_reg_len, i - from);
+			buf_pos += map->debugfs_reg_len + 2;
+
+			/* Format the value, write all X if we can't read */
+			ret = regmap_read(map, i, &val);
+			if (ret == 0)
+				snprintf(buf + buf_pos, count - buf_pos,
+					 "%.*x", map->debugfs_val_len, val);
+			else
+				memset(buf + buf_pos, 'X',
+				       map->debugfs_val_len);
+			buf_pos += 2 * map->format.val_bytes;
+
+			buf[buf_pos++] = '\n';
+		}
+		p += map->debugfs_tot_len;
+	}
+
+	ret = buf_pos;
+
+	if (copy_to_user(user_buf, buf, buf_pos)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	*ppos += buf_pos;
+
+out:
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct regmap *map = file->private_data;
+
+	return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+				   count, ppos);
+}
+
+#undef REGMAP_ALLOW_WRITE_DEBUGFS
+#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous especially when we have clients such as
+ * PMICs, therefore don't provide any real compile time configuration option
+ * for this feature, people who want to use this will need to modify
+ * the source code directly.
+ */
+static ssize_t regmap_map_write_file(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char buf[32];
+	size_t buf_size;
+	char *start = buf;
+	unsigned long reg, value;
+	struct regmap *map = file->private_data;
+	int ret;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	while (*start == ' ')
+		start++;
+	reg = simple_strtoul(start, &start, 16);
+	while (*start == ' ')
+		start++;
+	if (kstrtoul(start, 16, &value))
+		return -EINVAL;
+
+	/* Userspace has been fiddling around behind the kernel's back */
+	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+	ret = regmap_write(map, reg, value);
+	if (ret < 0)
+		return ret;
+	return buf_size;
+}
+#else
+#define regmap_map_write_file NULL
+#endif
+
+static const struct file_operations regmap_map_fops = {
+	.open = simple_open,
+	.read = regmap_map_read_file,
+	.write = regmap_map_write_file,
+	.llseek = default_llseek,
+};
+
+static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct regmap_range_node *range = file->private_data;
+	struct regmap *map = range->map;
+
+	return regmap_read_debugfs(map, range->range_min, range->range_max,
+				   user_buf, count, ppos);
+}
+
+static const struct file_operations regmap_range_fops = {
+	.open = simple_open,
+	.read = regmap_range_read_file,
+	.llseek = default_llseek,
+};
+
+static ssize_t regmap_reg_ranges_read_file(struct file *file,
+					   char __user *user_buf, size_t count,
+					   loff_t *ppos)
+{
+	struct regmap *map = file->private_data;
+	struct regmap_debugfs_off_cache *c;
+	loff_t p = 0;
+	size_t buf_pos = 0;
+	char *buf;
+	char *entry;
+	int ret;
+	unsigned entry_len;
+
+	if (*ppos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!entry) {
+		kfree(buf);
+		return -ENOMEM;
+	}
+
+	/* While we are at it, build the register dump cache
+	 * now so the read() operation on the `registers' file
+	 * can benefit from using the cache.  We do not care
+	 * about the file position information that is contained
+	 * in the cache, just about the actual register blocks */
+	regmap_calc_tot_len(map, buf, count);
+	regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
+
+	/* Reset file pointer as the fixed-format of the `registers'
+	 * file is not compatible with the `range' file */
+	p = 0;
+	mutex_lock(&map->cache_lock);
+	list_for_each_entry(c, &map->debugfs_off_cache, list) {
+		entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
+				     c->base_reg, c->max_reg);
+		if (p >= *ppos) {
+			if (buf_pos + entry_len > count)
+				break;
+			memcpy(buf + buf_pos, entry, entry_len);
+			buf_pos += entry_len;
+		}
+		p += entry_len;
+	}
+	mutex_unlock(&map->cache_lock);
+
+	kfree(entry);
+	ret = buf_pos;
+
+	if (copy_to_user(user_buf, buf, buf_pos)) {
+		ret = -EFAULT;
+		goto out_buf;
+	}
+
+	*ppos += buf_pos;
+out_buf:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations regmap_reg_ranges_fops = {
+	.open = simple_open,
+	.read = regmap_reg_ranges_read_file,
+	.llseek = default_llseek,
+};
+
+static int regmap_access_show(struct seq_file *s, void *ignored)
+{
+	struct regmap *map = s->private;
+	int i, reg_len;
+
+	reg_len = regmap_calc_reg_len(map->max_register);
+
+	for (i = 0; i <= map->max_register; i += map->reg_stride) {
+		/* Ignore registers which are neither readable nor writable */
+		if (!regmap_readable(map, i) && !regmap_writeable(map, i))
+			continue;
+
+		/* Format the register */
+		seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
+			   regmap_readable(map, i) ? 'y' : 'n',
+			   regmap_writeable(map, i) ? 'y' : 'n',
+			   regmap_volatile(map, i) ? 'y' : 'n',
+			   regmap_precious(map, i) ? 'y' : 'n');
+	}
+
+	return 0;
+}
+
+static int access_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, regmap_access_show, inode->i_private);
+}
+
+static const struct file_operations regmap_access_fops = {
+	.open		= access_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static ssize_t regmap_cache_only_write_file(struct file *file,
+					    const char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct regmap *map = container_of(file->private_data,
+					  struct regmap, cache_only);
+	ssize_t result;
+	bool was_enabled, require_sync = false;
+	int err;
+
+	map->lock(map->lock_arg);
+
+	was_enabled = map->cache_only;
+
+	result = debugfs_write_file_bool(file, user_buf, count, ppos);
+	if (result < 0) {
+		map->unlock(map->lock_arg);
+		return result;
+	}
+
+	if (map->cache_only && !was_enabled) {
+		dev_warn(map->dev, "debugfs cache_only=Y forced\n");
+		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+	} else if (!map->cache_only && was_enabled) {
+		dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
+		require_sync = true;
+	}
+
+	map->unlock(map->lock_arg);
+
+	if (require_sync) {
+		err = regcache_sync(map);
+		if (err)
+			dev_err(map->dev, "Failed to sync cache %d\n", err);
+	}
+
+	return result;
+}
+
+static const struct file_operations regmap_cache_only_fops = {
+	.open = simple_open,
+	.read = debugfs_read_file_bool,
+	.write = regmap_cache_only_write_file,
+};
+
+static ssize_t regmap_cache_bypass_write_file(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct regmap *map = container_of(file->private_data,
+					  struct regmap, cache_bypass);
+	ssize_t result;
+	bool was_enabled;
+
+	map->lock(map->lock_arg);
+
+	was_enabled = map->cache_bypass;
+
+	result = debugfs_write_file_bool(file, user_buf, count, ppos);
+	if (result < 0)
+		goto out;
+
+	if (map->cache_bypass && !was_enabled) {
+		dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
+		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+	} else if (!map->cache_bypass && was_enabled) {
+		dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
+	}
+
+out:
+	map->unlock(map->lock_arg);
+
+	return result;
+}
+
+static const struct file_operations regmap_cache_bypass_fops = {
+	.open = simple_open,
+	.read = debugfs_read_file_bool,
+	.write = regmap_cache_bypass_write_file,
+};
+
+void regmap_debugfs_init(struct regmap *map, const char *name)
+{
+	struct rb_node *next;
+	struct regmap_range_node *range_node;
+	const char *devname = "dummy";
+
+	/*
+	 * Userspace can initiate reads from the hardware over debugfs.
+	 * Normally internal regmap structures and buffers are protected with
+	 * a mutex or a spinlock, but if the regmap owner decided to disable
+	 * all locking mechanisms, this is no longer the case. For safety:
+	 * don't create the debugfs entries if locking is disabled.
+	 */
+	if (map->debugfs_disable) {
+		dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+		return;
+	}
+
+	/* If we don't have the debugfs root yet, postpone init */
+	if (!regmap_debugfs_root) {
+		struct regmap_debugfs_node *node;
+		node = kzalloc(sizeof(*node), GFP_KERNEL);
+		if (!node)
+			return;
+		node->map = map;
+		node->name = name;
+		mutex_lock(&regmap_debugfs_early_lock);
+		list_add(&node->link, &regmap_debugfs_early_list);
+		mutex_unlock(&regmap_debugfs_early_lock);
+		return;
+	}
+
+	INIT_LIST_HEAD(&map->debugfs_off_cache);
+	mutex_init(&map->cache_lock);
+
+	if (map->dev)
+		devname = dev_name(map->dev);
+
+	if (name) {
+		map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+					      devname, name);
+		name = map->debugfs_name;
+	} else {
+		name = devname;
+	}
+
+	if (!strcmp(name, "dummy")) {
+		map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
+						dummy_index);
+		name = map->debugfs_name;
+		dummy_index++;
+	}
+
+	map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
+	if (!map->debugfs) {
+		dev_warn(map->dev,
+			 "Failed to create %s debugfs directory\n", name);
+
+		kfree(map->debugfs_name);
+		map->debugfs_name = NULL;
+		return;
+	}
+
+	debugfs_create_file("name", 0400, map->debugfs,
+			    map, &regmap_name_fops);
+
+	debugfs_create_file("range", 0400, map->debugfs,
+			    map, &regmap_reg_ranges_fops);
+
+	if (map->max_register || regmap_readable(map, 0)) {
+		umode_t registers_mode;
+
+#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+		registers_mode = 0600;
+#else
+		registers_mode = 0400;
+#endif
+
+		debugfs_create_file("registers", registers_mode, map->debugfs,
+				    map, &regmap_map_fops);
+		debugfs_create_file("access", 0400, map->debugfs,
+				    map, &regmap_access_fops);
+	}
+
+	if (map->cache_type) {
+		debugfs_create_file("cache_only", 0600, map->debugfs,
+				    &map->cache_only, &regmap_cache_only_fops);
+		debugfs_create_bool("cache_dirty", 0400, map->debugfs,
+				    &map->cache_dirty);
+		debugfs_create_file("cache_bypass", 0600, map->debugfs,
+				    &map->cache_bypass,
+				    &regmap_cache_bypass_fops);
+	}
+
+	next = rb_first(&map->range_tree);
+	while (next) {
+		range_node = rb_entry(next, struct regmap_range_node, node);
+
+		if (range_node->name)
+			debugfs_create_file(range_node->name, 0400,
+					    map->debugfs, range_node,
+					    &regmap_range_fops);
+
+		next = rb_next(&range_node->node);
+	}
+
+	if (map->cache_ops && map->cache_ops->debugfs_init)
+		map->cache_ops->debugfs_init(map);
+}
+
+void regmap_debugfs_exit(struct regmap *map)
+{
+	if (map->debugfs) {
+		debugfs_remove_recursive(map->debugfs);
+		mutex_lock(&map->cache_lock);
+		regmap_debugfs_free_dump_cache(map);
+		mutex_unlock(&map->cache_lock);
+		kfree(map->debugfs_name);
+	} else {
+		struct regmap_debugfs_node *node, *tmp;
+
+		mutex_lock(&regmap_debugfs_early_lock);
+		list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
+					 link) {
+			if (node->map == map) {
+				list_del(&node->link);
+				kfree(node);
+			}
+		}
+		mutex_unlock(&regmap_debugfs_early_lock);
+	}
+}
+
+void regmap_debugfs_initcall(void)
+{
+	struct regmap_debugfs_node *node, *tmp;
+
+	regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
+	if (!regmap_debugfs_root) {
+		pr_warn("regmap: Failed to create debugfs root\n");
+		return;
+	}
+
+	mutex_lock(&regmap_debugfs_early_lock);
+	list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
+		regmap_debugfs_init(node->map, node->name);
+		list_del(&node->link);
+		kfree(node);
+	}
+	mutex_unlock(&regmap_debugfs_early_lock);
+}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
new file mode 100644
index 0000000..056acde
--- /dev/null
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -0,0 +1,311 @@
+/*
+ * Register map access API - I2C support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
+				      unsigned int *val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+
+	if (reg > 0xff)
+		return -EINVAL;
+
+	ret = i2c_smbus_read_byte_data(i2c, reg);
+	if (ret < 0)
+		return ret;
+
+	*val = ret;
+
+	return 0;
+}
+
+static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
+				       unsigned int val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+
+	if (val > 0xff || reg > 0xff)
+		return -EINVAL;
+
+	return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_byte = {
+	.reg_write = regmap_smbus_byte_reg_write,
+	.reg_read = regmap_smbus_byte_reg_read,
+};
+
+static int regmap_smbus_word_reg_read(void *context, unsigned int reg,
+				      unsigned int *val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+
+	if (reg > 0xff)
+		return -EINVAL;
+
+	ret = i2c_smbus_read_word_data(i2c, reg);
+	if (ret < 0)
+		return ret;
+
+	*val = ret;
+
+	return 0;
+}
+
+static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
+				       unsigned int val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+
+	if (val > 0xffff || reg > 0xff)
+		return -EINVAL;
+
+	return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_word = {
+	.reg_write = regmap_smbus_word_reg_write,
+	.reg_read = regmap_smbus_word_reg_read,
+};
+
+static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
+					  unsigned int *val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+
+	if (reg > 0xff)
+		return -EINVAL;
+
+	ret = i2c_smbus_read_word_swapped(i2c, reg);
+	if (ret < 0)
+		return ret;
+
+	*val = ret;
+
+	return 0;
+}
+
+static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
+					   unsigned int val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+
+	if (val > 0xffff || reg > 0xff)
+		return -EINVAL;
+
+	return i2c_smbus_write_word_swapped(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_word_swapped = {
+	.reg_write = regmap_smbus_word_write_swapped,
+	.reg_read = regmap_smbus_word_read_swapped,
+};
+
+static int regmap_i2c_write(void *context, const void *data, size_t count)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+
+	ret = i2c_master_send(i2c, data, count);
+	if (ret == count)
+		return 0;
+	else if (ret < 0)
+		return ret;
+	else
+		return -EIO;
+}
+
+static int regmap_i2c_gather_write(void *context,
+				   const void *reg, size_t reg_size,
+				   const void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	struct i2c_msg xfer[2];
+	int ret;
+
+	/* If the I2C controller can't do a gather tell the core, it
+	 * will substitute in a linear write for us.
+	 */
+	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
+		return -ENOTSUPP;
+
+	xfer[0].addr = i2c->addr;
+	xfer[0].flags = 0;
+	xfer[0].len = reg_size;
+	xfer[0].buf = (void *)reg;
+
+	xfer[1].addr = i2c->addr;
+	xfer[1].flags = I2C_M_NOSTART;
+	xfer[1].len = val_size;
+	xfer[1].buf = (void *)val;
+
+	ret = i2c_transfer(i2c->adapter, xfer, 2);
+	if (ret == 2)
+		return 0;
+	if (ret < 0)
+		return ret;
+	else
+		return -EIO;
+}
+
+static int regmap_i2c_read(void *context,
+			   const void *reg, size_t reg_size,
+			   void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	struct i2c_msg xfer[2];
+	int ret;
+
+	xfer[0].addr = i2c->addr;
+	xfer[0].flags = 0;
+	xfer[0].len = reg_size;
+	xfer[0].buf = (void *)reg;
+
+	xfer[1].addr = i2c->addr;
+	xfer[1].flags = I2C_M_RD;
+	xfer[1].len = val_size;
+	xfer[1].buf = val;
+
+	ret = i2c_transfer(i2c->adapter, xfer, 2);
+	if (ret == 2)
+		return 0;
+	else if (ret < 0)
+		return ret;
+	else
+		return -EIO;
+}
+
+static struct regmap_bus regmap_i2c = {
+	.write = regmap_i2c_write,
+	.gather_write = regmap_i2c_gather_write,
+	.read = regmap_i2c_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_BIG,
+	.val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
+				      size_t count)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+
+	if (count < 1)
+		return -EINVAL;
+
+	--count;
+	return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+					      ((u8 *)data + 1));
+}
+
+static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+				     size_t reg_size, void *val,
+				     size_t val_size)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+
+	if (reg_size != 1 || val_size < 1)
+		return -EINVAL;
+
+	ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
+	if (ret == val_size)
+		return 0;
+	else if (ret < 0)
+		return ret;
+	else
+		return -EIO;
+}
+
+static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+	.write = regmap_i2c_smbus_i2c_write,
+	.read = regmap_i2c_smbus_i2c_read,
+	.max_raw_read = I2C_SMBUS_BLOCK_MAX,
+	.max_raw_write = I2C_SMBUS_BLOCK_MAX,
+};
+
+static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+					const struct regmap_config *config)
+{
+	if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
+		return &regmap_i2c;
+	else if (config->val_bits == 8 && config->reg_bits == 8 &&
+		 i2c_check_functionality(i2c->adapter,
+					 I2C_FUNC_SMBUS_I2C_BLOCK))
+		return &regmap_i2c_smbus_i2c_block;
+	else if (config->val_bits == 16 && config->reg_bits == 8 &&
+		 i2c_check_functionality(i2c->adapter,
+					 I2C_FUNC_SMBUS_WORD_DATA))
+		switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
+		case REGMAP_ENDIAN_LITTLE:
+			return &regmap_smbus_word;
+		case REGMAP_ENDIAN_BIG:
+			return &regmap_smbus_word_swapped;
+		default:		/* everything else is not supported */
+			break;
+		}
+	else if (config->val_bits == 8 && config->reg_bits == 8 &&
+		 i2c_check_functionality(i2c->adapter,
+					 I2C_FUNC_SMBUS_BYTE_DATA))
+		return &regmap_smbus_byte;
+
+	return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_i2c);
+
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
new file mode 100644
index 0000000..429ca8e
--- /dev/null
+++ b/drivers/base/regmap/regmap-irq.c
@@ -0,0 +1,851 @@
+/*
+ * regmap based irq_chip
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_irq_chip_data {
+	struct mutex lock;
+	struct irq_chip irq_chip;
+
+	struct regmap *map;
+	const struct regmap_irq_chip *chip;
+
+	int irq_base;
+	struct irq_domain *domain;
+
+	int irq;
+	int wake_count;
+
+	void *status_reg_buf;
+	unsigned int *status_buf;
+	unsigned int *mask_buf;
+	unsigned int *mask_buf_def;
+	unsigned int *wake_buf;
+	unsigned int *type_buf;
+	unsigned int *type_buf_def;
+
+	unsigned int irq_reg_stride;
+	unsigned int type_reg_stride;
+};
+
+static inline const
+struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
+				     int irq)
+{
+	return &data->chip->irqs[irq];
+}
+
+static void regmap_irq_lock(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+	mutex_lock(&d->lock);
+}
+
+static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
+				  unsigned int reg, unsigned int mask,
+				  unsigned int val)
+{
+	if (d->chip->mask_writeonly)
+		return regmap_write_bits(d->map, reg, mask, val);
+	else
+		return regmap_update_bits(d->map, reg, mask, val);
+}
+
+static void regmap_irq_sync_unlock(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	struct regmap *map = d->map;
+	int i, ret;
+	u32 reg;
+	u32 unmask_offset;
+
+	if (d->chip->runtime_pm) {
+		ret = pm_runtime_get_sync(map->dev);
+		if (ret < 0)
+			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
+				ret);
+	}
+
+	/*
+	 * If there's been a change in the mask write it back to the
+	 * hardware.  We rely on the use of the regmap core cache to
+	 * suppress pointless writes.
+	 */
+	for (i = 0; i < d->chip->num_regs; i++) {
+		reg = d->chip->mask_base +
+			(i * map->reg_stride * d->irq_reg_stride);
+		if (d->chip->mask_invert) {
+			ret = regmap_irq_update_bits(d, reg,
+					 d->mask_buf_def[i], ~d->mask_buf[i]);
+		} else if (d->chip->unmask_base) {
+			/* set mask with mask_base register */
+			ret = regmap_irq_update_bits(d, reg,
+					d->mask_buf_def[i], ~d->mask_buf[i]);
+			if (ret < 0)
+				dev_err(d->map->dev,
+					"Failed to sync unmasks in %x\n",
+					reg);
+			unmask_offset = d->chip->unmask_base -
+							d->chip->mask_base;
+			/* clear mask with unmask_base register */
+			ret = regmap_irq_update_bits(d,
+					reg + unmask_offset,
+					d->mask_buf_def[i],
+					d->mask_buf[i]);
+		} else {
+			ret = regmap_irq_update_bits(d, reg,
+					 d->mask_buf_def[i], d->mask_buf[i]);
+		}
+		if (ret != 0)
+			dev_err(d->map->dev, "Failed to sync masks in %x\n",
+				reg);
+
+		reg = d->chip->wake_base +
+			(i * map->reg_stride * d->irq_reg_stride);
+		if (d->wake_buf) {
+			if (d->chip->wake_invert)
+				ret = regmap_irq_update_bits(d, reg,
+							 d->mask_buf_def[i],
+							 ~d->wake_buf[i]);
+			else
+				ret = regmap_irq_update_bits(d, reg,
+							 d->mask_buf_def[i],
+							 d->wake_buf[i]);
+			if (ret != 0)
+				dev_err(d->map->dev,
+					"Failed to sync wakes in %x: %d\n",
+					reg, ret);
+		}
+
+		if (!d->chip->init_ack_masked)
+			continue;
+		/*
+		 * Ack all the masked interrupts unconditionally,
+		 * OR if there is masked interrupt which hasn't been Acked,
+		 * it'll be ignored in irq handler, then may introduce irq storm
+		 */
+		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
+			reg = d->chip->ack_base +
+				(i * map->reg_stride * d->irq_reg_stride);
+			/* some chips ack by write 0 */
+			if (d->chip->ack_invert)
+				ret = regmap_write(map, reg, ~d->mask_buf[i]);
+			else
+				ret = regmap_write(map, reg, d->mask_buf[i]);
+			if (ret != 0)
+				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
+					reg, ret);
+		}
+	}
+
+	for (i = 0; i < d->chip->num_type_reg; i++) {
+		if (!d->type_buf_def[i])
+			continue;
+		reg = d->chip->type_base +
+			(i * map->reg_stride * d->type_reg_stride);
+		if (d->chip->type_invert)
+			ret = regmap_irq_update_bits(d, reg,
+				d->type_buf_def[i], ~d->type_buf[i]);
+		else
+			ret = regmap_irq_update_bits(d, reg,
+				d->type_buf_def[i], d->type_buf[i]);
+		if (ret != 0)
+			dev_err(d->map->dev, "Failed to sync type in %x\n",
+				reg);
+	}
+
+	if (d->chip->runtime_pm)
+		pm_runtime_put(map->dev);
+
+	/* If we've changed our wakeup count propagate it to the parent */
+	if (d->wake_count < 0)
+		for (i = d->wake_count; i < 0; i++)
+			irq_set_irq_wake(d->irq, 0);
+	else if (d->wake_count > 0)
+		for (i = 0; i < d->wake_count; i++)
+			irq_set_irq_wake(d->irq, 1);
+
+	d->wake_count = 0;
+
+	mutex_unlock(&d->lock);
+}
+
+static void regmap_irq_enable(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	struct regmap *map = d->map;
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
+}
+
+static void regmap_irq_disable(struct irq_data *data)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	struct regmap *map = d->map;
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
+}
+
+static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	struct regmap *map = d->map;
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+	int reg = irq_data->type_reg_offset / map->reg_stride;
+
+	if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
+		return 0;
+
+	d->type_buf[reg] &= ~(irq_data->type_falling_mask |
+					irq_data->type_rising_mask);
+	switch (type) {
+	case IRQ_TYPE_EDGE_FALLING:
+		d->type_buf[reg] |= irq_data->type_falling_mask;
+		break;
+
+	case IRQ_TYPE_EDGE_RISING:
+		d->type_buf[reg] |= irq_data->type_rising_mask;
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		d->type_buf[reg] |= (irq_data->type_falling_mask |
+					irq_data->type_rising_mask);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	struct regmap *map = d->map;
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+	if (on) {
+		if (d->wake_buf)
+			d->wake_buf[irq_data->reg_offset / map->reg_stride]
+				&= ~irq_data->mask;
+		d->wake_count++;
+	} else {
+		if (d->wake_buf)
+			d->wake_buf[irq_data->reg_offset / map->reg_stride]
+				|= irq_data->mask;
+		d->wake_count--;
+	}
+
+	return 0;
+}
+
+static const struct irq_chip regmap_irq_chip = {
+	.irq_bus_lock		= regmap_irq_lock,
+	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
+	.irq_disable		= regmap_irq_disable,
+	.irq_enable		= regmap_irq_enable,
+	.irq_set_type		= regmap_irq_set_type,
+	.irq_set_wake		= regmap_irq_set_wake,
+};
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+	struct regmap_irq_chip_data *data = d;
+	const struct regmap_irq_chip *chip = data->chip;
+	struct regmap *map = data->map;
+	int ret, i;
+	bool handled = false;
+	u32 reg;
+
+	if (chip->handle_pre_irq)
+		chip->handle_pre_irq(chip->irq_drv_data);
+
+	if (chip->runtime_pm) {
+		ret = pm_runtime_get_sync(map->dev);
+		if (ret < 0) {
+			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
+				ret);
+			pm_runtime_put(map->dev);
+			goto exit;
+		}
+	}
+
+	/*
+	 * Read in the statuses, using a single bulk read if possible
+	 * in order to reduce the I/O overheads.
+	 */
+	if (!map->use_single_read && map->reg_stride == 1 &&
+	    data->irq_reg_stride == 1) {
+		u8 *buf8 = data->status_reg_buf;
+		u16 *buf16 = data->status_reg_buf;
+		u32 *buf32 = data->status_reg_buf;
+
+		BUG_ON(!data->status_reg_buf);
+
+		ret = regmap_bulk_read(map, chip->status_base,
+				       data->status_reg_buf,
+				       chip->num_regs);
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to read IRQ status: %d\n",
+				ret);
+			goto exit;
+		}
+
+		for (i = 0; i < data->chip->num_regs; i++) {
+			switch (map->format.val_bytes) {
+			case 1:
+				data->status_buf[i] = buf8[i];
+				break;
+			case 2:
+				data->status_buf[i] = buf16[i];
+				break;
+			case 4:
+				data->status_buf[i] = buf32[i];
+				break;
+			default:
+				BUG();
+				goto exit;
+			}
+		}
+
+	} else {
+		for (i = 0; i < data->chip->num_regs; i++) {
+			ret = regmap_read(map, chip->status_base +
+					  (i * map->reg_stride
+					   * data->irq_reg_stride),
+					  &data->status_buf[i]);
+
+			if (ret != 0) {
+				dev_err(map->dev,
+					"Failed to read IRQ status: %d\n",
+					ret);
+				if (chip->runtime_pm)
+					pm_runtime_put(map->dev);
+				goto exit;
+			}
+		}
+	}
+
+	/*
+	 * Ignore masked IRQs and ack if we need to; we ack early so
+	 * there is no race between handling and acknowleding the
+	 * interrupt.  We assume that typically few of the interrupts
+	 * will fire simultaneously so don't worry about overhead from
+	 * doing a write per register.
+	 */
+	for (i = 0; i < data->chip->num_regs; i++) {
+		data->status_buf[i] &= ~data->mask_buf[i];
+
+		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
+			reg = chip->ack_base +
+				(i * map->reg_stride * data->irq_reg_stride);
+			ret = regmap_write(map, reg, data->status_buf[i]);
+			if (ret != 0)
+				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+					reg, ret);
+		}
+	}
+
+	for (i = 0; i < chip->num_irqs; i++) {
+		if (data->status_buf[chip->irqs[i].reg_offset /
+				     map->reg_stride] & chip->irqs[i].mask) {
+			handle_nested_irq(irq_find_mapping(data->domain, i));
+			handled = true;
+		}
+	}
+
+	if (chip->runtime_pm)
+		pm_runtime_put(map->dev);
+
+exit:
+	if (chip->handle_post_irq)
+		chip->handle_post_irq(chip->irq_drv_data);
+
+	if (handled)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
+static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
+			  irq_hw_number_t hw)
+{
+	struct regmap_irq_chip_data *data = h->host_data;
+
+	irq_set_chip_data(virq, data);
+	irq_set_chip(virq, &data->irq_chip);
+	irq_set_nested_thread(virq, 1);
+	irq_set_parent(virq, data->irq);
+	irq_set_noprobe(virq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops regmap_domain_ops = {
+	.map	= regmap_irq_map,
+	.xlate	= irq_domain_xlate_onetwocell,
+};
+
+/**
+ * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
+ *
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * In order for this to be efficient the chip really should use a
+ * register cache.  The chip driver is responsible for restoring the
+ * register values used by the IRQ controller over suspend and resume.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+			int irq_base, const struct regmap_irq_chip *chip,
+			struct regmap_irq_chip_data **data)
+{
+	struct regmap_irq_chip_data *d;
+	int i;
+	int ret = -ENOMEM;
+	u32 reg;
+	u32 unmask_offset;
+
+	if (chip->num_regs <= 0)
+		return -EINVAL;
+
+	for (i = 0; i < chip->num_irqs; i++) {
+		if (chip->irqs[i].reg_offset % map->reg_stride)
+			return -EINVAL;
+		if (chip->irqs[i].reg_offset / map->reg_stride >=
+		    chip->num_regs)
+			return -EINVAL;
+	}
+
+	if (irq_base) {
+		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+		if (irq_base < 0) {
+			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+				 irq_base);
+			return irq_base;
+		}
+	}
+
+	d = kzalloc(sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+				GFP_KERNEL);
+	if (!d->status_buf)
+		goto err_alloc;
+
+	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+			      GFP_KERNEL);
+	if (!d->mask_buf)
+		goto err_alloc;
+
+	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
+				  GFP_KERNEL);
+	if (!d->mask_buf_def)
+		goto err_alloc;
+
+	if (chip->wake_base) {
+		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
+				      GFP_KERNEL);
+		if (!d->wake_buf)
+			goto err_alloc;
+	}
+
+	if (chip->num_type_reg) {
+		d->type_buf_def = kcalloc(chip->num_type_reg,
+					sizeof(unsigned int), GFP_KERNEL);
+		if (!d->type_buf_def)
+			goto err_alloc;
+
+		d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
+				      GFP_KERNEL);
+		if (!d->type_buf)
+			goto err_alloc;
+	}
+
+	d->irq_chip = regmap_irq_chip;
+	d->irq_chip.name = chip->name;
+	d->irq = irq;
+	d->map = map;
+	d->chip = chip;
+	d->irq_base = irq_base;
+
+	if (chip->irq_reg_stride)
+		d->irq_reg_stride = chip->irq_reg_stride;
+	else
+		d->irq_reg_stride = 1;
+
+	if (chip->type_reg_stride)
+		d->type_reg_stride = chip->type_reg_stride;
+	else
+		d->type_reg_stride = 1;
+
+	if (!map->use_single_read && map->reg_stride == 1 &&
+	    d->irq_reg_stride == 1) {
+		d->status_reg_buf = kmalloc_array(chip->num_regs,
+						  map->format.val_bytes,
+						  GFP_KERNEL);
+		if (!d->status_reg_buf)
+			goto err_alloc;
+	}
+
+	mutex_init(&d->lock);
+
+	for (i = 0; i < chip->num_irqs; i++)
+		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
+			|= chip->irqs[i].mask;
+
+	/* Mask all the interrupts by default */
+	for (i = 0; i < chip->num_regs; i++) {
+		d->mask_buf[i] = d->mask_buf_def[i];
+		reg = chip->mask_base +
+			(i * map->reg_stride * d->irq_reg_stride);
+		if (chip->mask_invert)
+			ret = regmap_irq_update_bits(d, reg,
+					 d->mask_buf[i], ~d->mask_buf[i]);
+		else if (d->chip->unmask_base) {
+			unmask_offset = d->chip->unmask_base -
+					d->chip->mask_base;
+			ret = regmap_irq_update_bits(d,
+					reg + unmask_offset,
+					d->mask_buf[i],
+					d->mask_buf[i]);
+		} else
+			ret = regmap_irq_update_bits(d, reg,
+					 d->mask_buf[i], d->mask_buf[i]);
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+				reg, ret);
+			goto err_alloc;
+		}
+
+		if (!chip->init_ack_masked)
+			continue;
+
+		/* Ack masked but set interrupts */
+		reg = chip->status_base +
+			(i * map->reg_stride * d->irq_reg_stride);
+		ret = regmap_read(map, reg, &d->status_buf[i]);
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to read IRQ status: %d\n",
+				ret);
+			goto err_alloc;
+		}
+
+		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
+			reg = chip->ack_base +
+				(i * map->reg_stride * d->irq_reg_stride);
+			if (chip->ack_invert)
+				ret = regmap_write(map, reg,
+					~(d->status_buf[i] & d->mask_buf[i]));
+			else
+				ret = regmap_write(map, reg,
+					d->status_buf[i] & d->mask_buf[i]);
+			if (ret != 0) {
+				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+					reg, ret);
+				goto err_alloc;
+			}
+		}
+	}
+
+	/* Wake is disabled by default */
+	if (d->wake_buf) {
+		for (i = 0; i < chip->num_regs; i++) {
+			d->wake_buf[i] = d->mask_buf_def[i];
+			reg = chip->wake_base +
+				(i * map->reg_stride * d->irq_reg_stride);
+
+			if (chip->wake_invert)
+				ret = regmap_irq_update_bits(d, reg,
+							 d->mask_buf_def[i],
+							 0);
+			else
+				ret = regmap_irq_update_bits(d, reg,
+							 d->mask_buf_def[i],
+							 d->wake_buf[i]);
+			if (ret != 0) {
+				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+					reg, ret);
+				goto err_alloc;
+			}
+		}
+	}
+
+	if (chip->num_type_reg) {
+		for (i = 0; i < chip->num_irqs; i++) {
+			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
+			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
+					chip->irqs[i].type_falling_mask;
+		}
+		for (i = 0; i < chip->num_type_reg; ++i) {
+			if (!d->type_buf_def[i])
+				continue;
+
+			reg = chip->type_base +
+				(i * map->reg_stride * d->type_reg_stride);
+			if (chip->type_invert)
+				ret = regmap_irq_update_bits(d, reg,
+					d->type_buf_def[i], 0xFF);
+			else
+				ret = regmap_irq_update_bits(d, reg,
+					d->type_buf_def[i], 0x0);
+			if (ret != 0) {
+				dev_err(map->dev,
+					"Failed to set type in 0x%x: %x\n",
+					reg, ret);
+				goto err_alloc;
+			}
+		}
+	}
+
+	if (irq_base)
+		d->domain = irq_domain_add_legacy(map->dev->of_node,
+						  chip->num_irqs, irq_base, 0,
+						  &regmap_domain_ops, d);
+	else
+		d->domain = irq_domain_add_linear(map->dev->of_node,
+						  chip->num_irqs,
+						  &regmap_domain_ops, d);
+	if (!d->domain) {
+		dev_err(map->dev, "Failed to create IRQ domain\n");
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
+				   irq_flags | IRQF_ONESHOT,
+				   chip->name, d);
+	if (ret != 0) {
+		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
+			irq, chip->name, ret);
+		goto err_domain;
+	}
+
+	*data = d;
+
+	return 0;
+
+err_domain:
+	/* Should really dispose of the domain but... */
+err_alloc:
+	kfree(d->type_buf);
+	kfree(d->type_buf_def);
+	kfree(d->wake_buf);
+	kfree(d->mask_buf_def);
+	kfree(d->mask_buf);
+	kfree(d->status_buf);
+	kfree(d->status_reg_buf);
+	kfree(d);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
+
+/**
+ * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
+ *
+ * @irq: Primary IRQ for the device
+ * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also disposes of all mapped IRQs on the chip.
+ */
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+{
+	unsigned int virq;
+	int hwirq;
+
+	if (!d)
+		return;
+
+	free_irq(irq, d);
+
+	/* Dispose all virtual irq from irq domain before removing it */
+	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+		/* Ignore hwirq if holes in the IRQ list */
+		if (!d->chip->irqs[hwirq].mask)
+			continue;
+
+		/*
+		 * Find the virtual irq of hwirq on chip and if it is
+		 * there then dispose it
+		 */
+		virq = irq_find_mapping(d->domain, hwirq);
+		if (virq)
+			irq_dispose_mapping(virq);
+	}
+
+	irq_domain_remove(d->domain);
+	kfree(d->type_buf);
+	kfree(d->type_buf_def);
+	kfree(d->wake_buf);
+	kfree(d->mask_buf_def);
+	kfree(d->mask_buf);
+	kfree(d->status_reg_buf);
+	kfree(d->status_buf);
+	kfree(d);
+}
+EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+
+static void devm_regmap_irq_chip_release(struct device *dev, void *res)
+{
+	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
+
+	regmap_del_irq_chip(d->irq, d);
+}
+
+static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
+
+{
+	struct regmap_irq_chip_data **r = res;
+
+	if (!r || !*r) {
+		WARN_ON(!r || !*r);
+		return 0;
+	}
+	return *r == data;
+}
+
+/**
+ * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The &regmap_irq_chip_data will be automatically released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+			     int irq_flags, int irq_base,
+			     const struct regmap_irq_chip *chip,
+			     struct regmap_irq_chip_data **data)
+{
+	struct regmap_irq_chip_data **ptr, *d;
+	int ret;
+
+	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
+				  chip, &d);
+	if (ret < 0) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	*ptr = d;
+	devres_add(dev, ptr);
+	*data = d;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
+
+/**
+ * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
+ *
+ * @dev: Device for which which resource was allocated.
+ * @irq: Primary IRQ for the device.
+ * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
+ *
+ * A resource managed version of regmap_del_irq_chip().
+ */
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+			      struct regmap_irq_chip_data *data)
+{
+	int rc;
+
+	WARN_ON(irq != data->irq);
+	rc = devres_release(dev, devm_regmap_irq_chip_release,
+			    devm_regmap_irq_chip_match, data);
+
+	if (rc != 0)
+		WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
+
+/**
+ * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
+ *
+ * @data: regmap irq controller to operate on.
+ *
+ * Useful for drivers to request their own IRQs.
+ */
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
+{
+	WARN_ON(!data->irq_base);
+	return data->irq_base;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
+
+/**
+ * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
+ *
+ * @data: regmap irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs.
+ *
+ * Useful for drivers to request their own IRQs.
+ */
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
+{
+	/* Handle holes in the IRQ list */
+	if (!data->chip->irqs[irq].mask)
+		return -EINVAL;
+
+	return irq_create_mapping(data->domain, irq);
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
+
+/**
+ * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
+ *
+ * @data: regmap_irq controller to operate on.
+ *
+ * Useful for drivers to request their own IRQs and for integration
+ * with subsystems.  For ease of integration NULL is accepted as a
+ * domain, allowing devices to just call this even if no domain is
+ * allocated.
+ */
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
+{
+	if (data)
+		return data->domain;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
new file mode 100644
index 0000000..8741fb5
--- /dev/null
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -0,0 +1,391 @@
+/*
+ * Register map access API - MMIO support
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_mmio_context {
+	void __iomem *regs;
+	unsigned val_bytes;
+
+	bool attached_clk;
+	struct clk *clk;
+
+	void (*reg_write)(struct regmap_mmio_context *ctx,
+			  unsigned int reg, unsigned int val);
+	unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+			         unsigned int reg);
+};
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+	switch (reg_bits) {
+	case 8:
+	case 16:
+	case 32:
+#ifdef CONFIG_64BIT
+	case 64:
+#endif
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int regmap_mmio_get_min_stride(size_t val_bits)
+{
+	int min_stride;
+
+	switch (val_bits) {
+	case 8:
+		/* The core treats 0 as 1 */
+		min_stride = 0;
+		return 0;
+	case 16:
+		min_stride = 2;
+		break;
+	case 32:
+		min_stride = 4;
+		break;
+#ifdef CONFIG_64BIT
+	case 64:
+		min_stride = 8;
+		break;
+#endif
+	default:
+		return -EINVAL;
+	}
+
+	return min_stride;
+}
+
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+				unsigned int reg,
+				unsigned int val)
+{
+	writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	writew(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	iowrite32be(val, ctx->regs + reg);
+}
+
+#ifdef CONFIG_64BIT
+static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+				  unsigned int reg,
+				  unsigned int val)
+{
+	writeq(val, ctx->regs + reg);
+}
+#endif
+
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct regmap_mmio_context *ctx = context;
+	int ret;
+
+	if (!IS_ERR(ctx->clk)) {
+		ret = clk_enable(ctx->clk);
+		if (ret < 0)
+			return ret;
+	}
+
+	ctx->reg_write(ctx, reg, val);
+
+	if (!IS_ERR(ctx->clk))
+		clk_disable(ctx->clk);
+
+	return 0;
+}
+
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+				      unsigned int reg)
+{
+	return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return readl(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return ioread32be(ctx->regs + reg);
+}
+
+#ifdef CONFIG_64BIT
+static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+				         unsigned int reg)
+{
+	return readq(ctx->regs + reg);
+}
+#endif
+
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct regmap_mmio_context *ctx = context;
+	int ret;
+
+	if (!IS_ERR(ctx->clk)) {
+		ret = clk_enable(ctx->clk);
+		if (ret < 0)
+			return ret;
+	}
+
+	*val = ctx->reg_read(ctx, reg);
+
+	if (!IS_ERR(ctx->clk))
+		clk_disable(ctx->clk);
+
+	return 0;
+}
+
+static void regmap_mmio_free_context(void *context)
+{
+	struct regmap_mmio_context *ctx = context;
+
+	if (!IS_ERR(ctx->clk)) {
+		clk_unprepare(ctx->clk);
+		if (!ctx->attached_clk)
+			clk_put(ctx->clk);
+	}
+	kfree(context);
+}
+
+static const struct regmap_bus regmap_mmio = {
+	.fast_io = true,
+	.reg_write = regmap_mmio_write,
+	.reg_read = regmap_mmio_read,
+	.free_context = regmap_mmio_free_context,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+					const char *clk_id,
+					void __iomem *regs,
+					const struct regmap_config *config)
+{
+	struct regmap_mmio_context *ctx;
+	int min_stride;
+	int ret;
+
+	ret = regmap_mmio_regbits_check(config->reg_bits);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (config->pad_bits)
+		return ERR_PTR(-EINVAL);
+
+	min_stride = regmap_mmio_get_min_stride(config->val_bits);
+	if (min_stride < 0)
+		return ERR_PTR(min_stride);
+
+	if (config->reg_stride < min_stride)
+		return ERR_PTR(-EINVAL);
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->regs = regs;
+	ctx->val_bytes = config->val_bits / 8;
+	ctx->clk = ERR_PTR(-ENODEV);
+
+	switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
+	case REGMAP_ENDIAN_DEFAULT:
+	case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+	case REGMAP_ENDIAN_NATIVE:
+#endif
+		switch (config->val_bits) {
+		case 8:
+			ctx->reg_read = regmap_mmio_read8;
+			ctx->reg_write = regmap_mmio_write8;
+			break;
+		case 16:
+			ctx->reg_read = regmap_mmio_read16le;
+			ctx->reg_write = regmap_mmio_write16le;
+			break;
+		case 32:
+			ctx->reg_read = regmap_mmio_read32le;
+			ctx->reg_write = regmap_mmio_write32le;
+			break;
+#ifdef CONFIG_64BIT
+		case 64:
+			ctx->reg_read = regmap_mmio_read64le;
+			ctx->reg_write = regmap_mmio_write64le;
+			break;
+#endif
+		default:
+			ret = -EINVAL;
+			goto err_free;
+		}
+		break;
+	case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+	case REGMAP_ENDIAN_NATIVE:
+#endif
+		switch (config->val_bits) {
+		case 8:
+			ctx->reg_read = regmap_mmio_read8;
+			ctx->reg_write = regmap_mmio_write8;
+			break;
+		case 16:
+			ctx->reg_read = regmap_mmio_read16be;
+			ctx->reg_write = regmap_mmio_write16be;
+			break;
+		case 32:
+			ctx->reg_read = regmap_mmio_read32be;
+			ctx->reg_write = regmap_mmio_write32be;
+			break;
+		default:
+			ret = -EINVAL;
+			goto err_free;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		goto err_free;
+	}
+
+	if (clk_id == NULL)
+		return ctx;
+
+	ctx->clk = clk_get(dev, clk_id);
+	if (IS_ERR(ctx->clk)) {
+		ret = PTR_ERR(ctx->clk);
+		goto err_free;
+	}
+
+	ret = clk_prepare(ctx->clk);
+	if (ret < 0) {
+		clk_put(ctx->clk);
+		goto err_free;
+	}
+
+	return ctx;
+
+err_free:
+	kfree(ctx);
+
+	return ERR_PTR(ret);
+}
+
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+				      void __iomem *regs,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
+{
+	struct regmap_mmio_context *ctx;
+
+	ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+	if (IS_ERR(ctx))
+		return ERR_CAST(ctx);
+
+	return __regmap_init(dev, &regmap_mmio, ctx, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
+
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+					   const char *clk_id,
+					   void __iomem *regs,
+					   const struct regmap_config *config,
+					   struct lock_class_key *lock_key,
+					   const char *lock_name)
+{
+	struct regmap_mmio_context *ctx;
+
+	ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+	if (IS_ERR(ctx))
+		return ERR_CAST(ctx);
+
+	return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
+{
+	struct regmap_mmio_context *ctx = map->bus_context;
+
+	ctx->clk = clk;
+	ctx->attached_clk = true;
+
+	return clk_prepare(ctx->clk);
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
+
+void regmap_mmio_detach_clk(struct regmap *map)
+{
+	struct regmap_mmio_context *ctx = map->bus_context;
+
+	clk_unprepare(ctx->clk);
+
+	ctx->attached_clk = false;
+	ctx->clk = NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
new file mode 100644
index 0000000..597042e
--- /dev/null
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+// Register map access API - SCCB support
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "internal.h"
+
+/**
+ * sccb_is_available - Check if the adapter supports SCCB protocol
+ * @adap: I2C adapter
+ *
+ * Return true if the I2C adapter is capable of using SCCB helper functions,
+ * false otherwise.
+ */
+static bool sccb_is_available(struct i2c_adapter *adap)
+{
+	u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+
+	/*
+	 * If we ever want support for hardware doing SCCB natively, we will
+	 * introduce a sccb_xfer() callback to struct i2c_algorithm and check
+	 * for it here.
+	 */
+
+	return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs;
+}
+
+/**
+ * regmap_sccb_read - Read data from SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * This executes the 2-phase write transmission cycle that is followed by a
+ * 2-phase read transmission cycle, returning negative errno else zero on
+ * success.
+ */
+static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+	int ret;
+	union i2c_smbus_data data;
+
+	i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+	ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+			       I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL);
+	if (ret < 0)
+		goto out;
+
+	ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+			       I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data);
+	if (ret < 0)
+		goto out;
+
+	*val = data.byte;
+out:
+	i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+	return ret;
+}
+
+/**
+ * regmap_sccb_write - Write data to SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * This executes the SCCB 3-phase write transmission cycle, returning negative
+ * errno else zero on success.
+ */
+static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct device *dev = context;
+	struct i2c_client *i2c = to_i2c_client(dev);
+
+	return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_sccb_bus = {
+	.reg_write = regmap_sccb_write,
+	.reg_read = regmap_sccb_read,
+};
+
+static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c,
+					const struct regmap_config *config)
+{
+	if (config->val_bits == 8 && config->reg_bits == 8 &&
+			sccb_is_available(i2c->adapter))
+		return &regmap_sccb_bus;
+
+	return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sccb);
+
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 0000000..50a6638
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct device *dev = context;
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+	return sdw_write(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct device *dev = context;
+	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+	int read;
+
+	read = sdw_read(slave, reg);
+	if (read < 0)
+		return read;
+
+	*val = read;
+	return 0;
+}
+
+static struct regmap_bus regmap_sdw = {
+	.reg_read = regmap_sdw_read,
+	.reg_write = regmap_sdw_write,
+	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+	/* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+	if (config->val_bits != 8)
+		return -ENOTSUPP;
+
+	/* Registers are 32 bits wide */
+	if (config->reg_bits != 32)
+		return -ENOTSUPP;
+
+	if (config->pad_bits != 0)
+		return -ENOTSUPP;
+
+	return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+	int ret;
+
+	ret = regmap_sdw_config_check(config);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return __regmap_init(&sdw->dev, &regmap_sdw,
+			&sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
+{
+	int ret;
+
+	ret = regmap_sdw_config_check(config);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+			&sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
new file mode 100644
index 0000000..0968059
--- /dev/null
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017, Linaro Ltd.
+
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static int regmap_slimbus_write(void *context, const void *data, size_t count)
+{
+	struct slim_device *sdev = context;
+
+	return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2);
+}
+
+static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
+			       void *val, size_t val_size)
+{
+	struct slim_device *sdev = context;
+
+	return slim_read(sdev, *(u16 *)reg, val_size, val);
+}
+
+static struct regmap_bus regmap_slimbus_bus = {
+	.write = regmap_slimbus_write,
+	.read = regmap_slimbus_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct regmap_bus *regmap_get_slimbus(struct slim_device *slim,
+					const struct regmap_config *config)
+{
+	if (config->val_bits == 8 && config->reg_bits == 16)
+		return &regmap_slimbus_bus;
+
+	return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+				     const struct regmap_config *config,
+				     struct lock_class_key *lock_key,
+				     const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
+
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+					  const struct regmap_config *config,
+					  struct lock_class_key *lock_key,
+					  const char *lock_name)
+{
+	const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
new file mode 100644
index 0000000..c7150dd
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi.c
@@ -0,0 +1,136 @@
+/*
+ * Register map access API - SPI support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+struct regmap_async_spi {
+	struct regmap_async core;
+	struct spi_message m;
+	struct spi_transfer t[2];
+};
+
+static void regmap_spi_complete(void *data)
+{
+	struct regmap_async_spi *async = data;
+
+	regmap_async_complete_cb(&async->core, async->m.status);
+}
+
+static int regmap_spi_write(void *context, const void *data, size_t count)
+{
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+
+	return spi_write(spi, data, count);
+}
+
+static int regmap_spi_gather_write(void *context,
+				   const void *reg, size_t reg_len,
+				   const void *val, size_t val_len)
+{
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+	struct spi_message m;
+	struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
+				     { .tx_buf = val, .len = val_len, }, };
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+	spi_message_add_tail(&t[1], &m);
+
+	return spi_sync(spi, &m);
+}
+
+static int regmap_spi_async_write(void *context,
+				  const void *reg, size_t reg_len,
+				  const void *val, size_t val_len,
+				  struct regmap_async *a)
+{
+	struct regmap_async_spi *async = container_of(a,
+						      struct regmap_async_spi,
+						      core);
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+
+	async->t[0].tx_buf = reg;
+	async->t[0].len = reg_len;
+	async->t[1].tx_buf = val;
+	async->t[1].len = val_len;
+
+	spi_message_init(&async->m);
+	spi_message_add_tail(&async->t[0], &async->m);
+	if (val)
+		spi_message_add_tail(&async->t[1], &async->m);
+
+	async->m.complete = regmap_spi_complete;
+	async->m.context = async;
+
+	return spi_async(spi, &async->m);
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+	struct regmap_async_spi *async_spi;
+
+	async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
+	if (!async_spi)
+		return NULL;
+
+	return &async_spi->core;
+}
+
+static int regmap_spi_read(void *context,
+			   const void *reg, size_t reg_size,
+			   void *val, size_t val_size)
+{
+	struct device *dev = context;
+	struct spi_device *spi = to_spi_device(dev);
+
+	return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static const struct regmap_bus regmap_spi = {
+	.write = regmap_spi_write,
+	.gather_write = regmap_spi_gather_write,
+	.async_write = regmap_spi_async_write,
+	.async_alloc = regmap_spi_async_alloc,
+	.read = regmap_spi_read,
+	.read_flag_mask = 0x80,
+	.reg_format_endian_default = REGMAP_ENDIAN_BIG,
+	.val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+struct regmap *__regmap_init_spi(struct spi_device *spi,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+	return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spi);
+
+struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
+{
+	return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
new file mode 100644
index 0000000..0bfb8ed
--- /dev/null
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -0,0 +1,234 @@
+/*
+ * Register map access API - SPMI support
+ *
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * Based on regmap-i2c.c:
+ * Copyright 2011 Wolfson Microelectronics plc
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_spmi_base_read(void *context,
+				 const void *reg, size_t reg_size,
+				 void *val, size_t val_size)
+{
+	u8 addr = *(u8 *)reg;
+	int err = 0;
+
+	BUG_ON(reg_size != 1);
+
+	while (val_size-- && !err)
+		err = spmi_register_read(context, addr++, val++);
+
+	return err;
+}
+
+static int regmap_spmi_base_gather_write(void *context,
+					 const void *reg, size_t reg_size,
+					 const void *val, size_t val_size)
+{
+	const u8 *data = val;
+	u8 addr = *(u8 *)reg;
+	int err = 0;
+
+	BUG_ON(reg_size != 1);
+
+	/*
+	 * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
+	 * use it when possible.
+	 */
+	if (addr == 0 && val_size) {
+		err = spmi_register_zero_write(context, *data);
+		if (err)
+			goto err_out;
+
+		data++;
+		addr++;
+		val_size--;
+	}
+
+	while (val_size) {
+		err = spmi_register_write(context, addr, *data);
+		if (err)
+			goto err_out;
+
+		data++;
+		addr++;
+		val_size--;
+	}
+
+err_out:
+	return err;
+}
+
+static int regmap_spmi_base_write(void *context, const void *data,
+				  size_t count)
+{
+	BUG_ON(count < 1);
+	return regmap_spmi_base_gather_write(context, data, 1, data + 1,
+					     count - 1);
+}
+
+static const struct regmap_bus regmap_spmi_base = {
+	.read				= regmap_spmi_base_read,
+	.write				= regmap_spmi_base_write,
+	.gather_write			= regmap_spmi_base_gather_write,
+	.reg_format_endian_default	= REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE,
+};
+
+struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev,
+				       const struct regmap_config *config,
+				       struct lock_class_key *lock_key,
+				       const char *lock_name)
+{
+	return __regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_base);
+
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev,
+					    const struct regmap_config *config,
+					    struct lock_class_key *lock_key,
+					    const char *lock_name)
+{
+	return __devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);
+
+static int regmap_spmi_ext_read(void *context,
+				const void *reg, size_t reg_size,
+				void *val, size_t val_size)
+{
+	int err = 0;
+	size_t len;
+	u16 addr;
+
+	BUG_ON(reg_size != 2);
+
+	addr = *(u16 *)reg;
+
+	/*
+	 * Split accesses into two to take advantage of the more
+	 * bandwidth-efficient 'Extended Register Read' command when possible
+	 */
+	while (addr <= 0xFF && val_size) {
+		len = min_t(size_t, val_size, 16);
+
+		err = spmi_ext_register_read(context, addr, val, len);
+		if (err)
+			goto err_out;
+
+		addr += len;
+		val += len;
+		val_size -= len;
+	}
+
+	while (val_size) {
+		len = min_t(size_t, val_size, 8);
+
+		err = spmi_ext_register_readl(context, addr, val, len);
+		if (err)
+			goto err_out;
+
+		addr += len;
+		val += len;
+		val_size -= len;
+	}
+
+err_out:
+	return err;
+}
+
+static int regmap_spmi_ext_gather_write(void *context,
+					const void *reg, size_t reg_size,
+					const void *val, size_t val_size)
+{
+	int err = 0;
+	size_t len;
+	u16 addr;
+
+	BUG_ON(reg_size != 2);
+
+	addr = *(u16 *)reg;
+
+	while (addr <= 0xFF && val_size) {
+		len = min_t(size_t, val_size, 16);
+
+		err = spmi_ext_register_write(context, addr, val, len);
+		if (err)
+			goto err_out;
+
+		addr += len;
+		val += len;
+		val_size -= len;
+	}
+
+	while (val_size) {
+		len = min_t(size_t, val_size, 8);
+
+		err = spmi_ext_register_writel(context, addr, val, len);
+		if (err)
+			goto err_out;
+
+		addr += len;
+		val += len;
+		val_size -= len;
+	}
+
+err_out:
+	return err;
+}
+
+static int regmap_spmi_ext_write(void *context, const void *data,
+				 size_t count)
+{
+	BUG_ON(count < 2);
+	return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
+					    count - 2);
+}
+
+static const struct regmap_bus regmap_spmi_ext = {
+	.read				= regmap_spmi_ext_read,
+	.write				= regmap_spmi_ext_write,
+	.gather_write			= regmap_spmi_ext_gather_write,
+	.reg_format_endian_default	= REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE,
+};
+
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name)
+{
+	return __regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+			     lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext);
+
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
+					   const struct regmap_config *config,
+					   struct lock_class_key *lock_key,
+					   const char *lock_name)
+{
+	return __devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+				  lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
new file mode 100644
index 0000000..e6c64b0
--- /dev/null
+++ b/drivers/base/regmap/regmap-w1.c
@@ -0,0 +1,245 @@
+/*
+ * Register map access API - W1 (1-Wire) support
+ *
+ * Copyright (c) 2017 Radioavionica Corporation
+ * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/w1.h>
+
+#include "internal.h"
+
+#define W1_CMD_READ_DATA	0x69
+#define W1_CMD_WRITE_DATA	0x6C
+
+/*
+ * 1-Wire slaves registers with addess 8 bit and data 8 bit
+ */
+
+static int w1_reg_a8_v8_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_READ_DATA);
+		w1_write_8(sl->master, reg);
+		*val = w1_read_8(sl->master);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+static int w1_reg_a8_v8_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+		w1_write_8(sl->master, reg);
+		w1_write_8(sl->master, val);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+/*
+ * 1-Wire slaves registers with addess 8 bit and data 16 bit
+ */
+
+static int w1_reg_a8_v16_read(void *context, unsigned int reg,
+				unsigned int *val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_READ_DATA);
+		w1_write_8(sl->master, reg);
+		*val = w1_read_8(sl->master);
+		*val |= w1_read_8(sl->master)<<8;
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+static int w1_reg_a8_v16_write(void *context, unsigned int reg,
+				unsigned int val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+		w1_write_8(sl->master, reg);
+		w1_write_8(sl->master, val & 0x00FF);
+		w1_write_8(sl->master, val>>8 & 0x00FF);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+/*
+ * 1-Wire slaves registers with addess 16 bit and data 16 bit
+ */
+
+static int w1_reg_a16_v16_read(void *context, unsigned int reg,
+				unsigned int *val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 65535)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_READ_DATA);
+		w1_write_8(sl->master, reg & 0x00FF);
+		w1_write_8(sl->master, reg>>8 & 0x00FF);
+		*val = w1_read_8(sl->master);
+		*val |= w1_read_8(sl->master)<<8;
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+static int w1_reg_a16_v16_write(void *context, unsigned int reg,
+				unsigned int val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 65535)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+		w1_write_8(sl->master, reg & 0x00FF);
+		w1_write_8(sl->master, reg>>8 & 0x00FF);
+		w1_write_8(sl->master, val & 0x00FF);
+		w1_write_8(sl->master, val>>8 & 0x00FF);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+/*
+ * Various types of supported bus addressing
+ */
+
+static struct regmap_bus regmap_w1_bus_a8_v8 = {
+	.reg_read = w1_reg_a8_v8_read,
+	.reg_write = w1_reg_a8_v8_write,
+};
+
+static struct regmap_bus regmap_w1_bus_a8_v16 = {
+	.reg_read = w1_reg_a8_v16_read,
+	.reg_write = w1_reg_a8_v16_write,
+};
+
+static struct regmap_bus regmap_w1_bus_a16_v16 = {
+	.reg_read = w1_reg_a16_v16_read,
+	.reg_write = w1_reg_a16_v16_write,
+};
+
+static const struct regmap_bus *regmap_get_w1_bus(struct device *w1_dev,
+					const struct regmap_config *config)
+{
+	if (config->reg_bits == 8 && config->val_bits == 8)
+		return &regmap_w1_bus_a8_v8;
+
+	if (config->reg_bits == 8 && config->val_bits == 16)
+		return &regmap_w1_bus_a8_v16;
+
+	if (config->reg_bits == 16 && config->val_bits == 16)
+		return &regmap_w1_bus_a16_v16;
+
+	return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_w1(struct device *w1_dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+
+	const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __regmap_init(w1_dev, bus, w1_dev, config,
+			 lock_key, lock_name);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(__regmap_init_w1);
+
+struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+
+	const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __devm_regmap_init(w1_dev, bus, w1_dev, config,
+				 lock_key, lock_name);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
new file mode 100644
index 0000000..0360a90
--- /dev/null
+++ b/drivers/base/regmap/regmap.c
@@ -0,0 +1,3039 @@
+/*
+ * Register map access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/hwspinlock.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#include "internal.h"
+
+/*
+ * Sometimes for failures during very early init the trace
+ * infrastructure isn't available early enough to be used.  For this
+ * sort of problem defining LOG_DEVICE will add printks for basic
+ * register I/O on a specific device.
+ */
+#undef LOG_DEVICE
+
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+			       unsigned int mask, unsigned int val,
+			       bool *change, bool force_write);
+
+static int _regmap_bus_reg_read(void *context, unsigned int reg,
+				unsigned int *val);
+static int _regmap_bus_read(void *context, unsigned int reg,
+			    unsigned int *val);
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+				       unsigned int val);
+static int _regmap_bus_reg_write(void *context, unsigned int reg,
+				 unsigned int val);
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+				 unsigned int val);
+
+bool regmap_reg_in_ranges(unsigned int reg,
+			  const struct regmap_range *ranges,
+			  unsigned int nranges)
+{
+	const struct regmap_range *r;
+	int i;
+
+	for (i = 0, r = ranges; i < nranges; i++, r++)
+		if (regmap_reg_in_range(reg, r))
+			return true;
+	return false;
+}
+EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
+
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+			      const struct regmap_access_table *table)
+{
+	/* Check "no ranges" first */
+	if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
+		return false;
+
+	/* In case zero "yes ranges" are supplied, any reg is OK */
+	if (!table->n_yes_ranges)
+		return true;
+
+	return regmap_reg_in_ranges(reg, table->yes_ranges,
+				    table->n_yes_ranges);
+}
+EXPORT_SYMBOL_GPL(regmap_check_range_table);
+
+bool regmap_writeable(struct regmap *map, unsigned int reg)
+{
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	if (map->writeable_reg)
+		return map->writeable_reg(map->dev, reg);
+
+	if (map->wr_table)
+		return regmap_check_range_table(map, reg, map->wr_table);
+
+	return true;
+}
+
+bool regmap_cached(struct regmap *map, unsigned int reg)
+{
+	int ret;
+	unsigned int val;
+
+	if (map->cache_type == REGCACHE_NONE)
+		return false;
+
+	if (!map->cache_ops)
+		return false;
+
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	map->lock(map->lock_arg);
+	ret = regcache_read(map, reg, &val);
+	map->unlock(map->lock_arg);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+bool regmap_readable(struct regmap *map, unsigned int reg)
+{
+	if (!map->reg_read)
+		return false;
+
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	if (map->format.format_write)
+		return false;
+
+	if (map->readable_reg)
+		return map->readable_reg(map->dev, reg);
+
+	if (map->rd_table)
+		return regmap_check_range_table(map, reg, map->rd_table);
+
+	return true;
+}
+
+bool regmap_volatile(struct regmap *map, unsigned int reg)
+{
+	if (!map->format.format_write && !regmap_readable(map, reg))
+		return false;
+
+	if (map->volatile_reg)
+		return map->volatile_reg(map->dev, reg);
+
+	if (map->volatile_table)
+		return regmap_check_range_table(map, reg, map->volatile_table);
+
+	if (map->cache_ops)
+		return false;
+	else
+		return true;
+}
+
+bool regmap_precious(struct regmap *map, unsigned int reg)
+{
+	if (!regmap_readable(map, reg))
+		return false;
+
+	if (map->precious_reg)
+		return map->precious_reg(map->dev, reg);
+
+	if (map->precious_table)
+		return regmap_check_range_table(map, reg, map->precious_table);
+
+	return false;
+}
+
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
+{
+	if (map->readable_noinc_reg)
+		return map->readable_noinc_reg(map->dev, reg);
+
+	if (map->rd_noinc_table)
+		return regmap_check_range_table(map, reg, map->rd_noinc_table);
+
+	return true;
+}
+
+static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
+	size_t num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++)
+		if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
+			return false;
+
+	return true;
+}
+
+static void regmap_format_2_6_write(struct regmap *map,
+				     unsigned int reg, unsigned int val)
+{
+	u8 *out = map->work_buf;
+
+	*out = (reg << 6) | val;
+}
+
+static void regmap_format_4_12_write(struct regmap *map,
+				     unsigned int reg, unsigned int val)
+{
+	__be16 *out = map->work_buf;
+	*out = cpu_to_be16((reg << 12) | val);
+}
+
+static void regmap_format_7_9_write(struct regmap *map,
+				    unsigned int reg, unsigned int val)
+{
+	__be16 *out = map->work_buf;
+	*out = cpu_to_be16((reg << 9) | val);
+}
+
+static void regmap_format_10_14_write(struct regmap *map,
+				    unsigned int reg, unsigned int val)
+{
+	u8 *out = map->work_buf;
+
+	out[2] = val;
+	out[1] = (val >> 8) | (reg << 6);
+	out[0] = reg >> 2;
+}
+
+static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
+{
+	u8 *b = buf;
+
+	b[0] = val << shift;
+}
+
+static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
+{
+	__be16 *b = buf;
+
+	b[0] = cpu_to_be16(val << shift);
+}
+
+static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
+{
+	__le16 *b = buf;
+
+	b[0] = cpu_to_le16(val << shift);
+}
+
+static void regmap_format_16_native(void *buf, unsigned int val,
+				    unsigned int shift)
+{
+	*(u16 *)buf = val << shift;
+}
+
+static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
+{
+	u8 *b = buf;
+
+	val <<= shift;
+
+	b[0] = val >> 16;
+	b[1] = val >> 8;
+	b[2] = val;
+}
+
+static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
+{
+	__be32 *b = buf;
+
+	b[0] = cpu_to_be32(val << shift);
+}
+
+static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
+{
+	__le32 *b = buf;
+
+	b[0] = cpu_to_le32(val << shift);
+}
+
+static void regmap_format_32_native(void *buf, unsigned int val,
+				    unsigned int shift)
+{
+	*(u32 *)buf = val << shift;
+}
+
+#ifdef CONFIG_64BIT
+static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
+{
+	__be64 *b = buf;
+
+	b[0] = cpu_to_be64((u64)val << shift);
+}
+
+static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
+{
+	__le64 *b = buf;
+
+	b[0] = cpu_to_le64((u64)val << shift);
+}
+
+static void regmap_format_64_native(void *buf, unsigned int val,
+				    unsigned int shift)
+{
+	*(u64 *)buf = (u64)val << shift;
+}
+#endif
+
+static void regmap_parse_inplace_noop(void *buf)
+{
+}
+
+static unsigned int regmap_parse_8(const void *buf)
+{
+	const u8 *b = buf;
+
+	return b[0];
+}
+
+static unsigned int regmap_parse_16_be(const void *buf)
+{
+	const __be16 *b = buf;
+
+	return be16_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_16_le(const void *buf)
+{
+	const __le16 *b = buf;
+
+	return le16_to_cpu(b[0]);
+}
+
+static void regmap_parse_16_be_inplace(void *buf)
+{
+	__be16 *b = buf;
+
+	b[0] = be16_to_cpu(b[0]);
+}
+
+static void regmap_parse_16_le_inplace(void *buf)
+{
+	__le16 *b = buf;
+
+	b[0] = le16_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_16_native(const void *buf)
+{
+	return *(u16 *)buf;
+}
+
+static unsigned int regmap_parse_24(const void *buf)
+{
+	const u8 *b = buf;
+	unsigned int ret = b[2];
+	ret |= ((unsigned int)b[1]) << 8;
+	ret |= ((unsigned int)b[0]) << 16;
+
+	return ret;
+}
+
+static unsigned int regmap_parse_32_be(const void *buf)
+{
+	const __be32 *b = buf;
+
+	return be32_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_32_le(const void *buf)
+{
+	const __le32 *b = buf;
+
+	return le32_to_cpu(b[0]);
+}
+
+static void regmap_parse_32_be_inplace(void *buf)
+{
+	__be32 *b = buf;
+
+	b[0] = be32_to_cpu(b[0]);
+}
+
+static void regmap_parse_32_le_inplace(void *buf)
+{
+	__le32 *b = buf;
+
+	b[0] = le32_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_32_native(const void *buf)
+{
+	return *(u32 *)buf;
+}
+
+#ifdef CONFIG_64BIT
+static unsigned int regmap_parse_64_be(const void *buf)
+{
+	const __be64 *b = buf;
+
+	return be64_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_64_le(const void *buf)
+{
+	const __le64 *b = buf;
+
+	return le64_to_cpu(b[0]);
+}
+
+static void regmap_parse_64_be_inplace(void *buf)
+{
+	__be64 *b = buf;
+
+	b[0] = be64_to_cpu(b[0]);
+}
+
+static void regmap_parse_64_le_inplace(void *buf)
+{
+	__le64 *b = buf;
+
+	b[0] = le64_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_64_native(const void *buf)
+{
+	return *(u64 *)buf;
+}
+#endif
+
+static void regmap_lock_hwlock(void *__map)
+{
+	struct regmap *map = __map;
+
+	hwspin_lock_timeout(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irq(void *__map)
+{
+	struct regmap *map = __map;
+
+	hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irqsave(void *__map)
+{
+	struct regmap *map = __map;
+
+	hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
+				    &map->spinlock_flags);
+}
+
+static void regmap_unlock_hwlock(void *__map)
+{
+	struct regmap *map = __map;
+
+	hwspin_unlock(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irq(void *__map)
+{
+	struct regmap *map = __map;
+
+	hwspin_unlock_irq(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irqrestore(void *__map)
+{
+	struct regmap *map = __map;
+
+	hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
+}
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
+
+static void regmap_lock_mutex(void *__map)
+{
+	struct regmap *map = __map;
+	mutex_lock(&map->mutex);
+}
+
+static void regmap_unlock_mutex(void *__map)
+{
+	struct regmap *map = __map;
+	mutex_unlock(&map->mutex);
+}
+
+static void regmap_lock_spinlock(void *__map)
+__acquires(&map->spinlock)
+{
+	struct regmap *map = __map;
+	unsigned long flags;
+
+	spin_lock_irqsave(&map->spinlock, flags);
+	map->spinlock_flags = flags;
+}
+
+static void regmap_unlock_spinlock(void *__map)
+__releases(&map->spinlock)
+{
+	struct regmap *map = __map;
+	spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
+}
+
+static void dev_get_regmap_release(struct device *dev, void *res)
+{
+	/*
+	 * We don't actually have anything to do here; the goal here
+	 * is not to manage the regmap but to provide a simple way to
+	 * get the regmap back given a struct device.
+	 */
+}
+
+static bool _regmap_range_add(struct regmap *map,
+			      struct regmap_range_node *data)
+{
+	struct rb_root *root = &map->range_tree;
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	while (*new) {
+		struct regmap_range_node *this =
+			rb_entry(*new, struct regmap_range_node, node);
+
+		parent = *new;
+		if (data->range_max < this->range_min)
+			new = &((*new)->rb_left);
+		else if (data->range_min > this->range_max)
+			new = &((*new)->rb_right);
+		else
+			return false;
+	}
+
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+
+	return true;
+}
+
+static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
+						      unsigned int reg)
+{
+	struct rb_node *node = map->range_tree.rb_node;
+
+	while (node) {
+		struct regmap_range_node *this =
+			rb_entry(node, struct regmap_range_node, node);
+
+		if (reg < this->range_min)
+			node = node->rb_left;
+		else if (reg > this->range_max)
+			node = node->rb_right;
+		else
+			return this;
+	}
+
+	return NULL;
+}
+
+static void regmap_range_exit(struct regmap *map)
+{
+	struct rb_node *next;
+	struct regmap_range_node *range_node;
+
+	next = rb_first(&map->range_tree);
+	while (next) {
+		range_node = rb_entry(next, struct regmap_range_node, node);
+		next = rb_next(&range_node->node);
+		rb_erase(&range_node->node, &map->range_tree);
+		kfree(range_node);
+	}
+
+	kfree(map->selector_work_buf);
+}
+
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+		      const struct regmap_config *config)
+{
+	struct regmap **m;
+
+	map->dev = dev;
+
+	regmap_debugfs_init(map, config->name);
+
+	/* Add a devres resource for dev_get_regmap() */
+	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+	if (!m) {
+		regmap_debugfs_exit(map);
+		return -ENOMEM;
+	}
+	*m = map;
+	devres_add(dev, m);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
+static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
+					const struct regmap_config *config)
+{
+	enum regmap_endian endian;
+
+	/* Retrieve the endianness specification from the regmap config */
+	endian = config->reg_format_endian;
+
+	/* If the regmap config specified a non-default value, use that */
+	if (endian != REGMAP_ENDIAN_DEFAULT)
+		return endian;
+
+	/* Retrieve the endianness specification from the bus config */
+	if (bus && bus->reg_format_endian_default)
+		endian = bus->reg_format_endian_default;
+
+	/* If the bus specified a non-default value, use that */
+	if (endian != REGMAP_ENDIAN_DEFAULT)
+		return endian;
+
+	/* Use this if no other value was found */
+	return REGMAP_ENDIAN_BIG;
+}
+
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+					 const struct regmap_bus *bus,
+					 const struct regmap_config *config)
+{
+	struct device_node *np;
+	enum regmap_endian endian;
+
+	/* Retrieve the endianness specification from the regmap config */
+	endian = config->val_format_endian;
+
+	/* If the regmap config specified a non-default value, use that */
+	if (endian != REGMAP_ENDIAN_DEFAULT)
+		return endian;
+
+	/* If the dev and dev->of_node exist try to get endianness from DT */
+	if (dev && dev->of_node) {
+		np = dev->of_node;
+
+		/* Parse the device's DT node for an endianness specification */
+		if (of_property_read_bool(np, "big-endian"))
+			endian = REGMAP_ENDIAN_BIG;
+		else if (of_property_read_bool(np, "little-endian"))
+			endian = REGMAP_ENDIAN_LITTLE;
+		else if (of_property_read_bool(np, "native-endian"))
+			endian = REGMAP_ENDIAN_NATIVE;
+
+		/* If the endianness was specified in DT, use that */
+		if (endian != REGMAP_ENDIAN_DEFAULT)
+			return endian;
+	}
+
+	/* Retrieve the endianness specification from the bus config */
+	if (bus && bus->val_format_endian_default)
+		endian = bus->val_format_endian_default;
+
+	/* If the bus specified a non-default value, use that */
+	if (endian != REGMAP_ENDIAN_DEFAULT)
+		return endian;
+
+	/* Use this if no other value was found */
+	return REGMAP_ENDIAN_BIG;
+}
+EXPORT_SYMBOL_GPL(regmap_get_val_endian);
+
+struct regmap *__regmap_init(struct device *dev,
+			     const struct regmap_bus *bus,
+			     void *bus_context,
+			     const struct regmap_config *config,
+			     struct lock_class_key *lock_key,
+			     const char *lock_name)
+{
+	struct regmap *map;
+	int ret = -EINVAL;
+	enum regmap_endian reg_endian, val_endian;
+	int i, j;
+
+	if (!config)
+		goto err;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (config->name) {
+		map->name = kstrdup_const(config->name, GFP_KERNEL);
+		if (!map->name) {
+			ret = -ENOMEM;
+			goto err_map;
+		}
+	}
+
+	if (config->disable_locking) {
+		map->lock = map->unlock = regmap_lock_unlock_none;
+		regmap_debugfs_disable(map);
+	} else if (config->lock && config->unlock) {
+		map->lock = config->lock;
+		map->unlock = config->unlock;
+		map->lock_arg = config->lock_arg;
+	} else if (config->use_hwlock) {
+		map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
+		if (!map->hwlock) {
+			ret = -ENXIO;
+			goto err_name;
+		}
+
+		switch (config->hwlock_mode) {
+		case HWLOCK_IRQSTATE:
+			map->lock = regmap_lock_hwlock_irqsave;
+			map->unlock = regmap_unlock_hwlock_irqrestore;
+			break;
+		case HWLOCK_IRQ:
+			map->lock = regmap_lock_hwlock_irq;
+			map->unlock = regmap_unlock_hwlock_irq;
+			break;
+		default:
+			map->lock = regmap_lock_hwlock;
+			map->unlock = regmap_unlock_hwlock;
+			break;
+		}
+
+		map->lock_arg = map;
+	} else {
+		if ((bus && bus->fast_io) ||
+		    config->fast_io) {
+			spin_lock_init(&map->spinlock);
+			map->lock = regmap_lock_spinlock;
+			map->unlock = regmap_unlock_spinlock;
+			lockdep_set_class_and_name(&map->spinlock,
+						   lock_key, lock_name);
+		} else {
+			mutex_init(&map->mutex);
+			map->lock = regmap_lock_mutex;
+			map->unlock = regmap_unlock_mutex;
+			lockdep_set_class_and_name(&map->mutex,
+						   lock_key, lock_name);
+		}
+		map->lock_arg = map;
+	}
+
+	/*
+	 * When we write in fast-paths with regmap_bulk_write() don't allocate
+	 * scratch buffers with sleeping allocations.
+	 */
+	if ((bus && bus->fast_io) || config->fast_io)
+		map->alloc_flags = GFP_ATOMIC;
+	else
+		map->alloc_flags = GFP_KERNEL;
+
+	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
+	map->format.pad_bytes = config->pad_bits / 8;
+	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
+	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+			config->val_bits + config->pad_bits, 8);
+	map->reg_shift = config->pad_bits % 8;
+	if (config->reg_stride)
+		map->reg_stride = config->reg_stride;
+	else
+		map->reg_stride = 1;
+	if (is_power_of_2(map->reg_stride))
+		map->reg_stride_order = ilog2(map->reg_stride);
+	else
+		map->reg_stride_order = -1;
+	map->use_single_read = config->use_single_rw || !bus || !bus->read;
+	map->use_single_write = config->use_single_rw || !bus || !bus->write;
+	map->can_multi_write = config->can_multi_write && bus && bus->write;
+	if (bus) {
+		map->max_raw_read = bus->max_raw_read;
+		map->max_raw_write = bus->max_raw_write;
+	}
+	map->dev = dev;
+	map->bus = bus;
+	map->bus_context = bus_context;
+	map->max_register = config->max_register;
+	map->wr_table = config->wr_table;
+	map->rd_table = config->rd_table;
+	map->volatile_table = config->volatile_table;
+	map->precious_table = config->precious_table;
+	map->rd_noinc_table = config->rd_noinc_table;
+	map->writeable_reg = config->writeable_reg;
+	map->readable_reg = config->readable_reg;
+	map->volatile_reg = config->volatile_reg;
+	map->precious_reg = config->precious_reg;
+	map->readable_noinc_reg = config->readable_noinc_reg;
+	map->cache_type = config->cache_type;
+
+	spin_lock_init(&map->async_lock);
+	INIT_LIST_HEAD(&map->async_list);
+	INIT_LIST_HEAD(&map->async_free);
+	init_waitqueue_head(&map->async_waitq);
+
+	if (config->read_flag_mask ||
+	    config->write_flag_mask ||
+	    config->zero_flag_mask) {
+		map->read_flag_mask = config->read_flag_mask;
+		map->write_flag_mask = config->write_flag_mask;
+	} else if (bus) {
+		map->read_flag_mask = bus->read_flag_mask;
+	}
+
+	if (!bus) {
+		map->reg_read  = config->reg_read;
+		map->reg_write = config->reg_write;
+
+		map->defer_caching = false;
+		goto skip_format_initialization;
+	} else if (!bus->read || !bus->write) {
+		map->reg_read = _regmap_bus_reg_read;
+		map->reg_write = _regmap_bus_reg_write;
+
+		map->defer_caching = false;
+		goto skip_format_initialization;
+	} else {
+		map->reg_read  = _regmap_bus_read;
+		map->reg_update_bits = bus->reg_update_bits;
+	}
+
+	reg_endian = regmap_get_reg_endian(bus, config);
+	val_endian = regmap_get_val_endian(dev, bus, config);
+
+	switch (config->reg_bits + map->reg_shift) {
+	case 2:
+		switch (config->val_bits) {
+		case 6:
+			map->format.format_write = regmap_format_2_6_write;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+
+	case 4:
+		switch (config->val_bits) {
+		case 12:
+			map->format.format_write = regmap_format_4_12_write;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+
+	case 7:
+		switch (config->val_bits) {
+		case 9:
+			map->format.format_write = regmap_format_7_9_write;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+
+	case 10:
+		switch (config->val_bits) {
+		case 14:
+			map->format.format_write = regmap_format_10_14_write;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+
+	case 8:
+		map->format.format_reg = regmap_format_8;
+		break;
+
+	case 16:
+		switch (reg_endian) {
+		case REGMAP_ENDIAN_BIG:
+			map->format.format_reg = regmap_format_16_be;
+			break;
+		case REGMAP_ENDIAN_LITTLE:
+			map->format.format_reg = regmap_format_16_le;
+			break;
+		case REGMAP_ENDIAN_NATIVE:
+			map->format.format_reg = regmap_format_16_native;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+
+	case 24:
+		if (reg_endian != REGMAP_ENDIAN_BIG)
+			goto err_hwlock;
+		map->format.format_reg = regmap_format_24;
+		break;
+
+	case 32:
+		switch (reg_endian) {
+		case REGMAP_ENDIAN_BIG:
+			map->format.format_reg = regmap_format_32_be;
+			break;
+		case REGMAP_ENDIAN_LITTLE:
+			map->format.format_reg = regmap_format_32_le;
+			break;
+		case REGMAP_ENDIAN_NATIVE:
+			map->format.format_reg = regmap_format_32_native;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+
+#ifdef CONFIG_64BIT
+	case 64:
+		switch (reg_endian) {
+		case REGMAP_ENDIAN_BIG:
+			map->format.format_reg = regmap_format_64_be;
+			break;
+		case REGMAP_ENDIAN_LITTLE:
+			map->format.format_reg = regmap_format_64_le;
+			break;
+		case REGMAP_ENDIAN_NATIVE:
+			map->format.format_reg = regmap_format_64_native;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+#endif
+
+	default:
+		goto err_hwlock;
+	}
+
+	if (val_endian == REGMAP_ENDIAN_NATIVE)
+		map->format.parse_inplace = regmap_parse_inplace_noop;
+
+	switch (config->val_bits) {
+	case 8:
+		map->format.format_val = regmap_format_8;
+		map->format.parse_val = regmap_parse_8;
+		map->format.parse_inplace = regmap_parse_inplace_noop;
+		break;
+	case 16:
+		switch (val_endian) {
+		case REGMAP_ENDIAN_BIG:
+			map->format.format_val = regmap_format_16_be;
+			map->format.parse_val = regmap_parse_16_be;
+			map->format.parse_inplace = regmap_parse_16_be_inplace;
+			break;
+		case REGMAP_ENDIAN_LITTLE:
+			map->format.format_val = regmap_format_16_le;
+			map->format.parse_val = regmap_parse_16_le;
+			map->format.parse_inplace = regmap_parse_16_le_inplace;
+			break;
+		case REGMAP_ENDIAN_NATIVE:
+			map->format.format_val = regmap_format_16_native;
+			map->format.parse_val = regmap_parse_16_native;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+	case 24:
+		if (val_endian != REGMAP_ENDIAN_BIG)
+			goto err_hwlock;
+		map->format.format_val = regmap_format_24;
+		map->format.parse_val = regmap_parse_24;
+		break;
+	case 32:
+		switch (val_endian) {
+		case REGMAP_ENDIAN_BIG:
+			map->format.format_val = regmap_format_32_be;
+			map->format.parse_val = regmap_parse_32_be;
+			map->format.parse_inplace = regmap_parse_32_be_inplace;
+			break;
+		case REGMAP_ENDIAN_LITTLE:
+			map->format.format_val = regmap_format_32_le;
+			map->format.parse_val = regmap_parse_32_le;
+			map->format.parse_inplace = regmap_parse_32_le_inplace;
+			break;
+		case REGMAP_ENDIAN_NATIVE:
+			map->format.format_val = regmap_format_32_native;
+			map->format.parse_val = regmap_parse_32_native;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+#ifdef CONFIG_64BIT
+	case 64:
+		switch (val_endian) {
+		case REGMAP_ENDIAN_BIG:
+			map->format.format_val = regmap_format_64_be;
+			map->format.parse_val = regmap_parse_64_be;
+			map->format.parse_inplace = regmap_parse_64_be_inplace;
+			break;
+		case REGMAP_ENDIAN_LITTLE:
+			map->format.format_val = regmap_format_64_le;
+			map->format.parse_val = regmap_parse_64_le;
+			map->format.parse_inplace = regmap_parse_64_le_inplace;
+			break;
+		case REGMAP_ENDIAN_NATIVE:
+			map->format.format_val = regmap_format_64_native;
+			map->format.parse_val = regmap_parse_64_native;
+			break;
+		default:
+			goto err_hwlock;
+		}
+		break;
+#endif
+	}
+
+	if (map->format.format_write) {
+		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
+		    (val_endian != REGMAP_ENDIAN_BIG))
+			goto err_hwlock;
+		map->use_single_write = true;
+	}
+
+	if (!map->format.format_write &&
+	    !(map->format.format_reg && map->format.format_val))
+		goto err_hwlock;
+
+	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
+	if (map->work_buf == NULL) {
+		ret = -ENOMEM;
+		goto err_hwlock;
+	}
+
+	if (map->format.format_write) {
+		map->defer_caching = false;
+		map->reg_write = _regmap_bus_formatted_write;
+	} else if (map->format.format_val) {
+		map->defer_caching = true;
+		map->reg_write = _regmap_bus_raw_write;
+	}
+
+skip_format_initialization:
+
+	map->range_tree = RB_ROOT;
+	for (i = 0; i < config->num_ranges; i++) {
+		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
+		struct regmap_range_node *new;
+
+		/* Sanity check */
+		if (range_cfg->range_max < range_cfg->range_min) {
+			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+				range_cfg->range_max, range_cfg->range_min);
+			goto err_range;
+		}
+
+		if (range_cfg->range_max > map->max_register) {
+			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+				range_cfg->range_max, map->max_register);
+			goto err_range;
+		}
+
+		if (range_cfg->selector_reg > map->max_register) {
+			dev_err(map->dev,
+				"Invalid range %d: selector out of map\n", i);
+			goto err_range;
+		}
+
+		if (range_cfg->window_len == 0) {
+			dev_err(map->dev, "Invalid range %d: window_len 0\n",
+				i);
+			goto err_range;
+		}
+
+		/* Make sure, that this register range has no selector
+		   or data window within its boundary */
+		for (j = 0; j < config->num_ranges; j++) {
+			unsigned sel_reg = config->ranges[j].selector_reg;
+			unsigned win_min = config->ranges[j].window_start;
+			unsigned win_max = win_min +
+					   config->ranges[j].window_len - 1;
+
+			/* Allow data window inside its own virtual range */
+			if (j == i)
+				continue;
+
+			if (range_cfg->range_min <= sel_reg &&
+			    sel_reg <= range_cfg->range_max) {
+				dev_err(map->dev,
+					"Range %d: selector for %d in window\n",
+					i, j);
+				goto err_range;
+			}
+
+			if (!(win_max < range_cfg->range_min ||
+			      win_min > range_cfg->range_max)) {
+				dev_err(map->dev,
+					"Range %d: window for %d in window\n",
+					i, j);
+				goto err_range;
+			}
+		}
+
+		new = kzalloc(sizeof(*new), GFP_KERNEL);
+		if (new == NULL) {
+			ret = -ENOMEM;
+			goto err_range;
+		}
+
+		new->map = map;
+		new->name = range_cfg->name;
+		new->range_min = range_cfg->range_min;
+		new->range_max = range_cfg->range_max;
+		new->selector_reg = range_cfg->selector_reg;
+		new->selector_mask = range_cfg->selector_mask;
+		new->selector_shift = range_cfg->selector_shift;
+		new->window_start = range_cfg->window_start;
+		new->window_len = range_cfg->window_len;
+
+		if (!_regmap_range_add(map, new)) {
+			dev_err(map->dev, "Failed to add range %d\n", i);
+			kfree(new);
+			goto err_range;
+		}
+
+		if (map->selector_work_buf == NULL) {
+			map->selector_work_buf =
+				kzalloc(map->format.buf_size, GFP_KERNEL);
+			if (map->selector_work_buf == NULL) {
+				ret = -ENOMEM;
+				goto err_range;
+			}
+		}
+	}
+
+	ret = regcache_init(map, config);
+	if (ret != 0)
+		goto err_range;
+
+	if (dev) {
+		ret = regmap_attach_dev(dev, map, config);
+		if (ret != 0)
+			goto err_regcache;
+	} else {
+		regmap_debugfs_init(map, config->name);
+	}
+
+	return map;
+
+err_regcache:
+	regcache_exit(map);
+err_range:
+	regmap_range_exit(map);
+	kfree(map->work_buf);
+err_hwlock:
+	if (map->hwlock)
+		hwspin_lock_free(map->hwlock);
+err_name:
+	kfree_const(map->name);
+err_map:
+	kfree(map);
+err:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__regmap_init);
+
+static void devm_regmap_release(struct device *dev, void *res)
+{
+	regmap_exit(*(struct regmap **)res);
+}
+
+struct regmap *__devm_regmap_init(struct device *dev,
+				  const struct regmap_bus *bus,
+				  void *bus_context,
+				  const struct regmap_config *config,
+				  struct lock_class_key *lock_key,
+				  const char *lock_name)
+{
+	struct regmap **ptr, *regmap;
+
+	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	regmap = __regmap_init(dev, bus, bus_context, config,
+			       lock_key, lock_name);
+	if (!IS_ERR(regmap)) {
+		*ptr = regmap;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return regmap;
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init);
+
+static void regmap_field_init(struct regmap_field *rm_field,
+	struct regmap *regmap, struct reg_field reg_field)
+{
+	rm_field->regmap = regmap;
+	rm_field->reg = reg_field.reg;
+	rm_field->shift = reg_field.lsb;
+	rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
+	rm_field->id_size = reg_field.id_size;
+	rm_field->id_offset = reg_field.id_offset;
+}
+
+/**
+ * devm_regmap_field_alloc() - Allocate and initialise a register field.
+ *
+ * @dev: Device that will be interacted with
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field will be automatically freed
+ * by the device management code.
+ */
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+		struct regmap *regmap, struct reg_field reg_field)
+{
+	struct regmap_field *rm_field = devm_kzalloc(dev,
+					sizeof(*rm_field), GFP_KERNEL);
+	if (!rm_field)
+		return ERR_PTR(-ENOMEM);
+
+	regmap_field_init(rm_field, regmap, reg_field);
+
+	return rm_field;
+
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
+
+/**
+ * devm_regmap_field_free() - Free a register field allocated using
+ *                            devm_regmap_field_alloc.
+ *
+ * @dev: Device that will be interacted with
+ * @field: regmap field which should be freed.
+ *
+ * Free register field allocated using devm_regmap_field_alloc(). Usually
+ * drivers need not call this function, as the memory allocated via devm
+ * will be freed as per device-driver life-cyle.
+ */
+void devm_regmap_field_free(struct device *dev,
+	struct regmap_field *field)
+{
+	devm_kfree(dev, field);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_free);
+
+/**
+ * regmap_field_alloc() - Allocate and initialise a register field.
+ *
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field should be freed by the
+ * user once its finished working with it using regmap_field_free().
+ */
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+		struct reg_field reg_field)
+{
+	struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
+
+	if (!rm_field)
+		return ERR_PTR(-ENOMEM);
+
+	regmap_field_init(rm_field, regmap, reg_field);
+
+	return rm_field;
+}
+EXPORT_SYMBOL_GPL(regmap_field_alloc);
+
+/**
+ * regmap_field_free() - Free register field allocated using
+ *                       regmap_field_alloc.
+ *
+ * @field: regmap field which should be freed.
+ */
+void regmap_field_free(struct regmap_field *field)
+{
+	kfree(field);
+}
+EXPORT_SYMBOL_GPL(regmap_field_free);
+
+/**
+ * regmap_reinit_cache() - Reinitialise the current register cache
+ *
+ * @map: Register map to operate on.
+ * @config: New configuration.  Only the cache data will be used.
+ *
+ * Discard any existing register cache for the map and initialize a
+ * new cache.  This can be used to restore the cache to defaults or to
+ * update the cache configuration to reflect runtime discovery of the
+ * hardware.
+ *
+ * No explicit locking is done here, the user needs to ensure that
+ * this function will not race with other calls to regmap.
+ */
+int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
+{
+	regcache_exit(map);
+	regmap_debugfs_exit(map);
+
+	map->max_register = config->max_register;
+	map->writeable_reg = config->writeable_reg;
+	map->readable_reg = config->readable_reg;
+	map->volatile_reg = config->volatile_reg;
+	map->precious_reg = config->precious_reg;
+	map->readable_noinc_reg = config->readable_noinc_reg;
+	map->cache_type = config->cache_type;
+
+	regmap_debugfs_init(map, config->name);
+
+	map->cache_bypass = false;
+	map->cache_only = false;
+
+	return regcache_init(map, config);
+}
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
+
+/**
+ * regmap_exit() - Free a previously allocated register map
+ *
+ * @map: Register map to operate on.
+ */
+void regmap_exit(struct regmap *map)
+{
+	struct regmap_async *async;
+
+	regcache_exit(map);
+	regmap_debugfs_exit(map);
+	regmap_range_exit(map);
+	if (map->bus && map->bus->free_context)
+		map->bus->free_context(map->bus_context);
+	kfree(map->work_buf);
+	while (!list_empty(&map->async_free)) {
+		async = list_first_entry_or_null(&map->async_free,
+						 struct regmap_async,
+						 list);
+		list_del(&async->list);
+		kfree(async->work_buf);
+		kfree(async);
+	}
+	if (map->hwlock)
+		hwspin_lock_free(map->hwlock);
+	kfree_const(map->name);
+	kfree(map);
+}
+EXPORT_SYMBOL_GPL(regmap_exit);
+
+static int dev_get_regmap_match(struct device *dev, void *res, void *data)
+{
+	struct regmap **r = res;
+	if (!r || !*r) {
+		WARN_ON(!r || !*r);
+		return 0;
+	}
+
+	/* If the user didn't specify a name match any */
+	if (data)
+		return (*r)->name == data;
+	else
+		return 1;
+}
+
+/**
+ * dev_get_regmap() - Obtain the regmap (if any) for a device
+ *
+ * @dev: Device to retrieve the map for
+ * @name: Optional name for the register map, usually NULL.
+ *
+ * Returns the regmap for the device if one is present, or NULL.  If
+ * name is specified then it must match the name specified when
+ * registering the device, if it is NULL then the first regmap found
+ * will be used.  Devices with multiple register maps are very rare,
+ * generic code should normally not need to specify a name.
+ */
+struct regmap *dev_get_regmap(struct device *dev, const char *name)
+{
+	struct regmap **r = devres_find(dev, dev_get_regmap_release,
+					dev_get_regmap_match, (void *)name);
+
+	if (!r)
+		return NULL;
+	return *r;
+}
+EXPORT_SYMBOL_GPL(dev_get_regmap);
+
+/**
+ * regmap_get_device() - Obtain the device from a regmap
+ *
+ * @map: Register map to operate on.
+ *
+ * Returns the underlying device that the regmap has been created for.
+ */
+struct device *regmap_get_device(struct regmap *map)
+{
+	return map->dev;
+}
+EXPORT_SYMBOL_GPL(regmap_get_device);
+
+static int _regmap_select_page(struct regmap *map, unsigned int *reg,
+			       struct regmap_range_node *range,
+			       unsigned int val_num)
+{
+	void *orig_work_buf;
+	unsigned int win_offset;
+	unsigned int win_page;
+	bool page_chg;
+	int ret;
+
+	win_offset = (*reg - range->range_min) % range->window_len;
+	win_page = (*reg - range->range_min) / range->window_len;
+
+	if (val_num > 1) {
+		/* Bulk write shouldn't cross range boundary */
+		if (*reg + val_num - 1 > range->range_max)
+			return -EINVAL;
+
+		/* ... or single page boundary */
+		if (val_num > range->window_len - win_offset)
+			return -EINVAL;
+	}
+
+	/* It is possible to have selector register inside data window.
+	   In that case, selector register is located on every page and
+	   it needs no page switching, when accessed alone. */
+	if (val_num > 1 ||
+	    range->window_start + win_offset != range->selector_reg) {
+		/* Use separate work_buf during page switching */
+		orig_work_buf = map->work_buf;
+		map->work_buf = map->selector_work_buf;
+
+		ret = _regmap_update_bits(map, range->selector_reg,
+					  range->selector_mask,
+					  win_page << range->selector_shift,
+					  &page_chg, false);
+
+		map->work_buf = orig_work_buf;
+
+		if (ret != 0)
+			return ret;
+	}
+
+	*reg = range->window_start + win_offset;
+
+	return 0;
+}
+
+static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
+					  unsigned long mask)
+{
+	u8 *buf;
+	int i;
+
+	if (!mask || !map->work_buf)
+		return;
+
+	buf = map->work_buf;
+
+	for (i = 0; i < max_bytes; i++)
+		buf[i] |= (mask >> (8 * i)) & 0xff;
+}
+
+static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+				  const void *val, size_t val_len)
+{
+	struct regmap_range_node *range;
+	unsigned long flags;
+	void *work_val = map->work_buf + map->format.reg_bytes +
+		map->format.pad_bytes;
+	void *buf;
+	int ret = -ENOTSUPP;
+	size_t len;
+	int i;
+
+	WARN_ON(!map->bus);
+
+	/* Check for unwritable registers before we start */
+	if (map->writeable_reg)
+		for (i = 0; i < val_len / map->format.val_bytes; i++)
+			if (!map->writeable_reg(map->dev,
+					       reg + regmap_get_offset(map, i)))
+				return -EINVAL;
+
+	if (!map->cache_bypass && map->format.parse_val) {
+		unsigned int ival;
+		int val_bytes = map->format.val_bytes;
+		for (i = 0; i < val_len / val_bytes; i++) {
+			ival = map->format.parse_val(val + (i * val_bytes));
+			ret = regcache_write(map,
+					     reg + regmap_get_offset(map, i),
+					     ival);
+			if (ret) {
+				dev_err(map->dev,
+					"Error in caching of register: %x ret: %d\n",
+					reg + i, ret);
+				return ret;
+			}
+		}
+		if (map->cache_only) {
+			map->cache_dirty = true;
+			return 0;
+		}
+	}
+
+	range = _regmap_range_lookup(map, reg);
+	if (range) {
+		int val_num = val_len / map->format.val_bytes;
+		int win_offset = (reg - range->range_min) % range->window_len;
+		int win_residue = range->window_len - win_offset;
+
+		/* If the write goes beyond the end of the window split it */
+		while (val_num > win_residue) {
+			dev_dbg(map->dev, "Writing window %d/%zu\n",
+				win_residue, val_len / map->format.val_bytes);
+			ret = _regmap_raw_write_impl(map, reg, val,
+						     win_residue *
+						     map->format.val_bytes);
+			if (ret != 0)
+				return ret;
+
+			reg += win_residue;
+			val_num -= win_residue;
+			val += win_residue * map->format.val_bytes;
+			val_len -= win_residue * map->format.val_bytes;
+
+			win_offset = (reg - range->range_min) %
+				range->window_len;
+			win_residue = range->window_len - win_offset;
+		}
+
+		ret = _regmap_select_page(map, &reg, range, val_num);
+		if (ret != 0)
+			return ret;
+	}
+
+	map->format.format_reg(map->work_buf, reg, map->reg_shift);
+	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
+				      map->write_flag_mask);
+
+	/*
+	 * Essentially all I/O mechanisms will be faster with a single
+	 * buffer to write.  Since register syncs often generate raw
+	 * writes of single registers optimise that case.
+	 */
+	if (val != work_val && val_len == map->format.val_bytes) {
+		memcpy(work_val, val, map->format.val_bytes);
+		val = work_val;
+	}
+
+	if (map->async && map->bus->async_write) {
+		struct regmap_async *async;
+
+		trace_regmap_async_write_start(map, reg, val_len);
+
+		spin_lock_irqsave(&map->async_lock, flags);
+		async = list_first_entry_or_null(&map->async_free,
+						 struct regmap_async,
+						 list);
+		if (async)
+			list_del(&async->list);
+		spin_unlock_irqrestore(&map->async_lock, flags);
+
+		if (!async) {
+			async = map->bus->async_alloc();
+			if (!async)
+				return -ENOMEM;
+
+			async->work_buf = kzalloc(map->format.buf_size,
+						  GFP_KERNEL | GFP_DMA);
+			if (!async->work_buf) {
+				kfree(async);
+				return -ENOMEM;
+			}
+		}
+
+		async->map = map;
+
+		/* If the caller supplied the value we can use it safely. */
+		memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
+		       map->format.reg_bytes + map->format.val_bytes);
+
+		spin_lock_irqsave(&map->async_lock, flags);
+		list_add_tail(&async->list, &map->async_list);
+		spin_unlock_irqrestore(&map->async_lock, flags);
+
+		if (val != work_val)
+			ret = map->bus->async_write(map->bus_context,
+						    async->work_buf,
+						    map->format.reg_bytes +
+						    map->format.pad_bytes,
+						    val, val_len, async);
+		else
+			ret = map->bus->async_write(map->bus_context,
+						    async->work_buf,
+						    map->format.reg_bytes +
+						    map->format.pad_bytes +
+						    val_len, NULL, 0, async);
+
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to schedule write: %d\n",
+				ret);
+
+			spin_lock_irqsave(&map->async_lock, flags);
+			list_move(&async->list, &map->async_free);
+			spin_unlock_irqrestore(&map->async_lock, flags);
+		}
+
+		return ret;
+	}
+
+	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
+
+	/* If we're doing a single register write we can probably just
+	 * send the work_buf directly, otherwise try to do a gather
+	 * write.
+	 */
+	if (val == work_val)
+		ret = map->bus->write(map->bus_context, map->work_buf,
+				      map->format.reg_bytes +
+				      map->format.pad_bytes +
+				      val_len);
+	else if (map->bus->gather_write)
+		ret = map->bus->gather_write(map->bus_context, map->work_buf,
+					     map->format.reg_bytes +
+					     map->format.pad_bytes,
+					     val, val_len);
+
+	/* If that didn't work fall back on linearising by hand. */
+	if (ret == -ENOTSUPP) {
+		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
+		buf = kzalloc(len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+
+		memcpy(buf, map->work_buf, map->format.reg_bytes);
+		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
+		       val, val_len);
+		ret = map->bus->write(map->bus_context, buf, len);
+
+		kfree(buf);
+	} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+		/* regcache_drop_region() takes lock that we already have,
+		 * thus call map->cache_ops->drop() directly
+		 */
+		if (map->cache_ops && map->cache_ops->drop)
+			map->cache_ops->drop(map, reg, reg + 1);
+	}
+
+	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
+
+	return ret;
+}
+
+/**
+ * regmap_can_raw_write - Test if regmap_raw_write() is supported
+ *
+ * @map: Map to check.
+ */
+bool regmap_can_raw_write(struct regmap *map)
+{
+	return map->bus && map->bus->write && map->format.format_val &&
+		map->format.format_reg;
+}
+EXPORT_SYMBOL_GPL(regmap_can_raw_write);
+
+/**
+ * regmap_get_raw_read_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_read_max(struct regmap *map)
+{
+	return map->max_raw_read;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
+
+/**
+ * regmap_get_raw_write_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_write_max(struct regmap *map)
+{
+	return map->max_raw_write;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
+
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+				       unsigned int val)
+{
+	int ret;
+	struct regmap_range_node *range;
+	struct regmap *map = context;
+
+	WARN_ON(!map->bus || !map->format.format_write);
+
+	range = _regmap_range_lookup(map, reg);
+	if (range) {
+		ret = _regmap_select_page(map, &reg, range, 1);
+		if (ret != 0)
+			return ret;
+	}
+
+	map->format.format_write(map, reg, val);
+
+	trace_regmap_hw_write_start(map, reg, 1);
+
+	ret = map->bus->write(map->bus_context, map->work_buf,
+			      map->format.buf_size);
+
+	trace_regmap_hw_write_done(map, reg, 1);
+
+	return ret;
+}
+
+static int _regmap_bus_reg_write(void *context, unsigned int reg,
+				 unsigned int val)
+{
+	struct regmap *map = context;
+
+	return map->bus->reg_write(map->bus_context, reg, val);
+}
+
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+				 unsigned int val)
+{
+	struct regmap *map = context;
+
+	WARN_ON(!map->bus || !map->format.format_val);
+
+	map->format.format_val(map->work_buf + map->format.reg_bytes
+			       + map->format.pad_bytes, val, 0);
+	return _regmap_raw_write_impl(map, reg,
+				      map->work_buf +
+				      map->format.reg_bytes +
+				      map->format.pad_bytes,
+				      map->format.val_bytes);
+}
+
+static inline void *_regmap_map_get_context(struct regmap *map)
+{
+	return (map->bus) ? map : map->bus_context;
+}
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+		  unsigned int val)
+{
+	int ret;
+	void *context = _regmap_map_get_context(map);
+
+	if (!regmap_writeable(map, reg))
+		return -EIO;
+
+	if (!map->cache_bypass && !map->defer_caching) {
+		ret = regcache_write(map, reg, val);
+		if (ret != 0)
+			return ret;
+		if (map->cache_only) {
+			map->cache_dirty = true;
+			return 0;
+		}
+	}
+
+#ifdef LOG_DEVICE
+	if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+		dev_info(map->dev, "%x <= %x\n", reg, val);
+#endif
+
+	trace_regmap_reg_write(map, reg, val);
+
+	return map->reg_write(context, reg, val);
+}
+
+/**
+ * regmap_write() - Write a value to a single register
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
+{
+	int ret;
+
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	ret = _regmap_write(map, reg, val);
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write);
+
+/**
+ * regmap_write_async() - Write a value to a single register asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
+{
+	int ret;
+
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	map->async = true;
+
+	ret = _regmap_write(map, reg, val);
+
+	map->async = false;
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_async);
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+		      const void *val, size_t val_len)
+{
+	size_t val_bytes = map->format.val_bytes;
+	size_t val_count = val_len / val_bytes;
+	size_t chunk_count, chunk_bytes;
+	size_t chunk_regs = val_count;
+	int ret, i;
+
+	if (!val_count)
+		return -EINVAL;
+
+	if (map->use_single_write)
+		chunk_regs = 1;
+	else if (map->max_raw_write && val_len > map->max_raw_write)
+		chunk_regs = map->max_raw_write / val_bytes;
+
+	chunk_count = val_count / chunk_regs;
+	chunk_bytes = chunk_regs * val_bytes;
+
+	/* Write as many bytes as possible with chunk_size */
+	for (i = 0; i < chunk_count; i++) {
+		ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
+		if (ret)
+			return ret;
+
+		reg += regmap_get_offset(map, chunk_regs);
+		val += chunk_bytes;
+		val_len -= chunk_bytes;
+	}
+
+	/* Write remaining bytes */
+	if (val_len)
+		ret = _regmap_raw_write_impl(map, reg, val, val_len);
+
+	return ret;
+}
+
+/**
+ * regmap_raw_write() - Write raw values to one or more registers
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ *       device
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device.  No formatting will be done on the data provided.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+		     const void *val, size_t val_len)
+{
+	int ret;
+
+	if (!regmap_can_raw_write(map))
+		return -EINVAL;
+	if (val_len % map->format.val_bytes)
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	ret = _regmap_raw_write(map, reg, val, val_len);
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write);
+
+/**
+ * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
+ *                                   register field.
+ *
+ * @field: Register field to write to
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
+ *
+ * Perform a read/modify/write cycle on the register field with change,
+ * async, force option.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_update_bits_base(struct regmap_field *field,
+				  unsigned int mask, unsigned int val,
+				  bool *change, bool async, bool force)
+{
+	mask = (mask << field->shift) & field->mask;
+
+	return regmap_update_bits_base(field->regmap, field->reg,
+				       mask, val << field->shift,
+				       change, async, force);
+}
+EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
+
+/**
+ * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
+ *                                    register field with port ID
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
+				   unsigned int mask, unsigned int val,
+				   bool *change, bool async, bool force)
+{
+	if (id >= field->id_size)
+		return -EINVAL;
+
+	mask = (mask << field->shift) & field->mask;
+
+	return regmap_update_bits_base(field->regmap,
+				       field->reg + (field->id_offset * id),
+				       mask, val << field->shift,
+				       change, async, force);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
+
+/**
+ * regmap_bulk_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be write from
+ * @val: Block of data to be written, in native register size for device
+ * @val_count: Number of registers to write
+ *
+ * This function is intended to be used for writing a large block of
+ * data to the device either in single transfer or multiple transfer.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+		     size_t val_count)
+{
+	int ret = 0, i;
+	size_t val_bytes = map->format.val_bytes;
+
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+
+	/*
+	 * Some devices don't support bulk write, for them we have a series of
+	 * single write operations.
+	 */
+	if (!map->bus || !map->format.parse_inplace) {
+		map->lock(map->lock_arg);
+		for (i = 0; i < val_count; i++) {
+			unsigned int ival;
+
+			switch (val_bytes) {
+			case 1:
+				ival = *(u8 *)(val + (i * val_bytes));
+				break;
+			case 2:
+				ival = *(u16 *)(val + (i * val_bytes));
+				break;
+			case 4:
+				ival = *(u32 *)(val + (i * val_bytes));
+				break;
+#ifdef CONFIG_64BIT
+			case 8:
+				ival = *(u64 *)(val + (i * val_bytes));
+				break;
+#endif
+			default:
+				ret = -EINVAL;
+				goto out;
+			}
+
+			ret = _regmap_write(map,
+					    reg + regmap_get_offset(map, i),
+					    ival);
+			if (ret != 0)
+				goto out;
+		}
+out:
+		map->unlock(map->lock_arg);
+	} else {
+		void *wval;
+
+		wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
+		if (!wval)
+			return -ENOMEM;
+
+		for (i = 0; i < val_count * val_bytes; i += val_bytes)
+			map->format.parse_inplace(wval + i);
+
+		ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
+
+		kfree(wval);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_write);
+
+/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was necessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+				       const struct reg_sequence *regs,
+				       size_t num_regs)
+{
+	int ret;
+	void *buf;
+	int i;
+	u8 *u8;
+	size_t val_bytes = map->format.val_bytes;
+	size_t reg_bytes = map->format.reg_bytes;
+	size_t pad_bytes = map->format.pad_bytes;
+	size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+	size_t len = pair_size * num_regs;
+
+	if (!len)
+		return -EINVAL;
+
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* We have to linearise by hand. */
+
+	u8 = buf;
+
+	for (i = 0; i < num_regs; i++) {
+		unsigned int reg = regs[i].reg;
+		unsigned int val = regs[i].def;
+		trace_regmap_hw_write_start(map, reg, 1);
+		map->format.format_reg(u8, reg, map->reg_shift);
+		u8 += reg_bytes + pad_bytes;
+		map->format.format_val(u8, val, 0);
+		u8 += val_bytes;
+	}
+	u8 = buf;
+	*u8 |= map->write_flag_mask;
+
+	ret = map->bus->write(map->bus_context, buf, len);
+
+	kfree(buf);
+
+	for (i = 0; i < num_regs; i++) {
+		int reg = regs[i].reg;
+		trace_regmap_hw_write_done(map, reg, 1);
+	}
+	return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+					  unsigned int reg,
+					  struct regmap_range_node *range)
+{
+	unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+	return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+					       struct reg_sequence *regs,
+					       size_t num_regs)
+{
+	int ret;
+	int i, n;
+	struct reg_sequence *base;
+	unsigned int this_page = 0;
+	unsigned int page_change = 0;
+	/*
+	 * the set of registers are not neccessarily in order, but
+	 * since the order of write must be preserved this algorithm
+	 * chops the set each time the page changes. This also applies
+	 * if there is a delay required at any point in the sequence.
+	 */
+	base = regs;
+	for (i = 0, n = 0; i < num_regs; i++, n++) {
+		unsigned int reg = regs[i].reg;
+		struct regmap_range_node *range;
+
+		range = _regmap_range_lookup(map, reg);
+		if (range) {
+			unsigned int win_page = _regmap_register_page(map, reg,
+								      range);
+
+			if (i == 0)
+				this_page = win_page;
+			if (win_page != this_page) {
+				this_page = win_page;
+				page_change = 1;
+			}
+		}
+
+		/* If we have both a page change and a delay make sure to
+		 * write the regs and apply the delay before we change the
+		 * page.
+		 */
+
+		if (page_change || regs[i].delay_us) {
+
+				/* For situations where the first write requires
+				 * a delay we need to make sure we don't call
+				 * raw_multi_reg_write with n=0
+				 * This can't occur with page breaks as we
+				 * never write on the first iteration
+				 */
+				if (regs[i].delay_us && i == 0)
+					n = 1;
+
+				ret = _regmap_raw_multi_reg_write(map, base, n);
+				if (ret != 0)
+					return ret;
+
+				if (regs[i].delay_us)
+					udelay(regs[i].delay_us);
+
+				base += n;
+				n = 0;
+
+				if (page_change) {
+					ret = _regmap_select_page(map,
+								  &base[n].reg,
+								  range, 1);
+					if (ret != 0)
+						return ret;
+
+					page_change = 0;
+				}
+
+		}
+
+	}
+	if (n > 0)
+		return _regmap_raw_multi_reg_write(map, base, n);
+	return 0;
+}
+
+static int _regmap_multi_reg_write(struct regmap *map,
+				   const struct reg_sequence *regs,
+				   size_t num_regs)
+{
+	int i;
+	int ret;
+
+	if (!map->can_multi_write) {
+		for (i = 0; i < num_regs; i++) {
+			ret = _regmap_write(map, regs[i].reg, regs[i].def);
+			if (ret != 0)
+				return ret;
+
+			if (regs[i].delay_us)
+				udelay(regs[i].delay_us);
+		}
+		return 0;
+	}
+
+	if (!map->format.parse_inplace)
+		return -EINVAL;
+
+	if (map->writeable_reg)
+		for (i = 0; i < num_regs; i++) {
+			int reg = regs[i].reg;
+			if (!map->writeable_reg(map->dev, reg))
+				return -EINVAL;
+			if (!IS_ALIGNED(reg, map->reg_stride))
+				return -EINVAL;
+		}
+
+	if (!map->cache_bypass) {
+		for (i = 0; i < num_regs; i++) {
+			unsigned int val = regs[i].def;
+			unsigned int reg = regs[i].reg;
+			ret = regcache_write(map, reg, val);
+			if (ret) {
+				dev_err(map->dev,
+				"Error in caching of register: %x ret: %d\n",
+								reg, ret);
+				return ret;
+			}
+		}
+		if (map->cache_only) {
+			map->cache_dirty = true;
+			return 0;
+		}
+	}
+
+	WARN_ON(!map->bus);
+
+	for (i = 0; i < num_regs; i++) {
+		unsigned int reg = regs[i].reg;
+		struct regmap_range_node *range;
+
+		/* Coalesce all the writes between a page break or a delay
+		 * in a sequence
+		 */
+		range = _regmap_range_lookup(map, reg);
+		if (range || regs[i].delay_us) {
+			size_t len = sizeof(struct reg_sequence)*num_regs;
+			struct reg_sequence *base = kmemdup(regs, len,
+							   GFP_KERNEL);
+			if (!base)
+				return -ENOMEM;
+			ret = _regmap_range_multi_paged_reg_write(map, base,
+								  num_regs);
+			kfree(base);
+
+			return ret;
+		}
+	}
+	return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
+/**
+ * regmap_multi_reg_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
+			   int num_regs)
+{
+	int ret;
+
+	map->lock(map->lock_arg);
+
+	ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
+/**
+ * regmap_multi_reg_write_bypassed() - Write multiple registers to the
+ *                                     device but not the cache
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * Write multiple registers to the device but not the cache where the set
+ * of register are supplied in any order.
+ *
+ * This function is intended to be used for writing a large block of data
+ * atomically to the device in single transfer for those I2C client devices
+ * that implement this alternative block write mode.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+				    const struct reg_sequence *regs,
+				    int num_regs)
+{
+	int ret;
+	bool bypass;
+
+	map->lock(map->lock_arg);
+
+	bypass = map->cache_bypass;
+	map->cache_bypass = true;
+
+	ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+	map->cache_bypass = bypass;
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
+
+/**
+ * regmap_raw_write_async() - Write raw values to one or more registers
+ *                            asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ *       device.  Must be valid until regmap_async_complete() is called.
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device.  No formatting will be done on the data provided.
+ *
+ * If supported by the underlying bus the write will be scheduled
+ * asynchronously, helping maximise I/O speed on higher speed buses
+ * like SPI.  regmap_async_complete() can be called to ensure that all
+ * asynchrnous writes have been completed.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+			   const void *val, size_t val_len)
+{
+	int ret;
+
+	if (val_len % map->format.val_bytes)
+		return -EINVAL;
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	map->async = true;
+
+	ret = _regmap_raw_write(map, reg, val, val_len);
+
+	map->async = false;
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+
+static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+			    unsigned int val_len)
+{
+	struct regmap_range_node *range;
+	int ret;
+
+	WARN_ON(!map->bus);
+
+	if (!map->bus || !map->bus->read)
+		return -EINVAL;
+
+	range = _regmap_range_lookup(map, reg);
+	if (range) {
+		ret = _regmap_select_page(map, &reg, range,
+					  val_len / map->format.val_bytes);
+		if (ret != 0)
+			return ret;
+	}
+
+	map->format.format_reg(map->work_buf, reg, map->reg_shift);
+	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
+				      map->read_flag_mask);
+	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
+
+	ret = map->bus->read(map->bus_context, map->work_buf,
+			     map->format.reg_bytes + map->format.pad_bytes,
+			     val, val_len);
+
+	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
+
+	return ret;
+}
+
+static int _regmap_bus_reg_read(void *context, unsigned int reg,
+				unsigned int *val)
+{
+	struct regmap *map = context;
+
+	return map->bus->reg_read(map->bus_context, reg, val);
+}
+
+static int _regmap_bus_read(void *context, unsigned int reg,
+			    unsigned int *val)
+{
+	int ret;
+	struct regmap *map = context;
+	void *work_val = map->work_buf + map->format.reg_bytes +
+		map->format.pad_bytes;
+
+	if (!map->format.parse_val)
+		return -EINVAL;
+
+	ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
+	if (ret == 0)
+		*val = map->format.parse_val(work_val);
+
+	return ret;
+}
+
+static int _regmap_read(struct regmap *map, unsigned int reg,
+			unsigned int *val)
+{
+	int ret;
+	void *context = _regmap_map_get_context(map);
+
+	if (!map->cache_bypass) {
+		ret = regcache_read(map, reg, val);
+		if (ret == 0)
+			return 0;
+	}
+
+	if (map->cache_only)
+		return -EBUSY;
+
+	if (!regmap_readable(map, reg))
+		return -EIO;
+
+	ret = map->reg_read(context, reg, val);
+	if (ret == 0) {
+#ifdef LOG_DEVICE
+		if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+			dev_info(map->dev, "%x => %x\n", reg, *val);
+#endif
+
+		trace_regmap_reg_read(map, reg, *val);
+
+		if (!map->cache_bypass)
+			regcache_write(map, reg, *val);
+	}
+
+	return ret;
+}
+
+/**
+ * regmap_read() - Read a value from a single register
+ *
+ * @map: Register map to read from
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+	int ret;
+
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	ret = _regmap_read(map, reg, val);
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read);
+
+/**
+ * regmap_raw_read() - Read raw data from the device
+ *
+ * @map: Register map to read from
+ * @reg: First register to be read from
+ * @val: Pointer to store read value
+ * @val_len: Size of data to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+		    size_t val_len)
+{
+	size_t val_bytes = map->format.val_bytes;
+	size_t val_count = val_len / val_bytes;
+	unsigned int v;
+	int ret, i;
+
+	if (!map->bus)
+		return -EINVAL;
+	if (val_len % map->format.val_bytes)
+		return -EINVAL;
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+	if (val_count == 0)
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
+	    map->cache_type == REGCACHE_NONE) {
+		size_t chunk_count, chunk_bytes;
+		size_t chunk_regs = val_count;
+
+		if (!map->bus->read) {
+			ret = -ENOTSUPP;
+			goto out;
+		}
+
+		if (map->use_single_read)
+			chunk_regs = 1;
+		else if (map->max_raw_read && val_len > map->max_raw_read)
+			chunk_regs = map->max_raw_read / val_bytes;
+
+		chunk_count = val_count / chunk_regs;
+		chunk_bytes = chunk_regs * val_bytes;
+
+		/* Read bytes that fit into whole chunks */
+		for (i = 0; i < chunk_count; i++) {
+			ret = _regmap_raw_read(map, reg, val, chunk_bytes);
+			if (ret != 0)
+				goto out;
+
+			reg += regmap_get_offset(map, chunk_regs);
+			val += chunk_bytes;
+			val_len -= chunk_bytes;
+		}
+
+		/* Read remaining bytes */
+		if (val_len) {
+			ret = _regmap_raw_read(map, reg, val, val_len);
+			if (ret != 0)
+				goto out;
+		}
+	} else {
+		/* Otherwise go word by word for the cache; should be low
+		 * cost as we expect to hit the cache.
+		 */
+		for (i = 0; i < val_count; i++) {
+			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
+					   &v);
+			if (ret != 0)
+				goto out;
+
+			map->format.format_val(val + (i * val_bytes), v, 0);
+		}
+	}
+
+ out:
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_read);
+
+/**
+ * regmap_noinc_read(): Read data from a register without incrementing the
+ *			register number
+ *
+ * @map: Register map to read from
+ * @reg: Register to read from
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk bus read operations will read a
+ * range of registers. Some devices have certain registers for which a read
+ * operation read will read from an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple reads as required to read val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+		      void *val, size_t val_len)
+{
+	size_t read_len;
+	int ret;
+
+	if (!map->bus)
+		return -EINVAL;
+	if (!map->bus->read)
+		return -ENOTSUPP;
+	if (val_len % map->format.val_bytes)
+		return -EINVAL;
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+	if (val_len == 0)
+		return -EINVAL;
+
+	map->lock(map->lock_arg);
+
+	if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	while (val_len) {
+		if (map->max_raw_read && map->max_raw_read < val_len)
+			read_len = map->max_raw_read;
+		else
+			read_len = val_len;
+		ret = _regmap_raw_read(map, reg, val, read_len);
+		if (ret)
+			goto out_unlock;
+		val = ((u8 *)val) + read_len;
+		val_len -= read_len;
+	}
+
+out_unlock:
+	map->unlock(map->lock_arg);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_read);
+
+/**
+ * regmap_field_read(): Read a value to a single register field
+ *
+ * @field: Register field to read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_read(struct regmap_field *field, unsigned int *val)
+{
+	int ret;
+	unsigned int reg_val;
+	ret = regmap_read(field->regmap, field->reg, &reg_val);
+	if (ret != 0)
+		return ret;
+
+	reg_val &= field->mask;
+	reg_val >>= field->shift;
+	*val = reg_val;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_field_read);
+
+/**
+ * regmap_fields_read() - Read a value to a single register field with port ID
+ *
+ * @field: Register field to read from
+ * @id: port ID
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+		       unsigned int *val)
+{
+	int ret;
+	unsigned int reg_val;
+
+	if (id >= field->id_size)
+		return -EINVAL;
+
+	ret = regmap_read(field->regmap,
+			  field->reg + (field->id_offset * id),
+			  &reg_val);
+	if (ret != 0)
+		return ret;
+
+	reg_val &= field->mask;
+	reg_val >>= field->shift;
+	*val = reg_val;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_fields_read);
+
+/**
+ * regmap_bulk_read() - Read multiple registers from the device
+ *
+ * @map: Register map to read from
+ * @reg: First register to be read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+		     size_t val_count)
+{
+	int ret, i;
+	size_t val_bytes = map->format.val_bytes;
+	bool vol = regmap_volatile_range(map, reg, val_count);
+
+	if (!IS_ALIGNED(reg, map->reg_stride))
+		return -EINVAL;
+	if (val_count == 0)
+		return -EINVAL;
+
+	if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
+		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+		if (ret != 0)
+			return ret;
+
+		for (i = 0; i < val_count * val_bytes; i += val_bytes)
+			map->format.parse_inplace(val + i);
+	} else {
+#ifdef CONFIG_64BIT
+		u64 *u64 = val;
+#endif
+		u32 *u32 = val;
+		u16 *u16 = val;
+		u8 *u8 = val;
+
+		map->lock(map->lock_arg);
+
+		for (i = 0; i < val_count; i++) {
+			unsigned int ival;
+
+			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
+					   &ival);
+			if (ret != 0)
+				goto out;
+
+			switch (map->format.val_bytes) {
+#ifdef CONFIG_64BIT
+			case 8:
+				u64[i] = ival;
+				break;
+#endif
+			case 4:
+				u32[i] = ival;
+				break;
+			case 2:
+				u16[i] = ival;
+				break;
+			case 1:
+				u8[i] = ival;
+				break;
+			default:
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+
+out:
+		map->unlock(map->lock_arg);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_read);
+
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+			       unsigned int mask, unsigned int val,
+			       bool *change, bool force_write)
+{
+	int ret;
+	unsigned int tmp, orig;
+
+	if (change)
+		*change = false;
+
+	if (regmap_volatile(map, reg) && map->reg_update_bits) {
+		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+		if (ret == 0 && change)
+			*change = true;
+	} else {
+		ret = _regmap_read(map, reg, &orig);
+		if (ret != 0)
+			return ret;
+
+		tmp = orig & ~mask;
+		tmp |= val & mask;
+
+		if (force_write || (tmp != orig)) {
+			ret = _regmap_write(map, reg, tmp);
+			if (ret == 0 && change)
+				*change = true;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
+ *
+ * Perform a read/modify/write cycle on a register map with change, async, force
+ * options.
+ *
+ * If async is true:
+ *
+ * With most buses the read must be done synchronously so this is most useful
+ * for devices with a cache which do not need to interact with the hardware to
+ * determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+			    unsigned int mask, unsigned int val,
+			    bool *change, bool async, bool force)
+{
+	int ret;
+
+	map->lock(map->lock_arg);
+
+	map->async = async;
+
+	ret = _regmap_update_bits(map, reg, mask, val, change, force);
+
+	map->async = false;
+
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_base);
+
+void regmap_async_complete_cb(struct regmap_async *async, int ret)
+{
+	struct regmap *map = async->map;
+	bool wake;
+
+	trace_regmap_async_io_complete(map);
+
+	spin_lock(&map->async_lock);
+	list_move(&async->list, &map->async_free);
+	wake = list_empty(&map->async_list);
+
+	if (ret != 0)
+		map->async_ret = ret;
+
+	spin_unlock(&map->async_lock);
+
+	if (wake)
+		wake_up(&map->async_waitq);
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
+
+static int regmap_async_is_done(struct regmap *map)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&map->async_lock, flags);
+	ret = list_empty(&map->async_list);
+	spin_unlock_irqrestore(&map->async_lock, flags);
+
+	return ret;
+}
+
+/**
+ * regmap_async_complete - Ensure all asynchronous I/O has completed.
+ *
+ * @map: Map to operate on.
+ *
+ * Blocks until any pending asynchronous I/O has completed.  Returns
+ * an error code for any failed I/O operations.
+ */
+int regmap_async_complete(struct regmap *map)
+{
+	unsigned long flags;
+	int ret;
+
+	/* Nothing to do with no async support */
+	if (!map->bus || !map->bus->async_write)
+		return 0;
+
+	trace_regmap_async_complete_start(map);
+
+	wait_event(map->async_waitq, regmap_async_is_done(map));
+
+	spin_lock_irqsave(&map->async_lock, flags);
+	ret = map->async_ret;
+	map->async_ret = 0;
+	spin_unlock_irqrestore(&map->async_lock, flags);
+
+	trace_regmap_async_complete_done(map);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete);
+
+/**
+ * regmap_register_patch - Register and apply register updates to be applied
+ *                         on device initialistion
+ *
+ * @map: Register map to apply updates to.
+ * @regs: Values to update.
+ * @num_regs: Number of entries in regs.
+ *
+ * Register a set of register updates to be applied to the device
+ * whenever the device registers are synchronised with the cache and
+ * apply them immediately.  Typically this is used to apply
+ * corrections to be applied to the device defaults on startup, such
+ * as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
+ */
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
+			  int num_regs)
+{
+	struct reg_sequence *p;
+	int ret;
+	bool bypass;
+
+	if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
+	    num_regs))
+		return 0;
+
+	p = krealloc(map->patch,
+		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
+		     GFP_KERNEL);
+	if (p) {
+		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
+		map->patch = p;
+		map->patch_regs += num_regs;
+	} else {
+		return -ENOMEM;
+	}
+
+	map->lock(map->lock_arg);
+
+	bypass = map->cache_bypass;
+
+	map->cache_bypass = true;
+	map->async = true;
+
+	ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+	map->async = false;
+	map->cache_bypass = bypass;
+
+	map->unlock(map->lock_arg);
+
+	regmap_async_complete(map);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_register_patch);
+
+/**
+ * regmap_get_val_bytes() - Report the size of a register value
+ *
+ * @map: Register map to operate on.
+ *
+ * Report the size of a register value, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_val_bytes(struct regmap *map)
+{
+	if (map->format.format_write)
+		return -EINVAL;
+
+	return map->format.val_bytes;
+}
+EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+
+/**
+ * regmap_get_max_register() - Report the max register value
+ *
+ * @map: Register map to operate on.
+ *
+ * Report the max register value, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_max_register(struct regmap *map)
+{
+	return map->max_register ? map->max_register : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regmap_get_max_register);
+
+/**
+ * regmap_get_reg_stride() - Report the register address stride
+ *
+ * @map: Register map to operate on.
+ *
+ * Report the register address stride, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_reg_stride(struct regmap *map)
+{
+	return map->reg_stride;
+}
+EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
+
+int regmap_parse_val(struct regmap *map, const void *buf,
+			unsigned int *val)
+{
+	if (!map->format.parse_val)
+		return -EINVAL;
+
+	*val = map->format.parse_val(buf);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
+static int __init regmap_initcall(void)
+{
+	regmap_debugfs_initcall();
+
+	return 0;
+}
+postcore_initcall(regmap_initcall);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
new file mode 100644
index 0000000..d4066fa
--- /dev/null
+++ b/drivers/base/regmap/trace.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+#include "internal.h"
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+	TP_PROTO(struct regmap *map, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(map, reg, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		regmap_name(map)	)
+		__field(	unsigned int,	reg			)
+		__field(	unsigned int,	val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, regmap_name(map));
+		__entry->reg = reg;
+		__entry->val = val;
+	),
+
+	TP_printk("%s reg=%x val=%x", __get_str(name),
+		  (unsigned int)__entry->reg,
+		  (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+	TP_PROTO(struct regmap *map, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(map, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+	TP_PROTO(struct regmap *map, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(map, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+
+	TP_PROTO(struct regmap *map, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(map, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+	TP_ARGS(map, reg, count),
+
+	TP_STRUCT__entry(
+		__string(	name,		regmap_name(map)	)
+		__field(	unsigned int,	reg			)
+		__field(	int,		count			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, regmap_name(map));
+		__entry->reg = reg;
+		__entry->count = count;
+	),
+
+	TP_printk("%s reg=%x count=%d", __get_str(name),
+		  (unsigned int)__entry->reg,
+		  (int)__entry->count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+	TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+	TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+	TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+	TP_ARGS(map, reg, count)
+);
+
+TRACE_EVENT(regcache_sync,
+
+	TP_PROTO(struct regmap *map, const char *type,
+		 const char *status),
+
+	TP_ARGS(map, type, status),
+
+	TP_STRUCT__entry(
+		__string(       name,           regmap_name(map)	)
+		__string(	status,		status			)
+		__string(	type,		type			)
+		__field(	int,		type			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, regmap_name(map));
+		__assign_str(status, status);
+		__assign_str(type, type);
+	),
+
+	TP_printk("%s type=%s status=%s", __get_str(name),
+		  __get_str(type), __get_str(status))
+);
+
+DECLARE_EVENT_CLASS(regmap_bool,
+
+	TP_PROTO(struct regmap *map, bool flag),
+
+	TP_ARGS(map, flag),
+
+	TP_STRUCT__entry(
+		__string(	name,		regmap_name(map)	)
+		__field(	int,		flag			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, regmap_name(map));
+		__entry->flag = flag;
+	),
+
+	TP_printk("%s flag=%d", __get_str(name),
+		  (int)__entry->flag)
+);
+
+DEFINE_EVENT(regmap_bool, regmap_cache_only,
+
+	TP_PROTO(struct regmap *map, bool flag),
+
+	TP_ARGS(map, flag)
+
+);
+
+DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
+
+	TP_PROTO(struct regmap *map, bool flag),
+
+	TP_ARGS(map, flag)
+
+);
+
+DECLARE_EVENT_CLASS(regmap_async,
+
+	TP_PROTO(struct regmap *map),
+
+	TP_ARGS(map),
+
+	TP_STRUCT__entry(
+		__string(	name,		regmap_name(map)	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, regmap_name(map));
+	),
+
+	TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(regmap_block, regmap_async_write_start,
+
+	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+	TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_io_complete,
+
+	TP_PROTO(struct regmap *map),
+
+	TP_ARGS(map)
+
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_start,
+
+	TP_PROTO(struct regmap *map),
+
+	TP_ARGS(map)
+
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_done,
+
+	TP_PROTO(struct regmap *map),
+
+	TP_ARGS(map)
+
+);
+
+TRACE_EVENT(regcache_drop_region,
+
+	TP_PROTO(struct regmap *map, unsigned int from,
+		 unsigned int to),
+
+	TP_ARGS(map, from, to),
+
+	TP_STRUCT__entry(
+		__string(       name,           regmap_name(map)	)
+		__field(	unsigned int,	from			)
+		__field(	unsigned int,	to			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, regmap_name(map));
+		__entry->from = from;
+		__entry->to = to;
+	),
+
+	TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
+		  (unsigned int)__entry->to)
+);
+
+#endif /* _TRACE_REGMAP_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 0000000..10b280f
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/err.h>
+#include <linux/glob.h>
+
+static DEFINE_IDA(soc_ida);
+
+static ssize_t soc_info_get(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf);
+
+struct soc_device {
+	struct device dev;
+	struct soc_device_attribute *attr;
+	int soc_dev_num;
+};
+
+static struct bus_type soc_bus_type = {
+	.name  = "soc",
+};
+
+static DEVICE_ATTR(machine,  S_IRUGO, soc_info_get,  NULL);
+static DEVICE_ATTR(family,   S_IRUGO, soc_info_get,  NULL);
+static DEVICE_ATTR(soc_id,   S_IRUGO, soc_info_get,  NULL);
+static DEVICE_ATTR(revision, S_IRUGO, soc_info_get,  NULL);
+
+struct device *soc_device_to_device(struct soc_device *soc_dev)
+{
+	return &soc_dev->dev;
+}
+
+static umode_t soc_attribute_mode(struct kobject *kobj,
+				struct attribute *attr,
+				int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+	if ((attr == &dev_attr_machine.attr)
+	    && (soc_dev->attr->machine != NULL))
+		return attr->mode;
+	if ((attr == &dev_attr_family.attr)
+	    && (soc_dev->attr->family != NULL))
+		return attr->mode;
+	if ((attr == &dev_attr_revision.attr)
+	    && (soc_dev->attr->revision != NULL))
+		return attr->mode;
+	if ((attr == &dev_attr_soc_id.attr)
+	    && (soc_dev->attr->soc_id != NULL))
+		return attr->mode;
+
+	/* Unknown or unfilled attribute. */
+	return 0;
+}
+
+static ssize_t soc_info_get(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+	if (attr == &dev_attr_machine)
+		return sprintf(buf, "%s\n", soc_dev->attr->machine);
+	if (attr == &dev_attr_family)
+		return sprintf(buf, "%s\n", soc_dev->attr->family);
+	if (attr == &dev_attr_revision)
+		return sprintf(buf, "%s\n", soc_dev->attr->revision);
+	if (attr == &dev_attr_soc_id)
+		return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
+
+	return -EINVAL;
+
+}
+
+static struct attribute *soc_attr[] = {
+	&dev_attr_machine.attr,
+	&dev_attr_family.attr,
+	&dev_attr_soc_id.attr,
+	&dev_attr_revision.attr,
+	NULL,
+};
+
+static const struct attribute_group soc_attr_group = {
+	.attrs = soc_attr,
+	.is_visible = soc_attribute_mode,
+};
+
+static const struct attribute_group *soc_attr_groups[] = {
+	&soc_attr_group,
+	NULL,
+};
+
+static void soc_release(struct device *dev)
+{
+	struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+	kfree(soc_dev);
+}
+
+static struct soc_device_attribute *early_soc_dev_attr;
+
+struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
+{
+	struct soc_device *soc_dev;
+	int ret;
+
+	if (!soc_bus_type.p) {
+		if (early_soc_dev_attr)
+			return ERR_PTR(-EBUSY);
+		early_soc_dev_attr = soc_dev_attr;
+		return NULL;
+	}
+
+	soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
+	if (!soc_dev) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	/* Fetch a unique (reclaimable) SOC ID. */
+	ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
+	if (ret < 0)
+		goto out2;
+	soc_dev->soc_dev_num = ret;
+
+	soc_dev->attr = soc_dev_attr;
+	soc_dev->dev.bus = &soc_bus_type;
+	soc_dev->dev.groups = soc_attr_groups;
+	soc_dev->dev.release = soc_release;
+
+	dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
+
+	ret = device_register(&soc_dev->dev);
+	if (ret)
+		goto out3;
+
+	return soc_dev;
+
+out3:
+	ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+	put_device(&soc_dev->dev);
+	soc_dev = NULL;
+out2:
+	kfree(soc_dev);
+out1:
+	return ERR_PTR(ret);
+}
+
+/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
+void soc_device_unregister(struct soc_device *soc_dev)
+{
+	ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+
+	device_unregister(&soc_dev->dev);
+	early_soc_dev_attr = NULL;
+}
+
+static int __init soc_bus_register(void)
+{
+	int ret;
+
+	ret = bus_register(&soc_bus_type);
+	if (ret)
+		return ret;
+
+	if (early_soc_dev_attr)
+		return PTR_ERR(soc_device_register(early_soc_dev_attr));
+
+	return 0;
+}
+core_initcall(soc_bus_register);
+
+static int soc_device_match_attr(const struct soc_device_attribute *attr,
+				 const struct soc_device_attribute *match)
+{
+	if (match->machine &&
+	    (!attr->machine || !glob_match(match->machine, attr->machine)))
+		return 0;
+
+	if (match->family &&
+	    (!attr->family || !glob_match(match->family, attr->family)))
+		return 0;
+
+	if (match->revision &&
+	    (!attr->revision || !glob_match(match->revision, attr->revision)))
+		return 0;
+
+	if (match->soc_id &&
+	    (!attr->soc_id || !glob_match(match->soc_id, attr->soc_id)))
+		return 0;
+
+	return 1;
+}
+
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+	struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+	return soc_device_match_attr(soc_dev->attr, arg);
+}
+
+/*
+ * soc_device_match - identify the SoC in the machine
+ * @matches: zero-terminated array of possible matches
+ *
+ * returns the first matching entry of the argument array, or NULL
+ * if none of them match.
+ *
+ * This function is meant as a helper in place of of_match_node()
+ * in cases where either no device tree is available or the information
+ * in a device node is insufficient to identify a particular variant
+ * by its compatible strings or other properties. For new devices,
+ * the DT binding should always provide unique compatible strings
+ * that allow the use of of_match_node() instead.
+ *
+ * The calling function can use the .data entry of the
+ * soc_device_attribute to pass a structure or function pointer for
+ * each entry.
+ */
+const struct soc_device_attribute *soc_device_match(
+	const struct soc_device_attribute *matches)
+{
+	int ret = 0;
+
+	if (!matches)
+		return NULL;
+
+	while (!ret) {
+		if (!(matches->machine || matches->family ||
+		      matches->revision || matches->soc_id))
+			break;
+		ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
+				       soc_device_match_one);
+		if (ret < 0 && early_soc_dev_attr)
+			ret = soc_device_match_attr(early_soc_dev_attr,
+						    matches);
+		if (ret < 0)
+			return NULL;
+		if (!ret)
+			matches++;
+		else
+			return matches;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
new file mode 100644
index 0000000..6e076f3
--- /dev/null
+++ b/drivers/base/syscore.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  syscore.c - Execution of system core operations.
+ *
+ *  Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+
+#include <linux/syscore_ops.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <trace/events/power.h>
+
+static LIST_HEAD(syscore_ops_list);
+static DEFINE_MUTEX(syscore_ops_lock);
+
+/**
+ * register_syscore_ops - Register a set of system core operations.
+ * @ops: System core operations to register.
+ */
+void register_syscore_ops(struct syscore_ops *ops)
+{
+	mutex_lock(&syscore_ops_lock);
+	list_add_tail(&ops->node, &syscore_ops_list);
+	mutex_unlock(&syscore_ops_lock);
+}
+EXPORT_SYMBOL_GPL(register_syscore_ops);
+
+/**
+ * unregister_syscore_ops - Unregister a set of system core operations.
+ * @ops: System core operations to unregister.
+ */
+void unregister_syscore_ops(struct syscore_ops *ops)
+{
+	mutex_lock(&syscore_ops_lock);
+	list_del(&ops->node);
+	mutex_unlock(&syscore_ops_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * syscore_suspend - Execute all the registered system core suspend callbacks.
+ *
+ * This function is executed with one CPU on-line and disabled interrupts.
+ */
+int syscore_suspend(void)
+{
+	struct syscore_ops *ops;
+	int ret = 0;
+
+	trace_suspend_resume(TPS("syscore_suspend"), 0, true);
+	pr_debug("Checking wakeup interrupts\n");
+
+	/* Return error code if there are any wakeup interrupts pending. */
+	if (pm_wakeup_pending())
+		return -EBUSY;
+
+	WARN_ONCE(!irqs_disabled(),
+		"Interrupts enabled before system core suspend.\n");
+
+	list_for_each_entry_reverse(ops, &syscore_ops_list, node)
+		if (ops->suspend) {
+			if (initcall_debug)
+				pr_info("PM: Calling %pF\n", ops->suspend);
+			ret = ops->suspend();
+			if (ret)
+				goto err_out;
+			WARN_ONCE(!irqs_disabled(),
+				"Interrupts enabled after %pF\n", ops->suspend);
+		}
+
+	trace_suspend_resume(TPS("syscore_suspend"), 0, false);
+	return 0;
+
+ err_out:
+	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
+
+	list_for_each_entry_continue(ops, &syscore_ops_list, node)
+		if (ops->resume)
+			ops->resume();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(syscore_suspend);
+
+/**
+ * syscore_resume - Execute all the registered system core resume callbacks.
+ *
+ * This function is executed with one CPU on-line and disabled interrupts.
+ */
+void syscore_resume(void)
+{
+	struct syscore_ops *ops;
+
+	trace_suspend_resume(TPS("syscore_resume"), 0, true);
+	WARN_ONCE(!irqs_disabled(),
+		"Interrupts enabled before system core resume.\n");
+
+	list_for_each_entry(ops, &syscore_ops_list, node)
+		if (ops->resume) {
+			if (initcall_debug)
+				pr_info("PM: Calling %pF\n", ops->resume);
+			ops->resume();
+			WARN_ONCE(!irqs_disabled(),
+				"Interrupts enabled after %pF\n", ops->resume);
+		}
+	trace_suspend_resume(TPS("syscore_resume"), 0, false);
+}
+EXPORT_SYMBOL_GPL(syscore_resume);
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * syscore_shutdown - Execute all the registered system core shutdown callbacks.
+ */
+void syscore_shutdown(void)
+{
+	struct syscore_ops *ops;
+
+	mutex_lock(&syscore_ops_lock);
+
+	list_for_each_entry_reverse(ops, &syscore_ops_list, node)
+		if (ops->shutdown) {
+			if (initcall_debug)
+				pr_info("PM: Calling %pF\n", ops->shutdown);
+			ops->shutdown();
+		}
+
+	mutex_unlock(&syscore_ops_lock);
+}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 0000000..86e85da
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+config TEST_ASYNC_DRIVER_PROBE
+	tristate "Build kernel module to test asynchronous driver probing"
+	depends on m
+	help
+	  Enabling this option produces a kernel module that allows
+	  testing asynchronous driver probing by the device core.
+	  The module name will be test_async_driver_probe.ko
+
+	  If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 0000000..90477c5
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)	+= test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 0000000..e7f145d
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY	(5 * 1000)	/* 5 sec */
+#define TEST_PROBE_THRESHOLD	(TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+	dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+		 TEST_PROBE_DELAY);
+	msleep(TEST_PROBE_DELAY);
+	dev_info(&pdev->dev, "done sleeping\n");
+
+	return 0;
+}
+
+static struct platform_driver async_driver = {
+	.driver = {
+		.name = "test_async_driver",
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+	.probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+	.driver = {
+		.name = "test_sync_driver",
+		.probe_type = PROBE_FORCE_SYNCHRONOUS,
+	},
+	.probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+	ktime_t calltime, delta;
+	unsigned long long duration;
+	int error;
+
+	pr_info("registering first asynchronous device...\n");
+
+	async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+						      NULL, 0);
+	if (IS_ERR(async_dev_1)) {
+		error = PTR_ERR(async_dev_1);
+		pr_err("failed to create async_dev_1: %d\n", error);
+		return error;
+	}
+
+	pr_info("registering asynchronous driver...\n");
+	calltime = ktime_get();
+	error = platform_driver_register(&async_driver);
+	if (error) {
+		pr_err("Failed to register async_driver: %d\n", error);
+		goto err_unregister_async_dev_1;
+	}
+
+	delta = ktime_sub(ktime_get(), calltime);
+	duration = (unsigned long long) ktime_to_ms(delta);
+	pr_info("registration took %lld msecs\n", duration);
+	if (duration > TEST_PROBE_THRESHOLD) {
+		pr_err("test failed: probe took too long\n");
+		error = -ETIMEDOUT;
+		goto err_unregister_async_driver;
+	}
+
+	pr_info("registering second asynchronous device...\n");
+	calltime = ktime_get();
+	async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+						      NULL, 0);
+	if (IS_ERR(async_dev_2)) {
+		error = PTR_ERR(async_dev_2);
+		pr_err("failed to create async_dev_2: %d\n", error);
+		goto err_unregister_async_driver;
+	}
+
+	delta = ktime_sub(ktime_get(), calltime);
+	duration = (unsigned long long) ktime_to_ms(delta);
+	pr_info("registration took %lld msecs\n", duration);
+	if (duration > TEST_PROBE_THRESHOLD) {
+		pr_err("test failed: probe took too long\n");
+		error = -ETIMEDOUT;
+		goto err_unregister_async_dev_2;
+	}
+
+	pr_info("registering synchronous driver...\n");
+
+	error = platform_driver_register(&sync_driver);
+	if (error) {
+		pr_err("Failed to register async_driver: %d\n", error);
+		goto err_unregister_async_dev_2;
+	}
+
+	pr_info("registering synchronous device...\n");
+	calltime = ktime_get();
+	sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+						     NULL, 0);
+	if (IS_ERR(sync_dev_1)) {
+		error = PTR_ERR(sync_dev_1);
+		pr_err("failed to create sync_dev_1: %d\n", error);
+		goto err_unregister_sync_driver;
+	}
+
+	delta = ktime_sub(ktime_get(), calltime);
+	duration = (unsigned long long) ktime_to_ms(delta);
+	pr_info("registration took %lld msecs\n", duration);
+	if (duration < TEST_PROBE_THRESHOLD) {
+		pr_err("test failed: probe was too quick\n");
+		error = -ETIMEDOUT;
+		goto err_unregister_sync_dev_1;
+	}
+
+	pr_info("completed successfully");
+
+	return 0;
+
+err_unregister_sync_dev_1:
+	platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+	platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+	platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+	platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+	platform_device_unregister(async_dev_1);
+
+	return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+	platform_driver_unregister(&async_driver);
+	platform_driver_unregister(&sync_driver);
+	platform_device_unregister(async_dev_1);
+	platform_device_unregister(async_dev_2);
+	platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
new file mode 100644
index 0000000..5fd9f16
--- /dev/null
+++ b/drivers/base/topology.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * driver/base/topology.c - Populate sysfs with cpu topology information
+ *
+ * Written by: Zhang Yanmin, Intel Corporation
+ *
+ * Copyright (C) 2006, Intel Corp.
+ *
+ * All rights reserved.
+ */
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/hardirq.h>
+#include <linux/topology.h>
+
+#define define_id_show_func(name)				\
+static ssize_t name##_show(struct device *dev,			\
+		struct device_attribute *attr, char *buf)	\
+{								\
+	return sprintf(buf, "%d\n", topology_##name(dev->id));	\
+}
+
+#define define_siblings_show_map(name, mask)				\
+static ssize_t name##_show(struct device *dev,				\
+			   struct device_attribute *attr, char *buf)	\
+{									\
+	return cpumap_print_to_pagebuf(false, buf, topology_##mask(dev->id));\
+}
+
+#define define_siblings_show_list(name, mask)				\
+static ssize_t name##_list_show(struct device *dev,			\
+				struct device_attribute *attr,		\
+				char *buf)				\
+{									\
+	return cpumap_print_to_pagebuf(true, buf, topology_##mask(dev->id));\
+}
+
+#define define_siblings_show_func(name, mask)	\
+	define_siblings_show_map(name, mask);	\
+	define_siblings_show_list(name, mask)
+
+define_id_show_func(physical_package_id);
+static DEVICE_ATTR_RO(physical_package_id);
+
+define_id_show_func(core_id);
+static DEVICE_ATTR_RO(core_id);
+
+define_siblings_show_func(thread_siblings, sibling_cpumask);
+static DEVICE_ATTR_RO(thread_siblings);
+static DEVICE_ATTR_RO(thread_siblings_list);
+
+define_siblings_show_func(core_siblings, core_cpumask);
+static DEVICE_ATTR_RO(core_siblings);
+static DEVICE_ATTR_RO(core_siblings_list);
+
+#ifdef CONFIG_SCHED_BOOK
+define_id_show_func(book_id);
+static DEVICE_ATTR_RO(book_id);
+define_siblings_show_func(book_siblings, book_cpumask);
+static DEVICE_ATTR_RO(book_siblings);
+static DEVICE_ATTR_RO(book_siblings_list);
+#endif
+
+#ifdef CONFIG_SCHED_DRAWER
+define_id_show_func(drawer_id);
+static DEVICE_ATTR_RO(drawer_id);
+define_siblings_show_func(drawer_siblings, drawer_cpumask);
+static DEVICE_ATTR_RO(drawer_siblings);
+static DEVICE_ATTR_RO(drawer_siblings_list);
+#endif
+
+static struct attribute *default_attrs[] = {
+	&dev_attr_physical_package_id.attr,
+	&dev_attr_core_id.attr,
+	&dev_attr_thread_siblings.attr,
+	&dev_attr_thread_siblings_list.attr,
+	&dev_attr_core_siblings.attr,
+	&dev_attr_core_siblings_list.attr,
+#ifdef CONFIG_SCHED_BOOK
+	&dev_attr_book_id.attr,
+	&dev_attr_book_siblings.attr,
+	&dev_attr_book_siblings_list.attr,
+#endif
+#ifdef CONFIG_SCHED_DRAWER
+	&dev_attr_drawer_id.attr,
+	&dev_attr_drawer_siblings.attr,
+	&dev_attr_drawer_siblings_list.attr,
+#endif
+	NULL
+};
+
+static const struct attribute_group topology_attr_group = {
+	.attrs = default_attrs,
+	.name = "topology"
+};
+
+/* Add/Remove cpu_topology interface for CPU device */
+static int topology_add_dev(unsigned int cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+
+	return sysfs_create_group(&dev->kobj, &topology_attr_group);
+}
+
+static int topology_remove_dev(unsigned int cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+
+	sysfs_remove_group(&dev->kobj, &topology_attr_group);
+	return 0;
+}
+
+static int topology_sysfs_init(void)
+{
+	return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
+				 "base/topology:prepare", topology_add_dev,
+				 topology_remove_dev);
+}
+
+device_initcall(topology_sysfs_init);
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
new file mode 100644
index 0000000..5ed86de
--- /dev/null
+++ b/drivers/base/transport_class.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * transport_class.c - implementation of generic transport classes
+ *                     using attribute_containers
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * The basic idea here is to allow any "device controller" (which
+ * would most often be a Host Bus Adapter to use the services of one
+ * or more tranport classes for performing transport specific
+ * services.  Transport specific services are things that the generic
+ * command layer doesn't want to know about (speed settings, line
+ * condidtioning, etc), but which the user might be interested in.
+ * Thus, the HBA's use the routines exported by the transport classes
+ * to perform these functions.  The transport classes export certain
+ * values to the user via sysfs using attribute containers.
+ *
+ * Note: because not every HBA will care about every transport
+ * attribute, there's a many to one relationship that goes like this:
+ *
+ * transport class<-----attribute container<----class device
+ *
+ * Usually the attribute container is per-HBA, but the design doesn't
+ * mandate that.  Although most of the services will be specific to
+ * the actual external storage connection used by the HBA, the generic
+ * transport class is framed entirely in terms of generic devices to
+ * allow it to be used by any physical HBA in the system.
+ */
+#include <linux/export.h>
+#include <linux/attribute_container.h>
+#include <linux/transport_class.h>
+
+/**
+ * transport_class_register - register an initial transport class
+ *
+ * @tclass:	a pointer to the transport class structure to be initialised
+ *
+ * The transport class contains an embedded class which is used to
+ * identify it.  The caller should initialise this structure with
+ * zeros and then generic class must have been initialised with the
+ * actual transport class unique name.  There's a macro
+ * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must
+ * be registered).
+ *
+ * Returns 0 on success or error on failure.
+ */
+int transport_class_register(struct transport_class *tclass)
+{
+	return class_register(&tclass->class);
+}
+EXPORT_SYMBOL_GPL(transport_class_register);
+
+/**
+ * transport_class_unregister - unregister a previously registered class
+ *
+ * @tclass: The transport class to unregister
+ *
+ * Must be called prior to deallocating the memory for the transport
+ * class.
+ */
+void transport_class_unregister(struct transport_class *tclass)
+{
+	class_unregister(&tclass->class);
+}
+EXPORT_SYMBOL_GPL(transport_class_unregister);
+
+static int anon_transport_dummy_function(struct transport_container *tc,
+					 struct device *dev,
+					 struct device *cdev)
+{
+	/* do nothing */
+	return 0;
+}
+
+/**
+ * anon_transport_class_register - register an anonymous class
+ *
+ * @atc: The anon transport class to register
+ *
+ * The anonymous transport class contains both a transport class and a
+ * container.  The idea of an anonymous class is that it never
+ * actually has any device attributes associated with it (and thus
+ * saves on container storage).  So it can only be used for triggering
+ * events.  Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to
+ * initialise the anon transport class storage.
+ */
+int anon_transport_class_register(struct anon_transport_class *atc)
+{
+	int error;
+	atc->container.class = &atc->tclass.class;
+	attribute_container_set_no_classdevs(&atc->container);
+	error = attribute_container_register(&atc->container);
+	if (error)
+		return error;
+	atc->tclass.setup = anon_transport_dummy_function;
+	atc->tclass.remove = anon_transport_dummy_function;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(anon_transport_class_register);
+
+/**
+ * anon_transport_class_unregister - unregister an anon class
+ *
+ * @atc: Pointer to the anon transport class to unregister
+ *
+ * Must be called prior to deallocating the memory for the anon
+ * transport class.
+ */
+void anon_transport_class_unregister(struct anon_transport_class *atc)
+{
+	if (unlikely(attribute_container_unregister(&atc->container)))
+		BUG();
+}
+EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
+
+static int transport_setup_classdev(struct attribute_container *cont,
+				    struct device *dev,
+				    struct device *classdev)
+{
+	struct transport_class *tclass = class_to_transport_class(cont->class);
+	struct transport_container *tcont = attribute_container_to_transport_container(cont);
+
+	if (tclass->setup)
+		tclass->setup(tcont, dev, classdev);
+
+	return 0;
+}
+
+/**
+ * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+ * the HBA itself or a device remote across the HBA bus).  This
+ * routine is simply a trigger point to see if any set of transport
+ * classes wishes to associate with the added device.  This allocates
+ * storage for the class device and initialises it, but does not yet
+ * add it to the system or add attributes to it (you do this with
+ * transport_add_device).  If you have no need for a separate setup
+ * and add operations, use transport_register_device (see
+ * transport_class.h).
+ */
+
+void transport_setup_device(struct device *dev)
+{
+	attribute_container_add_device(dev, transport_setup_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_setup_device);
+
+static int transport_add_class_device(struct attribute_container *cont,
+				      struct device *dev,
+				      struct device *classdev)
+{
+	int error = attribute_container_add_class_device(classdev);
+	struct transport_container *tcont = 
+		attribute_container_to_transport_container(cont);
+
+	if (!error && tcont->statistics)
+		error = sysfs_create_group(&classdev->kobj, tcont->statistics);
+
+	return error;
+}
+
+
+/**
+ * transport_add_device - declare a new dev for transport class association
+ *
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+ * the HBA itself or a device remote across the HBA bus).  This
+ * routine is simply a trigger point used to add the device to the
+ * system and register attributes for it.
+ */
+
+void transport_add_device(struct device *dev)
+{
+	attribute_container_device_trigger(dev, transport_add_class_device);
+}
+EXPORT_SYMBOL_GPL(transport_add_device);
+
+static int transport_configure(struct attribute_container *cont,
+			       struct device *dev,
+			       struct device *cdev)
+{
+	struct transport_class *tclass = class_to_transport_class(cont->class);
+	struct transport_container *tcont = attribute_container_to_transport_container(cont);
+
+	if (tclass->configure)
+		tclass->configure(tcont, dev, cdev);
+
+	return 0;
+}
+
+/**
+ * transport_configure_device - configure an already set up device
+ *
+ * @dev: generic device representing device to be configured
+ *
+ * The idea of configure is simply to provide a point within the setup
+ * process to allow the transport class to extract information from a
+ * device after it has been setup.  This is used in SCSI because we
+ * have to have a setup device to begin using the HBA, but after we
+ * send the initial inquiry, we use configure to extract the device
+ * parameters.  The device need not have been added to be configured.
+ */
+void transport_configure_device(struct device *dev)
+{
+	attribute_container_device_trigger(dev, transport_configure);
+}
+EXPORT_SYMBOL_GPL(transport_configure_device);
+
+static int transport_remove_classdev(struct attribute_container *cont,
+				     struct device *dev,
+				     struct device *classdev)
+{
+	struct transport_container *tcont = 
+		attribute_container_to_transport_container(cont);
+	struct transport_class *tclass = class_to_transport_class(cont->class);
+
+	if (tclass->remove)
+		tclass->remove(tcont, dev, classdev);
+
+	if (tclass->remove != anon_transport_dummy_function) {
+		if (tcont->statistics)
+			sysfs_remove_group(&classdev->kobj, tcont->statistics);
+		attribute_container_class_device_del(classdev);
+	}
+
+	return 0;
+}
+
+
+/**
+ * transport_remove_device - remove the visibility of a device
+ *
+ * @dev: generic device to remove
+ *
+ * This call removes the visibility of the device (to the user from
+ * sysfs), but does not destroy it.  To eliminate a device entirely
+ * you must also call transport_destroy_device.  If you don't need to
+ * do remove and destroy as separate operations, use
+ * transport_unregister_device() (see transport_class.h) which will
+ * perform both calls for you.
+ */
+void transport_remove_device(struct device *dev)
+{
+	attribute_container_device_trigger(dev, transport_remove_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_remove_device);
+
+static void transport_destroy_classdev(struct attribute_container *cont,
+				      struct device *dev,
+				      struct device *classdev)
+{
+	struct transport_class *tclass = class_to_transport_class(cont->class);
+
+	if (tclass->remove != anon_transport_dummy_function)
+		put_device(classdev);
+}
+
+
+/**
+ * transport_destroy_device - destroy a removed device
+ *
+ * @dev: device to eliminate from the transport class.
+ *
+ * This call triggers the elimination of storage associated with the
+ * transport classdev.  Note: all it really does is relinquish a
+ * reference to the classdev.  The memory will not be freed until the
+ * last reference goes to zero.  Note also that the classdev retains a
+ * reference count on dev, so dev too will remain for as long as the
+ * transport class device remains around.
+ */
+void transport_destroy_device(struct device *dev)
+{
+	attribute_container_remove_device(dev, transport_destroy_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_destroy_device);