v4.19.13 snapshot.
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
new file mode 100644
index 0000000..6a172d3
--- /dev/null
+++ b/drivers/devfreq/Kconfig
@@ -0,0 +1,118 @@
+menuconfig PM_DEVFREQ
+	bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+	select SRCU
+	select PM_OPP
+	help
+	  A device may have a list of frequencies and voltages available.
+	  devfreq, a generic DVFS framework can be registered for a device
+	  in order to let the governor provided to devfreq choose an
+	  operating frequency based on the device driver's policy.
+
+	  Each device may have its own governor and policy. Devfreq can
+	  reevaluate the device state periodically and/or based on the
+	  notification to "nb", a notifier block, of devfreq.
+
+	  Like some CPUs with CPUfreq, a device may have multiple clocks.
+	  However, because the clock frequencies of a single device are
+	  determined by the single device's state, an instance of devfreq
+	  is attached to a single device and returns a "representative"
+	  clock frequency of the device, which is also attached
+	  to a device by 1-to-1. The device registering devfreq takes the
+	  responsibility to "interpret" the representative frequency and
+	  to set its every clock accordingly with the "target" callback
+	  given to devfreq.
+
+	  When OPP is used with the devfreq device, it is recommended to
+	  register devfreq's nb to the OPP's notifier head.  If OPP is
+	  used with the devfreq device, you may use OPP helper
+	  functions defined in devfreq.h.
+
+if PM_DEVFREQ
+
+comment "DEVFREQ Governors"
+
+config DEVFREQ_GOV_SIMPLE_ONDEMAND
+	tristate "Simple Ondemand"
+	help
+	  Chooses frequency based on the recent load on the device. Works
+	  similar as ONDEMAND governor of CPUFREQ does. A device with
+	  Simple-Ondemand should be able to provide busy/total counter
+	  values that imply the usage rate. A device may provide tuned
+	  values to the governor with data field at devfreq_add_device().
+
+config DEVFREQ_GOV_PERFORMANCE
+	tristate "Performance"
+	help
+	  Sets the frequency at the maximum available frequency.
+	  This governor always returns UINT_MAX as frequency so that
+	  the DEVFREQ framework returns the highest frequency available
+	  at any time.
+
+config DEVFREQ_GOV_POWERSAVE
+	tristate "Powersave"
+	help
+	  Sets the frequency at the minimum available frequency.
+	  This governor always returns 0 as frequency so that
+	  the DEVFREQ framework returns the lowest frequency available
+	  at any time.
+
+config DEVFREQ_GOV_USERSPACE
+	tristate "Userspace"
+	help
+	  Sets the frequency at the user specified one.
+	  This governor returns the user configured frequency if there
+	  has been an input to /sys/devices/.../power/devfreq_set_freq.
+	  Otherwise, the governor does not change the frequency
+	  given at the initialization.
+
+config DEVFREQ_GOV_PASSIVE
+	tristate "Passive"
+	help
+	  Sets the frequency based on the frequency of its parent devfreq
+	  device. This governor does not change the frequency by itself
+	  through sysfs entries. The passive governor recommends that
+	  devfreq device uses the OPP table to get the frequency/voltage.
+
+comment "DEVFREQ Drivers"
+
+config ARM_EXYNOS_BUS_DEVFREQ
+	tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select DEVFREQ_GOV_PASSIVE
+	select DEVFREQ_EVENT_EXYNOS_PPMU
+	select PM_DEVFREQ_EVENT
+	select PM_OPP
+	help
+	  This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
+	  Memory bus has one more group of memory bus (e.g, MIF and INT block).
+	  Each memory bus group could contain many memoby bus block. It reads
+	  PPMU counters of memory controllers by using DEVFREQ-event device
+	  and adjusts the operating frequencies and voltages with OPP support.
+	  This does not yet operate with optimal voltages.
+
+config ARM_TEGRA_DEVFREQ
+	tristate "Tegra DEVFREQ Driver"
+	depends on ARCH_TEGRA_124_SOC
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select PM_OPP
+	help
+	  This adds the DEVFREQ driver for the Tegra family of SoCs.
+	  It reads ACTMON counters of memory controllers and adjusts the
+	  operating frequencies and voltages with OPP support.
+
+config ARM_RK3399_DMC_DEVFREQ
+	tristate "ARM RK3399 DMC DEVFREQ Driver"
+	depends on ARCH_ROCKCHIP
+	select DEVFREQ_EVENT_ROCKCHIP_DFI
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select PM_DEVFREQ_EVENT
+	select PM_OPP
+	help
+          This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+          It sets the frequency for the memory controller and reads the usage counts
+          from hardware.
+
+source "drivers/devfreq/event/Kconfig"
+
+endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
new file mode 100644
index 0000000..32b8d4d
--- /dev/null
+++ b/drivers/devfreq/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PM_DEVFREQ)	+= devfreq.o
+obj-$(CONFIG_PM_DEVFREQ_EVENT)	+= devfreq-event.o
+obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)	+= governor_simpleondemand.o
+obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)	+= governor_performance.o
+obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
+obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)	+= governor_passive.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
+obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ)	+= rk3399_dmc.o
+obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
+
+# DEVFREQ Event Drivers
+obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
new file mode 100644
index 0000000..d67242d
--- /dev/null
+++ b/drivers/devfreq/devfreq-event.c
@@ -0,0 +1,483 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/devfreq.c.
+ */
+
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+static struct class *devfreq_event_class;
+
+/* The list of all devfreq event list */
+static LIST_HEAD(devfreq_event_list);
+static DEFINE_MUTEX(devfreq_event_list_lock);
+
+#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
+
+/**
+ * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
+ *				 the enable_count of devfreq-event dev.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function increase the enable_count and enable the
+ * devfreq-event device. The devfreq-event device should be enabled before
+ * using it by devfreq device.
+ */
+int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+	int ret = 0;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	mutex_lock(&edev->lock);
+	if (edev->desc->ops && edev->desc->ops->enable
+			&& edev->enable_count == 0) {
+		ret = edev->desc->ops->enable(edev);
+		if (ret < 0)
+			goto err;
+	}
+	edev->enable_count++;
+err:
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
+
+/**
+ * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
+ *				  the enable_count of the devfreq-event dev.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function decrease the enable_count and disable the
+ * devfreq-event device. After the devfreq-event device is disabled,
+ * devfreq device can't use the devfreq-event device for get/set/reset
+ * operations.
+ */
+int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+	int ret = 0;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	mutex_lock(&edev->lock);
+	if (edev->enable_count <= 0) {
+		dev_warn(&edev->dev, "unbalanced enable_count\n");
+		ret = -EIO;
+		goto err;
+	}
+
+	if (edev->desc->ops && edev->desc->ops->disable
+			&& edev->enable_count == 1) {
+		ret = edev->desc->ops->disable(edev);
+		if (ret < 0)
+			goto err;
+	}
+	edev->enable_count--;
+err:
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
+
+/**
+ * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
+ *				not.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function check whether devfreq-event dev is enabled or not.
+ * If return true, the devfreq-event dev is enabeld. If return false, the
+ * devfreq-event dev is disabled.
+ */
+bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+	bool enabled = false;
+
+	if (!edev || !edev->desc)
+		return enabled;
+
+	mutex_lock(&edev->lock);
+
+	if (edev->enable_count > 0)
+		enabled = true;
+
+	mutex_unlock(&edev->lock);
+
+	return enabled;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
+
+/**
+ * devfreq_event_set_event() - Set event to devfreq-event dev to start.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function set the event to the devfreq-event device to start
+ * for getting the event data which could be various event type.
+ */
+int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+	int ret;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	if (!edev->desc->ops || !edev->desc->ops->set_event)
+		return -EINVAL;
+
+	if (!devfreq_event_is_enabled(edev))
+		return -EPERM;
+
+	mutex_lock(&edev->lock);
+	ret = edev->desc->ops->set_event(edev);
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_set_event);
+
+/**
+ * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
+ * @edev	: the devfreq-event device
+ * @edata	: the calculated data of devfreq-event device
+ *
+ * Note that this function get the calculated event data from devfreq-event dev
+ * after stoping the progress of whole sequence of devfreq-event dev.
+ */
+int devfreq_event_get_event(struct devfreq_event_dev *edev,
+			    struct devfreq_event_data *edata)
+{
+	int ret;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	if (!edev->desc->ops || !edev->desc->ops->get_event)
+		return -EINVAL;
+
+	if (!devfreq_event_is_enabled(edev))
+		return -EINVAL;
+
+	edata->total_count = edata->load_count = 0;
+
+	mutex_lock(&edev->lock);
+	ret = edev->desc->ops->get_event(edev, edata);
+	if (ret < 0)
+		edata->total_count = edata->load_count = 0;
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_event);
+
+/**
+ * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function stop all operations of devfreq-event dev and reset
+ * the current event data to make the devfreq-event device into initial state.
+ */
+int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+	int ret = 0;
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	if (!devfreq_event_is_enabled(edev))
+		return -EPERM;
+
+	mutex_lock(&edev->lock);
+	if (edev->desc->ops && edev->desc->ops->reset)
+		ret = edev->desc->ops->reset(edev);
+	mutex_unlock(&edev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
+
+/**
+ * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
+ *					 devicetree.
+ * @dev		: the pointer to the given device
+ * @index	: the index into list of devfreq-event device
+ *
+ * Note that this function return the pointer of devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
+						      int index)
+{
+	struct device_node *node;
+	struct devfreq_event_dev *edev;
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	node = of_parse_phandle(dev->of_node, "devfreq-events", index);
+	if (!node)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&devfreq_event_list_lock);
+	list_for_each_entry(edev, &devfreq_event_list, node) {
+		if (edev->dev.parent && edev->dev.parent->of_node == node)
+			goto out;
+	}
+
+	list_for_each_entry(edev, &devfreq_event_list, node) {
+		if (!strcmp(edev->desc->name, node->name))
+			goto out;
+	}
+	edev = NULL;
+out:
+	mutex_unlock(&devfreq_event_list_lock);
+
+	if (!edev) {
+		of_node_put(node);
+		return ERR_PTR(-ENODEV);
+	}
+
+	of_node_put(node);
+
+	return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
+
+/**
+ * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
+ * @dev		: the pointer to the given device
+ *
+ * Note that this function return the count of devfreq-event devices.
+ */
+int devfreq_event_get_edev_count(struct device *dev)
+{
+	int count;
+
+	if (!dev->of_node) {
+		dev_err(dev, "device does not have a device node entry\n");
+		return -EINVAL;
+	}
+
+	count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
+						sizeof(u32));
+	if (count < 0) {
+		dev_err(dev,
+			"failed to get the count of devfreq-event in %pOF node\n",
+			dev->of_node);
+		return count;
+	}
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
+
+static void devfreq_event_release_edev(struct device *dev)
+{
+	struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+	kfree(edev);
+}
+
+/**
+ * devfreq_event_add_edev() - Add new devfreq-event device.
+ * @dev		: the device owning the devfreq-event device being created
+ * @desc	: the devfreq-event device's decriptor which include essential
+ *		  data for devfreq-event device.
+ *
+ * Note that this function add new devfreq-event device to devfreq-event class
+ * list and register the device of the devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+						struct devfreq_event_desc *desc)
+{
+	struct devfreq_event_dev *edev;
+	static atomic_t event_no = ATOMIC_INIT(-1);
+	int ret;
+
+	if (!dev || !desc)
+		return ERR_PTR(-EINVAL);
+
+	if (!desc->name || !desc->ops)
+		return ERR_PTR(-EINVAL);
+
+	if (!desc->ops->set_event || !desc->ops->get_event)
+		return ERR_PTR(-EINVAL);
+
+	edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
+	if (!edev)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&edev->lock);
+	edev->desc = desc;
+	edev->enable_count = 0;
+	edev->dev.parent = dev;
+	edev->dev.class = devfreq_event_class;
+	edev->dev.release = devfreq_event_release_edev;
+
+	dev_set_name(&edev->dev, "event%d", atomic_inc_return(&event_no));
+	ret = device_register(&edev->dev);
+	if (ret < 0) {
+		put_device(&edev->dev);
+		return ERR_PTR(ret);
+	}
+	dev_set_drvdata(&edev->dev, edev);
+
+	INIT_LIST_HEAD(&edev->node);
+
+	mutex_lock(&devfreq_event_list_lock);
+	list_add(&edev->node, &devfreq_event_list);
+	mutex_unlock(&devfreq_event_list_lock);
+
+	return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
+
+/**
+ * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
+ * @dev		: the devfreq-event device
+ *
+ * Note that this function remove the registered devfreq-event device.
+ */
+int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+	if (!edev)
+		return -EINVAL;
+
+	WARN_ON(edev->enable_count);
+
+	mutex_lock(&devfreq_event_list_lock);
+	list_del(&edev->node);
+	mutex_unlock(&devfreq_event_list_lock);
+
+	device_unregister(&edev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
+
+static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
+{
+	struct devfreq_event_dev **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+static void devm_devfreq_event_release(struct device *dev, void *res)
+{
+	devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
+}
+
+/**
+ * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
+ * @dev		: the device owning the devfreq-event device being created
+ * @desc	: the devfreq-event device's decriptor which include essential
+ *		  data for devfreq-event device.
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management and simplify the free operation
+ * for memory of devfreq-event device.
+ */
+struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+						struct devfreq_event_desc *desc)
+{
+	struct devfreq_event_dev **ptr, *edev;
+
+	ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr),
+				GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	edev = devfreq_event_add_edev(dev, desc);
+	if (IS_ERR(edev)) {
+		devres_free(ptr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	*ptr = edev;
+	devres_add(dev, ptr);
+
+	return edev;
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
+
+/**
+ * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
+ * @dev		: the device owning the devfreq-event device being created
+ * @edev	: the devfreq-event device
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management.
+ */
+void devm_devfreq_event_remove_edev(struct device *dev,
+				struct devfreq_event_dev *edev)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_event_release,
+			       devm_devfreq_event_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
+
+/*
+ * Device attributes for devfreq-event class.
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", edev->desc->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t enable_count_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+	if (!edev || !edev->desc)
+		return -EINVAL;
+
+	return sprintf(buf, "%d\n", edev->enable_count);
+}
+static DEVICE_ATTR_RO(enable_count);
+
+static struct attribute *devfreq_event_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_enable_count.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(devfreq_event);
+
+static int __init devfreq_event_init(void)
+{
+	devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
+	if (IS_ERR(devfreq_event_class)) {
+		pr_err("%s: couldn't create class\n", __FILE__);
+		return PTR_ERR(devfreq_event_class);
+	}
+
+	devfreq_event_class->dev_groups = devfreq_event_groups;
+
+	return 0;
+}
+subsys_initcall(devfreq_event_init);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
new file mode 100644
index 0000000..4c49bb1
--- /dev/null
+++ b/drivers/devfreq/devfreq.c
@@ -0,0 +1,1537 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ *	    for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/pm_opp.h>
+#include <linux/devfreq.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/hrtimer.h>
+#include <linux/of.h>
+#include "governor.h"
+
+#define MAX(a,b)	((a > b) ? a : b)
+#define MIN(a,b)	((a < b) ? a : b)
+
+static struct class *devfreq_class;
+
+/*
+ * devfreq core provides delayed work based load monitoring helper
+ * functions. Governors can use these or can implement their own
+ * monitoring mechanism.
+ */
+static struct workqueue_struct *devfreq_wq;
+
+/* The list of all device-devfreq governors */
+static LIST_HEAD(devfreq_governor_list);
+/* The list of all device-devfreq */
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(devfreq_list_lock);
+
+/**
+ * find_device_devfreq() - find devfreq struct using device pointer
+ * @dev:	device pointer used to lookup device devfreq.
+ *
+ * Search the list of device devfreqs and return the matched device's
+ * devfreq info. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq *find_device_devfreq(struct device *dev)
+{
+	struct devfreq *tmp_devfreq;
+
+	if (IS_ERR_OR_NULL(dev)) {
+		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	WARN(!mutex_is_locked(&devfreq_list_lock),
+	     "devfreq_list_lock must be locked.");
+
+	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
+		if (tmp_devfreq->dev.parent == dev)
+			return tmp_devfreq;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+static unsigned long find_available_min_freq(struct devfreq *devfreq)
+{
+	struct dev_pm_opp *opp;
+	unsigned long min_freq = 0;
+
+	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
+	if (IS_ERR(opp))
+		min_freq = 0;
+	else
+		dev_pm_opp_put(opp);
+
+	return min_freq;
+}
+
+static unsigned long find_available_max_freq(struct devfreq *devfreq)
+{
+	struct dev_pm_opp *opp;
+	unsigned long max_freq = ULONG_MAX;
+
+	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
+	if (IS_ERR(opp))
+		max_freq = 0;
+	else
+		dev_pm_opp_put(opp);
+
+	return max_freq;
+}
+
+/**
+ * devfreq_get_freq_level() - Lookup freq_table for the frequency
+ * @devfreq:	the devfreq instance
+ * @freq:	the target frequency
+ */
+static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+{
+	int lev;
+
+	for (lev = 0; lev < devfreq->profile->max_state; lev++)
+		if (freq == devfreq->profile->freq_table[lev])
+			return lev;
+
+	return -EINVAL;
+}
+
+static int set_freq_table(struct devfreq *devfreq)
+{
+	struct devfreq_dev_profile *profile = devfreq->profile;
+	struct dev_pm_opp *opp;
+	unsigned long freq;
+	int i, count;
+
+	/* Initialize the freq_table from OPP table */
+	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
+	if (count <= 0)
+		return -EINVAL;
+
+	profile->max_state = count;
+	profile->freq_table = devm_kcalloc(devfreq->dev.parent,
+					profile->max_state,
+					sizeof(*profile->freq_table),
+					GFP_KERNEL);
+	if (!profile->freq_table) {
+		profile->max_state = 0;
+		return -ENOMEM;
+	}
+
+	for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
+		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
+		if (IS_ERR(opp)) {
+			devm_kfree(devfreq->dev.parent, profile->freq_table);
+			profile->max_state = 0;
+			return PTR_ERR(opp);
+		}
+		dev_pm_opp_put(opp);
+		profile->freq_table[i] = freq;
+	}
+
+	return 0;
+}
+
+/**
+ * devfreq_update_status() - Update statistics of devfreq behavior
+ * @devfreq:	the devfreq instance
+ * @freq:	the update target frequency
+ */
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+{
+	int lev, prev_lev, ret = 0;
+	unsigned long cur_time;
+
+	cur_time = jiffies;
+
+	/* Immediately exit if previous_freq is not initialized yet. */
+	if (!devfreq->previous_freq)
+		goto out;
+
+	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
+	if (prev_lev < 0) {
+		ret = prev_lev;
+		goto out;
+	}
+
+	devfreq->time_in_state[prev_lev] +=
+			 cur_time - devfreq->last_stat_updated;
+
+	lev = devfreq_get_freq_level(devfreq, freq);
+	if (lev < 0) {
+		ret = lev;
+		goto out;
+	}
+
+	if (lev != prev_lev) {
+		devfreq->trans_table[(prev_lev *
+				devfreq->profile->max_state) + lev]++;
+		devfreq->total_trans++;
+	}
+
+out:
+	devfreq->last_stat_updated = cur_time;
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_update_status);
+
+/**
+ * find_devfreq_governor() - find devfreq governor from name
+ * @name:	name of the governor
+ *
+ * Search the list of devfreq governors and return the matched
+ * governor's pointer. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq_governor *find_devfreq_governor(const char *name)
+{
+	struct devfreq_governor *tmp_governor;
+
+	if (IS_ERR_OR_NULL(name)) {
+		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	WARN(!mutex_is_locked(&devfreq_list_lock),
+	     "devfreq_list_lock must be locked.");
+
+	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
+		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
+			return tmp_governor;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+static int devfreq_notify_transition(struct devfreq *devfreq,
+		struct devfreq_freqs *freqs, unsigned int state)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (state) {
+	case DEVFREQ_PRECHANGE:
+		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+				DEVFREQ_PRECHANGE, freqs);
+		break;
+
+	case DEVFREQ_POSTCHANGE:
+		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+				DEVFREQ_POSTCHANGE, freqs);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Load monitoring helper functions for governors use */
+
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency.
+ * @devfreq:	the devfreq instance.
+ *
+ * Note: Lock devfreq->lock before calling update_devfreq
+ *	 This function is exported for governors.
+ */
+int update_devfreq(struct devfreq *devfreq)
+{
+	struct devfreq_freqs freqs;
+	unsigned long freq, cur_freq, min_freq, max_freq;
+	int err = 0;
+	u32 flags = 0;
+
+	if (!mutex_is_locked(&devfreq->lock)) {
+		WARN(true, "devfreq->lock must be locked by the caller.\n");
+		return -EINVAL;
+	}
+
+	if (!devfreq->governor)
+		return -EINVAL;
+
+	/* Reevaluate the proper frequency */
+	err = devfreq->governor->get_target_freq(devfreq, &freq);
+	if (err)
+		return err;
+
+	/*
+	 * Adjust the frequency with user freq, QoS and available freq.
+	 *
+	 * List from the highest priority
+	 * max_freq
+	 * min_freq
+	 */
+	max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
+	min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
+
+	if (min_freq && freq < min_freq) {
+		freq = min_freq;
+		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
+	}
+	if (max_freq && freq > max_freq) {
+		freq = max_freq;
+		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
+	}
+
+	if (devfreq->profile->get_cur_freq)
+		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
+	else
+		cur_freq = devfreq->previous_freq;
+
+	freqs.old = cur_freq;
+	freqs.new = freq;
+	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
+
+	err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
+	if (err) {
+		freqs.new = cur_freq;
+		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+		return err;
+	}
+
+	freqs.new = freq;
+	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+
+	if (devfreq_update_status(devfreq, freq))
+		dev_err(&devfreq->dev,
+			"Couldn't update frequency transition information.\n");
+
+	devfreq->previous_freq = freq;
+	return err;
+}
+EXPORT_SYMBOL(update_devfreq);
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work:	the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+	int err;
+	struct devfreq *devfreq = container_of(work,
+					struct devfreq, work.work);
+
+	mutex_lock(&devfreq->lock);
+	err = update_devfreq(devfreq);
+	if (err)
+		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
+	queue_delayed_work(devfreq_wq, &devfreq->work,
+				msecs_to_jiffies(devfreq->profile->polling_ms));
+	mutex_unlock(&devfreq->lock);
+}
+
+/**
+ * devfreq_monitor_start() - Start load monitoring of devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function for starting devfreq device load monitoing. By
+ * default delayed work based monitoring is supported. Function
+ * to be called from governor in response to DEVFREQ_GOV_START
+ * event when device is added to devfreq framework.
+ */
+void devfreq_monitor_start(struct devfreq *devfreq)
+{
+	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+	if (devfreq->profile->polling_ms)
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+}
+EXPORT_SYMBOL(devfreq_monitor_start);
+
+/**
+ * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function to stop devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_STOP
+ * event when device is removed from devfreq framework.
+ */
+void devfreq_monitor_stop(struct devfreq *devfreq)
+{
+	cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_stop);
+
+/**
+ * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
+ * @devfreq:	the devfreq instance.
+ *
+ * Helper function to suspend devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_SUSPEND
+ * event or when polling interval is set to zero.
+ *
+ * Note: Though this function is same as devfreq_monitor_stop(),
+ * intentionally kept separate to provide hooks for collecting
+ * transition statistics.
+ */
+void devfreq_monitor_suspend(struct devfreq *devfreq)
+{
+	mutex_lock(&devfreq->lock);
+	if (devfreq->stop_polling) {
+		mutex_unlock(&devfreq->lock);
+		return;
+	}
+
+	devfreq_update_status(devfreq, devfreq->previous_freq);
+	devfreq->stop_polling = true;
+	mutex_unlock(&devfreq->lock);
+	cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_suspend);
+
+/**
+ * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
+ * @devfreq:    the devfreq instance.
+ *
+ * Helper function to resume devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_RESUME
+ * event or when polling interval is set to non-zero.
+ */
+void devfreq_monitor_resume(struct devfreq *devfreq)
+{
+	unsigned long freq;
+
+	mutex_lock(&devfreq->lock);
+	if (!devfreq->stop_polling)
+		goto out;
+
+	if (!delayed_work_pending(&devfreq->work) &&
+			devfreq->profile->polling_ms)
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+
+	devfreq->last_stat_updated = jiffies;
+	devfreq->stop_polling = false;
+
+	if (devfreq->profile->get_cur_freq &&
+		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+		devfreq->previous_freq = freq;
+
+out:
+	mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_monitor_resume);
+
+/**
+ * devfreq_interval_update() - Update device devfreq monitoring interval
+ * @devfreq:    the devfreq instance.
+ * @delay:      new polling interval to be set.
+ *
+ * Helper function to set new load monitoring polling interval. Function
+ * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ */
+void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+{
+	unsigned int cur_delay = devfreq->profile->polling_ms;
+	unsigned int new_delay = *delay;
+
+	mutex_lock(&devfreq->lock);
+	devfreq->profile->polling_ms = new_delay;
+
+	if (devfreq->stop_polling)
+		goto out;
+
+	/* if new delay is zero, stop polling */
+	if (!new_delay) {
+		mutex_unlock(&devfreq->lock);
+		cancel_delayed_work_sync(&devfreq->work);
+		return;
+	}
+
+	/* if current delay is zero, start polling with new delay */
+	if (!cur_delay) {
+		queue_delayed_work(devfreq_wq, &devfreq->work,
+			msecs_to_jiffies(devfreq->profile->polling_ms));
+		goto out;
+	}
+
+	/* if current delay is greater than new delay, restart polling */
+	if (cur_delay > new_delay) {
+		mutex_unlock(&devfreq->lock);
+		cancel_delayed_work_sync(&devfreq->work);
+		mutex_lock(&devfreq->lock);
+		if (!devfreq->stop_polling)
+			queue_delayed_work(devfreq_wq, &devfreq->work,
+			      msecs_to_jiffies(devfreq->profile->polling_ms));
+	}
+out:
+	mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_interval_update);
+
+/**
+ * devfreq_notifier_call() - Notify that the device frequency requirements
+ *			   has been changed out of devfreq framework.
+ * @nb:		the notifier_block (supposed to be devfreq->nb)
+ * @type:	not used
+ * @devp:	not used
+ *
+ * Called by a notifier that uses devfreq->nb.
+ */
+static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
+				 void *devp)
+{
+	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
+	int ret;
+
+	mutex_lock(&devfreq->lock);
+
+	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+	if (!devfreq->scaling_min_freq) {
+		mutex_unlock(&devfreq->lock);
+		return -EINVAL;
+	}
+
+	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+	if (!devfreq->scaling_max_freq) {
+		mutex_unlock(&devfreq->lock);
+		return -EINVAL;
+	}
+
+	ret = update_devfreq(devfreq);
+	mutex_unlock(&devfreq->lock);
+
+	return ret;
+}
+
+/**
+ * devfreq_dev_release() - Callback for struct device to release the device.
+ * @dev:	the devfreq device
+ *
+ * Remove devfreq from the list and release its resources.
+ */
+static void devfreq_dev_release(struct device *dev)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+
+	mutex_lock(&devfreq_list_lock);
+	if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
+		mutex_unlock(&devfreq_list_lock);
+		dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
+		return;
+	}
+	list_del(&devfreq->node);
+	mutex_unlock(&devfreq_list_lock);
+
+	if (devfreq->governor)
+		devfreq->governor->event_handler(devfreq,
+						 DEVFREQ_GOV_STOP, NULL);
+
+	if (devfreq->profile->exit)
+		devfreq->profile->exit(devfreq->dev.parent);
+
+	mutex_destroy(&devfreq->lock);
+	kfree(devfreq);
+}
+
+/**
+ * devfreq_add_device() - Add devfreq feature to the device
+ * @dev:	the device to add devfreq feature.
+ * @profile:	device-specific profile to run devfreq.
+ * @governor_name:	name of the policy to choose frequency.
+ * @data:	private data for the governor. The devfreq framework does not
+ *		touch this value.
+ */
+struct devfreq *devfreq_add_device(struct device *dev,
+				   struct devfreq_dev_profile *profile,
+				   const char *governor_name,
+				   void *data)
+{
+	struct devfreq *devfreq;
+	struct devfreq_governor *governor;
+	static atomic_t devfreq_no = ATOMIC_INIT(-1);
+	int err = 0;
+
+	if (!dev || !profile || !governor_name) {
+		dev_err(dev, "%s: Invalid parameters.\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	devfreq = find_device_devfreq(dev);
+	mutex_unlock(&devfreq_list_lock);
+	if (!IS_ERR(devfreq)) {
+		dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+			__func__);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
+	if (!devfreq) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mutex_init(&devfreq->lock);
+	mutex_lock(&devfreq->lock);
+	devfreq->dev.parent = dev;
+	devfreq->dev.class = devfreq_class;
+	devfreq->dev.release = devfreq_dev_release;
+	devfreq->profile = profile;
+	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
+	devfreq->previous_freq = profile->initial_freq;
+	devfreq->last_status.current_frequency = profile->initial_freq;
+	devfreq->data = data;
+	devfreq->nb.notifier_call = devfreq_notifier_call;
+
+	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
+		mutex_unlock(&devfreq->lock);
+		err = set_freq_table(devfreq);
+		if (err < 0)
+			goto err_out;
+		mutex_lock(&devfreq->lock);
+	}
+
+	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+	if (!devfreq->scaling_min_freq) {
+		mutex_unlock(&devfreq->lock);
+		err = -EINVAL;
+		goto err_dev;
+	}
+	devfreq->min_freq = devfreq->scaling_min_freq;
+
+	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+	if (!devfreq->scaling_max_freq) {
+		mutex_unlock(&devfreq->lock);
+		err = -EINVAL;
+		goto err_dev;
+	}
+	devfreq->max_freq = devfreq->scaling_max_freq;
+
+	dev_set_name(&devfreq->dev, "devfreq%d",
+				atomic_inc_return(&devfreq_no));
+	err = device_register(&devfreq->dev);
+	if (err) {
+		mutex_unlock(&devfreq->lock);
+		put_device(&devfreq->dev);
+		goto err_out;
+	}
+
+	devfreq->trans_table =
+		devm_kzalloc(&devfreq->dev,
+			     array3_size(sizeof(unsigned int),
+					 devfreq->profile->max_state,
+					 devfreq->profile->max_state),
+			     GFP_KERNEL);
+	devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+						devfreq->profile->max_state,
+						sizeof(unsigned long),
+						GFP_KERNEL);
+	devfreq->last_stat_updated = jiffies;
+
+	srcu_init_notifier_head(&devfreq->transition_notifier_list);
+
+	mutex_unlock(&devfreq->lock);
+
+	mutex_lock(&devfreq_list_lock);
+	list_add(&devfreq->node, &devfreq_list);
+
+	governor = find_devfreq_governor(devfreq->governor_name);
+	if (IS_ERR(governor)) {
+		dev_err(dev, "%s: Unable to find governor for the device\n",
+			__func__);
+		err = PTR_ERR(governor);
+		goto err_init;
+	}
+
+	devfreq->governor = governor;
+	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
+						NULL);
+	if (err) {
+		dev_err(dev, "%s: Unable to start governor for the device\n",
+			__func__);
+		goto err_init;
+	}
+	mutex_unlock(&devfreq_list_lock);
+
+	return devfreq;
+
+err_init:
+	list_del(&devfreq->node);
+	mutex_unlock(&devfreq_list_lock);
+
+	device_unregister(&devfreq->dev);
+	devfreq = NULL;
+err_dev:
+	if (devfreq)
+		kfree(devfreq);
+err_out:
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(devfreq_add_device);
+
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @devfreq:	the devfreq instance to be removed
+ *
+ * The opposite of devfreq_add_device().
+ */
+int devfreq_remove_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	device_unregister(&devfreq->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(devfreq_remove_device);
+
+static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
+{
+	struct devfreq **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+static void devm_devfreq_dev_release(struct device *dev, void *res)
+{
+	devfreq_remove_device(*(struct devfreq **)res);
+}
+
+/**
+ * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
+ * @dev:	the device to add devfreq feature.
+ * @profile:	device-specific profile to run devfreq.
+ * @governor_name:	name of the policy to choose frequency.
+ * @data:	private data for the governor. The devfreq framework does not
+ *		touch this value.
+ *
+ * This function manages automatically the memory of devfreq device using device
+ * resource management and simplify the free operation for memory of devfreq
+ * device.
+ */
+struct devfreq *devm_devfreq_add_device(struct device *dev,
+					struct devfreq_dev_profile *profile,
+					const char *governor_name,
+					void *data)
+{
+	struct devfreq **ptr, *devfreq;
+
+	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	devfreq = devfreq_add_device(dev, profile, governor_name, data);
+	if (IS_ERR(devfreq)) {
+		devres_free(ptr);
+		return devfreq;
+	}
+
+	*ptr = devfreq;
+	devres_add(dev, ptr);
+
+	return devfreq;
+}
+EXPORT_SYMBOL(devm_devfreq_add_device);
+
+#ifdef CONFIG_OF
+/*
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of devfreq
+ *
+ * return the instance of devfreq device
+ */
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+	struct device_node *node;
+	struct devfreq *devfreq;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	node = of_parse_phandle(dev->of_node, "devfreq", index);
+	if (!node)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&devfreq_list_lock);
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		if (devfreq->dev.parent
+			&& devfreq->dev.parent->of_node == node) {
+			mutex_unlock(&devfreq_list_lock);
+			of_node_put(node);
+			return devfreq;
+		}
+	}
+	mutex_unlock(&devfreq_list_lock);
+	of_node_put(node);
+
+	return ERR_PTR(-EPROBE_DEFER);
+}
+#else
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
+
+/**
+ * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
+ * @dev:	the device to add devfreq feature.
+ * @devfreq:	the devfreq instance to be removed
+ */
+void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_remove_device);
+
+/**
+ * devfreq_suspend_device() - Suspend devfreq of a device.
+ * @devfreq: the devfreq instance to be suspended
+ *
+ * This function is intended to be called by the pm callbacks
+ * (e.g., runtime_suspend, suspend) of the device driver that
+ * holds the devfreq.
+ */
+int devfreq_suspend_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	if (!devfreq->governor)
+		return 0;
+
+	return devfreq->governor->event_handler(devfreq,
+				DEVFREQ_GOV_SUSPEND, NULL);
+}
+EXPORT_SYMBOL(devfreq_suspend_device);
+
+/**
+ * devfreq_resume_device() - Resume devfreq of a device.
+ * @devfreq: the devfreq instance to be resumed
+ *
+ * This function is intended to be called by the pm callbacks
+ * (e.g., runtime_resume, resume) of the device driver that
+ * holds the devfreq.
+ */
+int devfreq_resume_device(struct devfreq *devfreq)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	if (!devfreq->governor)
+		return 0;
+
+	return devfreq->governor->event_handler(devfreq,
+				DEVFREQ_GOV_RESUME, NULL);
+}
+EXPORT_SYMBOL(devfreq_resume_device);
+
+/**
+ * devfreq_add_governor() - Add devfreq governor
+ * @governor:	the devfreq governor to be added
+ */
+int devfreq_add_governor(struct devfreq_governor *governor)
+{
+	struct devfreq_governor *g;
+	struct devfreq *devfreq;
+	int err = 0;
+
+	if (!governor) {
+		pr_err("%s: Invalid parameters.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	g = find_devfreq_governor(governor->name);
+	if (!IS_ERR(g)) {
+		pr_err("%s: governor %s already registered\n", __func__,
+		       g->name);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	list_add(&governor->node, &devfreq_governor_list);
+
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		int ret = 0;
+		struct device *dev = devfreq->dev.parent;
+
+		if (!strncmp(devfreq->governor_name, governor->name,
+			     DEVFREQ_NAME_LEN)) {
+			/* The following should never occur */
+			if (devfreq->governor) {
+				dev_warn(dev,
+					 "%s: Governor %s already present\n",
+					 __func__, devfreq->governor->name);
+				ret = devfreq->governor->event_handler(devfreq,
+							DEVFREQ_GOV_STOP, NULL);
+				if (ret) {
+					dev_warn(dev,
+						 "%s: Governor %s stop = %d\n",
+						 __func__,
+						 devfreq->governor->name, ret);
+				}
+				/* Fall through */
+			}
+			devfreq->governor = governor;
+			ret = devfreq->governor->event_handler(devfreq,
+						DEVFREQ_GOV_START, NULL);
+			if (ret) {
+				dev_warn(dev, "%s: Governor %s start=%d\n",
+					 __func__, devfreq->governor->name,
+					 ret);
+			}
+		}
+	}
+
+err_out:
+	mutex_unlock(&devfreq_list_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(devfreq_add_governor);
+
+/**
+ * devfreq_remove_governor() - Remove devfreq feature from a device.
+ * @governor:	the devfreq governor to be removed
+ */
+int devfreq_remove_governor(struct devfreq_governor *governor)
+{
+	struct devfreq_governor *g;
+	struct devfreq *devfreq;
+	int err = 0;
+
+	if (!governor) {
+		pr_err("%s: Invalid parameters.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devfreq_list_lock);
+	g = find_devfreq_governor(governor->name);
+	if (IS_ERR(g)) {
+		pr_err("%s: governor %s not registered\n", __func__,
+		       governor->name);
+		err = PTR_ERR(g);
+		goto err_out;
+	}
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		int ret;
+		struct device *dev = devfreq->dev.parent;
+
+		if (!strncmp(devfreq->governor_name, governor->name,
+			     DEVFREQ_NAME_LEN)) {
+			/* we should have a devfreq governor! */
+			if (!devfreq->governor) {
+				dev_warn(dev, "%s: Governor %s NOT present\n",
+					 __func__, governor->name);
+				continue;
+				/* Fall through */
+			}
+			ret = devfreq->governor->event_handler(devfreq,
+						DEVFREQ_GOV_STOP, NULL);
+			if (ret) {
+				dev_warn(dev, "%s: Governor %s stop=%d\n",
+					 __func__, devfreq->governor->name,
+					 ret);
+			}
+			devfreq->governor = NULL;
+		}
+	}
+
+	list_del(&governor->node);
+err_out:
+	mutex_unlock(&devfreq_list_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(devfreq_remove_governor);
+
+static ssize_t governor_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	if (!to_devfreq(dev)->governor)
+		return -EINVAL;
+
+	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+}
+
+static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	int ret;
+	char str_governor[DEVFREQ_NAME_LEN + 1];
+	struct devfreq_governor *governor;
+
+	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&devfreq_list_lock);
+	governor = find_devfreq_governor(str_governor);
+	if (IS_ERR(governor)) {
+		ret = PTR_ERR(governor);
+		goto out;
+	}
+	if (df->governor == governor) {
+		ret = 0;
+		goto out;
+	} else if ((df->governor && df->governor->immutable) ||
+					governor->immutable) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (df->governor) {
+		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+		if (ret) {
+			dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+				 __func__, df->governor->name, ret);
+			goto out;
+		}
+	}
+	df->governor = governor;
+	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
+	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+	if (ret)
+		dev_warn(dev, "%s: Governor %s not started(%d)\n",
+			 __func__, df->governor->name, ret);
+out:
+	mutex_unlock(&devfreq_list_lock);
+
+	if (!ret)
+		ret = count;
+	return ret;
+}
+static DEVICE_ATTR_RW(governor);
+
+static ssize_t available_governors_show(struct device *d,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct devfreq *df = to_devfreq(d);
+	ssize_t count = 0;
+
+	mutex_lock(&devfreq_list_lock);
+
+	/*
+	 * The devfreq with immutable governor (e.g., passive) shows
+	 * only own governor.
+	 */
+	if (df->governor->immutable) {
+		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
+				   "%s ", df->governor_name);
+	/*
+	 * The devfreq device shows the registered governor except for
+	 * immutable governors such as passive governor .
+	 */
+	} else {
+		struct devfreq_governor *governor;
+
+		list_for_each_entry(governor, &devfreq_governor_list, node) {
+			if (governor->immutable)
+				continue;
+			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+					   "%s ", governor->name);
+		}
+	}
+
+	mutex_unlock(&devfreq_list_lock);
+
+	/* Truncate the trailing space */
+	if (count)
+		count--;
+
+	count += sprintf(&buf[count], "\n");
+
+	return count;
+}
+static DEVICE_ATTR_RO(available_governors);
+
+static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	unsigned long freq;
+	struct devfreq *devfreq = to_devfreq(dev);
+
+	if (devfreq->profile->get_cur_freq &&
+		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+		return sprintf(buf, "%lu\n", freq);
+
+	return sprintf(buf, "%lu\n", devfreq->previous_freq);
+}
+static DEVICE_ATTR_RO(cur_freq);
+
+static ssize_t target_freq_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
+}
+static DEVICE_ATTR_RO(target_freq);
+
+static ssize_t polling_interval_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
+}
+
+static ssize_t polling_interval_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	unsigned int value;
+	int ret;
+
+	if (!df->governor)
+		return -EINVAL;
+
+	ret = sscanf(buf, "%u", &value);
+	if (ret != 1)
+		return -EINVAL;
+
+	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
+	ret = count;
+
+	return ret;
+}
+static DEVICE_ATTR_RW(polling_interval);
+
+static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	unsigned long value;
+	int ret;
+	unsigned long max;
+
+	ret = sscanf(buf, "%lu", &value);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&df->lock);
+	max = df->max_freq;
+	if (value && max && value > max) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	df->min_freq = value;
+	update_devfreq(df);
+	ret = count;
+unlock:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+
+	return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq));
+}
+
+static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct devfreq *df = to_devfreq(dev);
+	unsigned long value;
+	int ret;
+	unsigned long min;
+
+	ret = sscanf(buf, "%lu", &value);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&df->lock);
+	min = df->min_freq;
+	if (value && min && value < min) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	df->max_freq = value;
+	update_devfreq(df);
+	ret = count;
+unlock:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+static DEVICE_ATTR_RW(min_freq);
+
+static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+
+	return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq));
+}
+static DEVICE_ATTR_RW(max_freq);
+
+static ssize_t available_frequencies_show(struct device *d,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct devfreq *df = to_devfreq(d);
+	ssize_t count = 0;
+	int i;
+
+	mutex_lock(&df->lock);
+
+	for (i = 0; i < df->profile->max_state; i++)
+		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+				"%lu ", df->profile->freq_table[i]);
+
+	mutex_unlock(&df->lock);
+	/* Truncate the trailing space */
+	if (count)
+		count--;
+
+	count += sprintf(&buf[count], "\n");
+
+	return count;
+}
+static DEVICE_ATTR_RO(available_frequencies);
+
+static ssize_t trans_stat_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	ssize_t len;
+	int i, j;
+	unsigned int max_state = devfreq->profile->max_state;
+
+	if (!devfreq->stop_polling &&
+			devfreq_update_status(devfreq, devfreq->previous_freq))
+		return 0;
+	if (max_state == 0)
+		return sprintf(buf, "Not Supported.\n");
+
+	len = sprintf(buf, "     From  :   To\n");
+	len += sprintf(buf + len, "           :");
+	for (i = 0; i < max_state; i++)
+		len += sprintf(buf + len, "%10lu",
+				devfreq->profile->freq_table[i]);
+
+	len += sprintf(buf + len, "   time(ms)\n");
+
+	for (i = 0; i < max_state; i++) {
+		if (devfreq->profile->freq_table[i]
+					== devfreq->previous_freq) {
+			len += sprintf(buf + len, "*");
+		} else {
+			len += sprintf(buf + len, " ");
+		}
+		len += sprintf(buf + len, "%10lu:",
+				devfreq->profile->freq_table[i]);
+		for (j = 0; j < max_state; j++)
+			len += sprintf(buf + len, "%10u",
+				devfreq->trans_table[(i * max_state) + j]);
+		len += sprintf(buf + len, "%10u\n",
+			jiffies_to_msecs(devfreq->time_in_state[i]));
+	}
+
+	len += sprintf(buf + len, "Total transition : %u\n",
+					devfreq->total_trans);
+	return len;
+}
+static DEVICE_ATTR_RO(trans_stat);
+
+static struct attribute *devfreq_attrs[] = {
+	&dev_attr_governor.attr,
+	&dev_attr_available_governors.attr,
+	&dev_attr_cur_freq.attr,
+	&dev_attr_available_frequencies.attr,
+	&dev_attr_target_freq.attr,
+	&dev_attr_polling_interval.attr,
+	&dev_attr_min_freq.attr,
+	&dev_attr_max_freq.attr,
+	&dev_attr_trans_stat.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(devfreq);
+
+static int __init devfreq_init(void)
+{
+	devfreq_class = class_create(THIS_MODULE, "devfreq");
+	if (IS_ERR(devfreq_class)) {
+		pr_err("%s: couldn't create class\n", __FILE__);
+		return PTR_ERR(devfreq_class);
+	}
+
+	devfreq_wq = create_freezable_workqueue("devfreq_wq");
+	if (!devfreq_wq) {
+		class_destroy(devfreq_class);
+		pr_err("%s: couldn't create workqueue\n", __FILE__);
+		return -ENOMEM;
+	}
+	devfreq_class->dev_groups = devfreq_groups;
+
+	return 0;
+}
+subsys_initcall(devfreq_init);
+
+/*
+ * The following are helper functions for devfreq user device drivers with
+ * OPP framework.
+ */
+
+/**
+ * devfreq_recommended_opp() - Helper function to get proper OPP for the
+ *			     freq value given to target callback.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @freq:	The frequency given to target function
+ * @flags:	Flags handed from devfreq framework.
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+					   unsigned long *freq,
+					   u32 flags)
+{
+	struct dev_pm_opp *opp;
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
+		/* The freq is an upper bound. opp should be lower */
+		opp = dev_pm_opp_find_freq_floor(dev, freq);
+
+		/* If not available, use the closest opp */
+		if (opp == ERR_PTR(-ERANGE))
+			opp = dev_pm_opp_find_freq_ceil(dev, freq);
+	} else {
+		/* The freq is an lower bound. opp should be higher */
+		opp = dev_pm_opp_find_freq_ceil(dev, freq);
+
+		/* If not available, use the closest opp */
+		if (opp == ERR_PTR(-ERANGE))
+			opp = dev_pm_opp_find_freq_floor(dev, freq);
+	}
+
+	return opp;
+}
+EXPORT_SYMBOL(devfreq_recommended_opp);
+
+/**
+ * devfreq_register_opp_notifier() - Helper function to get devfreq notified
+ *				   for any changes in the OPP availability
+ *				   changes
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ */
+int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
+}
+EXPORT_SYMBOL(devfreq_register_opp_notifier);
+
+/**
+ * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
+ *				     notified for any changes in the OPP
+ *				     availability changes anymore.
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ *
+ * At exit() callback of devfreq_dev_profile, this must be included if
+ * devfreq_recommended_opp is used.
+ */
+int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
+{
+	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
+}
+EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
+
+static void devm_devfreq_opp_release(struct device *dev, void *res)
+{
+	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
+}
+
+/**
+ * devm_ devfreq_register_opp_notifier()
+ *		- Resource-managed devfreq_register_opp_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ */
+int devm_devfreq_register_opp_notifier(struct device *dev,
+				       struct devfreq *devfreq)
+{
+	struct devfreq **ptr;
+	int ret;
+
+	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = devfreq_register_opp_notifier(dev, devfreq);
+	if (ret) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	*ptr = devfreq;
+	devres_add(dev, ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
+
+/**
+ * devm_devfreq_unregister_opp_notifier()
+ *		- Resource-managed devfreq_unregister_opp_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ */
+void devm_devfreq_unregister_opp_notifier(struct device *dev,
+					 struct devfreq *devfreq)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
+
+/**
+ * devfreq_register_notifier() - Register a driver with devfreq
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to register.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_register_notifier(struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	int ret = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (list) {
+	case DEVFREQ_TRANSITION_NOTIFIER:
+		ret = srcu_notifier_chain_register(
+				&devfreq->transition_notifier_list, nb);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_register_notifier);
+
+/*
+ * devfreq_unregister_notifier() - Unregister a driver with devfreq
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	int ret = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (list) {
+	case DEVFREQ_TRANSITION_NOTIFIER:
+		ret = srcu_notifier_chain_unregister(
+				&devfreq->transition_notifier_list, nb);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_unregister_notifier);
+
+struct devfreq_notifier_devres {
+	struct devfreq *devfreq;
+	struct notifier_block *nb;
+	unsigned int list;
+};
+
+static void devm_devfreq_notifier_release(struct device *dev, void *res)
+{
+	struct devfreq_notifier_devres *this = res;
+
+	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
+}
+
+/**
+ * devm_devfreq_register_notifier()
+	- Resource-managed devfreq_register_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devm_devfreq_register_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	struct devfreq_notifier_devres *ptr;
+	int ret;
+
+	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
+				GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = devfreq_register_notifier(devfreq, nb, list);
+	if (ret) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	ptr->devfreq = devfreq;
+	ptr->nb = nb;
+	ptr->list = list;
+	devres_add(dev, ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_notifier);
+
+/**
+ * devm_devfreq_unregister_notifier()
+	- Resource-managed devfreq_unregister_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+void devm_devfreq_unregister_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
new file mode 100644
index 0000000..cd94980
--- /dev/null
+++ b/drivers/devfreq/event/Kconfig
@@ -0,0 +1,41 @@
+menuconfig PM_DEVFREQ_EVENT
+	bool "DEVFREQ-Event device Support"
+	help
+	  The devfreq-event device provide the raw data and events which
+	  indicate the current state of devfreq-event device. The provided
+	  data from devfreq-event device is used to monitor the state of
+	  device and determine the suitable size of resource to reduce the
+	  wasted resource.
+
+	  The devfreq-event device can support the various type of events
+	  (e.g., raw data, utilization, latency, bandwidth). The events
+	  may be used by devfreq governor and other subsystem.
+
+if PM_DEVFREQ_EVENT
+
+config DEVFREQ_EVENT_EXYNOS_NOCP
+	tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	select PM_OPP
+	select REGMAP_MMIO
+	help
+	  This add the devfreq-event driver for Exynos SoC. It provides NoC
+	  (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
+
+config DEVFREQ_EVENT_EXYNOS_PPMU
+	tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	select PM_OPP
+	help
+	  This add the devfreq-event driver for Exynos SoC. It provides PPMU
+	  (Platform Performance Monitoring Unit) counters to estimate the
+	  utilization of each module.
+
+config DEVFREQ_EVENT_ROCKCHIP_DFI
+	tristate "ROCKCHIP DFI DEVFREQ event Driver"
+	depends on ARCH_ROCKCHIP
+	help
+	  This add the devfreq-event driver for Rockchip SoC. It provides DFI
+	  (DDR Monitor Module) driver to count ddr load.
+
+endif # PM_DEVFREQ_EVENT
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
new file mode 100644
index 0000000..dda7090
--- /dev/null
+++ b/drivers/devfreq/event/Makefile
@@ -0,0 +1,5 @@
+# Exynos DEVFREQ Event Drivers
+
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
+obj-$(CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI) += rockchip-dfi.o
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
new file mode 100644
index 0000000..f6e7956
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -0,0 +1,303 @@
+/*
+ * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "exynos-nocp.h"
+
+struct exynos_nocp {
+	struct devfreq_event_dev *edev;
+	struct devfreq_event_desc desc;
+
+	struct device *dev;
+
+	struct regmap *regmap;
+	struct clk *clk;
+};
+
+/*
+ * The devfreq-event ops structure for nocp probe.
+ */
+static int exynos_nocp_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+	int ret;
+
+	/* Disable NoC probe */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK, 0);
+	if (ret < 0) {
+		dev_err(nocp->dev, "failed to disable the NoC probe device\n");
+		return ret;
+	}
+
+	/* Set a statistics dump period to 0 */
+	ret = regmap_write(nocp->regmap, NOCP_STAT_PERIOD, 0x0);
+	if (ret < 0)
+		goto out;
+
+	/* Set the IntEvent fields of *_SRC */
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_BYTE_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CYCLE_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+	if (ret < 0)
+		goto out;
+
+
+	/* Set an alarm with a max/min value of 0 to generate StatALARM */
+	ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MIN, 0x0);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MAX, 0x0);
+	if (ret < 0)
+		goto out;
+
+	/* Set AlarmMode */
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Enable the measurements by setting AlarmEn and StatEn */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+			NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK,
+			NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Set GlobalEN */
+	ret = regmap_update_bits(nocp->regmap, NOCP_CFG_CTL,
+				NOCP_CFG_CTL_GLOBALEN_MASK,
+				NOCP_CFG_CTL_GLOBALEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Enable NoC probe */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK,
+				NOCP_MAIN_CTL_STATEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	/* Reset NoC probe */
+	if (regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK, 0)) {
+		dev_err(nocp->dev, "Failed to reset NoC probe device\n");
+	}
+
+	return ret;
+}
+
+static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
+				struct devfreq_event_data *edata)
+{
+	struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+	unsigned int counter[4];
+	int ret;
+
+	/* Read cycle count */
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_0_VAL, &counter[0]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_1_VAL, &counter[1]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_2_VAL, &counter[2]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_3_VAL, &counter[3]);
+	if (ret < 0)
+		goto out;
+
+	edata->load_count = ((counter[1] << 16) | counter[0]);
+	edata->total_count = ((counter[3] << 16) | counter[2]);
+
+	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+
+	return 0;
+
+out:
+	dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
+
+	return ret;
+}
+
+static const struct devfreq_event_ops exynos_nocp_ops = {
+	.set_event = exynos_nocp_set_event,
+	.get_event = exynos_nocp_get_event,
+};
+
+static const struct of_device_id exynos_nocp_id_match[] = {
+	{ .compatible = "samsung,exynos5420-nocp", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_nocp_id_match);
+
+static struct regmap_config exynos_nocp_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = NOCP_COUNTERS_3_VAL,
+};
+
+static int exynos_nocp_parse_dt(struct platform_device *pdev,
+				struct exynos_nocp *nocp)
+{
+	struct device *dev = nocp->dev;
+	struct device_node *np = dev->of_node;
+	struct resource *res;
+	void __iomem *base;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	nocp->clk = devm_clk_get(dev, "nocp");
+	if (IS_ERR(nocp->clk))
+		nocp->clk = NULL;
+
+	/* Maps the memory mapped IO to control nocp register */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	exynos_nocp_regmap_config.max_register = resource_size(res) - 4;
+
+	nocp->regmap = devm_regmap_init_mmio(dev, base,
+					&exynos_nocp_regmap_config);
+	if (IS_ERR(nocp->regmap)) {
+		dev_err(dev, "failed to initialize regmap\n");
+		return PTR_ERR(nocp->regmap);
+	}
+
+	return 0;
+}
+
+static int exynos_nocp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct exynos_nocp *nocp;
+	int ret;
+
+	nocp = devm_kzalloc(&pdev->dev, sizeof(*nocp), GFP_KERNEL);
+	if (!nocp)
+		return -ENOMEM;
+
+	nocp->dev = &pdev->dev;
+
+	/* Parse dt data to get resource */
+	ret = exynos_nocp_parse_dt(pdev, nocp);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"failed to parse devicetree for resource\n");
+		return ret;
+	}
+
+	/* Add devfreq-event device to measure the bandwidth of NoC */
+	nocp->desc.ops = &exynos_nocp_ops;
+	nocp->desc.driver_data = nocp;
+	nocp->desc.name = np->full_name;
+	nocp->edev = devm_devfreq_event_add_edev(&pdev->dev, &nocp->desc);
+	if (IS_ERR(nocp->edev)) {
+		dev_err(&pdev->dev,
+			"failed to add devfreq-event device\n");
+		return PTR_ERR(nocp->edev);
+	}
+	platform_set_drvdata(pdev, nocp);
+
+	ret = clk_prepare_enable(nocp->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+		return ret;
+	}
+
+	pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
+			dev_name(dev));
+
+	return 0;
+}
+
+static int exynos_nocp_remove(struct platform_device *pdev)
+{
+	struct exynos_nocp *nocp = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(nocp->clk);
+
+	return 0;
+}
+
+static struct platform_driver exynos_nocp_driver = {
+	.probe	= exynos_nocp_probe,
+	.remove	= exynos_nocp_remove,
+	.driver = {
+		.name	= "exynos-nocp",
+		.of_match_table = exynos_nocp_id_match,
+	},
+};
+module_platform_driver(exynos_nocp_driver);
+
+MODULE_DESCRIPTION("Exynos NoC (Network on Chip) Probe driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
new file mode 100644
index 0000000..28564db
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -0,0 +1,78 @@
+/*
+ * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_NOCP_H__
+#define __EXYNOS_NOCP_H__
+
+enum nocp_reg {
+	NOCP_ID_REVISION_ID		= 0x04,
+	NOCP_MAIN_CTL			= 0x08,
+	NOCP_CFG_CTL			= 0x0C,
+
+	NOCP_STAT_PERIOD		= 0x24,
+	NOCP_STAT_GO			= 0x28,
+	NOCP_STAT_ALARM_MIN		= 0x2C,
+	NOCP_STAT_ALARM_MAX		= 0x30,
+	NOCP_STAT_ALARM_STATUS		= 0x34,
+	NOCP_STAT_ALARM_CLR		= 0x38,
+
+	NOCP_COUNTERS_0_SRC		= 0x138,
+	NOCP_COUNTERS_0_ALARM_MODE	= 0x13C,
+	NOCP_COUNTERS_0_VAL		= 0x140,
+
+	NOCP_COUNTERS_1_SRC		= 0x14C,
+	NOCP_COUNTERS_1_ALARM_MODE	= 0x150,
+	NOCP_COUNTERS_1_VAL		= 0x154,
+
+	NOCP_COUNTERS_2_SRC		= 0x160,
+	NOCP_COUNTERS_2_ALARM_MODE	= 0x164,
+	NOCP_COUNTERS_2_VAL		= 0x168,
+
+	NOCP_COUNTERS_3_SRC		= 0x174,
+	NOCP_COUNTERS_3_ALARM_MODE	= 0x178,
+	NOCP_COUNTERS_3_VAL		= 0x17C,
+};
+
+/* NOCP_MAIN_CTL register */
+#define NOCP_MAIN_CTL_ERREN_MASK		BIT(0)
+#define NOCP_MAIN_CTL_TRACEEN_MASK		BIT(1)
+#define NOCP_MAIN_CTL_PAYLOADEN_MASK		BIT(2)
+#define NOCP_MAIN_CTL_STATEN_MASK		BIT(3)
+#define NOCP_MAIN_CTL_ALARMEN_MASK		BIT(4)
+#define NOCP_MAIN_CTL_STATCONDDUMP_MASK	BIT(5)
+#define NOCP_MAIN_CTL_INTRUSIVEMODE_MASK	BIT(6)
+
+/* NOCP_CFG_CTL register */
+#define NOCP_CFG_CTL_GLOBALEN_MASK		BIT(0)
+#define NOCP_CFG_CTL_ACTIVE_MASK		BIT(1)
+
+/* NOCP_COUNTERS_x_SRC register */
+#define NOCP_CNT_SRC_INTEVENT_SHIFT		0
+#define NOCP_CNT_SRC_INTEVENT_MASK		(0x1F << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_OFF_MASK		(0x0 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CYCLE_MASK	(0x1 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_IDLE_MASK		(0x2 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_XFER_MASK		(0x3 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BUSY_MASK		(0x4 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_WAIT_MASK		(0x5 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_PKT_MASK		(0x6 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BYTE_MASK		(0x8 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CHAIN_MASK	(0x10 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+
+/* NOCP_COUNTERS_x_ALARM_MODE register */
+#define NOCP_CNT_ALARM_MODE_SHIFT		0
+#define NOCP_CNT_ALARM_MODE_MASK		(0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_OFF_MASK		(0x0 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MASK		(0x1 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MAX_MASK		(0x2 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MAX_MASK	(0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+
+#endif /* __EXYNOS_NOCP_H__ */
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
new file mode 100644
index 0000000..a9c64f0
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -0,0 +1,679 @@
+/*
+ * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ *
+ * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/suspend.h>
+#include <linux/devfreq-event.h>
+
+#include "exynos-ppmu.h"
+
+struct exynos_ppmu_data {
+	struct clk *clk;
+};
+
+struct exynos_ppmu {
+	struct devfreq_event_dev **edev;
+	struct devfreq_event_desc *desc;
+	unsigned int num_events;
+
+	struct device *dev;
+	struct regmap *regmap;
+
+	struct exynos_ppmu_data ppmu;
+};
+
+#define PPMU_EVENT(name)			\
+	{ "ppmu-event0-"#name, PPMU_PMNCNT0 },	\
+	{ "ppmu-event1-"#name, PPMU_PMNCNT1 },	\
+	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
+	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
+
+static struct __exynos_ppmu_events {
+	char *name;
+	int id;
+} ppmu_events[] = {
+	/* For Exynos3250, Exynos4 and Exynos5260 */
+	PPMU_EVENT(g3d),
+	PPMU_EVENT(fsys),
+
+	/* For Exynos4 SoCs and Exynos3250 */
+	PPMU_EVENT(dmc0),
+	PPMU_EVENT(dmc1),
+	PPMU_EVENT(cpu),
+	PPMU_EVENT(rightbus),
+	PPMU_EVENT(leftbus),
+	PPMU_EVENT(lcd0),
+	PPMU_EVENT(camif),
+
+	/* Only for Exynos3250 and Exynos5260 */
+	PPMU_EVENT(mfc),
+
+	/* Only for Exynos4 SoCs */
+	PPMU_EVENT(mfc-left),
+	PPMU_EVENT(mfc-right),
+
+	/* Only for Exynos5260 SoCs */
+	PPMU_EVENT(drex0-s0),
+	PPMU_EVENT(drex0-s1),
+	PPMU_EVENT(drex1-s0),
+	PPMU_EVENT(drex1-s1),
+	PPMU_EVENT(eagle),
+	PPMU_EVENT(kfc),
+	PPMU_EVENT(isp),
+	PPMU_EVENT(fimc),
+	PPMU_EVENT(gscl),
+	PPMU_EVENT(mscl),
+	PPMU_EVENT(fimd0x),
+	PPMU_EVENT(fimd1x),
+
+	/* Only for Exynos5433 SoCs */
+	PPMU_EVENT(d0-cpu),
+	PPMU_EVENT(d0-general),
+	PPMU_EVENT(d0-rt),
+	PPMU_EVENT(d1-cpu),
+	PPMU_EVENT(d1-general),
+	PPMU_EVENT(d1-rt),
+};
+
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
+		if (!strcmp(edev->desc->name, ppmu_events[i].name))
+			return ppmu_events[i].id;
+
+	return -EINVAL;
+}
+
+/*
+ * The devfreq-event ops structure for PPMU v1.1
+ */
+static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int ret;
+	u32 pmnc;
+
+	/* Disable all counters */
+	ret = regmap_write(info->regmap, PPMU_CNTENC,
+				PPMU_CCNT_MASK |
+				PPMU_PMCNT0_MASK |
+				PPMU_PMCNT1_MASK |
+				PPMU_PMCNT2_MASK |
+				PPMU_PMCNT3_MASK);
+	if (ret < 0)
+		return ret;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	int ret;
+	u32 pmnc, cntens;
+
+	if (id < 0)
+		return id;
+
+	/* Enable specific counter */
+	ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens);
+	if (ret < 0)
+		return ret;
+
+	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_CNTENS, cntens);
+	if (ret < 0)
+		return ret;
+
+	/* Set the event of Read/Write data count  */
+	ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
+				PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT);
+	if (ret < 0)
+		return ret;
+
+	/* Reset cycle counter/performance counter and enable PPMU */
+	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+			| PPMU_PMNC_COUNTER_RESET_MASK
+			| PPMU_PMNC_CC_RESET_MASK);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
+				struct devfreq_event_data *edata)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	unsigned int total_count, load_count;
+	unsigned int pmcnt3_high, pmcnt3_low;
+	unsigned int pmnc, cntenc;
+	int ret;
+
+	if (id < 0)
+		return -EINVAL;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	/* Read cycle count */
+	ret = regmap_read(info->regmap, PPMU_CCNT, &total_count);
+	if (ret < 0)
+		return ret;
+	edata->total_count = total_count;
+
+	/* Read performance count */
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count);
+		if (ret < 0)
+			return ret;
+		edata->load_count = load_count;
+		break;
+	case PPMU_PMNCNT3:
+		ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high);
+		if (ret < 0)
+			return ret;
+
+		ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low);
+		if (ret < 0)
+			return ret;
+
+		edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Disable specific counter */
+	ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc);
+	if (ret < 0)
+		return ret;
+
+	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc);
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+
+	return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_ops = {
+	.disable = exynos_ppmu_disable,
+	.set_event = exynos_ppmu_set_event,
+	.get_event = exynos_ppmu_get_event,
+};
+
+/*
+ * The devfreq-event ops structure for PPMU v2.0
+ */
+static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int ret;
+	u32 pmnc, clear;
+
+	/* Disable all counters */
+	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
+		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+	ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0);
+	if (ret < 0)
+		return ret;
+
+	ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0);
+	if (ret < 0)
+		return ret;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	unsigned int pmnc, cntens;
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	int ret;
+
+	/* Enable all counters */
+	ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens);
+	if (ret < 0)
+		return ret;
+
+	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens);
+	if (ret < 0)
+		return ret;
+
+	/* Set the event of Read/Write data count  */
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+				PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT);
+		if (ret < 0)
+			return ret;
+		break;
+	case PPMU_PMNCNT3:
+		ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+				PPMU_V2_EVT3_RW_DATA_CNT);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	/* Reset cycle counter/performance counter and enable PPMU */
+	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+			| PPMU_PMNC_COUNTER_RESET_MASK
+			| PPMU_PMNC_CC_RESET_MASK
+			| PPMU_PMNC_CC_DIVIDER_MASK
+			| PPMU_V2_PMNC_START_MODE_MASK);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
+
+	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
+				    struct devfreq_event_data *edata)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	int ret;
+	unsigned int pmnc, cntenc;
+	unsigned int pmcnt_high, pmcnt_low;
+	unsigned int total_count, count;
+	unsigned long load_count = 0;
+
+	/* Disable PPMU */
+	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
+	if (ret < 0)
+		return ret;
+
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
+	if (ret < 0)
+		return ret;
+
+	/* Read cycle count and performance count */
+	ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count);
+	if (ret < 0)
+		return ret;
+	edata->total_count = total_count;
+
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count);
+		if (ret < 0)
+			return ret;
+		load_count = count;
+		break;
+	case PPMU_PMNCNT3:
+		ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH,
+						&pmcnt_high);
+		if (ret < 0)
+			return ret;
+
+		ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low);
+		if (ret < 0)
+			return ret;
+
+		load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low;
+		break;
+	}
+	edata->load_count = load_count;
+
+	/* Disable all counters */
+	ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc);
+	if (ret < 0)
+		return 0;
+
+	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc);
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+	return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
+	.disable = exynos_ppmu_v2_disable,
+	.set_event = exynos_ppmu_v2_set_event,
+	.get_event = exynos_ppmu_v2_get_event,
+};
+
+static const struct of_device_id exynos_ppmu_id_match[] = {
+	{
+		.compatible = "samsung,exynos-ppmu",
+		.data = (void *)&exynos_ppmu_ops,
+	}, {
+		.compatible = "samsung,exynos-ppmu-v2",
+		.data = (void *)&exynos_ppmu_v2_ops,
+	},
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
+
+static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
+{
+	const struct of_device_id *match;
+
+	match = of_match_node(exynos_ppmu_id_match, np);
+	return (struct devfreq_event_ops *)match->data;
+}
+
+static int of_get_devfreq_events(struct device_node *np,
+				 struct exynos_ppmu *info)
+{
+	struct devfreq_event_desc *desc;
+	struct devfreq_event_ops *event_ops;
+	struct device *dev = info->dev;
+	struct device_node *events_np, *node;
+	int i, j, count;
+
+	events_np = of_get_child_by_name(np, "events");
+	if (!events_np) {
+		dev_err(dev,
+			"failed to get child node of devfreq-event devices\n");
+		return -EINVAL;
+	}
+	event_ops = exynos_bus_get_ops(np);
+
+	count = of_get_child_count(events_np);
+	desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	info->num_events = count;
+
+	j = 0;
+	for_each_child_of_node(events_np, node) {
+		for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
+			if (!ppmu_events[i].name)
+				continue;
+
+			if (!of_node_cmp(node->name, ppmu_events[i].name))
+				break;
+		}
+
+		if (i == ARRAY_SIZE(ppmu_events)) {
+			dev_warn(dev,
+				"don't know how to configure events : %s\n",
+				node->name);
+			continue;
+		}
+
+		desc[j].ops = event_ops;
+		desc[j].driver_data = info;
+
+		of_property_read_string(node, "event-name", &desc[j].name);
+
+		j++;
+	}
+	info->desc = desc;
+
+	of_node_put(events_np);
+
+	return 0;
+}
+
+static struct regmap_config exynos_ppmu_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
+static int exynos_ppmu_parse_dt(struct platform_device *pdev,
+				struct exynos_ppmu *info)
+{
+	struct device *dev = info->dev;
+	struct device_node *np = dev->of_node;
+	struct resource *res;
+	void __iomem *base;
+	int ret = 0;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	/* Maps the memory mapped IO to control PPMU register */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	exynos_ppmu_regmap_config.max_register = resource_size(res) - 4;
+	info->regmap = devm_regmap_init_mmio(dev, base,
+					&exynos_ppmu_regmap_config);
+	if (IS_ERR(info->regmap)) {
+		dev_err(dev, "failed to initialize regmap\n");
+		return PTR_ERR(info->regmap);
+	}
+
+	info->ppmu.clk = devm_clk_get(dev, "ppmu");
+	if (IS_ERR(info->ppmu.clk)) {
+		info->ppmu.clk = NULL;
+		dev_warn(dev, "cannot get PPMU clock\n");
+	}
+
+	ret = of_get_devfreq_events(np, info);
+	if (ret < 0) {
+		dev_err(dev, "failed to parse exynos ppmu dt node\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_ppmu_probe(struct platform_device *pdev)
+{
+	struct exynos_ppmu *info;
+	struct devfreq_event_dev **edev;
+	struct devfreq_event_desc *desc;
+	int i, ret = 0, size;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = &pdev->dev;
+
+	/* Parse dt data to get resource */
+	ret = exynos_ppmu_parse_dt(pdev, info);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"failed to parse devicetree for resource\n");
+		return ret;
+	}
+	desc = info->desc;
+
+	size = sizeof(struct devfreq_event_dev *) * info->num_events;
+	info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!info->edev)
+		return -ENOMEM;
+
+	edev = info->edev;
+	platform_set_drvdata(pdev, info);
+
+	for (i = 0; i < info->num_events; i++) {
+		edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
+		if (IS_ERR(edev[i])) {
+			ret = PTR_ERR(edev[i]);
+			dev_err(&pdev->dev,
+				"failed to add devfreq-event device\n");
+			return PTR_ERR(edev[i]);
+		}
+
+		pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n",
+			dev_name(&pdev->dev), desc[i].name);
+	}
+
+	ret = clk_prepare_enable(info->ppmu.clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_ppmu_remove(struct platform_device *pdev)
+{
+	struct exynos_ppmu *info = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(info->ppmu.clk);
+
+	return 0;
+}
+
+static struct platform_driver exynos_ppmu_driver = {
+	.probe	= exynos_ppmu_probe,
+	.remove	= exynos_ppmu_remove,
+	.driver = {
+		.name	= "exynos-ppmu",
+		.of_match_table = exynos_ppmu_id_match,
+	},
+};
+module_platform_driver(exynos_ppmu_driver);
+
+MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
new file mode 100644
index 0000000..05774c4
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -0,0 +1,163 @@
+/*
+ * exynos_ppmu.h - EXYNOS PPMU header file
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PPMU_H__
+#define __EXYNOS_PPMU_H__
+
+enum ppmu_state {
+	PPMU_DISABLE = 0,
+	PPMU_ENABLE,
+};
+
+enum ppmu_counter {
+	PPMU_PMNCNT0 = 0,
+	PPMU_PMNCNT1,
+	PPMU_PMNCNT2,
+	PPMU_PMNCNT3,
+
+	PPMU_PMNCNT_MAX,
+};
+
+/***
+ * PPMUv1.1 Definitions
+ */
+enum ppmu_event_type {
+	PPMU_RO_BUSY_CYCLE_CNT	= 0x0,
+	PPMU_WO_BUSY_CYCLE_CNT	= 0x1,
+	PPMU_RW_BUSY_CYCLE_CNT	= 0x2,
+	PPMU_RO_REQUEST_CNT	= 0x3,
+	PPMU_WO_REQUEST_CNT	= 0x4,
+	PPMU_RO_DATA_CNT	= 0x5,
+	PPMU_WO_DATA_CNT	= 0x6,
+	PPMU_RO_LATENCY		= 0x12,
+	PPMU_WO_LATENCY		= 0x16,
+};
+
+enum ppmu_reg {
+	/* PPC control register */
+	PPMU_PMNC		= 0x00,
+	PPMU_CNTENS		= 0x10,
+	PPMU_CNTENC		= 0x20,
+	PPMU_INTENS		= 0x30,
+	PPMU_INTENC		= 0x40,
+	PPMU_FLAG		= 0x50,
+
+	/* Cycle Counter and Performance Event Counter Register */
+	PPMU_CCNT		= 0x100,
+	PPMU_PMCNT0		= 0x110,
+	PPMU_PMCNT1		= 0x120,
+	PPMU_PMCNT2		= 0x130,
+	PPMU_PMCNT3_HIGH	= 0x140,
+	PPMU_PMCNT3_LOW		= 0x150,
+
+	/* Bus Event Generator */
+	PPMU_BEVT0SEL		= 0x1000,
+	PPMU_BEVT1SEL		= 0x1100,
+	PPMU_BEVT2SEL		= 0x1200,
+	PPMU_BEVT3SEL		= 0x1300,
+	PPMU_COUNTER_RESET	= 0x1810,
+	PPMU_READ_OVERFLOW_CNT	= 0x1810,
+	PPMU_READ_UNDERFLOW_CNT	= 0x1814,
+	PPMU_WRITE_OVERFLOW_CNT	= 0x1850,
+	PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
+	PPMU_READ_PENDING_CNT	= 0x1880,
+	PPMU_WRITE_PENDING_CNT	= 0x1884
+};
+
+/* PMNC register */
+#define PPMU_PMNC_CC_RESET_SHIFT	2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT	1
+#define PPMU_PMNC_ENABLE_SHIFT		0
+#define PPMU_PMNC_START_MODE_MASK	BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK	BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK		BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK	BIT(1)
+#define PPMU_PMNC_ENABLE_MASK		BIT(0)
+
+/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
+#define PPMU_CCNT_MASK			BIT(31)
+#define PPMU_PMCNT3_MASK		BIT(3)
+#define PPMU_PMCNT2_MASK		BIT(2)
+#define PPMU_PMCNT1_MASK		BIT(1)
+#define PPMU_PMCNT0_MASK		BIT(0)
+
+/* PPMU_PMNCTx/PPMU_BETxSEL registers */
+#define PPMU_PMNCT(x)			(PPMU_PMCNT0 + (0x10 * x))
+#define PPMU_BEVTxSEL(x)		(PPMU_BEVT0SEL + (0x100 * x))
+
+/***
+ * PPMU_V2.0 definitions
+ */
+enum ppmu_v2_mode {
+	PPMU_V2_MODE_MANUAL = 0,
+	PPMU_V2_MODE_AUTO = 1,
+	PPMU_V2_MODE_CIG = 2,	/* CIG (Conditional Interrupt Generation) */
+};
+
+enum ppmu_v2_event_type {
+	PPMU_V2_RO_DATA_CNT	= 0x4,
+	PPMU_V2_WO_DATA_CNT	= 0x5,
+
+	PPMU_V2_EVT3_RW_DATA_CNT = 0x22,	/* Only for Event3 */
+};
+
+enum ppmu_V2_reg {
+	/* PPC control register */
+	PPMU_V2_PMNC		= 0x04,
+	PPMU_V2_CNTENS		= 0x08,
+	PPMU_V2_CNTENC		= 0x0c,
+	PPMU_V2_INTENS		= 0x10,
+	PPMU_V2_INTENC		= 0x14,
+	PPMU_V2_FLAG		= 0x18,
+
+	/* Cycle Counter and Performance Event Counter Register */
+	PPMU_V2_CCNT		= 0x48,
+	PPMU_V2_PMCNT0		= 0x34,
+	PPMU_V2_PMCNT1		= 0x38,
+	PPMU_V2_PMCNT2		= 0x3c,
+	PPMU_V2_PMCNT3_LOW	= 0x40,
+	PPMU_V2_PMCNT3_HIGH	= 0x44,
+
+	/* Bus Event Generator */
+	PPMU_V2_CIG_CFG0		= 0x1c,
+	PPMU_V2_CIG_CFG1		= 0x20,
+	PPMU_V2_CIG_CFG2		= 0x24,
+	PPMU_V2_CIG_RESULT	= 0x28,
+	PPMU_V2_CNT_RESET	= 0x2c,
+	PPMU_V2_CNT_AUTO		= 0x30,
+	PPMU_V2_CH_EV0_TYPE	= 0x200,
+	PPMU_V2_CH_EV1_TYPE	= 0x204,
+	PPMU_V2_CH_EV2_TYPE	= 0x208,
+	PPMU_V2_CH_EV3_TYPE	= 0x20c,
+	PPMU_V2_SM_ID_V		= 0x220,
+	PPMU_V2_SM_ID_A		= 0x224,
+	PPMU_V2_SM_OTHERS_V	= 0x228,
+	PPMU_V2_SM_OTHERS_A	= 0x22c,
+	PPMU_V2_INTERRUPT_RESET	= 0x260,
+};
+
+/* PMNC register */
+#define PPMU_V2_PMNC_START_MODE_SHIFT	20
+#define PPMU_V2_PMNC_START_MODE_MASK	(0x3 << PPMU_V2_PMNC_START_MODE_SHIFT)
+
+#define PPMU_PMNC_CC_RESET_SHIFT	2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT	1
+#define PPMU_PMNC_ENABLE_SHIFT		0
+#define PPMU_PMNC_START_MODE_MASK	BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK	BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK		BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK	BIT(1)
+#define PPMU_PMNC_ENABLE_MASK		BIT(0)
+
+#define PPMU_V2_PMNCT(x)		(PPMU_V2_PMCNT0 + (0x4 * x))
+#define PPMU_V2_CH_EVx_TYPE(x)		(PPMU_V2_CH_EV0_TYPE + (0x4 * x))
+
+#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
new file mode 100644
index 0000000..22b1133
--- /dev/null
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+#define RK3399_DMC_NUM_CH	2
+
+/* DDRMON_CTRL */
+#define DDRMON_CTRL	0x04
+#define CLR_DDRMON_CTRL	(0x1f0000 << 0)
+#define LPDDR4_EN	(0x10001 << 4)
+#define HARDWARE_EN	(0x10001 << 3)
+#define LPDDR3_EN	(0x10001 << 2)
+#define SOFTWARE_EN	(0x10001 << 1)
+#define SOFTWARE_DIS	(0x10000 << 1)
+#define TIME_CNT_EN	(0x10001 << 0)
+
+#define DDRMON_CH0_COUNT_NUM		0x28
+#define DDRMON_CH0_DFI_ACCESS_NUM	0x2c
+#define DDRMON_CH1_COUNT_NUM		0x3c
+#define DDRMON_CH1_DFI_ACCESS_NUM	0x40
+
+/* pmu grf */
+#define PMUGRF_OS_REG2	0x308
+#define DDRTYPE_SHIFT	13
+#define DDRTYPE_MASK	7
+
+enum {
+	DDR3 = 3,
+	LPDDR3 = 6,
+	LPDDR4 = 7,
+	UNUSED = 0xFF
+};
+
+struct dmc_usage {
+	u32 access;
+	u32 total;
+};
+
+/*
+ * The dfi controller can monitor DDR load. It has an upper and lower threshold
+ * for the operating points. Whenever the usage leaves these bounds an event is
+ * generated to indicate the DDR frequency should be changed.
+ */
+struct rockchip_dfi {
+	struct devfreq_event_dev *edev;
+	struct devfreq_event_desc *desc;
+	struct dmc_usage ch_usage[RK3399_DMC_NUM_CH];
+	struct device *dev;
+	void __iomem *regs;
+	struct regmap *regmap_pmu;
+	struct clk *clk;
+};
+
+static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	void __iomem *dfi_regs = info->regs;
+	u32 val;
+	u32 ddr_type;
+
+	/* get ddr type */
+	regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val);
+	ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK;
+
+	/* clear DDRMON_CTRL setting */
+	writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
+
+	/* set ddr type to dfi */
+	if (ddr_type == LPDDR3)
+		writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
+	else if (ddr_type == LPDDR4)
+		writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
+
+	/* enable count, use software mode */
+	writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL);
+}
+
+static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	void __iomem *dfi_regs = info->regs;
+
+	writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL);
+}
+
+static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	u32 tmp, max = 0;
+	u32 i, busier_ch = 0;
+	void __iomem *dfi_regs = info->regs;
+
+	rockchip_dfi_stop_hardware_counter(edev);
+
+	/* Find out which channel is busier */
+	for (i = 0; i < RK3399_DMC_NUM_CH; i++) {
+		info->ch_usage[i].access = readl_relaxed(dfi_regs +
+				DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4;
+		info->ch_usage[i].total = readl_relaxed(dfi_regs +
+				DDRMON_CH0_COUNT_NUM + i * 20);
+		tmp = info->ch_usage[i].access;
+		if (tmp > max) {
+			busier_ch = i;
+			max = tmp;
+		}
+	}
+	rockchip_dfi_start_hardware_counter(edev);
+
+	return busier_ch;
+}
+
+static int rockchip_dfi_disable(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+
+	rockchip_dfi_stop_hardware_counter(edev);
+	clk_disable_unprepare(info->clk);
+
+	return 0;
+}
+
+static int rockchip_dfi_enable(struct devfreq_event_dev *edev)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	int ret;
+
+	ret = clk_prepare_enable(info->clk);
+	if (ret) {
+		dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret);
+		return ret;
+	}
+
+	rockchip_dfi_start_hardware_counter(edev);
+	return 0;
+}
+
+static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
+{
+	return 0;
+}
+
+static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
+				  struct devfreq_event_data *edata)
+{
+	struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
+	int busier_ch;
+
+	busier_ch = rockchip_dfi_get_busier_ch(edev);
+
+	edata->load_count = info->ch_usage[busier_ch].access;
+	edata->total_count = info->ch_usage[busier_ch].total;
+
+	return 0;
+}
+
+static const struct devfreq_event_ops rockchip_dfi_ops = {
+	.disable = rockchip_dfi_disable,
+	.enable = rockchip_dfi_enable,
+	.get_event = rockchip_dfi_get_event,
+	.set_event = rockchip_dfi_set_event,
+};
+
+static const struct of_device_id rockchip_dfi_id_match[] = {
+	{ .compatible = "rockchip,rk3399-dfi" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
+
+static int rockchip_dfi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rockchip_dfi *data;
+	struct resource *res;
+	struct devfreq_event_desc *desc;
+	struct device_node *np = pdev->dev.of_node, *node;
+
+	data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->regs))
+		return PTR_ERR(data->regs);
+
+	data->clk = devm_clk_get(dev, "pclk_ddr_mon");
+	if (IS_ERR(data->clk)) {
+		dev_err(dev, "Cannot get the clk dmc_clk\n");
+		return PTR_ERR(data->clk);
+	};
+
+	/* try to find the optional reference to the pmu syscon */
+	node = of_parse_phandle(np, "rockchip,pmu", 0);
+	if (node) {
+		data->regmap_pmu = syscon_node_to_regmap(node);
+		if (IS_ERR(data->regmap_pmu))
+			return PTR_ERR(data->regmap_pmu);
+	}
+	data->dev = dev;
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->ops = &rockchip_dfi_ops;
+	desc->driver_data = data;
+	desc->name = np->name;
+	data->desc = desc;
+
+	data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
+	if (IS_ERR(data->edev)) {
+		dev_err(&pdev->dev,
+			"failed to add devfreq-event device\n");
+		return PTR_ERR(data->edev);
+	}
+
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+}
+
+static struct platform_driver rockchip_dfi_driver = {
+	.probe	= rockchip_dfi_probe,
+	.driver = {
+		.name	= "rockchip-dfi",
+		.of_match_table = rockchip_dfi_id_match,
+	},
+};
+module_platform_driver(rockchip_dfi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip DFI driver");
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
new file mode 100644
index 0000000..c25658b
--- /dev/null
+++ b/drivers/devfreq/exynos-bus.c
@@ -0,0 +1,569 @@
+/*
+ * Generic Exynos Bus frequency driver with DEVFREQ Framework
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This driver support Exynos Bus frequency feature by using
+ * DEVFREQ framework and is based on drivers/devfreq/exynos/exynos4_bus.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define DEFAULT_SATURATION_RATIO	40
+#define DEFAULT_VOLTAGE_TOLERANCE	2
+
+struct exynos_bus {
+	struct device *dev;
+
+	struct devfreq *devfreq;
+	struct devfreq_event_dev **edev;
+	unsigned int edev_count;
+	struct mutex lock;
+
+	unsigned long curr_freq;
+
+	struct regulator *regulator;
+	struct clk *clk;
+	unsigned int voltage_tolerance;
+	unsigned int ratio;
+};
+
+/*
+ * Control the devfreq-event device to get the current state of bus
+ */
+#define exynos_bus_ops_edev(ops)				\
+static int exynos_bus_##ops(struct exynos_bus *bus)		\
+{								\
+	int i, ret;						\
+								\
+	for (i = 0; i < bus->edev_count; i++) {			\
+		if (!bus->edev[i])				\
+			continue;				\
+		ret = devfreq_event_##ops(bus->edev[i]);	\
+		if (ret < 0)					\
+			return ret;				\
+	}							\
+								\
+	return 0;						\
+}
+exynos_bus_ops_edev(enable_edev);
+exynos_bus_ops_edev(disable_edev);
+exynos_bus_ops_edev(set_event);
+
+static int exynos_bus_get_event(struct exynos_bus *bus,
+				struct devfreq_event_data *edata)
+{
+	struct devfreq_event_data event_data;
+	unsigned long load_count = 0, total_count = 0;
+	int i, ret = 0;
+
+	for (i = 0; i < bus->edev_count; i++) {
+		if (!bus->edev[i])
+			continue;
+
+		ret = devfreq_event_get_event(bus->edev[i], &event_data);
+		if (ret < 0)
+			return ret;
+
+		if (i == 0 || event_data.load_count > load_count) {
+			load_count = event_data.load_count;
+			total_count = event_data.total_count;
+		}
+	}
+
+	edata->load_count = load_count;
+	edata->total_count = total_count;
+
+	return ret;
+}
+
+/*
+ * Must necessary function for devfreq simple-ondemand governor
+ */
+static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct dev_pm_opp *new_opp;
+	unsigned long old_freq, new_freq, new_volt, tol;
+	int ret = 0;
+
+	/* Get new opp-bus instance according to new bus clock */
+	new_opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(new_opp)) {
+		dev_err(dev, "failed to get recommended opp instance\n");
+		return PTR_ERR(new_opp);
+	}
+
+	new_freq = dev_pm_opp_get_freq(new_opp);
+	new_volt = dev_pm_opp_get_voltage(new_opp);
+	dev_pm_opp_put(new_opp);
+
+	old_freq = bus->curr_freq;
+
+	if (old_freq == new_freq)
+		return 0;
+	tol = new_volt * bus->voltage_tolerance / 100;
+
+	/* Change voltage and frequency according to new OPP level */
+	mutex_lock(&bus->lock);
+
+	if (old_freq < new_freq) {
+		ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+		if (ret < 0) {
+			dev_err(bus->dev, "failed to set voltage\n");
+			goto out;
+		}
+	}
+
+	ret = clk_set_rate(bus->clk, new_freq);
+	if (ret < 0) {
+		dev_err(dev, "failed to change clock of bus\n");
+		clk_set_rate(bus->clk, old_freq);
+		goto out;
+	}
+
+	if (old_freq > new_freq) {
+		ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+		if (ret < 0) {
+			dev_err(bus->dev, "failed to set voltage\n");
+			goto out;
+		}
+	}
+	bus->curr_freq = new_freq;
+
+	dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+			old_freq, new_freq, clk_get_rate(bus->clk));
+out:
+	mutex_unlock(&bus->lock);
+
+	return ret;
+}
+
+static int exynos_bus_get_dev_status(struct device *dev,
+				     struct devfreq_dev_status *stat)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct devfreq_event_data edata;
+	int ret;
+
+	stat->current_frequency = bus->curr_freq;
+
+	ret = exynos_bus_get_event(bus, &edata);
+	if (ret < 0) {
+		stat->total_time = stat->busy_time = 0;
+		goto err;
+	}
+
+	stat->busy_time = (edata.load_count * 100) / bus->ratio;
+	stat->total_time = edata.total_count;
+
+	dev_dbg(dev, "Usage of devfreq-event : %lu/%lu\n", stat->busy_time,
+							stat->total_time);
+
+err:
+	ret = exynos_bus_set_event(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to set event to devfreq-event devices\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void exynos_bus_exit(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_disable_edev(bus);
+	if (ret < 0)
+		dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+	if (bus->regulator)
+		regulator_disable(bus->regulator);
+
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+}
+
+/*
+ * Must necessary function for devfreq passive governor
+ */
+static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
+					u32 flags)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct dev_pm_opp *new_opp;
+	unsigned long old_freq, new_freq;
+	int ret = 0;
+
+	/* Get new opp-bus instance according to new bus clock */
+	new_opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(new_opp)) {
+		dev_err(dev, "failed to get recommended opp instance\n");
+		return PTR_ERR(new_opp);
+	}
+
+	new_freq = dev_pm_opp_get_freq(new_opp);
+	dev_pm_opp_put(new_opp);
+
+	old_freq = bus->curr_freq;
+
+	if (old_freq == new_freq)
+		return 0;
+
+	/* Change the frequency according to new OPP level */
+	mutex_lock(&bus->lock);
+
+	ret = clk_set_rate(bus->clk, new_freq);
+	if (ret < 0) {
+		dev_err(dev, "failed to set the clock of bus\n");
+		goto out;
+	}
+
+	*freq = new_freq;
+	bus->curr_freq = new_freq;
+
+	dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
+			old_freq, new_freq, clk_get_rate(bus->clk));
+out:
+	mutex_unlock(&bus->lock);
+
+	return ret;
+}
+
+static void exynos_bus_passive_exit(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+}
+
+static int exynos_bus_parent_parse_of(struct device_node *np,
+					struct exynos_bus *bus)
+{
+	struct device *dev = bus->dev;
+	int i, ret, count, size;
+
+	/* Get the regulator to provide each bus with the power */
+	bus->regulator = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(bus->regulator)) {
+		dev_err(dev, "failed to get VDD regulator\n");
+		return PTR_ERR(bus->regulator);
+	}
+
+	ret = regulator_enable(bus->regulator);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable VDD regulator\n");
+		return ret;
+	}
+
+	/*
+	 * Get the devfreq-event devices to get the current utilization of
+	 * buses. This raw data will be used in devfreq ondemand governor.
+	 */
+	count = devfreq_event_get_edev_count(dev);
+	if (count < 0) {
+		dev_err(dev, "failed to get the count of devfreq-event dev\n");
+		ret = count;
+		goto err_regulator;
+	}
+	bus->edev_count = count;
+
+	size = sizeof(*bus->edev) * count;
+	bus->edev = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!bus->edev) {
+		ret = -ENOMEM;
+		goto err_regulator;
+	}
+
+	for (i = 0; i < count; i++) {
+		bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, i);
+		if (IS_ERR(bus->edev[i])) {
+			ret = -EPROBE_DEFER;
+			goto err_regulator;
+		}
+	}
+
+	/*
+	 * Optionally, Get the saturation ratio according to Exynos SoC
+	 * When measuring the utilization of each AXI bus with devfreq-event
+	 * devices, the measured real cycle might be much lower than the
+	 * total cycle of bus during sampling rate. In result, the devfreq
+	 * simple-ondemand governor might not decide to change the current
+	 * frequency due to too utilization (= real cycle/total cycle).
+	 * So, this property is used to adjust the utilization when calculating
+	 * the busy_time in exynos_bus_get_dev_status().
+	 */
+	if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio))
+		bus->ratio = DEFAULT_SATURATION_RATIO;
+
+	if (of_property_read_u32(np, "exynos,voltage-tolerance",
+					&bus->voltage_tolerance))
+		bus->voltage_tolerance = DEFAULT_VOLTAGE_TOLERANCE;
+
+	return 0;
+
+err_regulator:
+	regulator_disable(bus->regulator);
+
+	return ret;
+}
+
+static int exynos_bus_parse_of(struct device_node *np,
+			      struct exynos_bus *bus)
+{
+	struct device *dev = bus->dev;
+	struct dev_pm_opp *opp;
+	unsigned long rate;
+	int ret;
+
+	/* Get the clock to provide each bus with source clock */
+	bus->clk = devm_clk_get(dev, "bus");
+	if (IS_ERR(bus->clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		return PTR_ERR(bus->clk);
+	}
+
+	ret = clk_prepare_enable(bus->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to get enable clock\n");
+		return ret;
+	}
+
+	/* Get the freq and voltage from OPP table to scale the bus freq */
+	ret = dev_pm_opp_of_add_table(dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to get OPP table\n");
+		goto err_clk;
+	}
+
+	rate = clk_get_rate(bus->clk);
+
+	opp = devfreq_recommended_opp(dev, &rate, 0);
+	if (IS_ERR(opp)) {
+		dev_err(dev, "failed to find dev_pm_opp\n");
+		ret = PTR_ERR(opp);
+		goto err_opp;
+	}
+	bus->curr_freq = dev_pm_opp_get_freq(opp);
+	dev_pm_opp_put(opp);
+
+	return 0;
+
+err_opp:
+	dev_pm_opp_of_remove_table(dev);
+err_clk:
+	clk_disable_unprepare(bus->clk);
+
+	return ret;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node, *node;
+	struct devfreq_dev_profile *profile;
+	struct devfreq_simple_ondemand_data *ondemand_data;
+	struct devfreq_passive_data *passive_data;
+	struct devfreq *parent_devfreq;
+	struct exynos_bus *bus;
+	int ret, max_state;
+	unsigned long min_freq, max_freq;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+	if (!bus)
+		return -ENOMEM;
+	mutex_init(&bus->lock);
+	bus->dev = &pdev->dev;
+	platform_set_drvdata(pdev, bus);
+
+	/* Parse the device-tree to get the resource information */
+	ret = exynos_bus_parse_of(np, bus);
+	if (ret < 0)
+		return ret;
+
+	profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+	if (!profile) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	node = of_parse_phandle(dev->of_node, "devfreq", 0);
+	if (node) {
+		of_node_put(node);
+		goto passive;
+	} else {
+		ret = exynos_bus_parent_parse_of(np, bus);
+	}
+
+	if (ret < 0)
+		goto err;
+
+	/* Initialize the struct profile and governor data for parent device */
+	profile->polling_ms = 50;
+	profile->target = exynos_bus_target;
+	profile->get_dev_status = exynos_bus_get_dev_status;
+	profile->exit = exynos_bus_exit;
+
+	ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
+	if (!ondemand_data) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	ondemand_data->upthreshold = 40;
+	ondemand_data->downdifferential = 5;
+
+	/* Add devfreq device to monitor and handle the exynos bus */
+	bus->devfreq = devm_devfreq_add_device(dev, profile,
+						DEVFREQ_GOV_SIMPLE_ONDEMAND,
+						ondemand_data);
+	if (IS_ERR(bus->devfreq)) {
+		dev_err(dev, "failed to add devfreq device\n");
+		ret = PTR_ERR(bus->devfreq);
+		goto err;
+	}
+
+	/* Register opp_notifier to catch the change of OPP  */
+	ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to register opp notifier\n");
+		goto err;
+	}
+
+	/*
+	 * Enable devfreq-event to get raw data which is used to determine
+	 * current bus load.
+	 */
+	ret = exynos_bus_enable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable devfreq-event devices\n");
+		goto err;
+	}
+
+	ret = exynos_bus_set_event(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to set event to devfreq-event devices\n");
+		goto err;
+	}
+
+	goto out;
+passive:
+	/* Initialize the struct profile and governor data for passive device */
+	profile->target = exynos_bus_passive_target;
+	profile->exit = exynos_bus_passive_exit;
+
+	/* Get the instance of parent devfreq device */
+	parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
+	if (IS_ERR(parent_devfreq)) {
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+
+	passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+	if (!passive_data) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	passive_data->parent = parent_devfreq;
+
+	/* Add devfreq device for exynos bus with passive governor */
+	bus->devfreq = devm_devfreq_add_device(dev, profile, DEVFREQ_GOV_PASSIVE,
+						passive_data);
+	if (IS_ERR(bus->devfreq)) {
+		dev_err(dev,
+			"failed to add devfreq dev with passive governor\n");
+		ret = PTR_ERR(bus->devfreq);
+		goto err;
+	}
+
+out:
+	max_state = bus->devfreq->profile->max_state;
+	min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
+	max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+	pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
+			dev_name(dev), min_freq, max_freq);
+
+	return 0;
+
+err:
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_bus_resume(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_enable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable the devfreq-event devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_bus_suspend(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_disable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to disable the devfreq-event devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_bus_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
+};
+
+static const struct of_device_id exynos_bus_of_match[] = {
+	{ .compatible = "samsung,exynos-bus", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
+
+static struct platform_driver exynos_bus_platdrv = {
+	.probe		= exynos_bus_probe,
+	.driver = {
+		.name	= "exynos-bus",
+		.pm	= &exynos_bus_pm,
+		.of_match_table = of_match_ptr(exynos_bus_of_match),
+	},
+};
+module_platform_driver(exynos_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
new file mode 100644
index 0000000..cfc50a6
--- /dev/null
+++ b/drivers/devfreq/governor.h
@@ -0,0 +1,76 @@
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This header is for devfreq governors in drivers/devfreq/
+ */
+
+#ifndef _GOVERNOR_H
+#define _GOVERNOR_H
+
+#include <linux/devfreq.h>
+
+#define to_devfreq(DEV)	container_of((DEV), struct devfreq, dev)
+
+/* Devfreq events */
+#define DEVFREQ_GOV_START			0x1
+#define DEVFREQ_GOV_STOP			0x2
+#define DEVFREQ_GOV_INTERVAL			0x3
+#define DEVFREQ_GOV_SUSPEND			0x4
+#define DEVFREQ_GOV_RESUME			0x5
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @node:		list node - contains registered devfreq governors
+ * @name:		Governor's name
+ * @immutable:		Immutable flag for governor. If the value is 1,
+ *			this govenror is never changeable to other governor.
+ * @get_target_freq:	Returns desired operating frequency for the device.
+ *			Basically, get_target_freq will run
+ *			devfreq_dev_profile.get_dev_status() to get the
+ *			status of the device (load = busy_time / total_time).
+ *			If no_central_polling is set, this callback is called
+ *			only with update_devfreq() notified by OPP.
+ * @event_handler:      Callback for devfreq core framework to notify events
+ *                      to governors. Events include per device governor
+ *                      init and exit, opp changes out of devfreq, suspend
+ *                      and resume of per device devfreq during device idle.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+	struct list_head node;
+
+	const char name[DEVFREQ_NAME_LEN];
+	const unsigned int immutable;
+	int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+	int (*event_handler)(struct devfreq *devfreq,
+				unsigned int event, void *data);
+};
+
+/* Caution: devfreq->lock must be locked before calling update_devfreq */
+extern int update_devfreq(struct devfreq *devfreq);
+
+extern void devfreq_monitor_start(struct devfreq *devfreq);
+extern void devfreq_monitor_stop(struct devfreq *devfreq);
+extern void devfreq_monitor_suspend(struct devfreq *devfreq);
+extern void devfreq_monitor_resume(struct devfreq *devfreq);
+extern void devfreq_interval_update(struct devfreq *devfreq,
+					unsigned int *delay);
+
+extern int devfreq_add_governor(struct devfreq_governor *governor);
+extern int devfreq_remove_governor(struct devfreq_governor *governor);
+
+extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+	return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
new file mode 100644
index 0000000..3bc29ac
--- /dev/null
+++ b/drivers/devfreq/governor_passive.c
@@ -0,0 +1,211 @@
+/*
+ * linux/drivers/devfreq/governor_passive.c
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+					unsigned long *freq)
+{
+	struct devfreq_passive_data *p_data
+			= (struct devfreq_passive_data *)devfreq->data;
+	struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
+	unsigned long child_freq = ULONG_MAX;
+	struct dev_pm_opp *opp;
+	int i, count, ret = 0;
+
+	/*
+	 * If the devfreq device with passive governor has the specific method
+	 * to determine the next frequency, should use the get_target_freq()
+	 * of struct devfreq_passive_data.
+	 */
+	if (p_data->get_target_freq) {
+		ret = p_data->get_target_freq(devfreq, freq);
+		goto out;
+	}
+
+	/*
+	 * If the parent and passive devfreq device uses the OPP table,
+	 * get the next frequency by using the OPP table.
+	 */
+
+	/*
+	 * - parent devfreq device uses the governors except for passive.
+	 * - passive devfreq device uses the passive governor.
+	 *
+	 * Each devfreq has the OPP table. After deciding the new frequency
+	 * from the governor of parent devfreq device, the passive governor
+	 * need to get the index of new frequency on OPP table of parent
+	 * device. And then the index is used for getting the suitable
+	 * new frequency for passive devfreq device.
+	 */
+	if (!devfreq->profile || !devfreq->profile->freq_table
+		|| devfreq->profile->max_state <= 0)
+		return -EINVAL;
+
+	/*
+	 * The passive governor have to get the correct frequency from OPP
+	 * list of parent device. Because in this case, *freq is temporary
+	 * value which is decided by ondemand governor.
+	 */
+	opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
+	if (IS_ERR(opp)) {
+		ret = PTR_ERR(opp);
+		goto out;
+	}
+
+	dev_pm_opp_put(opp);
+
+	/*
+	 * Get the OPP table's index of decided freqeuncy by governor
+	 * of parent device.
+	 */
+	for (i = 0; i < parent_devfreq->profile->max_state; i++)
+		if (parent_devfreq->profile->freq_table[i] == *freq)
+			break;
+
+	if (i == parent_devfreq->profile->max_state) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Get the suitable frequency by using index of parent device. */
+	if (i < devfreq->profile->max_state) {
+		child_freq = devfreq->profile->freq_table[i];
+	} else {
+		count = devfreq->profile->max_state;
+		child_freq = devfreq->profile->freq_table[count - 1];
+	}
+
+	/* Return the suitable frequency for passive device. */
+	*freq = child_freq;
+
+out:
+	return ret;
+}
+
+static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
+{
+	int ret;
+
+	if (!devfreq->governor)
+		return -EINVAL;
+
+	mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
+
+	ret = devfreq->governor->get_target_freq(devfreq, &freq);
+	if (ret < 0)
+		goto out;
+
+	ret = devfreq->profile->target(devfreq->dev.parent, &freq, 0);
+	if (ret < 0)
+		goto out;
+
+	if (devfreq->profile->freq_table
+		&& (devfreq_update_status(devfreq, freq)))
+		dev_err(&devfreq->dev,
+			"Couldn't update frequency transition information.\n");
+
+	devfreq->previous_freq = freq;
+
+out:
+	mutex_unlock(&devfreq->lock);
+
+	return 0;
+}
+
+static int devfreq_passive_notifier_call(struct notifier_block *nb,
+				unsigned long event, void *ptr)
+{
+	struct devfreq_passive_data *data
+			= container_of(nb, struct devfreq_passive_data, nb);
+	struct devfreq *devfreq = (struct devfreq *)data->this;
+	struct devfreq *parent = (struct devfreq *)data->parent;
+	struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
+	unsigned long freq = freqs->new;
+
+	switch (event) {
+	case DEVFREQ_PRECHANGE:
+		if (parent->previous_freq > freq)
+			update_devfreq_passive(devfreq, freq);
+		break;
+	case DEVFREQ_POSTCHANGE:
+		if (parent->previous_freq < freq)
+			update_devfreq_passive(devfreq, freq);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int devfreq_passive_event_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	struct device *dev = devfreq->dev.parent;
+	struct devfreq_passive_data *p_data
+			= (struct devfreq_passive_data *)devfreq->data;
+	struct devfreq *parent = (struct devfreq *)p_data->parent;
+	struct notifier_block *nb = &p_data->nb;
+	int ret = 0;
+
+	if (!parent)
+		return -EPROBE_DEFER;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		if (!p_data->this)
+			p_data->this = devfreq;
+
+		nb->notifier_call = devfreq_passive_notifier_call;
+		ret = devm_devfreq_register_notifier(dev, parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER);
+		break;
+	case DEVFREQ_GOV_STOP:
+		devm_devfreq_unregister_notifier(dev, parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_passive = {
+	.name = DEVFREQ_GOV_PASSIVE,
+	.immutable = 1,
+	.get_target_freq = devfreq_passive_get_target_freq,
+	.event_handler = devfreq_passive_event_handler,
+};
+
+static int __init devfreq_passive_init(void)
+{
+	return devfreq_add_governor(&devfreq_passive);
+}
+subsys_initcall(devfreq_passive_init);
+
+static void __exit devfreq_passive_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_passive);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+}
+module_exit(devfreq_passive_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ Passive governor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
new file mode 100644
index 0000000..4d23ecf
--- /dev/null
+++ b/drivers/devfreq/governor_performance.c
@@ -0,0 +1,67 @@
+/*
+ *  linux/drivers/devfreq/governor_performance.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include "governor.h"
+
+static int devfreq_performance_func(struct devfreq *df,
+				    unsigned long *freq)
+{
+	/*
+	 * target callback should be able to get floor value as
+	 * said in devfreq.h
+	 */
+	if (!df->max_freq)
+		*freq = UINT_MAX;
+	else
+		*freq = df->max_freq;
+	return 0;
+}
+
+static int devfreq_performance_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	int ret = 0;
+
+	if (event == DEVFREQ_GOV_START) {
+		mutex_lock(&devfreq->lock);
+		ret = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_performance = {
+	.name = DEVFREQ_GOV_PERFORMANCE,
+	.get_target_freq = devfreq_performance_func,
+	.event_handler = devfreq_performance_handler,
+};
+
+static int __init devfreq_performance_init(void)
+{
+	return devfreq_add_governor(&devfreq_performance);
+}
+subsys_initcall(devfreq_performance_init);
+
+static void __exit devfreq_performance_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_performance);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_performance_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
new file mode 100644
index 0000000..0c42f23
--- /dev/null
+++ b/drivers/devfreq/governor_powersave.c
@@ -0,0 +1,64 @@
+/*
+ *  linux/drivers/devfreq/governor_powersave.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include "governor.h"
+
+static int devfreq_powersave_func(struct devfreq *df,
+				  unsigned long *freq)
+{
+	/*
+	 * target callback should be able to get ceiling value as
+	 * said in devfreq.h
+	 */
+	*freq = df->min_freq;
+	return 0;
+}
+
+static int devfreq_powersave_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	int ret = 0;
+
+	if (event == DEVFREQ_GOV_START) {
+		mutex_lock(&devfreq->lock);
+		ret = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_powersave = {
+	.name = DEVFREQ_GOV_POWERSAVE,
+	.get_target_freq = devfreq_powersave_func,
+	.event_handler = devfreq_powersave_handler,
+};
+
+static int __init devfreq_powersave_init(void)
+{
+	return devfreq_add_governor(&devfreq_powersave);
+}
+subsys_initcall(devfreq_powersave_init);
+
+static void __exit devfreq_powersave_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_powersave);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_powersave_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
new file mode 100644
index 0000000..28e0f2d
--- /dev/null
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -0,0 +1,150 @@
+/*
+ *  linux/drivers/devfreq/governor_simpleondemand.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+#include "governor.h"
+
+/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
+#define DFSO_UPTHRESHOLD	(90)
+#define DFSO_DOWNDIFFERENCTIAL	(5)
+static int devfreq_simple_ondemand_func(struct devfreq *df,
+					unsigned long *freq)
+{
+	int err;
+	struct devfreq_dev_status *stat;
+	unsigned long long a, b;
+	unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
+	unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+	struct devfreq_simple_ondemand_data *data = df->data;
+	unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+
+	err = devfreq_update_stats(df);
+	if (err)
+		return err;
+
+	stat = &df->last_status;
+
+	if (data) {
+		if (data->upthreshold)
+			dfso_upthreshold = data->upthreshold;
+		if (data->downdifferential)
+			dfso_downdifferential = data->downdifferential;
+	}
+	if (dfso_upthreshold > 100 ||
+	    dfso_upthreshold < dfso_downdifferential)
+		return -EINVAL;
+
+	/* Assume MAX if it is going to be divided by zero */
+	if (stat->total_time == 0) {
+		*freq = max;
+		return 0;
+	}
+
+	/* Prevent overflow */
+	if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
+		stat->busy_time >>= 7;
+		stat->total_time >>= 7;
+	}
+
+	/* Set MAX if it's busy enough */
+	if (stat->busy_time * 100 >
+	    stat->total_time * dfso_upthreshold) {
+		*freq = max;
+		return 0;
+	}
+
+	/* Set MAX if we do not know the initial frequency */
+	if (stat->current_frequency == 0) {
+		*freq = max;
+		return 0;
+	}
+
+	/* Keep the current frequency */
+	if (stat->busy_time * 100 >
+	    stat->total_time * (dfso_upthreshold - dfso_downdifferential)) {
+		*freq = stat->current_frequency;
+		return 0;
+	}
+
+	/* Set the desired frequency based on the load */
+	a = stat->busy_time;
+	a *= stat->current_frequency;
+	b = div_u64(a, stat->total_time);
+	b *= 100;
+	b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
+	*freq = (unsigned long) b;
+
+	if (df->min_freq && *freq < df->min_freq)
+		*freq = df->min_freq;
+	if (df->max_freq && *freq > df->max_freq)
+		*freq = df->max_freq;
+
+	return 0;
+}
+
+static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		devfreq_monitor_start(devfreq);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		devfreq_monitor_stop(devfreq);
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		devfreq_interval_update(devfreq, (unsigned int *)data);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_simple_ondemand = {
+	.name = DEVFREQ_GOV_SIMPLE_ONDEMAND,
+	.get_target_freq = devfreq_simple_ondemand_func,
+	.event_handler = devfreq_simple_ondemand_handler,
+};
+
+static int __init devfreq_simple_ondemand_init(void)
+{
+	return devfreq_add_governor(&devfreq_simple_ondemand);
+}
+subsys_initcall(devfreq_simple_ondemand_init);
+
+static void __exit devfreq_simple_ondemand_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_simple_ondemand);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_simple_ondemand_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
new file mode 100644
index 0000000..080607c
--- /dev/null
+++ b/drivers/devfreq/governor_userspace.c
@@ -0,0 +1,167 @@
+/*
+ *  linux/drivers/devfreq/governor_userspace.c
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *	MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct userspace_data {
+	unsigned long user_frequency;
+	bool valid;
+};
+
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+{
+	struct userspace_data *data = df->data;
+
+	if (data->valid) {
+		unsigned long adjusted_freq = data->user_frequency;
+
+		if (df->max_freq && adjusted_freq > df->max_freq)
+			adjusted_freq = df->max_freq;
+
+		if (df->min_freq && adjusted_freq < df->min_freq)
+			adjusted_freq = df->min_freq;
+
+		*freq = adjusted_freq;
+	} else {
+		*freq = df->previous_freq; /* No user freq specified yet */
+	}
+	return 0;
+}
+
+static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	struct userspace_data *data;
+	unsigned long wanted;
+	int err = 0;
+
+	mutex_lock(&devfreq->lock);
+	data = devfreq->data;
+
+	sscanf(buf, "%lu", &wanted);
+	data->user_frequency = wanted;
+	data->valid = true;
+	err = update_devfreq(devfreq);
+	if (err == 0)
+		err = count;
+	mutex_unlock(&devfreq->lock);
+	return err;
+}
+
+static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct devfreq *devfreq = to_devfreq(dev);
+	struct userspace_data *data;
+	int err = 0;
+
+	mutex_lock(&devfreq->lock);
+	data = devfreq->data;
+
+	if (data->valid)
+		err = sprintf(buf, "%lu\n", data->user_frequency);
+	else
+		err = sprintf(buf, "undefined\n");
+	mutex_unlock(&devfreq->lock);
+	return err;
+}
+
+static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
+static struct attribute *dev_entries[] = {
+	&dev_attr_set_freq.attr,
+	NULL,
+};
+static const struct attribute_group dev_attr_group = {
+	.name	= DEVFREQ_GOV_USERSPACE,
+	.attrs	= dev_entries,
+};
+
+static int userspace_init(struct devfreq *devfreq)
+{
+	int err = 0;
+	struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
+					      GFP_KERNEL);
+
+	if (!data) {
+		err = -ENOMEM;
+		goto out;
+	}
+	data->valid = false;
+	devfreq->data = data;
+
+	err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+out:
+	return err;
+}
+
+static void userspace_exit(struct devfreq *devfreq)
+{
+	/*
+	 * Remove the sysfs entry, unless this is being called after
+	 * device_del(), which should have done this already via kobject_del().
+	 */
+	if (devfreq->dev.kobj.sd)
+		sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+
+	kfree(devfreq->data);
+	devfreq->data = NULL;
+}
+
+static int devfreq_userspace_handler(struct devfreq *devfreq,
+			unsigned int event, void *data)
+{
+	int ret = 0;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		ret = userspace_init(devfreq);
+		break;
+	case DEVFREQ_GOV_STOP:
+		userspace_exit(devfreq);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_userspace = {
+	.name = "userspace",
+	.get_target_freq = devfreq_userspace_func,
+	.event_handler = devfreq_userspace_handler,
+};
+
+static int __init devfreq_userspace_init(void)
+{
+	return devfreq_add_governor(&devfreq_userspace);
+}
+subsys_initcall(devfreq_userspace_init);
+
+static void __exit devfreq_userspace_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_userspace);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+	return;
+}
+module_exit(devfreq_userspace_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
new file mode 100644
index 0000000..e795ad2
--- /dev/null
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd.
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/rwsem.h>
+#include <linux/suspend.h>
+
+#include <soc/rockchip/rockchip_sip.h>
+
+struct dram_timing {
+	unsigned int ddr3_speed_bin;
+	unsigned int pd_idle;
+	unsigned int sr_idle;
+	unsigned int sr_mc_gate_idle;
+	unsigned int srpd_lite_idle;
+	unsigned int standby_idle;
+	unsigned int auto_pd_dis_freq;
+	unsigned int dram_dll_dis_freq;
+	unsigned int phy_dll_dis_freq;
+	unsigned int ddr3_odt_dis_freq;
+	unsigned int ddr3_drv;
+	unsigned int ddr3_odt;
+	unsigned int phy_ddr3_ca_drv;
+	unsigned int phy_ddr3_dq_drv;
+	unsigned int phy_ddr3_odt;
+	unsigned int lpddr3_odt_dis_freq;
+	unsigned int lpddr3_drv;
+	unsigned int lpddr3_odt;
+	unsigned int phy_lpddr3_ca_drv;
+	unsigned int phy_lpddr3_dq_drv;
+	unsigned int phy_lpddr3_odt;
+	unsigned int lpddr4_odt_dis_freq;
+	unsigned int lpddr4_drv;
+	unsigned int lpddr4_dq_odt;
+	unsigned int lpddr4_ca_odt;
+	unsigned int phy_lpddr4_ca_drv;
+	unsigned int phy_lpddr4_ck_cs_drv;
+	unsigned int phy_lpddr4_dq_drv;
+	unsigned int phy_lpddr4_odt;
+};
+
+struct rk3399_dmcfreq {
+	struct device *dev;
+	struct devfreq *devfreq;
+	struct devfreq_simple_ondemand_data ondemand_data;
+	struct clk *dmc_clk;
+	struct devfreq_event_dev *edev;
+	struct mutex lock;
+	struct dram_timing timing;
+	struct regulator *vdd_center;
+	unsigned long rate, target_rate;
+	unsigned long volt, target_volt;
+};
+
+static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
+				 u32 flags)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+	unsigned long old_clk_rate = dmcfreq->rate;
+	unsigned long target_volt, target_rate;
+	int err;
+
+	opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(opp))
+		return PTR_ERR(opp);
+
+	target_rate = dev_pm_opp_get_freq(opp);
+	target_volt = dev_pm_opp_get_voltage(opp);
+	dev_pm_opp_put(opp);
+
+	if (dmcfreq->rate == target_rate)
+		return 0;
+
+	mutex_lock(&dmcfreq->lock);
+
+	/*
+	 * If frequency scaling from low to high, adjust voltage first.
+	 * If frequency scaling from high to low, adjust frequency first.
+	 */
+	if (old_clk_rate < target_rate) {
+		err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
+					    target_volt);
+		if (err) {
+			dev_err(dev, "Cannot set voltage %lu uV\n",
+				target_volt);
+			goto out;
+		}
+	}
+
+	err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
+	if (err) {
+		dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+			err);
+		regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
+				      dmcfreq->volt);
+		goto out;
+	}
+
+	/*
+	 * Check the dpll rate,
+	 * There only two result we will get,
+	 * 1. Ddr frequency scaling fail, we still get the old rate.
+	 * 2. Ddr frequency scaling sucessful, we get the rate we set.
+	 */
+	dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
+
+	/* If get the incorrect rate, set voltage to old value. */
+	if (dmcfreq->rate != target_rate) {
+		dev_err(dev, "Got wrong frequency, Request %lu, Current %lu\n",
+			target_rate, dmcfreq->rate);
+		regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
+				      dmcfreq->volt);
+		goto out;
+	} else if (old_clk_rate > target_rate)
+		err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
+					    target_volt);
+	if (err)
+		dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
+
+	dmcfreq->rate = target_rate;
+	dmcfreq->volt = target_volt;
+
+out:
+	mutex_unlock(&dmcfreq->lock);
+	return err;
+}
+
+static int rk3399_dmcfreq_get_dev_status(struct device *dev,
+					 struct devfreq_dev_status *stat)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	struct devfreq_event_data edata;
+	int ret = 0;
+
+	ret = devfreq_event_get_event(dmcfreq->edev, &edata);
+	if (ret < 0)
+		return ret;
+
+	stat->current_frequency = dmcfreq->rate;
+	stat->busy_time = edata.load_count;
+	stat->total_time = edata.total_count;
+
+	return ret;
+}
+
+static int rk3399_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+
+	*freq = dmcfreq->rate;
+
+	return 0;
+}
+
+static struct devfreq_dev_profile rk3399_devfreq_dmc_profile = {
+	.polling_ms	= 200,
+	.target		= rk3399_dmcfreq_target,
+	.get_dev_status	= rk3399_dmcfreq_get_dev_status,
+	.get_cur_freq	= rk3399_dmcfreq_get_cur_freq,
+};
+
+static __maybe_unused int rk3399_dmcfreq_suspend(struct device *dev)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = devfreq_event_disable_edev(dmcfreq->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to disable the devfreq-event devices\n");
+		return ret;
+	}
+
+	ret = devfreq_suspend_device(dmcfreq->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to suspend the devfreq devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = devfreq_event_enable_edev(dmcfreq->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable the devfreq-event devices\n");
+		return ret;
+	}
+
+	ret = devfreq_resume_device(dmcfreq->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to resume the devfreq devices\n");
+		return ret;
+	}
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend,
+			 rk3399_dmcfreq_resume);
+
+static int of_get_ddr_timings(struct dram_timing *timing,
+			      struct device_node *np)
+{
+	int ret = 0;
+
+	ret = of_property_read_u32(np, "rockchip,ddr3_speed_bin",
+				   &timing->ddr3_speed_bin);
+	ret |= of_property_read_u32(np, "rockchip,pd_idle",
+				    &timing->pd_idle);
+	ret |= of_property_read_u32(np, "rockchip,sr_idle",
+				    &timing->sr_idle);
+	ret |= of_property_read_u32(np, "rockchip,sr_mc_gate_idle",
+				    &timing->sr_mc_gate_idle);
+	ret |= of_property_read_u32(np, "rockchip,srpd_lite_idle",
+				    &timing->srpd_lite_idle);
+	ret |= of_property_read_u32(np, "rockchip,standby_idle",
+				    &timing->standby_idle);
+	ret |= of_property_read_u32(np, "rockchip,auto_pd_dis_freq",
+				    &timing->auto_pd_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,dram_dll_dis_freq",
+				    &timing->dram_dll_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,phy_dll_dis_freq",
+				    &timing->phy_dll_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,ddr3_odt_dis_freq",
+				    &timing->ddr3_odt_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,ddr3_drv",
+				    &timing->ddr3_drv);
+	ret |= of_property_read_u32(np, "rockchip,ddr3_odt",
+				    &timing->ddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,phy_ddr3_ca_drv",
+				    &timing->phy_ddr3_ca_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_ddr3_dq_drv",
+				    &timing->phy_ddr3_dq_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_ddr3_odt",
+				    &timing->phy_ddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,lpddr3_odt_dis_freq",
+				    &timing->lpddr3_odt_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,lpddr3_drv",
+				    &timing->lpddr3_drv);
+	ret |= of_property_read_u32(np, "rockchip,lpddr3_odt",
+				    &timing->lpddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_ca_drv",
+				    &timing->phy_lpddr3_ca_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_dq_drv",
+				    &timing->phy_lpddr3_dq_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_odt",
+				    &timing->phy_lpddr3_odt);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_odt_dis_freq",
+				    &timing->lpddr4_odt_dis_freq);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_drv",
+				    &timing->lpddr4_drv);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_dq_odt",
+				    &timing->lpddr4_dq_odt);
+	ret |= of_property_read_u32(np, "rockchip,lpddr4_ca_odt",
+				    &timing->lpddr4_ca_odt);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ca_drv",
+				    &timing->phy_lpddr4_ca_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ck_cs_drv",
+				    &timing->phy_lpddr4_ck_cs_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_dq_drv",
+				    &timing->phy_lpddr4_dq_drv);
+	ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_odt",
+				    &timing->phy_lpddr4_odt);
+
+	return ret;
+}
+
+static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+{
+	struct arm_smccc_res res;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
+	struct rk3399_dmcfreq *data;
+	int ret, index, size;
+	uint32_t *timing;
+	struct dev_pm_opp *opp;
+
+	data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	mutex_init(&data->lock);
+
+	data->vdd_center = devm_regulator_get(dev, "center");
+	if (IS_ERR(data->vdd_center)) {
+		if (PTR_ERR(data->vdd_center) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		dev_err(dev, "Cannot get the regulator \"center\"\n");
+		return PTR_ERR(data->vdd_center);
+	}
+
+	data->dmc_clk = devm_clk_get(dev, "dmc_clk");
+	if (IS_ERR(data->dmc_clk)) {
+		if (PTR_ERR(data->dmc_clk) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		dev_err(dev, "Cannot get the clk dmc_clk\n");
+		return PTR_ERR(data->dmc_clk);
+	};
+
+	data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
+	if (IS_ERR(data->edev))
+		return -EPROBE_DEFER;
+
+	ret = devfreq_event_enable_edev(data->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable devfreq-event devices\n");
+		return ret;
+	}
+
+	/*
+	 * Get dram timing and pass it to arm trust firmware,
+	 * the dram drvier in arm trust firmware will get these
+	 * timing and to do dram initial.
+	 */
+	if (!of_get_ddr_timings(&data->timing, np)) {
+		timing = &data->timing.ddr3_speed_bin;
+		size = sizeof(struct dram_timing) / 4;
+		for (index = 0; index < size; index++) {
+			arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index,
+				      ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM,
+				      0, 0, 0, 0, &res);
+			if (res.a0) {
+				dev_err(dev, "Failed to set dram param: %ld\n",
+					res.a0);
+				return -EINVAL;
+			}
+		}
+	}
+
+	arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+		      ROCKCHIP_SIP_CONFIG_DRAM_INIT,
+		      0, 0, 0, 0, &res);
+
+	/*
+	 * We add a devfreq driver to our parent since it has a device tree node
+	 * with operating points.
+	 */
+	if (dev_pm_opp_of_add_table(dev)) {
+		dev_err(dev, "Invalid operating-points in device tree.\n");
+		return -EINVAL;
+	}
+
+	of_property_read_u32(np, "upthreshold",
+			     &data->ondemand_data.upthreshold);
+	of_property_read_u32(np, "downdifferential",
+			     &data->ondemand_data.downdifferential);
+
+	data->rate = clk_get_rate(data->dmc_clk);
+
+	opp = devfreq_recommended_opp(dev, &data->rate, 0);
+	if (IS_ERR(opp)) {
+		ret = PTR_ERR(opp);
+		goto err_free_opp;
+	}
+
+	data->rate = dev_pm_opp_get_freq(opp);
+	data->volt = dev_pm_opp_get_voltage(opp);
+	dev_pm_opp_put(opp);
+
+	rk3399_devfreq_dmc_profile.initial_freq = data->rate;
+
+	data->devfreq = devm_devfreq_add_device(dev,
+					   &rk3399_devfreq_dmc_profile,
+					   DEVFREQ_GOV_SIMPLE_ONDEMAND,
+					   &data->ondemand_data);
+	if (IS_ERR(data->devfreq)) {
+		ret = PTR_ERR(data->devfreq);
+		goto err_free_opp;
+	}
+
+	devm_devfreq_register_opp_notifier(dev, data->devfreq);
+
+	data->dev = dev;
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+
+err_free_opp:
+	dev_pm_opp_of_remove_table(&pdev->dev);
+	return ret;
+}
+
+static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+{
+	struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
+
+	/*
+	 * Before remove the opp table we need to unregister the opp notifier.
+	 */
+	devm_devfreq_unregister_opp_notifier(dmcfreq->dev, dmcfreq->devfreq);
+	dev_pm_opp_of_remove_table(dmcfreq->dev);
+
+	return 0;
+}
+
+static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
+	{ .compatible = "rockchip,rk3399-dmc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
+
+static struct platform_driver rk3399_dmcfreq_driver = {
+	.probe	= rk3399_dmcfreq_probe,
+	.remove = rk3399_dmcfreq_remove,
+	.driver = {
+		.name	= "rk3399-dmc-freq",
+		.pm	= &rk3399_dmcfreq_pm,
+		.of_match_table = rk3399dmc_devfreq_of_match,
+	},
+};
+module_platform_driver(rk3399_dmcfreq_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
+MODULE_DESCRIPTION("RK3399 dmcfreq driver with devfreq framework");
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
new file mode 100644
index 0000000..c59d2ee
--- /dev/null
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -0,0 +1,788 @@
+/*
+ * A devfreq driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/reset.h>
+
+#include "governor.h"
+
+#define ACTMON_GLB_STATUS					0x0
+#define ACTMON_GLB_PERIOD_CTRL					0x4
+
+#define ACTMON_DEV_CTRL						0x0
+#define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
+#define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
+#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
+#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
+#define ACTMON_DEV_CTRL_ENB					BIT(31)
+
+#define ACTMON_DEV_UPPER_WMARK					0x4
+#define ACTMON_DEV_LOWER_WMARK					0x8
+#define ACTMON_DEV_INIT_AVG					0xc
+#define ACTMON_DEV_AVG_UPPER_WMARK				0x10
+#define ACTMON_DEV_AVG_LOWER_WMARK				0x14
+#define ACTMON_DEV_COUNT_WEIGHT					0x18
+#define ACTMON_DEV_AVG_COUNT					0x20
+#define ACTMON_DEV_INTR_STATUS					0x24
+
+#define ACTMON_INTR_STATUS_CLEAR				0xffffffff
+
+#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
+#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
+
+#define ACTMON_ABOVE_WMARK_WINDOW				1
+#define ACTMON_BELOW_WMARK_WINDOW				3
+#define ACTMON_BOOST_FREQ_STEP					16000
+
+/*
+ * Activity counter is incremented every 256 memory transactions, and each
+ * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
+ * 4 * 256 = 1024.
+ */
+#define ACTMON_COUNT_WEIGHT					0x400
+
+/*
+ * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
+ * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
+ */
+#define ACTMON_AVERAGE_WINDOW_LOG2			6
+#define ACTMON_SAMPLING_PERIOD				12 /* ms */
+#define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
+
+#define KHZ							1000
+
+/* Assume that the bus is saturated if the utilization is 25% */
+#define BUS_SATURATION_RATIO					25
+
+/**
+ * struct tegra_devfreq_device_config - configuration specific to an ACTMON
+ * device
+ *
+ * Coefficients and thresholds are percentages unless otherwise noted
+ */
+struct tegra_devfreq_device_config {
+	u32		offset;
+	u32		irq_mask;
+
+	/* Factors applied to boost_freq every consecutive watermark breach */
+	unsigned int	boost_up_coeff;
+	unsigned int	boost_down_coeff;
+
+	/* Define the watermark bounds when applied to the current avg */
+	unsigned int	boost_up_threshold;
+	unsigned int	boost_down_threshold;
+
+	/*
+	 * Threshold of activity (cycles) below which the CPU frequency isn't
+	 * to be taken into account. This is to avoid increasing the EMC
+	 * frequency when the CPU is very busy but not accessing the bus often.
+	 */
+	u32		avg_dependency_threshold;
+};
+
+enum tegra_actmon_device {
+	MCALL = 0,
+	MCCPU,
+};
+
+static struct tegra_devfreq_device_config actmon_device_configs[] = {
+	{
+		/* MCALL: All memory accesses (including from the CPUs) */
+		.offset = 0x1c0,
+		.irq_mask = 1 << 26,
+		.boost_up_coeff = 200,
+		.boost_down_coeff = 50,
+		.boost_up_threshold = 60,
+		.boost_down_threshold = 40,
+	},
+	{
+		/* MCCPU: memory accesses from the CPUs */
+		.offset = 0x200,
+		.irq_mask = 1 << 25,
+		.boost_up_coeff = 800,
+		.boost_down_coeff = 90,
+		.boost_up_threshold = 27,
+		.boost_down_threshold = 10,
+		.avg_dependency_threshold = 50000,
+	},
+};
+
+/**
+ * struct tegra_devfreq_device - state specific to an ACTMON device
+ *
+ * Frequencies are in kHz.
+ */
+struct tegra_devfreq_device {
+	const struct tegra_devfreq_device_config *config;
+	void __iomem *regs;
+	spinlock_t lock;
+
+	/* Average event count sampled in the last interrupt */
+	u32 avg_count;
+
+	/*
+	 * Extra frequency to increase the target by due to consecutive
+	 * watermark breaches.
+	 */
+	unsigned long boost_freq;
+
+	/* Optimal frequency calculated from the stats for this device */
+	unsigned long target_freq;
+};
+
+struct tegra_devfreq {
+	struct devfreq		*devfreq;
+
+	struct reset_control	*reset;
+	struct clk		*clock;
+	void __iomem		*regs;
+
+	struct clk		*emc_clock;
+	unsigned long		max_freq;
+	unsigned long		cur_freq;
+	struct notifier_block	rate_change_nb;
+
+	struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
+};
+
+struct tegra_actmon_emc_ratio {
+	unsigned long cpu_freq;
+	unsigned long emc_freq;
+};
+
+static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+	{ 1400000, ULONG_MAX },
+	{ 1200000,    750000 },
+	{ 1100000,    600000 },
+	{ 1000000,    500000 },
+	{  800000,    375000 },
+	{  500000,    200000 },
+	{  250000,    100000 },
+};
+
+static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
+{
+	return readl(tegra->regs + offset);
+}
+
+static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
+{
+	writel(val, tegra->regs + offset);
+}
+
+static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
+{
+	return readl(dev->regs + offset);
+}
+
+static void device_writel(struct tegra_devfreq_device *dev, u32 val,
+			  u32 offset)
+{
+	writel(val, dev->regs + offset);
+}
+
+static unsigned long do_percent(unsigned long val, unsigned int pct)
+{
+	return val * pct / 100;
+}
+
+static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
+					   struct tegra_devfreq_device *dev)
+{
+	u32 avg = dev->avg_count;
+	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
+	u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+
+	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
+
+	avg = max(dev->avg_count, band);
+	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
+}
+
+static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
+				       struct tegra_devfreq_device *dev)
+{
+	u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+
+	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
+		      ACTMON_DEV_UPPER_WMARK);
+
+	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
+		      ACTMON_DEV_LOWER_WMARK);
+}
+
+static void actmon_write_barrier(struct tegra_devfreq *tegra)
+{
+	/* ensure the update has reached the ACTMON */
+	wmb();
+	actmon_readl(tegra, ACTMON_GLB_STATUS);
+}
+
+static void actmon_isr_device(struct tegra_devfreq *tegra,
+			      struct tegra_devfreq_device *dev)
+{
+	unsigned long flags;
+	u32 intr_status, dev_ctrl;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
+	tegra_devfreq_update_avg_wmark(tegra, dev);
+
+	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
+	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
+
+	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
+		/*
+		 * new_boost = min(old_boost * up_coef + step, max_freq)
+		 */
+		dev->boost_freq = do_percent(dev->boost_freq,
+					     dev->config->boost_up_coeff);
+		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
+
+		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+		if (dev->boost_freq >= tegra->max_freq)
+			dev->boost_freq = tegra->max_freq;
+		else
+			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
+		/*
+		 * new_boost = old_boost * down_coef
+		 * or 0 if (old_boost * down_coef < step / 2)
+		 */
+		dev->boost_freq = do_percent(dev->boost_freq,
+					     dev->config->boost_down_coeff);
+
+		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
+			dev->boost_freq = 0;
+		else
+			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+	}
+
+	if (dev->config->avg_dependency_threshold) {
+		if (dev->avg_count >= dev->config->avg_dependency_threshold)
+			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+		else if (dev->boost_freq == 0)
+			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+	}
+
+	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
+
+	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
+
+	actmon_write_barrier(tegra);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static irqreturn_t actmon_isr(int irq, void *data)
+{
+	struct tegra_devfreq *tegra = data;
+	bool handled = false;
+	unsigned int i;
+	u32 val;
+
+	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		if (val & tegra->devices[i].config->irq_mask) {
+			actmon_isr_device(tegra, tegra->devices + i);
+			handled = true;
+		}
+	}
+
+	return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
+					    unsigned long cpu_freq)
+{
+	unsigned int i;
+	struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+
+	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
+		if (cpu_freq >= ratio->cpu_freq) {
+			if (ratio->emc_freq >= tegra->max_freq)
+				return tegra->max_freq;
+			else
+				return ratio->emc_freq;
+		}
+	}
+
+	return 0;
+}
+
+static void actmon_update_target(struct tegra_devfreq *tegra,
+				 struct tegra_devfreq_device *dev)
+{
+	unsigned long cpu_freq = 0;
+	unsigned long static_cpu_emc_freq = 0;
+	unsigned int avg_sustain_coef;
+	unsigned long flags;
+
+	if (dev->config->avg_dependency_threshold) {
+		cpu_freq = cpufreq_get(0);
+		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
+	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+	dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
+	dev->target_freq += dev->boost_freq;
+
+	if (dev->avg_count >= dev->config->avg_dependency_threshold)
+		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static irqreturn_t actmon_thread_isr(int irq, void *data)
+{
+	struct tegra_devfreq *tegra = data;
+
+	mutex_lock(&tegra->devfreq->lock);
+	update_devfreq(tegra->devfreq);
+	mutex_unlock(&tegra->devfreq->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
+				       unsigned long action, void *ptr)
+{
+	struct clk_notifier_data *data = ptr;
+	struct tegra_devfreq *tegra;
+	struct tegra_devfreq_device *dev;
+	unsigned int i;
+	unsigned long flags;
+
+	if (action != POST_RATE_CHANGE)
+		return NOTIFY_OK;
+
+	tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
+
+	tegra->cur_freq = data->new_rate / KHZ;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		spin_lock_irqsave(&dev->lock, flags);
+		tegra_devfreq_update_wmark(tegra, dev);
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+	actmon_write_barrier(tegra);
+
+	return NOTIFY_OK;
+}
+
+static void tegra_actmon_enable_interrupts(struct tegra_devfreq *tegra)
+{
+	struct tegra_devfreq_device *dev;
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		val = device_readl(dev, ACTMON_DEV_CTRL);
+		val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+		val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+		val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+		val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+		device_writel(dev, val, ACTMON_DEV_CTRL);
+	}
+
+	actmon_write_barrier(tegra);
+}
+
+static void tegra_actmon_disable_interrupts(struct tegra_devfreq *tegra)
+{
+	struct tegra_devfreq_device *dev;
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		val = device_readl(dev, ACTMON_DEV_CTRL);
+		val &= ~ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+		val &= ~ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+		val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+		val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+		device_writel(dev, val, ACTMON_DEV_CTRL);
+	}
+
+	actmon_write_barrier(tegra);
+}
+
+static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
+					  struct tegra_devfreq_device *dev)
+{
+	u32 val = 0;
+
+	dev->target_freq = tegra->cur_freq;
+
+	dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
+
+	tegra_devfreq_update_avg_wmark(tegra, dev);
+	tegra_devfreq_update_wmark(tegra, dev);
+
+	device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
+	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
+
+	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
+	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
+		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
+	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
+		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
+	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
+		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
+	val |= ACTMON_DEV_CTRL_ENB;
+
+	device_writel(dev, val, ACTMON_DEV_CTRL);
+
+	actmon_write_barrier(tegra);
+}
+
+static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
+				u32 flags)
+{
+	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+	unsigned long rate = *freq * KHZ;
+
+	opp = devfreq_recommended_opp(dev, &rate, flags);
+	if (IS_ERR(opp)) {
+		dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+		return PTR_ERR(opp);
+	}
+	rate = dev_pm_opp_get_freq(opp);
+	dev_pm_opp_put(opp);
+
+	clk_set_min_rate(tegra->emc_clock, rate);
+	clk_set_rate(tegra->emc_clock, 0);
+
+	*freq = rate;
+
+	return 0;
+}
+
+static int tegra_devfreq_get_dev_status(struct device *dev,
+					struct devfreq_dev_status *stat)
+{
+	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+	struct tegra_devfreq_device *actmon_dev;
+
+	stat->current_frequency = tegra->cur_freq;
+
+	/* To be used by the tegra governor */
+	stat->private_data = tegra;
+
+	/* The below are to be used by the other governors */
+
+	actmon_dev = &tegra->devices[MCALL];
+
+	/* Number of cycles spent on memory access */
+	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
+
+	/* The bus can be considered to be saturated way before 100% */
+	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+
+	/* Number of cycles in a sampling period */
+	stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
+
+	stat->busy_time = min(stat->busy_time, stat->total_time);
+
+	return 0;
+}
+
+static struct devfreq_dev_profile tegra_devfreq_profile = {
+	.polling_ms	= 0,
+	.target		= tegra_devfreq_target,
+	.get_dev_status	= tegra_devfreq_get_dev_status,
+};
+
+static int tegra_governor_get_target(struct devfreq *devfreq,
+				     unsigned long *freq)
+{
+	struct devfreq_dev_status *stat;
+	struct tegra_devfreq *tegra;
+	struct tegra_devfreq_device *dev;
+	unsigned long target_freq = 0;
+	unsigned int i;
+	int err;
+
+	err = devfreq_update_stats(devfreq);
+	if (err)
+		return err;
+
+	stat = &devfreq->last_status;
+
+	tegra = stat->private_data;
+
+	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+		dev = &tegra->devices[i];
+
+		actmon_update_target(tegra, dev);
+
+		target_freq = max(target_freq, dev->target_freq);
+	}
+
+	*freq = target_freq;
+
+	return 0;
+}
+
+static int tegra_governor_event_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	struct tegra_devfreq *tegra;
+	int ret = 0;
+
+	tegra = dev_get_drvdata(devfreq->dev.parent);
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		devfreq_monitor_start(devfreq);
+		tegra_actmon_enable_interrupts(tegra);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		tegra_actmon_disable_interrupts(tegra);
+		devfreq_monitor_stop(devfreq);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		tegra_actmon_disable_interrupts(tegra);
+		devfreq_monitor_suspend(devfreq);
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		devfreq_monitor_resume(devfreq);
+		tegra_actmon_enable_interrupts(tegra);
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor tegra_devfreq_governor = {
+	.name = "tegra_actmon",
+	.get_target_freq = tegra_governor_get_target,
+	.event_handler = tegra_governor_event_handler,
+};
+
+static int tegra_devfreq_probe(struct platform_device *pdev)
+{
+	struct tegra_devfreq *tegra;
+	struct tegra_devfreq_device *dev;
+	struct resource *res;
+	unsigned int i;
+	unsigned long rate;
+	int irq;
+	int err;
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->regs))
+		return PTR_ERR(tegra->regs);
+
+	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
+	if (IS_ERR(tegra->reset)) {
+		dev_err(&pdev->dev, "Failed to get reset\n");
+		return PTR_ERR(tegra->reset);
+	}
+
+	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
+	if (IS_ERR(tegra->clock)) {
+		dev_err(&pdev->dev, "Failed to get actmon clock\n");
+		return PTR_ERR(tegra->clock);
+	}
+
+	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
+	if (IS_ERR(tegra->emc_clock)) {
+		dev_err(&pdev->dev, "Failed to get emc clock\n");
+		return PTR_ERR(tegra->emc_clock);
+	}
+
+	clk_set_rate(tegra->emc_clock, ULONG_MAX);
+
+	tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
+	err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to register rate change notifier\n");
+		return err;
+	}
+
+	reset_control_assert(tegra->reset);
+
+	err = clk_prepare_enable(tegra->clock);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to prepare and enable ACTMON clock\n");
+		return err;
+	}
+
+	reset_control_deassert(tegra->reset);
+
+	tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
+	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
+	actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+		      ACTMON_GLB_PERIOD_CTRL);
+
+	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+		dev = tegra->devices + i;
+		dev->config = actmon_device_configs + i;
+		dev->regs = tegra->regs + dev->config->offset;
+		spin_lock_init(&dev->lock);
+
+		tegra_actmon_configure_device(tegra, dev);
+	}
+
+	for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
+		rate = clk_round_rate(tegra->emc_clock, rate);
+		dev_pm_opp_add(&pdev->dev, rate, 0);
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+		return irq;
+	}
+
+	platform_set_drvdata(pdev, tegra);
+
+	err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
+					actmon_thread_isr, IRQF_SHARED,
+					"tegra-devfreq", tegra);
+	if (err) {
+		dev_err(&pdev->dev, "Interrupt request failed\n");
+		return err;
+	}
+
+	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
+	tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
+						 &tegra_devfreq_profile,
+						 "tegra_actmon",
+						 NULL);
+
+	return 0;
+}
+
+static int tegra_devfreq_remove(struct platform_device *pdev)
+{
+	struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+	u32 val;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+		val = device_readl(&tegra->devices[i], ACTMON_DEV_CTRL);
+		val &= ~ACTMON_DEV_CTRL_ENB;
+		device_writel(&tegra->devices[i], val, ACTMON_DEV_CTRL);
+	}
+
+	actmon_write_barrier(tegra);
+
+	devm_free_irq(&pdev->dev, irq, tegra);
+
+	clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
+
+	clk_disable_unprepare(tegra->clock);
+
+	return 0;
+}
+
+static const struct of_device_id tegra_devfreq_of_match[] = {
+	{ .compatible = "nvidia,tegra124-actmon" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
+
+static struct platform_driver tegra_devfreq_driver = {
+	.probe	= tegra_devfreq_probe,
+	.remove	= tegra_devfreq_remove,
+	.driver = {
+		.name = "tegra-devfreq",
+		.of_match_table = tegra_devfreq_of_match,
+	},
+};
+
+static int __init tegra_devfreq_init(void)
+{
+	int ret = 0;
+
+	ret = devfreq_add_governor(&tegra_devfreq_governor);
+	if (ret) {
+		pr_err("%s: failed to add governor: %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&tegra_devfreq_driver);
+	if (ret)
+		devfreq_remove_governor(&tegra_devfreq_governor);
+
+	return ret;
+}
+module_init(tegra_devfreq_init)
+
+static void __exit tegra_devfreq_exit(void)
+{
+	int ret = 0;
+
+	platform_driver_unregister(&tegra_devfreq_driver);
+
+	ret = devfreq_remove_governor(&tegra_devfreq_governor);
+	if (ret)
+		pr_err("%s: failed to remove governor: %d\n", __func__, ret);
+}
+module_exit(tegra_devfreq_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tegra devfreq driver");
+MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");