v4.19.13 snapshot.
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
new file mode 100644
index 0000000..99e36c5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -0,0 +1,5 @@
+obj-y	= scmi-bus.o scmi-driver.o scmi-protocols.o
+scmi-bus-y = bus.o
+scmi-driver-y = driver.o
+scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
+obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
new file mode 100644
index 0000000..9dff33e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/base.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Base Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_base_protocol_cmd {
+	BASE_DISCOVER_VENDOR = 0x3,
+	BASE_DISCOVER_SUB_VENDOR = 0x4,
+	BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
+	BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
+	BASE_DISCOVER_AGENT = 0x7,
+	BASE_NOTIFY_ERRORS = 0x8,
+};
+
+struct scmi_msg_resp_base_attributes {
+	u8 num_protocols;
+	u8 num_agents;
+	__le16 reserved;
+};
+
+/**
+ * scmi_base_attributes_get() - gets the implementation details
+ *	that are associated with the base protocol.
+ *
+ * @handle: SCMI entity handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_attributes_get(const struct scmi_handle *handle)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_base_attributes *attr_info;
+	struct scmi_revision_info *rev = handle->version;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		attr_info = t->rx.buf;
+		rev->num_protocols = attr_info->num_protocols;
+		rev->num_agents = attr_info->num_agents;
+	}
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
+ *
+ * @handle: SCMI entity handle
+ * @sub_vendor: specify true if sub-vendor ID is needed
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
+{
+	u8 cmd;
+	int ret, size;
+	char *vendor_id;
+	struct scmi_xfer *t;
+	struct scmi_revision_info *rev = handle->version;
+
+	if (sub_vendor) {
+		cmd = BASE_DISCOVER_SUB_VENDOR;
+		vendor_id = rev->sub_vendor_id;
+		size = ARRAY_SIZE(rev->sub_vendor_id);
+	} else {
+		cmd = BASE_DISCOVER_VENDOR;
+		vendor_id = rev->vendor_id;
+		size = ARRAY_SIZE(rev->vendor_id);
+	}
+
+	ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		memcpy(vendor_id, t->rx.buf, size);
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_implementation_version_get() - gets a vendor-specific
+ *	implementation 32-bit version. The format of the version number is
+ *	vendor-specific
+ *
+ * @handle: SCMI entity handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_version_get(const struct scmi_handle *handle)
+{
+	int ret;
+	__le32 *impl_ver;
+	struct scmi_xfer *t;
+	struct scmi_revision_info *rev = handle->version;
+
+	ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
+				 SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		impl_ver = t->rx.buf;
+		rev->impl_ver = le32_to_cpu(*impl_ver);
+	}
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_implementation_list_get() - gets the list of protocols it is
+ *	OSPM is allowed to access
+ *
+ * @handle: SCMI entity handle
+ * @protocols_imp: pointer to hold the list of protocol identifiers
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
+					     u8 *protocols_imp)
+{
+	u8 *list;
+	int ret, loop;
+	struct scmi_xfer *t;
+	__le32 *num_skip, *num_ret;
+	u32 tot_num_ret = 0, loop_num_ret;
+	struct device *dev = handle->dev;
+
+	ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
+				 SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
+	if (ret)
+		return ret;
+
+	num_skip = t->tx.buf;
+	num_ret = t->rx.buf;
+	list = t->rx.buf + sizeof(*num_ret);
+
+	do {
+		/* Set the number of protocols to be skipped/already read */
+		*num_skip = cpu_to_le32(tot_num_ret);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			break;
+
+		loop_num_ret = le32_to_cpu(*num_ret);
+		if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
+			dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
+			break;
+		}
+
+		for (loop = 0; loop < loop_num_ret; loop++)
+			protocols_imp[tot_num_ret + loop] = *(list + loop);
+
+		tot_num_ret += loop_num_ret;
+	} while (loop_num_ret);
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+/**
+ * scmi_base_discover_agent_get() - discover the name of an agent
+ *
+ * @handle: SCMI entity handle
+ * @id: Agent identifier
+ * @name: Agent identifier ASCII string
+ *
+ * An agent id of 0 is reserved to identify the platform itself.
+ * Generally operating system is represented as "OSPM"
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
+					int id, char *name)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT,
+				 SCMI_PROTOCOL_BASE, sizeof(__le32),
+				 SCMI_MAX_STR_SIZE, &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(id);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+
+	scmi_xfer_put(handle, t);
+
+	return ret;
+}
+
+int scmi_base_protocol_init(struct scmi_handle *h)
+{
+	int id, ret;
+	u8 *prot_imp;
+	u32 version;
+	char name[SCMI_MAX_STR_SIZE];
+	const struct scmi_handle *handle = h;
+	struct device *dev = handle->dev;
+	struct scmi_revision_info *rev = handle->version;
+
+	ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version);
+	if (ret)
+		return ret;
+
+	prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
+	if (!prot_imp)
+		return -ENOMEM;
+
+	rev->major_ver = PROTOCOL_REV_MAJOR(version),
+	rev->minor_ver = PROTOCOL_REV_MINOR(version);
+
+	scmi_base_attributes_get(handle);
+	scmi_base_vendor_id_get(handle, false);
+	scmi_base_vendor_id_get(handle, true);
+	scmi_base_implementation_version_get(handle);
+	scmi_base_implementation_list_get(handle, prot_imp);
+	scmi_setup_protocol_implemented(handle, prot_imp);
+
+	dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
+		 rev->major_ver, rev->minor_ver, rev->vendor_id,
+		 rev->sub_vendor_id, rev->impl_ver);
+	dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
+		rev->num_agents);
+
+	for (id = 0; id < rev->num_agents; id++) {
+		scmi_base_discover_agent_get(handle, id, name);
+		dev_dbg(dev, "Agent %d: %s\n", id, name);
+	}
+
+	return 0;
+}
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
new file mode 100644
index 0000000..472c88a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol bus layer
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "common.h"
+
+static DEFINE_IDA(scmi_bus_id);
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
+
+static const struct scmi_device_id *
+scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
+{
+	const struct scmi_device_id *id = scmi_drv->id_table;
+
+	if (!id)
+		return NULL;
+
+	for (; id->protocol_id; id++)
+		if (id->protocol_id == scmi_dev->protocol_id)
+			return id;
+
+	return NULL;
+}
+
+static int scmi_dev_match(struct device *dev, struct device_driver *drv)
+{
+	struct scmi_driver *scmi_drv = to_scmi_driver(drv);
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+	const struct scmi_device_id *id;
+
+	id = scmi_dev_match_id(scmi_dev, scmi_drv);
+	if (id)
+		return 1;
+
+	return 0;
+}
+
+static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
+{
+	scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id);
+
+	if (unlikely(!fn))
+		return -EINVAL;
+	return fn(handle);
+}
+
+static int scmi_dev_probe(struct device *dev)
+{
+	struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+	const struct scmi_device_id *id;
+	int ret;
+
+	id = scmi_dev_match_id(scmi_dev, scmi_drv);
+	if (!id)
+		return -ENODEV;
+
+	if (!scmi_dev->handle)
+		return -EPROBE_DEFER;
+
+	ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle);
+	if (ret)
+		return ret;
+
+	return scmi_drv->probe(scmi_dev);
+}
+
+static int scmi_dev_remove(struct device *dev)
+{
+	struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+	if (scmi_drv->remove)
+		scmi_drv->remove(scmi_dev);
+
+	return 0;
+}
+
+static struct bus_type scmi_bus_type = {
+	.name =	"scmi_protocol",
+	.match = scmi_dev_match,
+	.probe = scmi_dev_probe,
+	.remove = scmi_dev_remove,
+};
+
+int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+			 const char *mod_name)
+{
+	int retval;
+
+	driver->driver.bus = &scmi_bus_type;
+	driver->driver.name = driver->name;
+	driver->driver.owner = owner;
+	driver->driver.mod_name = mod_name;
+
+	retval = driver_register(&driver->driver);
+	if (!retval)
+		pr_debug("registered new scmi driver %s\n", driver->name);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(scmi_driver_register);
+
+void scmi_driver_unregister(struct scmi_driver *driver)
+{
+	driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scmi_driver_unregister);
+
+struct scmi_device *
+scmi_device_create(struct device_node *np, struct device *parent, int protocol)
+{
+	int id, retval;
+	struct scmi_device *scmi_dev;
+
+	scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
+	if (!scmi_dev)
+		return NULL;
+
+	id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
+	if (id < 0)
+		goto free_mem;
+
+	scmi_dev->id = id;
+	scmi_dev->protocol_id = protocol;
+	scmi_dev->dev.parent = parent;
+	scmi_dev->dev.of_node = np;
+	scmi_dev->dev.bus = &scmi_bus_type;
+	dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
+
+	retval = device_register(&scmi_dev->dev);
+	if (retval)
+		goto put_dev;
+
+	return scmi_dev;
+put_dev:
+	put_device(&scmi_dev->dev);
+	ida_simple_remove(&scmi_bus_id, id);
+free_mem:
+	kfree(scmi_dev);
+	return NULL;
+}
+
+void scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+	scmi_handle_put(scmi_dev->handle);
+	device_unregister(&scmi_dev->dev);
+	ida_simple_remove(&scmi_bus_id, scmi_dev->id);
+	kfree(scmi_dev);
+}
+
+void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+	scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+}
+
+int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
+{
+	int ret;
+
+	spin_lock(&protocol_lock);
+	ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
+			GFP_ATOMIC);
+	spin_unlock(&protocol_lock);
+	if (ret != protocol_id)
+		pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(int protocol_id)
+{
+	spin_lock(&protocol_lock);
+	idr_remove(&scmi_protocols, protocol_id);
+	spin_unlock(&protocol_lock);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+static int __scmi_devices_unregister(struct device *dev, void *data)
+{
+	struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+	scmi_device_destroy(scmi_dev);
+	return 0;
+}
+
+static void scmi_devices_unregister(void)
+{
+	bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
+}
+
+static int __init scmi_bus_init(void)
+{
+	int retval;
+
+	retval = bus_register(&scmi_bus_type);
+	if (retval)
+		pr_err("scmi protocol bus register failed (%d)\n", retval);
+
+	return retval;
+}
+subsys_initcall(scmi_bus_init);
+
+static void __exit scmi_bus_exit(void)
+{
+	scmi_devices_unregister();
+	bus_unregister(&scmi_bus_type);
+	ida_destroy(&scmi_bus_id);
+}
+module_exit(scmi_bus_exit);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
new file mode 100644
index 0000000..e4119eb
--- /dev/null
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Clock Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_clock_protocol_cmd {
+	CLOCK_ATTRIBUTES = 0x3,
+	CLOCK_DESCRIBE_RATES = 0x4,
+	CLOCK_RATE_SET = 0x5,
+	CLOCK_RATE_GET = 0x6,
+	CLOCK_CONFIG_SET = 0x7,
+};
+
+struct scmi_msg_resp_clock_protocol_attributes {
+	__le16 num_clocks;
+	u8 max_async_req;
+	u8 reserved;
+};
+
+struct scmi_msg_resp_clock_attributes {
+	__le32 attributes;
+#define	CLOCK_ENABLE	BIT(0)
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_clock_set_config {
+	__le32 id;
+	__le32 attributes;
+};
+
+struct scmi_msg_clock_describe_rates {
+	__le32 id;
+	__le32 rate_index;
+};
+
+struct scmi_msg_resp_clock_describe_rates {
+	__le32 num_rates_flags;
+#define NUM_RETURNED(x)		((x) & 0xfff)
+#define RATE_DISCRETE(x)	!((x) & BIT(12))
+#define NUM_REMAINING(x)	((x) >> 16)
+	struct {
+		__le32 value_low;
+		__le32 value_high;
+	} rate[0];
+#define RATE_TO_U64(X)		\
+({				\
+	typeof(X) x = (X);	\
+	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
+})
+};
+
+struct scmi_clock_set_rate {
+	__le32 flags;
+#define CLOCK_SET_ASYNC		BIT(0)
+#define CLOCK_SET_DELAYED	BIT(1)
+#define CLOCK_SET_ROUND_UP	BIT(2)
+#define CLOCK_SET_ROUND_AUTO	BIT(3)
+	__le32 id;
+	__le32 value_low;
+	__le32 value_high;
+};
+
+struct clock_info {
+	int num_clocks;
+	int max_async_req;
+	struct scmi_clock_info *clk;
+};
+
+static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
+					      struct clock_info *ci)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_clock_protocol_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		ci->num_clocks = le16_to_cpu(attr->num_clocks);
+		ci->max_async_req = attr->max_async_req;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_clock_attributes_get(const struct scmi_handle *handle,
+				     u32 clk_id, struct scmi_clock_info *clk)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_clock_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
+				 sizeof(clk_id), sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+	else
+		clk->name[0] = '\0';
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+			      struct scmi_clock_info *clk)
+{
+	u64 *rate;
+	int ret, cnt;
+	bool rate_discrete = false;
+	u32 tot_rate_cnt = 0, rates_flag;
+	u16 num_returned, num_remaining;
+	struct scmi_xfer *t;
+	struct scmi_msg_clock_describe_rates *clk_desc;
+	struct scmi_msg_resp_clock_describe_rates *rlist;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
+				 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
+	if (ret)
+		return ret;
+
+	clk_desc = t->tx.buf;
+	rlist = t->rx.buf;
+
+	do {
+		clk_desc->id = cpu_to_le32(clk_id);
+		/* Set the number of rates to be skipped/already read */
+		clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			goto err;
+
+		rates_flag = le32_to_cpu(rlist->num_rates_flags);
+		num_remaining = NUM_REMAINING(rates_flag);
+		rate_discrete = RATE_DISCRETE(rates_flag);
+		num_returned = NUM_RETURNED(rates_flag);
+
+		if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
+			dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
+			break;
+		}
+
+		if (!rate_discrete) {
+			clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
+			clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
+			clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
+			dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
+				clk->range.min_rate, clk->range.max_rate,
+				clk->range.step_size);
+			break;
+		}
+
+		rate = &clk->list.rates[tot_rate_cnt];
+		for (cnt = 0; cnt < num_returned; cnt++, rate++) {
+			*rate = RATE_TO_U64(rlist->rate[cnt]);
+			dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
+		}
+
+		tot_rate_cnt += num_returned;
+		/*
+		 * check for both returned and remaining to avoid infinite
+		 * loop due to buggy firmware
+		 */
+	} while (num_returned && num_remaining);
+
+	if (rate_discrete)
+		clk->list.num_rates = tot_rate_cnt;
+
+err:
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
+				 sizeof(__le32), sizeof(u64), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		__le32 *pval = t->rx.buf;
+
+		*value = le32_to_cpu(*pval);
+		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
+			       u32 config, u64 rate)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_clock_set_rate *cfg;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
+				 sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->flags = cpu_to_le32(config);
+	cfg->id = cpu_to_le32(clk_id);
+	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
+	cfg->value_high = cpu_to_le32(rate >> 32);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_clock_set_config *cfg;
+
+	ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
+				 sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->id = cpu_to_le32(clk_id);
+	cfg->attributes = cpu_to_le32(config);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
+{
+	return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
+}
+
+static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
+{
+	return scmi_clock_config_set(handle, clk_id, 0);
+}
+
+static int scmi_clock_count_get(const struct scmi_handle *handle)
+{
+	struct clock_info *ci = handle->clk_priv;
+
+	return ci->num_clocks;
+}
+
+static const struct scmi_clock_info *
+scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
+{
+	struct clock_info *ci = handle->clk_priv;
+	struct scmi_clock_info *clk = ci->clk + clk_id;
+
+	if (!clk->name[0])
+		return NULL;
+
+	return clk;
+}
+
+static struct scmi_clk_ops clk_ops = {
+	.count_get = scmi_clock_count_get,
+	.info_get = scmi_clock_info_get,
+	.rate_get = scmi_clock_rate_get,
+	.rate_set = scmi_clock_rate_set,
+	.enable = scmi_clock_enable,
+	.disable = scmi_clock_disable,
+};
+
+static int scmi_clock_protocol_init(struct scmi_handle *handle)
+{
+	u32 version;
+	int clkid, ret;
+	struct clock_info *cinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
+
+	dev_dbg(handle->dev, "Clock Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
+	if (!cinfo)
+		return -ENOMEM;
+
+	scmi_clock_protocol_attributes_get(handle, cinfo);
+
+	cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
+				  sizeof(*cinfo->clk), GFP_KERNEL);
+	if (!cinfo->clk)
+		return -ENOMEM;
+
+	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
+		struct scmi_clock_info *clk = cinfo->clk + clkid;
+
+		ret = scmi_clock_attributes_get(handle, clkid, clk);
+		if (!ret)
+			scmi_clock_describe_rates_get(handle, clkid, clk);
+	}
+
+	handle->clk_ops = &clk_ops;
+	handle->clk_priv = cinfo;
+
+	return 0;
+}
+
+static int __init scmi_clock_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
+				      &scmi_clock_protocol_init);
+}
+subsys_initcall(scmi_clock_init);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
new file mode 100644
index 0000000..937a930
--- /dev/null
+++ b/drivers/firmware/arm_scmi/common.h
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * driver common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+
+#define PROTOCOL_REV_MINOR_MASK	GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK	GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x)	(u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
+#define PROTOCOL_REV_MINOR(x)	(u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
+#define MAX_PROTOCOLS_IMP	16
+#define MAX_OPPS		16
+
+enum scmi_common_cmd {
+	PROTOCOL_VERSION = 0x0,
+	PROTOCOL_ATTRIBUTES = 0x1,
+	PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @major_version: Major version of the ABI that firmware supports
+ * @minor_version: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+	__le16 minor_version;
+	__le16 major_version;
+};
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the command being sent
+ * @protocol_id: The identifier of the protocol used to send @id command
+ * @seq: The token to identify the message. when a message/command returns,
+ *	the platform returns the whole message header unmodified including
+ *	the token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ *	completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+	u8 id;
+	u8 protocol_id;
+	u16 seq;
+	u32 status;
+	bool poll_completion;
+};
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+	void *buf;
+	size_t len;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ *	message. If request-ACK protocol is used, we can reuse the same
+ *	buffer for the rx path as we use for the tx path.
+ * @done: completion event
+ */
+struct scmi_xfer {
+	struct scmi_msg_hdr hdr;
+	struct scmi_msg tx;
+	struct scmi_msg rx;
+	struct completion done;
+};
+
+void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
+		       size_t tx_size, size_t rx_size, struct scmi_xfer **p);
+int scmi_handle_put(const struct scmi_handle *handle);
+struct scmi_handle *scmi_handle_get(struct device *dev);
+void scmi_set_handle(struct scmi_device *scmi_dev);
+int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
+void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+				     u8 *prot_imp);
+
+int scmi_base_protocol_init(struct scmi_handle *h);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
new file mode 100644
index 0000000..8f952f2
--- /dev/null
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol driver
+ *
+ * SCMI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/processor.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+#define MSG_ID_MASK		GENMASK(7, 0)
+#define MSG_TYPE_MASK		GENMASK(9, 8)
+#define MSG_PROTOCOL_ID_MASK	GENMASK(17, 10)
+#define MSG_TOKEN_ID_MASK	GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr)	FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX		(MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
+enum scmi_error_codes {
+	SCMI_SUCCESS = 0,	/* Success */
+	SCMI_ERR_SUPPORT = -1,	/* Not supported */
+	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
+	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
+	SCMI_ERR_ENTRY = -4,	/* Not found */
+	SCMI_ERR_RANGE = -5,	/* Value out of range */
+	SCMI_ERR_BUSY = -6,	/* Device busy */
+	SCMI_ERR_COMMS = -7,	/* Communication Error */
+	SCMI_ERR_GENERIC = -8,	/* Generic Error */
+	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
+	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+	SCMI_ERR_MAX
+};
+
+/* List of all SCMI devices active in system */
+static LIST_HEAD(scmi_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(scmi_list_mutex);
+
+/**
+ * struct scmi_xfers_info - Structure to manage transfer information
+ *
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ *	Index of this bitmap table is also used for message
+ *	sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct scmi_xfers_info {
+	struct scmi_xfer *xfer_block;
+	unsigned long *xfer_alloc_table;
+	spinlock_t xfer_lock;
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ *	simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+	int max_rx_timeout_ms;
+	int max_msg;
+	int max_msg_size;
+};
+
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel informfation
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox channel
+ * @payload: Transmit/Receive mailbox channel payload area
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ *	 channel
+ * @handle: Pointer to SCMI entity handle
+ */
+struct scmi_chan_info {
+	struct mbox_client cl;
+	struct mbox_chan *chan;
+	void __iomem *payload;
+	struct device *dev;
+	struct scmi_handle *handle;
+};
+
+/**
+ * struct scmi_info - Structure representing a SCMI instance
+ *
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @handle: Instance of SCMI handle to send to clients
+ * @version: SCMI revision information containing protocol version,
+ *	implementation version and (sub-)vendor identification.
+ * @minfo: Message info
+ * @tx_idr: IDR object to map protocol id to channel info pointer
+ * @protocols_imp: List of protocols implemented, currently maximum of
+ *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * @node: List head
+ * @users: Number of users of this instance
+ */
+struct scmi_info {
+	struct device *dev;
+	const struct scmi_desc *desc;
+	struct scmi_revision_info version;
+	struct scmi_handle handle;
+	struct scmi_xfers_info minfo;
+	struct idr tx_idr;
+	u8 *protocols_imp;
+	struct list_head node;
+	int users;
+};
+
+#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
+#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+	__le32 reserved;
+	__le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
+	__le32 reserved1[2];
+	__le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
+	__le32 length;
+	__le32 msg_header;
+	u8 msg_payload[0];
+};
+
+static const int scmi_linux_errmap[] = {
+	/* better than switch case as long as return value is continuous */
+	0,			/* SCMI_SUCCESS */
+	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
+	-EINVAL,		/* SCMI_ERR_PARAM */
+	-EACCES,		/* SCMI_ERR_ACCESS */
+	-ENOENT,		/* SCMI_ERR_ENTRY */
+	-ERANGE,		/* SCMI_ERR_RANGE */
+	-EBUSY,			/* SCMI_ERR_BUSY */
+	-ECOMM,			/* SCMI_ERR_COMMS */
+	-EIO,			/* SCMI_ERR_GENERIC */
+	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
+	-EPROTO,		/* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
+		return scmi_linux_errmap[-errno];
+	return -EIO;
+}
+
+/**
+ * scmi_dump_header_dbg() - Helper to dump a message header.
+ *
+ * @dev: Device pointer corresponding to the SCMI entity
+ * @hdr: pointer to header.
+ */
+static inline void scmi_dump_header_dbg(struct device *dev,
+					struct scmi_msg_hdr *hdr)
+{
+	dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
+		hdr->id, hdr->seq, hdr->protocol_id);
+}
+
+static void scmi_fetch_response(struct scmi_xfer *xfer,
+				struct scmi_shared_mem __iomem *mem)
+{
+	xfer->hdr.status = ioread32(mem->msg_payload);
+	/* Skip the length of header and statues in payload area i.e 8 bytes*/
+	xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
+
+	/* Take a copy to the rx buffer.. */
+	memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
+}
+
+/**
+ * scmi_rx_callback() - mailbox client callback for receive messages
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void scmi_rx_callback(struct mbox_client *cl, void *m)
+{
+	u16 xfer_id;
+	struct scmi_xfer *xfer;
+	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+	struct device *dev = cinfo->dev;
+	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+	struct scmi_xfers_info *minfo = &info->minfo;
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+	xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
+
+	/* Are we even expecting this? */
+	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+		dev_err(dev, "message for %d is not expected!\n", xfer_id);
+		return;
+	}
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	scmi_dump_header_dbg(dev, &xfer->hdr);
+	/* Is the message of valid length? */
+	if (xfer->rx.len > info->desc->max_msg_size) {
+		dev_err(dev, "unable to handle %zu xfer(max %d)\n",
+			xfer->rx.len, info->desc->max_msg_size);
+		return;
+	}
+
+	scmi_fetch_response(xfer, mem);
+	complete(&xfer->done);
+}
+
+/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ *	protocol id and sequence id.
+ *
+ * Return: 32-bit packed command header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+	return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+		FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+		FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * This function prepares the shared memory which contains the header and the
+ * payload.
+ */
+static void scmi_tx_prepare(struct mbox_client *cl, void *m)
+{
+	struct scmi_xfer *t = m;
+	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+	/* Mark channel busy + clear error */
+	iowrite32(0x0, &mem->channel_status);
+	iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+		  &mem->flags);
+	iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
+	iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
+	if (t->tx.buf)
+		memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
+}
+
+/**
+ * scmi_xfer_get() - Allocate one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCMI entity. Further, this also holds a spinlock to maintain
+ * integrity of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
+{
+	u16 xfer_id;
+	struct scmi_xfer *xfer;
+	unsigned long flags, bit_pos;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct scmi_xfers_info *minfo = &info->minfo;
+
+	/* Keep the locked section as small as possible */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+				      info->desc->max_msg);
+	if (bit_pos == info->desc->max_msg) {
+		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+		return ERR_PTR(-ENOMEM);
+	}
+	set_bit(bit_pos, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+	xfer_id = bit_pos;
+
+	xfer = &minfo->xfer_block[xfer_id];
+	xfer->hdr.seq = xfer_id;
+	reinit_completion(&xfer->done);
+
+	return xfer;
+}
+
+/**
+ * scmi_xfer_put() - Release a message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+	unsigned long flags;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct scmi_xfers_info *minfo = &info->minfo;
+
+	/*
+	 * Keep the locked section as small as possible
+	 * NOTE: we might escape with smp_mb and no lock here..
+	 * but just be conservative and symmetric.
+	 */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+}
+
+static bool
+scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+	struct scmi_shared_mem __iomem *mem = cinfo->payload;
+	u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
+
+	if (xfer->hdr.seq != xfer_id)
+		return false;
+
+	return ioread32(&mem->channel_status) &
+		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+		SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
+
+#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
+
+static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
+				      struct scmi_xfer *xfer, ktime_t stop)
+{
+	ktime_t __cur = ktime_get();
+
+	return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
+}
+
+/**
+ * scmi_do_xfer() - Do one transfer
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ *	return corresponding error, else if all goes well,
+ *	return 0.
+ */
+int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+	int ret;
+	int timeout;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct device *dev = info->dev;
+	struct scmi_chan_info *cinfo;
+
+	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
+	if (unlikely(!cinfo))
+		return -EINVAL;
+
+	ret = mbox_send_message(cinfo->chan, xfer);
+	if (ret < 0) {
+		dev_dbg(dev, "mbox send fail %d\n", ret);
+		return ret;
+	}
+
+	/* mbox_send_message returns non-negative value on success, so reset */
+	ret = 0;
+
+	if (xfer->hdr.poll_completion) {
+		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
+
+		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
+
+		if (ktime_before(ktime_get(), stop))
+			scmi_fetch_response(xfer, cinfo->payload);
+		else
+			ret = -ETIMEDOUT;
+	} else {
+		/* And we wait for the response. */
+		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+			dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
+				(void *)_RET_IP_);
+			ret = -ETIMEDOUT;
+		}
+	}
+
+	if (!ret && xfer->hdr.status)
+		ret = scmi_to_linux_errno(xfer->hdr.status);
+
+	/*
+	 * NOTE: we might prefer not to need the mailbox ticker to manage the
+	 * transfer queueing since the protocol layer queues things by itself.
+	 * Unfortunately, we have to kick the mailbox framework after we have
+	 * received our message.
+	 */
+	mbox_client_txdone(cinfo->chan, ret);
+
+	return ret;
+}
+
+/**
+ * scmi_xfer_get_init() - Allocate and initialise one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @msg_id: Message identifier
+ * @prot_id: Protocol identifier for the message
+ * @tx_size: transmit message size
+ * @rx_size: receive message size
+ * @p: pointer to the allocated and initialised message
+ *
+ * This function allocates the message using @scmi_xfer_get and
+ * initialise the header.
+ *
+ * Return: 0 if all went fine with @p pointing to message, else
+ *	corresponding error.
+ */
+int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
+		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
+{
+	int ret;
+	struct scmi_xfer *xfer;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+	struct device *dev = info->dev;
+
+	/* Ensure we have sane transfer sizes */
+	if (rx_size > info->desc->max_msg_size ||
+	    tx_size > info->desc->max_msg_size)
+		return -ERANGE;
+
+	xfer = scmi_xfer_get(handle);
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "failed to get free message slot(%d)\n", ret);
+		return ret;
+	}
+
+	xfer->tx.len = tx_size;
+	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+	xfer->hdr.id = msg_id;
+	xfer->hdr.protocol_id = prot_id;
+	xfer->hdr.poll_completion = false;
+
+	*p = xfer;
+
+	return 0;
+}
+
+/**
+ * scmi_version_get() - command to get the revision of the SCMI entity
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol: Protocol identifier for the message
+ * @version: Holds returned version of protocol.
+ *
+ * Updates the SCMI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
+		     u32 *version)
+{
+	int ret;
+	__le32 *rev_info;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
+				 sizeof(*version), &t);
+	if (ret)
+		return ret;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		rev_info = t->rx.buf;
+		*version = le32_to_cpu(*rev_info);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+				     u8 *prot_imp)
+{
+	struct scmi_info *info = handle_to_scmi_info(handle);
+
+	info->protocols_imp = prot_imp;
+}
+
+static bool
+scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
+{
+	int i;
+	struct scmi_info *info = handle_to_scmi_info(handle);
+
+	if (!info->protocols_imp)
+		return false;
+
+	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+		if (info->protocols_imp[i] == prot_id)
+			return true;
+	return false;
+}
+
+/**
+ * scmi_handle_get() - Get the SCMI handle for a device
+ *
+ * @dev: pointer to device for which we want SCMI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: pointer to handle if successful, NULL on error
+ */
+struct scmi_handle *scmi_handle_get(struct device *dev)
+{
+	struct list_head *p;
+	struct scmi_info *info;
+	struct scmi_handle *handle = NULL;
+
+	mutex_lock(&scmi_list_mutex);
+	list_for_each(p, &scmi_list) {
+		info = list_entry(p, struct scmi_info, node);
+		if (dev->parent == info->dev) {
+			handle = &info->handle;
+			info->users++;
+			break;
+		}
+	}
+	mutex_unlock(&scmi_list_mutex);
+
+	return handle;
+}
+
+/**
+ * scmi_handle_put() - Release the handle acquired by scmi_handle_get
+ *
+ * @handle: handle acquired by scmi_handle_get
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: 0 is successfully released
+ *	if null was passed, it returns -EINVAL;
+ */
+int scmi_handle_put(const struct scmi_handle *handle)
+{
+	struct scmi_info *info;
+
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_scmi_info(handle);
+	mutex_lock(&scmi_list_mutex);
+	if (!WARN_ON(!info->users))
+		info->users--;
+	mutex_unlock(&scmi_list_mutex);
+
+	return 0;
+}
+
+static const struct scmi_desc scmi_generic_desc = {
+	.max_rx_timeout_ms = 30,	/* We may increase this if required */
+	.max_msg = 20,		/* Limited by MBOX_TX_QUEUE_LEN */
+	.max_msg_size = 128,
+};
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+	{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
+	{ /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+	int i;
+	struct scmi_xfer *xfer;
+	struct device *dev = sinfo->dev;
+	const struct scmi_desc *desc = sinfo->desc;
+	struct scmi_xfers_info *info = &sinfo->minfo;
+
+	/* Pre-allocated messages, no more than what hdr.seq can support */
+	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
+		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
+			desc->max_msg, MSG_TOKEN_MAX);
+		return -EINVAL;
+	}
+
+	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
+					sizeof(*info->xfer_block), GFP_KERNEL);
+	if (!info->xfer_block)
+		return -ENOMEM;
+
+	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
+					      sizeof(long), GFP_KERNEL);
+	if (!info->xfer_alloc_table)
+		return -ENOMEM;
+
+	/* Pre-initialize the buffer pointer to pre-allocated buffers */
+	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
+		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
+					    GFP_KERNEL);
+		if (!xfer->rx.buf)
+			return -ENOMEM;
+
+		xfer->tx.buf = xfer->rx.buf;
+		init_completion(&xfer->done);
+	}
+
+	spin_lock_init(&info->xfer_lock);
+
+	return 0;
+}
+
+static int scmi_mailbox_check(struct device_node *np)
+{
+	struct of_phandle_args arg;
+
+	return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
+}
+
+static int scmi_mbox_free_channel(int id, void *p, void *data)
+{
+	struct scmi_chan_info *cinfo = p;
+	struct idr *idr = data;
+
+	if (!IS_ERR_OR_NULL(cinfo->chan)) {
+		mbox_free_channel(cinfo->chan);
+		cinfo->chan = NULL;
+	}
+
+	idr_remove(idr, id);
+
+	return 0;
+}
+
+static int scmi_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct scmi_info *info = platform_get_drvdata(pdev);
+	struct idr *idr = &info->tx_idr;
+
+	mutex_lock(&scmi_list_mutex);
+	if (info->users)
+		ret = -EBUSY;
+	else
+		list_del(&info->node);
+	mutex_unlock(&scmi_list_mutex);
+
+	if (ret)
+		return ret;
+
+	/* Safe to free channels since no more users */
+	ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+	idr_destroy(&info->tx_idr);
+
+	return ret;
+}
+
+static inline int
+scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
+{
+	int ret;
+	struct resource res;
+	resource_size_t size;
+	struct device_node *shmem, *np = dev->of_node;
+	struct scmi_chan_info *cinfo;
+	struct mbox_client *cl;
+
+	if (scmi_mailbox_check(np)) {
+		cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+		goto idr_alloc;
+	}
+
+	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
+	if (!cinfo)
+		return -ENOMEM;
+
+	cinfo->dev = dev;
+
+	cl = &cinfo->cl;
+	cl->dev = dev;
+	cl->rx_callback = scmi_rx_callback;
+	cl->tx_prepare = scmi_tx_prepare;
+	cl->tx_block = false;
+	cl->knows_txdone = true;
+
+	shmem = of_parse_phandle(np, "shmem", 0);
+	ret = of_address_to_resource(shmem, 0, &res);
+	of_node_put(shmem);
+	if (ret) {
+		dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
+		return ret;
+	}
+
+	size = resource_size(&res);
+	cinfo->payload = devm_ioremap(info->dev, res.start, size);
+	if (!cinfo->payload) {
+		dev_err(dev, "failed to ioremap SCMI Tx payload\n");
+		return -EADDRNOTAVAIL;
+	}
+
+	/* Transmit channel is first entry i.e. index 0 */
+	cinfo->chan = mbox_request_channel(cl, 0);
+	if (IS_ERR(cinfo->chan)) {
+		ret = PTR_ERR(cinfo->chan);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to request SCMI Tx mailbox\n");
+		return ret;
+	}
+
+idr_alloc:
+	ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+	if (ret != prot_id) {
+		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+		return ret;
+	}
+
+	cinfo->handle = &info->handle;
+	return 0;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+			    int prot_id)
+{
+	struct scmi_device *sdev;
+
+	sdev = scmi_device_create(np, info->dev, prot_id);
+	if (!sdev) {
+		dev_err(info->dev, "failed to create %d protocol device\n",
+			prot_id);
+		return;
+	}
+
+	if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
+		dev_err(&sdev->dev, "failed to setup transport\n");
+		scmi_device_destroy(sdev);
+		return;
+	}
+
+	/* setup handle now as the transport is ready */
+	scmi_set_handle(sdev);
+}
+
+static int scmi_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct scmi_handle *handle;
+	const struct scmi_desc *desc;
+	struct scmi_info *info;
+	struct device *dev = &pdev->dev;
+	struct device_node *child, *np = dev->of_node;
+
+	/* Only mailbox method supported, check for the presence of one */
+	if (scmi_mailbox_check(np)) {
+		dev_err(dev, "no mailbox found in %pOF\n", np);
+		return -EINVAL;
+	}
+
+	desc = of_match_device(scmi_of_match, dev)->data;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->desc = desc;
+	INIT_LIST_HEAD(&info->node);
+
+	ret = scmi_xfer_info_init(info);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, info);
+	idr_init(&info->tx_idr);
+
+	handle = &info->handle;
+	handle->dev = info->dev;
+	handle->version = &info->version;
+
+	ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
+	if (ret)
+		return ret;
+
+	ret = scmi_base_protocol_init(handle);
+	if (ret) {
+		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
+		return ret;
+	}
+
+	mutex_lock(&scmi_list_mutex);
+	list_add_tail(&info->node, &scmi_list);
+	mutex_unlock(&scmi_list_mutex);
+
+	for_each_available_child_of_node(np, child) {
+		u32 prot_id;
+
+		if (of_property_read_u32(child, "reg", &prot_id))
+			continue;
+
+		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+			dev_err(dev, "Out of range protocol %d\n", prot_id);
+
+		if (!scmi_is_protocol_implemented(handle, prot_id)) {
+			dev_err(dev, "SCMI protocol %d not implemented\n",
+				prot_id);
+			continue;
+		}
+
+		scmi_create_protocol_device(child, info, prot_id);
+	}
+
+	return 0;
+}
+
+static struct platform_driver scmi_driver = {
+	.driver = {
+		   .name = "arm-scmi",
+		   .of_match_table = scmi_of_match,
+		   },
+	.probe = scmi_probe,
+	.remove = scmi_remove,
+};
+
+module_platform_driver(scmi_driver);
+
+MODULE_ALIAS("platform: arm-scmi");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
new file mode 100644
index 0000000..6434294
--- /dev/null
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Performance Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/sort.h>
+
+#include "common.h"
+
+enum scmi_performance_protocol_cmd {
+	PERF_DOMAIN_ATTRIBUTES = 0x3,
+	PERF_DESCRIBE_LEVELS = 0x4,
+	PERF_LIMITS_SET = 0x5,
+	PERF_LIMITS_GET = 0x6,
+	PERF_LEVEL_SET = 0x7,
+	PERF_LEVEL_GET = 0x8,
+	PERF_NOTIFY_LIMITS = 0x9,
+	PERF_NOTIFY_LEVEL = 0xa,
+};
+
+struct scmi_opp {
+	u32 perf;
+	u32 power;
+	u32 trans_latency_us;
+};
+
+struct scmi_msg_resp_perf_attributes {
+	__le16 num_domains;
+	__le16 flags;
+#define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
+	__le32 stats_addr_low;
+	__le32 stats_addr_high;
+	__le32 stats_size;
+};
+
+struct scmi_msg_resp_perf_domain_attributes {
+	__le32 flags;
+#define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
+#define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
+#define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
+#define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
+	__le32 rate_limit_us;
+	__le32 sustained_freq_khz;
+	__le32 sustained_perf_level;
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_perf_describe_levels {
+	__le32 domain;
+	__le32 level_index;
+};
+
+struct scmi_perf_set_limits {
+	__le32 domain;
+	__le32 max_level;
+	__le32 min_level;
+};
+
+struct scmi_perf_get_limits {
+	__le32 max_level;
+	__le32 min_level;
+};
+
+struct scmi_perf_set_level {
+	__le32 domain;
+	__le32 level;
+};
+
+struct scmi_perf_notify_level_or_limits {
+	__le32 domain;
+	__le32 notify_enable;
+};
+
+struct scmi_msg_resp_perf_describe_levels {
+	__le16 num_returned;
+	__le16 num_remaining;
+	struct {
+		__le32 perf_val;
+		__le32 power;
+		__le16 transition_latency_us;
+		__le16 reserved;
+	} opp[0];
+};
+
+struct perf_dom_info {
+	bool set_limits;
+	bool set_perf;
+	bool perf_limit_notify;
+	bool perf_level_notify;
+	u32 opp_count;
+	u32 sustained_freq_khz;
+	u32 sustained_perf_level;
+	u32 mult_factor;
+	char name[SCMI_MAX_STR_SIZE];
+	struct scmi_opp opp[MAX_OPPS];
+};
+
+struct scmi_perf_info {
+	int num_domains;
+	bool power_scale_mw;
+	u64 stats_addr;
+	u32 stats_size;
+	struct perf_dom_info *dom_info;
+};
+
+static int scmi_perf_attributes_get(const struct scmi_handle *handle,
+				    struct scmi_perf_info *pi)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_perf_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u16 flags = le16_to_cpu(attr->flags);
+
+		pi->num_domains = le16_to_cpu(attr->num_domains);
+		pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
+		pi->stats_size = le32_to_cpu(attr->stats_size);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+				struct perf_dom_info *dom_info)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_perf_domain_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
+				 SCMI_PROTOCOL_PERF, sizeof(domain),
+				 sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u32 flags = le32_to_cpu(attr->flags);
+
+		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
+		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
+		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+		dom_info->sustained_freq_khz =
+					le32_to_cpu(attr->sustained_freq_khz);
+		dom_info->sustained_perf_level =
+					le32_to_cpu(attr->sustained_perf_level);
+		if (!dom_info->sustained_freq_khz ||
+		    !dom_info->sustained_perf_level)
+			/* CPUFreq converts to kHz, hence default 1000 */
+			dom_info->mult_factor =	1000;
+		else
+			dom_info->mult_factor =
+					(dom_info->sustained_freq_khz * 1000) /
+					dom_info->sustained_perf_level;
+		memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+	const struct scmi_opp *t1 = opp1, *t2 = opp2;
+
+	return t1->perf - t2->perf;
+}
+
+static int
+scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+			      struct perf_dom_info *perf_dom)
+{
+	int ret, cnt;
+	u32 tot_opp_cnt = 0;
+	u16 num_returned, num_remaining;
+	struct scmi_xfer *t;
+	struct scmi_opp *opp;
+	struct scmi_msg_perf_describe_levels *dom_info;
+	struct scmi_msg_resp_perf_describe_levels *level_info;
+
+	ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
+				 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
+	if (ret)
+		return ret;
+
+	dom_info = t->tx.buf;
+	level_info = t->rx.buf;
+
+	do {
+		dom_info->domain = cpu_to_le32(domain);
+		/* Set the number of OPPs to be skipped/already read */
+		dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			break;
+
+		num_returned = le16_to_cpu(level_info->num_returned);
+		num_remaining = le16_to_cpu(level_info->num_remaining);
+		if (tot_opp_cnt + num_returned > MAX_OPPS) {
+			dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
+			break;
+		}
+
+		opp = &perf_dom->opp[tot_opp_cnt];
+		for (cnt = 0; cnt < num_returned; cnt++, opp++) {
+			opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
+			opp->power = le32_to_cpu(level_info->opp[cnt].power);
+			opp->trans_latency_us = le16_to_cpu
+				(level_info->opp[cnt].transition_latency_us);
+
+			dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
+				opp->perf, opp->power, opp->trans_latency_us);
+		}
+
+		tot_opp_cnt += num_returned;
+		/*
+		 * check for both returned and remaining to avoid infinite
+		 * loop due to buggy firmware
+		 */
+	} while (num_returned && num_remaining);
+
+	perf_dom->opp_count = tot_opp_cnt;
+	scmi_xfer_put(handle, t);
+
+	sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
+	return ret;
+}
+
+static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
+				u32 max_perf, u32 min_perf)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_set_limits *limits;
+
+	ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
+				 sizeof(*limits), 0, &t);
+	if (ret)
+		return ret;
+
+	limits = t->tx.buf;
+	limits->domain = cpu_to_le32(domain);
+	limits->max_level = cpu_to_le32(max_perf);
+	limits->min_level = cpu_to_le32(min_perf);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
+				u32 *max_perf, u32 *min_perf)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_get_limits *limits;
+
+	ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
+				 sizeof(__le32), 0, &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		limits = t->rx.buf;
+
+		*max_perf = le32_to_cpu(limits->max_level);
+		*min_perf = le32_to_cpu(limits->min_level);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
+			       u32 level, bool poll)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_perf_set_level *lvl;
+
+	ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
+				 sizeof(*lvl), 0, &t);
+	if (ret)
+		return ret;
+
+	t->hdr.poll_completion = poll;
+	lvl = t->tx.buf;
+	lvl->domain = cpu_to_le32(domain);
+	lvl->level = cpu_to_le32(level);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
+			       u32 *level, bool poll)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
+				 sizeof(u32), sizeof(u32), &t);
+	if (ret)
+		return ret;
+
+	t->hdr.poll_completion = poll;
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		*level = le32_to_cpu(*(__le32 *)t->rx.buf);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+/* Device specific ops */
+static int scmi_dev_domain_id(struct device *dev)
+{
+	struct of_phandle_args clkspec;
+
+	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+				       0, &clkspec))
+		return -EINVAL;
+
+	return clkspec.args[0];
+}
+
+static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
+				     struct device *dev)
+{
+	int idx, ret, domain;
+	unsigned long freq;
+	struct scmi_opp *opp;
+	struct perf_dom_info *dom;
+	struct scmi_perf_info *pi = handle->perf_priv;
+
+	domain = scmi_dev_domain_id(dev);
+	if (domain < 0)
+		return domain;
+
+	dom = pi->dom_info + domain;
+
+	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+		freq = opp->perf * dom->mult_factor;
+
+		ret = dev_pm_opp_add(dev, freq, 0);
+		if (ret) {
+			dev_warn(dev, "failed to add opp %luHz\n", freq);
+
+			while (idx-- > 0) {
+				freq = (--opp)->perf * dom->mult_factor;
+				dev_pm_opp_remove(dev, freq);
+			}
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
+					    struct device *dev)
+{
+	struct perf_dom_info *dom;
+	struct scmi_perf_info *pi = handle->perf_priv;
+	int domain = scmi_dev_domain_id(dev);
+
+	if (domain < 0)
+		return domain;
+
+	dom = pi->dom_info + domain;
+	/* uS to nS */
+	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
+}
+
+static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
+			      unsigned long freq, bool poll)
+{
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
+				   poll);
+}
+
+static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
+			      unsigned long *freq, bool poll)
+{
+	int ret;
+	u32 level;
+	struct scmi_perf_info *pi = handle->perf_priv;
+	struct perf_dom_info *dom = pi->dom_info + domain;
+
+	ret = scmi_perf_level_get(handle, domain, &level, poll);
+	if (!ret)
+		*freq = level * dom->mult_factor;
+
+	return ret;
+}
+
+static struct scmi_perf_ops perf_ops = {
+	.limits_set = scmi_perf_limits_set,
+	.limits_get = scmi_perf_limits_get,
+	.level_set = scmi_perf_level_set,
+	.level_get = scmi_perf_level_get,
+	.device_domain_id = scmi_dev_domain_id,
+	.transition_latency_get = scmi_dvfs_transition_latency_get,
+	.device_opps_add = scmi_dvfs_device_opps_add,
+	.freq_set = scmi_dvfs_freq_set,
+	.freq_get = scmi_dvfs_freq_get,
+};
+
+static int scmi_perf_protocol_init(struct scmi_handle *handle)
+{
+	int domain;
+	u32 version;
+	struct scmi_perf_info *pinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
+
+	dev_dbg(handle->dev, "Performance Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	scmi_perf_attributes_get(handle, pinfo);
+
+	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+				       sizeof(*pinfo->dom_info), GFP_KERNEL);
+	if (!pinfo->dom_info)
+		return -ENOMEM;
+
+	for (domain = 0; domain < pinfo->num_domains; domain++) {
+		struct perf_dom_info *dom = pinfo->dom_info + domain;
+
+		scmi_perf_domain_attributes_get(handle, domain, dom);
+		scmi_perf_describe_levels_get(handle, domain, dom);
+	}
+
+	handle->perf_ops = &perf_ops;
+	handle->perf_priv = pinfo;
+
+	return 0;
+}
+
+static int __init scmi_perf_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_PERF,
+				      &scmi_perf_protocol_init);
+}
+subsys_initcall(scmi_perf_init);
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
new file mode 100644
index 0000000..cfa033b
--- /dev/null
+++ b/drivers/firmware/arm_scmi/power.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Power Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_power_protocol_cmd {
+	POWER_DOMAIN_ATTRIBUTES = 0x3,
+	POWER_STATE_SET = 0x4,
+	POWER_STATE_GET = 0x5,
+	POWER_STATE_NOTIFY = 0x6,
+};
+
+struct scmi_msg_resp_power_attributes {
+	__le16 num_domains;
+	__le16 reserved;
+	__le32 stats_addr_low;
+	__le32 stats_addr_high;
+	__le32 stats_size;
+};
+
+struct scmi_msg_resp_power_domain_attributes {
+	__le32 flags;
+#define SUPPORTS_STATE_SET_NOTIFY(x)	((x) & BIT(31))
+#define SUPPORTS_STATE_SET_ASYNC(x)	((x) & BIT(30))
+#define SUPPORTS_STATE_SET_SYNC(x)	((x) & BIT(29))
+	    u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_set_state {
+	__le32 flags;
+#define STATE_SET_ASYNC		BIT(0)
+	__le32 domain;
+	__le32 state;
+};
+
+struct scmi_power_state_notify {
+	__le32 domain;
+	__le32 notify_enable;
+};
+
+struct power_dom_info {
+	bool state_set_sync;
+	bool state_set_async;
+	bool state_set_notify;
+	char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_info {
+	int num_domains;
+	u64 stats_addr;
+	u32 stats_size;
+	struct power_dom_info *dom_info;
+};
+
+static int scmi_power_attributes_get(const struct scmi_handle *handle,
+				     struct scmi_power_info *pi)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_power_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		pi->num_domains = le16_to_cpu(attr->num_domains);
+		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
+		pi->stats_size = le32_to_cpu(attr->stats_size);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+				 struct power_dom_info *dom_info)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_power_domain_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES,
+				 SCMI_PROTOCOL_POWER, sizeof(domain),
+				 sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		u32 flags = le32_to_cpu(attr->flags);
+
+		dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
+		dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
+		dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
+		memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_power_set_state *st;
+
+	ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
+				 sizeof(*st), 0, &t);
+	if (ret)
+		return ret;
+
+	st = t->tx.buf;
+	st->flags = cpu_to_le32(0);
+	st->domain = cpu_to_le32(domain);
+	st->state = cpu_to_le32(state);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
+{
+	int ret;
+	struct scmi_xfer *t;
+
+	ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
+				 sizeof(u32), sizeof(u32), &t);
+	if (ret)
+		return ret;
+
+	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret)
+		*state = le32_to_cpu(*(__le32 *)t->rx.buf);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_power_num_domains_get(const struct scmi_handle *handle)
+{
+	struct scmi_power_info *pi = handle->power_priv;
+
+	return pi->num_domains;
+}
+
+static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
+{
+	struct scmi_power_info *pi = handle->power_priv;
+	struct power_dom_info *dom = pi->dom_info + domain;
+
+	return dom->name;
+}
+
+static struct scmi_power_ops power_ops = {
+	.num_domains_get = scmi_power_num_domains_get,
+	.name_get = scmi_power_name_get,
+	.state_set = scmi_power_state_set,
+	.state_get = scmi_power_state_get,
+};
+
+static int scmi_power_protocol_init(struct scmi_handle *handle)
+{
+	int domain;
+	u32 version;
+	struct scmi_power_info *pinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version);
+
+	dev_dbg(handle->dev, "Power Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return -ENOMEM;
+
+	scmi_power_attributes_get(handle, pinfo);
+
+	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+				       sizeof(*pinfo->dom_info), GFP_KERNEL);
+	if (!pinfo->dom_info)
+		return -ENOMEM;
+
+	for (domain = 0; domain < pinfo->num_domains; domain++) {
+		struct power_dom_info *dom = pinfo->dom_info + domain;
+
+		scmi_power_domain_attributes_get(handle, domain, dom);
+	}
+
+	handle->power_ops = &power_ops;
+	handle->power_priv = pinfo;
+
+	return 0;
+}
+
+static int __init scmi_power_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_POWER,
+				      &scmi_power_protocol_init);
+}
+subsys_initcall(scmi_power_init);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
new file mode 100644
index 0000000..87f737e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic power domain support.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/scmi_protocol.h>
+
+struct scmi_pm_domain {
+	struct generic_pm_domain genpd;
+	const struct scmi_handle *handle;
+	const char *name;
+	u32 domain;
+};
+
+#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd)
+
+static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+	int ret;
+	u32 state, ret_state;
+	struct scmi_pm_domain *pd = to_scmi_pd(domain);
+	const struct scmi_power_ops *ops = pd->handle->power_ops;
+
+	if (power_on)
+		state = SCMI_POWER_STATE_GENERIC_ON;
+	else
+		state = SCMI_POWER_STATE_GENERIC_OFF;
+
+	ret = ops->state_set(pd->handle, pd->domain, state);
+	if (!ret)
+		ret = ops->state_get(pd->handle, pd->domain, &ret_state);
+	if (!ret && state != ret_state)
+		return -EIO;
+
+	return ret;
+}
+
+static int scmi_pd_power_on(struct generic_pm_domain *domain)
+{
+	return scmi_pd_power(domain, true);
+}
+
+static int scmi_pd_power_off(struct generic_pm_domain *domain)
+{
+	return scmi_pd_power(domain, false);
+}
+
+static int scmi_pm_domain_probe(struct scmi_device *sdev)
+{
+	int num_domains, i;
+	struct device *dev = &sdev->dev;
+	struct device_node *np = dev->of_node;
+	struct scmi_pm_domain *scmi_pd;
+	struct genpd_onecell_data *scmi_pd_data;
+	struct generic_pm_domain **domains;
+	const struct scmi_handle *handle = sdev->handle;
+
+	if (!handle || !handle->power_ops)
+		return -ENODEV;
+
+	num_domains = handle->power_ops->num_domains_get(handle);
+	if (num_domains < 0) {
+		dev_err(dev, "number of domains not found\n");
+		return num_domains;
+	}
+
+	scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
+	if (!scmi_pd)
+		return -ENOMEM;
+
+	scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
+	if (!scmi_pd_data)
+		return -ENOMEM;
+
+	domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+	if (!domains)
+		return -ENOMEM;
+
+	for (i = 0; i < num_domains; i++, scmi_pd++) {
+		u32 state;
+
+		domains[i] = &scmi_pd->genpd;
+
+		scmi_pd->domain = i;
+		scmi_pd->handle = handle;
+		scmi_pd->name = handle->power_ops->name_get(handle, i);
+		scmi_pd->genpd.name = scmi_pd->name;
+		scmi_pd->genpd.power_off = scmi_pd_power_off;
+		scmi_pd->genpd.power_on = scmi_pd_power_on;
+
+		if (handle->power_ops->state_get(handle, i, &state)) {
+			dev_warn(dev, "failed to get state for domain %d\n", i);
+			continue;
+		}
+
+		pm_genpd_init(&scmi_pd->genpd, NULL,
+			      state == SCMI_POWER_STATE_GENERIC_OFF);
+	}
+
+	scmi_pd_data->domains = domains;
+	scmi_pd_data->num_domains = num_domains;
+
+	of_genpd_add_provider_onecell(np, scmi_pd_data);
+
+	return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+	{ SCMI_PROTOCOL_POWER },
+	{ },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_power_domain_driver = {
+	.name = "scmi-power-domain",
+	.probe = scmi_pm_domain_probe,
+	.id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
new file mode 100644
index 0000000..27f2092
--- /dev/null
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Sensor Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_sensor_protocol_cmd {
+	SENSOR_DESCRIPTION_GET = 0x3,
+	SENSOR_CONFIG_SET = 0x4,
+	SENSOR_TRIP_POINT_SET = 0x5,
+	SENSOR_READING_GET = 0x6,
+};
+
+struct scmi_msg_resp_sensor_attributes {
+	__le16 num_sensors;
+	u8 max_requests;
+	u8 reserved;
+	__le32 reg_addr_low;
+	__le32 reg_addr_high;
+	__le32 reg_size;
+};
+
+struct scmi_msg_resp_sensor_description {
+	__le16 num_returned;
+	__le16 num_remaining;
+	struct {
+		__le32 id;
+		__le32 attributes_low;
+#define SUPPORTS_ASYNC_READ(x)	((x) & BIT(31))
+#define NUM_TRIP_POINTS(x)	(((x) >> 4) & 0xff)
+		__le32 attributes_high;
+#define SENSOR_TYPE(x)		((x) & 0xff)
+#define SENSOR_SCALE(x)		(((x) >> 11) & 0x3f)
+#define SENSOR_UPDATE_SCALE(x)	(((x) >> 22) & 0x1f)
+#define SENSOR_UPDATE_BASE(x)	(((x) >> 27) & 0x1f)
+		    u8 name[SCMI_MAX_STR_SIZE];
+	} desc[0];
+};
+
+struct scmi_msg_set_sensor_config {
+	__le32 id;
+	__le32 event_control;
+};
+
+struct scmi_msg_set_sensor_trip_point {
+	__le32 id;
+	__le32 event_control;
+#define SENSOR_TP_EVENT_MASK	(0x3)
+#define SENSOR_TP_DISABLED	0x0
+#define SENSOR_TP_POSITIVE	0x1
+#define SENSOR_TP_NEGATIVE	0x2
+#define SENSOR_TP_BOTH		0x3
+#define SENSOR_TP_ID(x)		(((x) & 0xff) << 4)
+	__le32 value_low;
+	__le32 value_high;
+};
+
+struct scmi_msg_sensor_reading_get {
+	__le32 id;
+	__le32 flags;
+#define SENSOR_READ_ASYNC	BIT(0)
+};
+
+struct sensors_info {
+	int num_sensors;
+	int max_requests;
+	u64 reg_addr;
+	u32 reg_size;
+	struct scmi_sensor_info *sensors;
+};
+
+static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
+				      struct sensors_info *si)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_sensor_attributes *attr;
+
+	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+				 SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
+	if (ret)
+		return ret;
+
+	attr = t->rx.buf;
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		si->num_sensors = le16_to_cpu(attr->num_sensors);
+		si->max_requests = attr->max_requests;
+		si->reg_addr = le32_to_cpu(attr->reg_addr_low) |
+				(u64)le32_to_cpu(attr->reg_addr_high) << 32;
+		si->reg_size = le32_to_cpu(attr->reg_size);
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_sensor_description_get(const struct scmi_handle *handle,
+				       struct sensors_info *si)
+{
+	int ret, cnt;
+	u32 desc_index = 0;
+	u16 num_returned, num_remaining;
+	struct scmi_xfer *t;
+	struct scmi_msg_resp_sensor_description *buf;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
+	if (ret)
+		return ret;
+
+	buf = t->rx.buf;
+
+	do {
+		/* Set the number of sensors to be skipped/already read */
+		*(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
+
+		ret = scmi_do_xfer(handle, t);
+		if (ret)
+			break;
+
+		num_returned = le16_to_cpu(buf->num_returned);
+		num_remaining = le16_to_cpu(buf->num_remaining);
+
+		if (desc_index + num_returned > si->num_sensors) {
+			dev_err(handle->dev, "No. of sensors can't exceed %d",
+				si->num_sensors);
+			break;
+		}
+
+		for (cnt = 0; cnt < num_returned; cnt++) {
+			u32 attrh;
+			struct scmi_sensor_info *s;
+
+			attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
+			s = &si->sensors[desc_index + cnt];
+			s->id = le32_to_cpu(buf->desc[cnt].id);
+			s->type = SENSOR_TYPE(attrh);
+			memcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+		}
+
+		desc_index += num_returned;
+		/*
+		 * check for both returned and remaining to avoid infinite
+		 * loop due to buggy firmware
+		 */
+	} while (num_returned && num_remaining);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int
+scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
+{
+	int ret;
+	u32 evt_cntl = BIT(0);
+	struct scmi_xfer *t;
+	struct scmi_msg_set_sensor_config *cfg;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
+	if (ret)
+		return ret;
+
+	cfg = t->tx.buf;
+	cfg->id = cpu_to_le32(sensor_id);
+	cfg->event_control = cpu_to_le32(evt_cntl);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
+				      u32 sensor_id, u8 trip_id, u64 trip_value)
+{
+	int ret;
+	u32 evt_cntl = SENSOR_TP_BOTH;
+	struct scmi_xfer *t;
+	struct scmi_msg_set_sensor_trip_point *trip;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
+	if (ret)
+		return ret;
+
+	trip = t->tx.buf;
+	trip->id = cpu_to_le32(sensor_id);
+	trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id));
+	trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
+	trip->value_high = cpu_to_le32(trip_value >> 32);
+
+	ret = scmi_do_xfer(handle, t);
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static int scmi_sensor_reading_get(const struct scmi_handle *handle,
+				   u32 sensor_id, bool async, u64 *value)
+{
+	int ret;
+	struct scmi_xfer *t;
+	struct scmi_msg_sensor_reading_get *sensor;
+
+	ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
+				 SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
+				 sizeof(u64), &t);
+	if (ret)
+		return ret;
+
+	sensor = t->tx.buf;
+	sensor->id = cpu_to_le32(sensor_id);
+	sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
+
+	ret = scmi_do_xfer(handle, t);
+	if (!ret) {
+		__le32 *pval = t->rx.buf;
+
+		*value = le32_to_cpu(*pval);
+		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+	}
+
+	scmi_xfer_put(handle, t);
+	return ret;
+}
+
+static const struct scmi_sensor_info *
+scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
+{
+	struct sensors_info *si = handle->sensor_priv;
+
+	return si->sensors + sensor_id;
+}
+
+static int scmi_sensor_count_get(const struct scmi_handle *handle)
+{
+	struct sensors_info *si = handle->sensor_priv;
+
+	return si->num_sensors;
+}
+
+static struct scmi_sensor_ops sensor_ops = {
+	.count_get = scmi_sensor_count_get,
+	.info_get = scmi_sensor_info_get,
+	.configuration_set = scmi_sensor_configuration_set,
+	.trip_point_set = scmi_sensor_trip_point_set,
+	.reading_get = scmi_sensor_reading_get,
+};
+
+static int scmi_sensors_protocol_init(struct scmi_handle *handle)
+{
+	u32 version;
+	struct sensors_info *sinfo;
+
+	scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
+
+	dev_dbg(handle->dev, "Sensor Version %d.%d\n",
+		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+	sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
+	if (!sinfo)
+		return -ENOMEM;
+
+	scmi_sensor_attributes_get(handle, sinfo);
+
+	sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
+				      sizeof(*sinfo->sensors), GFP_KERNEL);
+	if (!sinfo->sensors)
+		return -ENOMEM;
+
+	scmi_sensor_description_get(handle, sinfo);
+
+	handle->sensor_ops = &sensor_ops;
+	handle->sensor_priv = sinfo;
+
+	return 0;
+}
+
+static int __init scmi_sensors_init(void)
+{
+	return scmi_protocol_register(SCMI_PROTOCOL_SENSOR,
+				      &scmi_sensors_protocol_init);
+}
+subsys_initcall(scmi_sensors_init);