v4.19.13 snapshot.
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 0000000..ba79b60
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,166 @@
+#
+# QCOM Soc drivers
+#
+menu "Qualcomm SoC drivers"
+
+config QCOM_COMMAND_DB
+	bool "Qualcomm Command DB"
+	depends on ARCH_QCOM || COMPILE_TEST
+	depends on OF_RESERVED_MEM
+	help
+	  Command DB queries shared memory by key string for shared system
+	  resources. Platform drivers that require to set state of a shared
+	  resource on a RPM-hardened platform must use this database to get
+	  SoC specific identifier and information for the shared resources.
+
+config QCOM_GENI_SE
+	tristate "QCOM GENI Serial Engine Driver"
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  This driver is used to manage Generic Interface (GENI) firmware based
+	  Qualcomm Technologies, Inc. Universal Peripheral (QUP) Wrapper. This
+	  driver is also used to manage the common aspects of multiple Serial
+	  Engines present in the QUP.
+
+config QCOM_GLINK_SSR
+	tristate "Qualcomm Glink SSR driver"
+	depends on RPMSG
+	depends on QCOM_RPROC_COMMON
+	help
+	  Say y here to enable GLINK SSR support. The GLINK SSR driver
+	  implements the SSR protocol for notifying the remote processor about
+	  neighboring subsystems going up or down.
+
+config QCOM_GSBI
+        tristate "QCOM General Serial Bus Interface"
+        depends on ARCH_QCOM
+        select MFD_SYSCON
+        help
+          Say y here to enable GSBI support.  The GSBI provides control
+          functions for connecting the underlying serial UART, SPI, and I2C
+          devices to the output pins.
+
+config QCOM_LLCC
+	tristate "Qualcomm Technologies, Inc. LLCC driver"
+	depends on ARCH_QCOM
+	help
+	  Qualcomm Technologies, Inc. platform specific
+	  Last Level Cache Controller(LLCC) driver. This provides interfaces
+	  to clients that use the LLCC. Say yes here to enable LLCC slice
+	  driver.
+
+config QCOM_SDM845_LLCC
+	tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
+	depends on QCOM_LLCC
+	help
+	  Say yes here to enable the LLCC driver for SDM845. This provides
+	  data required to configure LLCC so that clients can start using the
+	  LLCC slices.
+
+config QCOM_MDT_LOADER
+	tristate
+	select QCOM_SCM
+
+config QCOM_PM
+	bool "Qualcomm Power Management"
+	depends on ARCH_QCOM && !ARM64
+	select ARM_CPU_SUSPEND
+	select QCOM_SCM
+	help
+	  QCOM Platform specific power driver to manage cores and L2 low power
+	  modes. It interface with various system drivers to put the cores in
+	  low power modes.
+
+config QCOM_QMI_HELPERS
+	tristate
+	depends on ARCH_QCOM && NET
+	help
+	  Helper library for handling QMI encoded messages.  QMI encoded
+	  messages are used in communication between the majority of QRTR
+	  clients and this helpers provide the common functionality needed for
+	  doing this from a kernel driver.
+
+config QCOM_RMTFS_MEM
+	tristate "Qualcomm Remote Filesystem memory driver"
+	depends on ARCH_QCOM
+	select QCOM_SCM
+	help
+	  The Qualcomm remote filesystem memory driver is used for allocating
+	  and exposing regions of shared memory with remote processors for the
+	  purpose of exchanging sector-data between the remote filesystem
+	  service and its clients.
+
+	  Say y here if you intend to boot the modem remoteproc.
+
+config QCOM_RPMH
+	bool "Qualcomm RPM-Hardened (RPMH) Communication"
+	depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
+	help
+	  Support for communication with the hardened-RPM blocks in
+	  Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+	  internal bus to transmit state requests for shared resources. A set
+	  of hardware components aggregate requests for these resources and
+	  help apply the aggregated state on the resource.
+
+config QCOM_SMEM
+	tristate "Qualcomm Shared Memory Manager (SMEM)"
+	depends on ARCH_QCOM
+	depends on HWSPINLOCK
+	help
+	  Say y here to enable support for the Qualcomm Shared Memory Manager.
+	  The driver provides an interface to items in a heap shared among all
+	  processors in a Qualcomm platform.
+
+config QCOM_SMD_RPM
+	tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+	depends on ARCH_QCOM
+	depends on RPMSG && OF
+	help
+	  If you say yes to this option, support will be included for the
+	  Resource Power Manager system found in the Qualcomm 8974 based
+	  devices.
+
+	  This is required to access many regulators, clocks and bus
+	  frequencies controlled by the RPM on these devices.
+
+	  Say M here if you want to include support for the Qualcomm RPM as a
+	  module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM_STATE
+	bool
+
+config QCOM_SMP2P
+	tristate "Qualcomm Shared Memory Point to Point support"
+	depends on MAILBOX
+	depends on QCOM_SMEM
+	select QCOM_SMEM_STATE
+	help
+	  Say yes here to support the Qualcomm Shared Memory Point to Point
+	  protocol.
+
+config QCOM_SMSM
+	tristate "Qualcomm Shared Memory State Machine"
+	depends on QCOM_SMEM
+	select QCOM_SMEM_STATE
+	help
+	  Say yes here to support the Qualcomm Shared Memory State Machine.
+	  The state machine is represented by bits in shared memory.
+
+config QCOM_WCNSS_CTRL
+	tristate "Qualcomm WCNSS control driver"
+	depends on ARCH_QCOM
+	depends on RPMSG
+	help
+	  Client driver for the WCNSS_CTRL SMD channel, used to download nv
+	  firmware to a newly booted WCNSS chip.
+
+config QCOM_APR
+	tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
+	depends on ARCH_QCOM
+	depends on RPMSG
+	help
+          Enable APR IPC protocol support between
+          application processor and QDSP6. APR is
+          used by audio driver to configure QDSP6
+          ASM, ADM and AFE modules.
+endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 0000000..f25b54c
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_GENI_SE) +=	qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_GLINK_SSR) +=	glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER)	+= mdt_loader.o
+obj-$(CONFIG_QCOM_PM)	+=	spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS)	+= qmi_helpers.o
+qmi_helpers-y	+= qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM)	+= rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPMH)		+= qcom_rpmh.o
+qcom_rpmh-y			+= rpmh-rsc.o
+qcom_rpmh-y			+= rpmh.o
+obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
+obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+obj-$(CONFIG_QCOM_APR) += apr.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
+obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
new file mode 100644
index 0000000..57af8a5
--- /dev/null
+++ b/drivers/soc/qcom/apr.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+struct apr {
+	struct rpmsg_endpoint *ch;
+	struct device *dev;
+	spinlock_t svcs_lock;
+	struct idr svcs_idr;
+	int dest_domain_id;
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+{
+	struct apr *apr = dev_get_drvdata(adev->dev.parent);
+	struct apr_hdr *hdr;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->lock, flags);
+
+	hdr = &pkt->hdr;
+	hdr->src_domain = APR_DOMAIN_APPS;
+	hdr->src_svc = adev->svc_id;
+	hdr->dest_domain = adev->domain_id;
+	hdr->dest_svc = adev->svc_id;
+
+	ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+	spin_unlock_irqrestore(&adev->lock, flags);
+
+	return ret ? ret : hdr->pkt_size;
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+	struct apr_device *adev = to_apr_device(dev);
+
+	kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+				  int len, void *priv, u32 addr)
+{
+	struct apr *apr = dev_get_drvdata(&rpdev->dev);
+	uint16_t hdr_size, msg_type, ver, svc_id;
+	struct apr_device *svc = NULL;
+	struct apr_driver *adrv = NULL;
+	struct apr_resp_pkt resp;
+	struct apr_hdr *hdr;
+	unsigned long flags;
+
+	if (len <= APR_HDR_SIZE) {
+		dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+			buf, len);
+		return -EINVAL;
+	}
+
+	hdr = buf;
+	ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+	if (ver > APR_PKT_VER + 1)
+		return -EINVAL;
+
+	hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+	if (hdr_size < APR_HDR_SIZE) {
+		dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+		return -EINVAL;
+	}
+
+	if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+		dev_err(apr->dev, "APR: Wrong paket size\n");
+		return -EINVAL;
+	}
+
+	msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+	if (msg_type >= APR_MSG_TYPE_MAX) {
+		dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+		return -EINVAL;
+	}
+
+	if (hdr->src_domain >= APR_DOMAIN_MAX ||
+			hdr->dest_domain >= APR_DOMAIN_MAX ||
+			hdr->src_svc >= APR_SVC_MAX ||
+			hdr->dest_svc >= APR_SVC_MAX) {
+		dev_err(apr->dev, "APR: Wrong APR header\n");
+		return -EINVAL;
+	}
+
+	svc_id = hdr->dest_svc;
+	spin_lock_irqsave(&apr->svcs_lock, flags);
+	svc = idr_find(&apr->svcs_idr, svc_id);
+	if (svc && svc->dev.driver)
+		adrv = to_apr_driver(svc->dev.driver);
+	spin_unlock_irqrestore(&apr->svcs_lock, flags);
+
+	if (!adrv) {
+		dev_err(apr->dev, "APR: service is not registered\n");
+		return -EINVAL;
+	}
+
+	resp.hdr = *hdr;
+	resp.payload_size = hdr->pkt_size - hdr_size;
+
+	/*
+	 * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+	 * optional headers in to apr_hdr which should be ignored
+	 */
+	if (resp.payload_size > 0)
+		resp.payload = buf + hdr_size;
+
+	adrv->callback(svc, &resp);
+
+	return 0;
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct apr_device *adev = to_apr_device(dev);
+	struct apr_driver *adrv = to_apr_driver(drv);
+	const struct apr_device_id *id = adrv->id_table;
+
+	/* Attempt an OF style match first */
+	if (of_driver_match_device(dev, drv))
+		return 1;
+
+	if (!id)
+		return 0;
+
+	while (id->domain_id != 0 || id->svc_id != 0) {
+		if (id->domain_id == adev->domain_id &&
+		    id->svc_id == adev->svc_id)
+			return 1;
+		id++;
+	}
+
+	return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+	struct apr_device *adev = to_apr_device(dev);
+	struct apr_driver *adrv = to_apr_driver(dev->driver);
+
+	return adrv->probe(adev);
+}
+
+static int apr_device_remove(struct device *dev)
+{
+	struct apr_device *adev = to_apr_device(dev);
+	struct apr_driver *adrv;
+	struct apr *apr = dev_get_drvdata(adev->dev.parent);
+
+	if (dev->driver) {
+		adrv = to_apr_driver(dev->driver);
+		if (adrv->remove)
+			adrv->remove(adev);
+		spin_lock(&apr->svcs_lock);
+		idr_remove(&apr->svcs_idr, adev->svc_id);
+		spin_unlock(&apr->svcs_lock);
+	}
+
+	return 0;
+}
+
+static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct apr_device *adev = to_apr_device(dev);
+	int ret;
+
+	ret = of_device_uevent_modalias(dev, env);
+	if (ret != -ENODEV)
+		return ret;
+
+	return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+	.name		= "aprbus",
+	.match		= apr_device_match,
+	.probe		= apr_device_probe,
+	.uevent		= apr_uevent,
+	.remove		= apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+			  const struct apr_device_id *id)
+{
+	struct apr *apr = dev_get_drvdata(dev);
+	struct apr_device *adev = NULL;
+	int ret;
+
+	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+	if (!adev)
+		return -ENOMEM;
+
+	spin_lock_init(&adev->lock);
+
+	adev->svc_id = id->svc_id;
+	adev->domain_id = id->domain_id;
+	adev->version = id->svc_version;
+	if (np)
+		strncpy(adev->name, np->name, APR_NAME_SIZE);
+	else
+		strncpy(adev->name, id->name, APR_NAME_SIZE);
+
+	dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+		     id->domain_id, id->svc_id);
+
+	adev->dev.bus = &aprbus;
+	adev->dev.parent = dev;
+	adev->dev.of_node = np;
+	adev->dev.release = apr_dev_release;
+	adev->dev.driver = NULL;
+
+	spin_lock(&apr->svcs_lock);
+	idr_alloc(&apr->svcs_idr, adev, id->svc_id,
+		  id->svc_id + 1, GFP_ATOMIC);
+	spin_unlock(&apr->svcs_lock);
+
+	dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
+
+	ret = device_register(&adev->dev);
+	if (ret) {
+		dev_err(dev, "device_register failed: %d\n", ret);
+		put_device(&adev->dev);
+	}
+
+	return ret;
+}
+
+static void of_register_apr_devices(struct device *dev)
+{
+	struct apr *apr = dev_get_drvdata(dev);
+	struct device_node *node;
+
+	for_each_child_of_node(dev->of_node, node) {
+		struct apr_device_id id = { {0} };
+
+		if (of_property_read_u32(node, "reg", &id.svc_id))
+			continue;
+
+		id.domain_id = apr->dest_domain_id;
+
+		if (apr_add_device(dev, node, &id))
+			dev_err(dev, "Failed to add apr %d svc\n", id.svc_id);
+	}
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+	struct device *dev = &rpdev->dev;
+	struct apr *apr;
+	int ret;
+
+	apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+	if (!apr)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+	if (ret) {
+		dev_err(dev, "APR Domain ID not specified in DT\n");
+		return ret;
+	}
+
+	dev_set_drvdata(dev, apr);
+	apr->ch = rpdev->ept;
+	apr->dev = dev;
+	spin_lock_init(&apr->svcs_lock);
+	idr_init(&apr->svcs_idr);
+	of_register_apr_devices(dev);
+
+	return 0;
+}
+
+static int apr_remove_device(struct device *dev, void *null)
+{
+	struct apr_device *adev = to_apr_device(dev);
+
+	device_unregister(&adev->dev);
+
+	return 0;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+	device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+	drv->driver.bus = &aprbus;
+	drv->driver.owner = owner;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id apr_of_match[] = {
+	{ .compatible = "qcom,apr"},
+	{ .compatible = "qcom,apr-v2"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, apr_of_match);
+
+static struct rpmsg_driver apr_driver = {
+	.probe = apr_probe,
+	.remove = apr_remove,
+	.callback = apr_callback,
+	.drv = {
+		.name = "qcom,apr",
+		.of_match_table = apr_of_match,
+	},
+};
+
+static int __init apr_init(void)
+{
+	int ret;
+
+	ret = bus_register(&aprbus);
+	if (!ret)
+		ret = register_rpmsg_driver(&apr_driver);
+	else
+		bus_unregister(&aprbus);
+
+	return ret;
+}
+
+static void __exit apr_exit(void)
+{
+	bus_unregister(&aprbus);
+	unregister_rpmsg_driver(&apr_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
new file mode 100644
index 0000000..a6f6462
--- /dev/null
+++ b/drivers/soc/qcom/cmd-db.c
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <soc/qcom/cmd-db.h>
+
+#define NUM_PRIORITY		2
+#define MAX_SLV_ID		8
+#define SLAVE_ID_MASK		0x7
+#define SLAVE_ID_SHIFT		16
+
+/**
+ * struct entry_header: header for each entry in cmddb
+ *
+ * @id: resource's identifier
+ * @priority: unused
+ * @addr: the address of the resource
+ * @len: length of the data
+ * @offset: offset from :@data_offset, start of the data
+ */
+struct entry_header {
+	u8 id[8];
+	__le32 priority[NUM_PRIORITY];
+	__le32 addr;
+	__le16 len;
+	__le16 offset;
+};
+
+/**
+ * struct rsc_hdr: resource header information
+ *
+ * @slv_id: id for the resource
+ * @header_offset: entry's header at offset from the end of the cmd_db_header
+ * @data_offset: entry's data at offset from the end of the cmd_db_header
+ * @cnt: number of entries for HW type
+ * @version: MSB is major, LSB is minor
+ * @reserved: reserved for future use.
+ */
+struct rsc_hdr {
+	__le16 slv_id;
+	__le16 header_offset;
+	__le16 data_offset;
+	__le16 cnt;
+	__le16 version;
+	__le16 reserved[3];
+};
+
+/**
+ * struct cmd_db_header: The DB header information
+ *
+ * @version: The cmd db version
+ * @magic: constant expected in the database
+ * @header: array of resources
+ * @checksum: checksum for the header. Unused.
+ * @reserved: reserved memory
+ * @data: driver specific data
+ */
+struct cmd_db_header {
+	__le32 version;
+	u8 magic[4];
+	struct rsc_hdr header[MAX_SLV_ID];
+	__le32 checksum;
+	__le32 reserved;
+	u8 data[];
+};
+
+/**
+ * DOC: Description of the Command DB database.
+ *
+ * At the start of the command DB memory is the cmd_db_header structure.
+ * The cmd_db_header holds the version, checksum, magic key as well as an
+ * array for header for each slave (depicted by the rsc_header). Each h/w
+ * based accelerator is a 'slave' (shared resource) and has slave id indicating
+ * the type of accelerator. The rsc_header is the header for such individual
+ * slaves of a given type. The entries for each of these slaves begin at the
+ * rsc_hdr.header_offset. In addition each slave could have auxiliary data
+ * that may be needed by the driver. The data for the slave starts at the
+ * entry_header.offset to the location pointed to by the rsc_hdr.data_offset.
+ *
+ * Drivers have a stringified key to a slave/resource. They can query the slave
+ * information and get the slave id and the auxiliary data and the length of the
+ * data. Using this information, they can format the request to be sent to the
+ * h/w accelerator and request a resource state.
+ */
+
+static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c };
+
+static bool cmd_db_magic_matches(const struct cmd_db_header *header)
+{
+	const u8 *magic = header->magic;
+
+	return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0;
+}
+
+static struct cmd_db_header *cmd_db_header;
+
+
+static inline void *rsc_to_entry_header(struct rsc_hdr *hdr)
+{
+	u16 offset = le16_to_cpu(hdr->header_offset);
+
+	return cmd_db_header->data + offset;
+}
+
+static inline void *
+rsc_offset(struct rsc_hdr *hdr, struct entry_header *ent)
+{
+	u16 offset = le16_to_cpu(hdr->data_offset);
+	u16 loffset = le16_to_cpu(ent->offset);
+
+	return cmd_db_header->data + offset + loffset;
+}
+
+/**
+ * cmd_db_ready - Indicates if command DB is available
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int cmd_db_ready(void)
+{
+	if (cmd_db_header == NULL)
+		return -EPROBE_DEFER;
+	else if (!cmd_db_magic_matches(cmd_db_header))
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(cmd_db_ready);
+
+static int cmd_db_get_header(const char *id, struct entry_header *eh,
+			     struct rsc_hdr *rh)
+{
+	struct rsc_hdr *rsc_hdr;
+	struct entry_header *ent;
+	int ret, i, j;
+	u8 query[8];
+
+	ret = cmd_db_ready();
+	if (ret)
+		return ret;
+
+	if (!eh || !rh)
+		return -EINVAL;
+
+	/* Pad out query string to same length as in DB */
+	strncpy(query, id, sizeof(query));
+
+	for (i = 0; i < MAX_SLV_ID; i++) {
+		rsc_hdr = &cmd_db_header->header[i];
+		if (!rsc_hdr->slv_id)
+			break;
+
+		ent = rsc_to_entry_header(rsc_hdr);
+		for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) {
+			if (memcmp(ent->id, query, sizeof(ent->id)) == 0)
+				break;
+		}
+
+		if (j < le16_to_cpu(rsc_hdr->cnt)) {
+			memcpy(eh, ent, sizeof(*ent));
+			memcpy(rh, rsc_hdr, sizeof(*rh));
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+/**
+ * cmd_db_read_addr() - Query command db for resource id address.
+ *
+ * @id: resource id to query for address
+ *
+ * Return: resource address on success, 0 on error
+ *
+ * This is used to retrieve resource address based on resource
+ * id.
+ */
+u32 cmd_db_read_addr(const char *id)
+{
+	int ret;
+	struct entry_header ent;
+	struct rsc_hdr rsc_hdr;
+
+	ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+
+	return ret < 0 ? 0 : le32_to_cpu(ent.addr);
+}
+EXPORT_SYMBOL(cmd_db_read_addr);
+
+/**
+ * cmd_db_read_aux_data() - Query command db for aux data.
+ *
+ *  @id: Resource to retrieve AUX Data on.
+ *  @data: Data buffer to copy returned aux data to. Returns size on NULL
+ *  @len: Caller provides size of data buffer passed in.
+ *
+ *  Return: size of data on success, errno otherwise
+ */
+int cmd_db_read_aux_data(const char *id, u8 *data, size_t len)
+{
+	int ret;
+	struct entry_header ent;
+	struct rsc_hdr rsc_hdr;
+	u16 ent_len;
+
+	if (!data)
+		return -EINVAL;
+
+	ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+	if (ret)
+		return ret;
+
+	ent_len = le16_to_cpu(ent.len);
+	if (len < ent_len)
+		return -EINVAL;
+
+	len = min_t(u16, ent_len, len);
+	memcpy(data, rsc_offset(&rsc_hdr, &ent), len);
+
+	return len;
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data);
+
+/**
+ * cmd_db_read_aux_data_len - Get the length of the auxiliary data stored in DB.
+ *
+ * @id: Resource to retrieve AUX Data.
+ *
+ * Return: size on success, 0 on error
+ */
+size_t cmd_db_read_aux_data_len(const char *id)
+{
+	int ret;
+	struct entry_header ent;
+	struct rsc_hdr rsc_hdr;
+
+	ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+
+	return ret < 0 ? 0 : le16_to_cpu(ent.len);
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data_len);
+
+/**
+ * cmd_db_read_slave_id - Get the slave ID for a given resource address
+ *
+ * @id: Resource id to query the DB for version
+ *
+ * Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error
+ */
+enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
+{
+	int ret;
+	struct entry_header ent;
+	struct rsc_hdr rsc_hdr;
+	u32 addr;
+
+	ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+	if (ret < 0)
+		return CMD_DB_HW_INVALID;
+
+	addr = le32_to_cpu(ent.addr);
+	return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+}
+EXPORT_SYMBOL(cmd_db_read_slave_id);
+
+static int cmd_db_dev_probe(struct platform_device *pdev)
+{
+	struct reserved_mem *rmem;
+	int ret = 0;
+
+	rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+	if (!rmem) {
+		dev_err(&pdev->dev, "failed to acquire memory region\n");
+		return -EINVAL;
+	}
+
+	cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+	if (IS_ERR_OR_NULL(cmd_db_header)) {
+		ret = PTR_ERR(cmd_db_header);
+		cmd_db_header = NULL;
+		return ret;
+	}
+
+	if (!cmd_db_magic_matches(cmd_db_header)) {
+		dev_err(&pdev->dev, "Invalid Command DB Magic\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id cmd_db_match_table[] = {
+	{ .compatible = "qcom,cmd-db" },
+	{ },
+};
+
+static struct platform_driver cmd_db_dev_driver = {
+	.probe  = cmd_db_dev_probe,
+	.driver = {
+		   .name = "cmd-db",
+		   .of_match_table = cmd_db_match_table,
+	},
+};
+
+static int __init cmd_db_device_init(void)
+{
+	return platform_driver_register(&cmd_db_dev_driver);
+}
+arch_initcall(cmd_db_device_init);
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
new file mode 100644
index 0000000..19c7399
--- /dev/null
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/rpmsg.h>
+#include <linux/remoteproc/qcom_rproc.h>
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version:     The G-Link SSR protocol version
+ * command:     The G-Link SSR command - do_cleanup
+ * seq_num:     Sequence number
+ * name_len:    Length of the name of the subsystem being restarted
+ * name:        G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+	__le32 version;
+	__le32 command;
+	__le32 seq_num;
+	__le32 name_len;
+	char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version:     The G-Link SSR protocol version
+ * response:    The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num:     Sequence number
+ */
+struct cleanup_done_msg {
+	__le32 version;
+	__le32 response;
+	__le32 seq_num;
+};
+
+/**
+ * G-Link SSR protocol commands
+ */
+#define GLINK_SSR_DO_CLEANUP	0
+#define GLINK_SSR_CLEANUP_DONE	1
+
+struct glink_ssr {
+	struct device *dev;
+	struct rpmsg_endpoint *ept;
+
+	struct notifier_block nb;
+
+	u32 seq_num;
+	struct completion completion;
+};
+
+static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev,
+				   void *data, int len, void *priv, u32 addr)
+{
+	struct cleanup_done_msg *msg = data;
+	struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+	if (len < sizeof(*msg)) {
+		dev_err(ssr->dev, "message too short\n");
+		return -EINVAL;
+	}
+
+	if (le32_to_cpu(msg->version) != 0)
+		return -EINVAL;
+
+	if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
+		return 0;
+
+	if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
+		dev_err(ssr->dev, "invalid sequence number of response\n");
+		return -EINVAL;
+	}
+
+	complete(&ssr->completion);
+
+	return 0;
+}
+
+static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event,
+				 void *data)
+{
+	struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb);
+	struct do_cleanup_msg msg;
+	char *ssr_name = data;
+	int ret;
+
+	ssr->seq_num++;
+	reinit_completion(&ssr->completion);
+
+	memset(&msg, 0, sizeof(msg));
+	msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
+	msg.seq_num = cpu_to_le32(ssr->seq_num);
+	msg.name_len = cpu_to_le32(strlen(ssr_name));
+	strlcpy(msg.name, ssr_name, sizeof(msg.name));
+
+	ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
+	if (ret < 0)
+		dev_err(ssr->dev, "failed to send cleanup message\n");
+
+	ret = wait_for_completion_timeout(&ssr->completion, HZ);
+	if (!ret)
+		dev_err(ssr->dev, "timeout waiting for cleanup done message\n");
+
+	return NOTIFY_DONE;
+}
+
+static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev)
+{
+	struct glink_ssr *ssr;
+
+	ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
+	if (!ssr)
+		return -ENOMEM;
+
+	init_completion(&ssr->completion);
+
+	ssr->dev = &rpdev->dev;
+	ssr->ept = rpdev->ept;
+	ssr->nb.notifier_call = qcom_glink_ssr_notify;
+
+	dev_set_drvdata(&rpdev->dev, ssr);
+
+	return qcom_register_ssr_notifier(&ssr->nb);
+}
+
+static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev)
+{
+	struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+	qcom_unregister_ssr_notifier(&ssr->nb);
+}
+
+static const struct rpmsg_device_id qcom_glink_ssr_match[] = {
+	{ "glink_ssr" },
+	{}
+};
+
+static struct rpmsg_driver qcom_glink_ssr_driver = {
+	.probe = qcom_glink_ssr_probe,
+	.remove = qcom_glink_ssr_remove,
+	.callback = qcom_glink_ssr_callback,
+	.id_table = qcom_glink_ssr_match,
+	.drv = {
+		.name = "qcom_glink_ssr",
+	},
+};
+module_rpmsg_driver(qcom_glink_ssr_driver);
+
+MODULE_ALIAS("rpmsg:glink_ssr");
+MODULE_DESCRIPTION("Qualcomm GLINK SSR notifier");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
new file mode 100644
index 0000000..2e1e4f0
--- /dev/null
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+/*
+ * SCT(System Cache Table) entry contains of the following members:
+ * usecase_id: Unique id for the client's use case
+ * slice_id: llcc slice id for each client
+ * max_cap: The maximum capacity of the cache slice provided in KB
+ * priority: Priority of the client used to select victim line for replacement
+ * fixed_size: Boolean indicating if the slice has a fixed capacity
+ * bonus_ways: Bonus ways are additional ways to be used for any slice,
+ *		if client ends up using more than reserved cache ways. Bonus
+ *		ways are allocated only if they are not reserved for some
+ *		other client.
+ * res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ *		be used by any other client than the one its assigned to.
+ * cache_mode: Each slice operates as a cache, this controls the mode of the
+ *             slice: normal or TCM(Tightly Coupled Memory)
+ * probe_target_ways: Determines what ways to probe for access hit. When
+ *                    configured to 1 only bonus and reserved ways are probed.
+ *                    When configured to 0 all ways in llcc are probed.
+ * dis_cap_alloc: Disable capacity based allocation for a client
+ * retain_on_pc: If this bit is set and client has maintained active vote
+ *               then the ways assigned to this client are not flushed on power
+ *               collapse.
+ * activate_on_init: Activate the slice immediately after the SCT is programmed
+ */
+#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
+	{					\
+		.usecase_id = uid,		\
+		.slice_id = sid,		\
+		.max_cap = mc,			\
+		.priority = p,			\
+		.fixed_size = fs,		\
+		.bonus_ways = bway,		\
+		.res_ways = rway,		\
+		.cache_mode = cmod,		\
+		.probe_target_ways = ptw,	\
+		.dis_cap_alloc = dca,		\
+		.retain_on_pc = rp,		\
+		.activate_on_init = a,		\
+	}
+
+static struct llcc_slice_config sdm845_data[] =  {
+	SCT_ENTRY(LLCC_CPUSS,    1,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 1),
+	SCT_ENTRY(LLCC_VIDSC0,   2,  512,  2, 1, 0x0,   0x0f0, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_VIDSC1,   3,  512,  2, 1, 0x0,   0x0f0, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_ROTATOR,  4,  563,  2, 1, 0x0,   0x00e, 2, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_VOICE,    5,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_AUDIO,    6,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_MDMHPGRW, 7,  1024, 2, 0, 0xfc,  0xf00, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_MDM,      8,  2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_CMPT,     10, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_GPUHTW,   11, 512,  1, 1, 0xc,   0x0,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_GPU,      12, 2304, 1, 0, 0xff0, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_MMUHWT,   13, 256,  2, 0, 0x0,   0x1,   0, 0, 1, 0, 1),
+	SCT_ENTRY(LLCC_CMPTDMA,  15, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_DISP,     16, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_VIDFW,    17, 2816, 1, 0, 0xffc, 0x2,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_MDMHPFX,  20, 1024, 2, 1, 0x0,   0xf00, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_MDMPNG,   21, 1024, 0, 1, 0x1e,  0x0,   0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_AUDHW,    22, 1024, 1, 1, 0xffc, 0x2,   0, 0, 1, 1, 0),
+};
+
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
+{
+	return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
+}
+
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+	{ .compatible = "qcom,sdm845-llcc", },
+	{ }
+};
+
+static struct platform_driver sdm845_qcom_llcc_driver = {
+	.driver = {
+		.name = "sdm845-llcc",
+		.of_match_table = sdm845_qcom_llcc_of_match,
+	},
+	.probe = sdm845_qcom_llcc_probe,
+};
+module_platform_driver(sdm845_qcom_llcc_driver);
+
+MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
new file mode 100644
index 0000000..54063a3
--- /dev/null
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE                      BIT(0)
+#define DEACTIVATE                    BIT(1)
+#define ACT_CTRL_OPCODE_ACTIVATE      BIT(0)
+#define ACT_CTRL_OPCODE_DEACTIVATE    BIT(1)
+#define ACT_CTRL_ACT_TRIG             BIT(0)
+#define ACT_CTRL_OPCODE_SHIFT         0x01
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
+#define ATTR1_FIXED_SIZE_SHIFT        0x03
+#define ATTR1_PRIORITY_SHIFT          0x04
+#define ATTR1_MAX_CAP_SHIFT           0x10
+#define ATTR0_RES_WAYS_MASK           GENMASK(11, 0)
+#define ATTR0_BONUS_WAYS_MASK         GENMASK(27, 16)
+#define ATTR0_BONUS_WAYS_SHIFT        0x10
+#define LLCC_STATUS_READ_DELAY        100
+
+#define CACHE_LINE_SIZE_SHIFT         6
+
+#define LLCC_COMMON_STATUS0           0x0003000c
+#define LLCC_LB_CNT_MASK              GENMASK(31, 28)
+#define LLCC_LB_CNT_SHIFT             28
+
+#define MAX_CAP_TO_BYTES(n)           (n * SZ_1K)
+#define LLCC_TRP_ACT_CTRLn(n)         (n * SZ_4K)
+#define LLCC_TRP_STATUSn(n)           (4 + n * SZ_4K)
+#define LLCC_TRP_ATTR0_CFGn(n)        (0x21000 + SZ_8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n)        (0x21004 + SZ_8 * n)
+
+#define BANK_OFFSET_STRIDE	      0x80000
+
+static struct llcc_drv_data *drv_data;
+
+static const struct regmap_config llcc_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.fast_io = true,
+};
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id for the client
+ *
+ * A pointer to llcc slice descriptor will be returned on success and
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+	const struct llcc_slice_config *cfg;
+	struct llcc_slice_desc *desc;
+	u32 sz, count;
+
+	cfg = drv_data->cfg;
+	sz = drv_data->cfg_size;
+
+	for (count = 0; cfg && count < sz; count++, cfg++)
+		if (cfg->usecase_id == uid)
+			break;
+
+	if (count == sz || !cfg)
+		return ERR_PTR(-ENODEV);
+
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return ERR_PTR(-ENOMEM);
+
+	desc->slice_id = cfg->slice_id;
+	desc->slice_size = cfg->max_cap;
+
+	return desc;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+	kfree(desc);
+}
+EXPORT_SYMBOL_GPL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(u32 sid,
+				u32 act_ctrl_reg_val, u32 status)
+{
+	u32 act_ctrl_reg;
+	u32 status_reg;
+	u32 slice_status;
+	int ret;
+
+	act_ctrl_reg = drv_data->bcast_off + LLCC_TRP_ACT_CTRLn(sid);
+	status_reg = drv_data->bcast_off + LLCC_TRP_STATUSn(sid);
+
+	/* Set the ACTIVE trigger */
+	act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
+	ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+	if (ret)
+		return ret;
+
+	/* Clear the ACTIVE trigger */
+	act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+	ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+	if (ret)
+		return ret;
+
+	ret = regmap_read_poll_timeout(drv_data->regmap, status_reg,
+				      slice_status, !(slice_status & status),
+				      0, LLCC_STATUS_READ_DELAY);
+	return ret;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+	int ret;
+	u32 act_ctrl_val;
+
+	mutex_lock(&drv_data->lock);
+	if (test_bit(desc->slice_id, drv_data->bitmap)) {
+		mutex_unlock(&drv_data->lock);
+		return 0;
+	}
+
+	act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+	ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+				  DEACTIVATE);
+	if (ret) {
+		mutex_unlock(&drv_data->lock);
+		return ret;
+	}
+
+	__set_bit(desc->slice_id, drv_data->bitmap);
+	mutex_unlock(&drv_data->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+	u32 act_ctrl_val;
+	int ret;
+
+	mutex_lock(&drv_data->lock);
+	if (!test_bit(desc->slice_id, drv_data->bitmap)) {
+		mutex_unlock(&drv_data->lock);
+		return 0;
+	}
+	act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+	ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+				  ACTIVATE);
+	if (ret) {
+		mutex_unlock(&drv_data->lock);
+		return ret;
+	}
+
+	__clear_bit(desc->slice_id, drv_data->bitmap);
+	mutex_unlock(&drv_data->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+	return desc->slice_id;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+	return desc->slice_size;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_size);
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev)
+{
+	int i;
+	u32 attr1_cfg;
+	u32 attr0_cfg;
+	u32 attr1_val;
+	u32 attr0_val;
+	u32 max_cap_cacheline;
+	u32 sz;
+	int ret;
+	const struct llcc_slice_config *llcc_table;
+	struct llcc_slice_desc desc;
+	u32 bcast_off = drv_data->bcast_off;
+
+	sz = drv_data->cfg_size;
+	llcc_table = drv_data->cfg;
+
+	for (i = 0; i < sz; i++) {
+		attr1_cfg = bcast_off +
+				LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
+		attr0_cfg = bcast_off +
+				LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
+
+		attr1_val = llcc_table[i].cache_mode;
+		attr1_val |= llcc_table[i].probe_target_ways <<
+				ATTR1_PROBE_TARGET_WAYS_SHIFT;
+		attr1_val |= llcc_table[i].fixed_size <<
+				ATTR1_FIXED_SIZE_SHIFT;
+		attr1_val |= llcc_table[i].priority <<
+				ATTR1_PRIORITY_SHIFT;
+
+		max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
+
+		/* LLCC instances can vary for each target.
+		 * The SW writes to broadcast register which gets propagated
+		 * to each llcc instace (llcc0,.. llccN).
+		 * Since the size of the memory is divided equally amongst the
+		 * llcc instances, we need to configure the max cap accordingly.
+		 */
+		max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+		max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+		attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+		attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
+		attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+		ret = regmap_write(drv_data->regmap, attr1_cfg, attr1_val);
+		if (ret)
+			return ret;
+		ret = regmap_write(drv_data->regmap, attr0_cfg, attr0_val);
+		if (ret)
+			return ret;
+		if (llcc_table[i].activate_on_init) {
+			desc.slice_id = llcc_table[i].slice_id;
+			ret = llcc_slice_activate(&desc);
+		}
+	}
+	return ret;
+}
+
+int qcom_llcc_probe(struct platform_device *pdev,
+		      const struct llcc_slice_config *llcc_cfg, u32 sz)
+{
+	u32 num_banks;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *base;
+	int ret, i;
+
+	drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	drv_data->regmap = devm_regmap_init_mmio(dev, base,
+					&llcc_regmap_config);
+	if (IS_ERR(drv_data->regmap))
+		return PTR_ERR(drv_data->regmap);
+
+	ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
+						&num_banks);
+	if (ret)
+		return ret;
+
+	num_banks &= LLCC_LB_CNT_MASK;
+	num_banks >>= LLCC_LB_CNT_SHIFT;
+	drv_data->num_banks = num_banks;
+
+	for (i = 0; i < sz; i++)
+		if (llcc_cfg[i].slice_id > drv_data->max_slices)
+			drv_data->max_slices = llcc_cfg[i].slice_id;
+
+	drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
+							GFP_KERNEL);
+	if (!drv_data->offsets)
+		return -ENOMEM;
+
+	for (i = 0; i < num_banks; i++)
+		drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
+
+	drv_data->bcast_off = num_banks * BANK_OFFSET_STRIDE;
+
+	drv_data->bitmap = devm_kcalloc(dev,
+	BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
+						GFP_KERNEL);
+	if (!drv_data->bitmap)
+		return -ENOMEM;
+
+	drv_data->cfg = llcc_cfg;
+	drv_data->cfg_size = sz;
+	mutex_init(&drv_data->lock);
+	platform_set_drvdata(pdev, drv_data);
+
+	return qcom_llcc_cfg_program(pdev);
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
new file mode 100644
index 0000000..1c48802
--- /dev/null
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -0,0 +1,245 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+{
+	if (phdr->p_type != PT_LOAD)
+		return false;
+
+	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+		return false;
+
+	if (!phdr->p_memsz)
+		return false;
+
+	return true;
+}
+
+/**
+ * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt
+ * @fw:		firmware object for the mdt file
+ *
+ * Returns size of the loaded firmware blob, or -EINVAL on failure.
+ */
+ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+	const struct elf32_phdr *phdrs;
+	const struct elf32_phdr *phdr;
+	const struct elf32_hdr *ehdr;
+	phys_addr_t min_addr = PHYS_ADDR_MAX;
+	phys_addr_t max_addr = 0;
+	int i;
+
+	ehdr = (struct elf32_hdr *)fw->data;
+	phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		if (!mdt_phdr_valid(phdr))
+			continue;
+
+		if (phdr->p_paddr < min_addr)
+			min_addr = phdr->p_paddr;
+
+		if (phdr->p_paddr + phdr->p_memsz > max_addr)
+			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+	}
+
+	return min_addr < max_addr ? max_addr - min_addr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
+
+static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+			   const char *firmware, int pas_id, void *mem_region,
+			   phys_addr_t mem_phys, size_t mem_size,
+			   phys_addr_t *reloc_base, bool pas_init)
+{
+	const struct elf32_phdr *phdrs;
+	const struct elf32_phdr *phdr;
+	const struct elf32_hdr *ehdr;
+	const struct firmware *seg_fw;
+	phys_addr_t mem_reloc;
+	phys_addr_t min_addr = PHYS_ADDR_MAX;
+	phys_addr_t max_addr = 0;
+	size_t fw_name_len;
+	ssize_t offset;
+	char *fw_name;
+	bool relocate = false;
+	void *ptr;
+	int ret;
+	int i;
+
+	if (!fw || !mem_region || !mem_phys || !mem_size)
+		return -EINVAL;
+
+	ehdr = (struct elf32_hdr *)fw->data;
+	phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+	fw_name_len = strlen(firmware);
+	if (fw_name_len <= 4)
+		return -EINVAL;
+
+	fw_name = kstrdup(firmware, GFP_KERNEL);
+	if (!fw_name)
+		return -ENOMEM;
+
+	if (pas_init) {
+		ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size);
+		if (ret) {
+			dev_err(dev, "invalid firmware metadata\n");
+			goto out;
+		}
+	}
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		if (!mdt_phdr_valid(phdr))
+			continue;
+
+		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+			relocate = true;
+
+		if (phdr->p_paddr < min_addr)
+			min_addr = phdr->p_paddr;
+
+		if (phdr->p_paddr + phdr->p_memsz > max_addr)
+			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+	}
+
+	if (relocate) {
+		if (pas_init) {
+			ret = qcom_scm_pas_mem_setup(pas_id, mem_phys,
+						     max_addr - min_addr);
+			if (ret) {
+				dev_err(dev, "unable to setup relocation\n");
+				goto out;
+			}
+		}
+
+		/*
+		 * The image is relocatable, so offset each segment based on
+		 * the lowest segment address.
+		 */
+		mem_reloc = min_addr;
+	} else {
+		/*
+		 * Image is not relocatable, so offset each segment based on
+		 * the allocated physical chunk of memory.
+		 */
+		mem_reloc = mem_phys;
+	}
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		phdr = &phdrs[i];
+
+		if (!mdt_phdr_valid(phdr))
+			continue;
+
+		offset = phdr->p_paddr - mem_reloc;
+		if (offset < 0 || offset + phdr->p_memsz > mem_size) {
+			dev_err(dev, "segment outside memory range\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ptr = mem_region + offset;
+
+		if (phdr->p_filesz) {
+			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+			ret = request_firmware_into_buf(&seg_fw, fw_name, dev,
+							ptr, phdr->p_filesz);
+			if (ret) {
+				dev_err(dev, "failed to load %s\n", fw_name);
+				break;
+			}
+
+			release_firmware(seg_fw);
+		}
+
+		if (phdr->p_memsz > phdr->p_filesz)
+			memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+	}
+
+	if (reloc_base)
+		*reloc_base = mem_reloc;
+
+out:
+	kfree(fw_name);
+
+	return ret;
+}
+
+/**
+ * qcom_mdt_load() - load the firmware which header is loaded as fw
+ * @dev:	device handle to associate resources with
+ * @fw:		firmware object for the mdt file
+ * @firmware:	name of the firmware, for construction of segment file names
+ * @pas_id:	PAS identifier
+ * @mem_region:	allocated memory region to load firmware into
+ * @mem_phys:	physical address of allocated memory region
+ * @mem_size:	size of the allocated memory region
+ * @reloc_base:	adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+		  const char *firmware, int pas_id, void *mem_region,
+		  phys_addr_t mem_phys, size_t mem_size,
+		  phys_addr_t *reloc_base)
+{
+	return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+			       mem_size, reloc_base, true);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
+
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev:	device handle to associate resources with
+ * @fw:		firmware object for the mdt file
+ * @firmware:	name of the firmware, for construction of segment file names
+ * @pas_id:	PAS identifier
+ * @mem_region:	allocated memory region to load firmware into
+ * @mem_phys:	physical address of allocated memory region
+ * @mem_size:	size of the allocated memory region
+ * @reloc_base:	adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+			  const char *firmware, int pas_id,
+			  void *mem_region, phys_addr_t mem_phys,
+			  size_t mem_size, phys_addr_t *reloc_base)
+{
+	return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+			       mem_size, reloc_base, false);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
+
+MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
new file mode 100644
index 0000000..feed3db
--- /dev/null
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+
+/**
+ * DOC: Overview
+ *
+ * Generic Interface (GENI) Serial Engine (SE) Wrapper driver is introduced
+ * to manage GENI firmware based Qualcomm Universal Peripheral (QUP) Wrapper
+ * controller. QUP Wrapper is designed to support various serial bus protocols
+ * like UART, SPI, I2C, I3C, etc.
+ */
+
+/**
+ * DOC: Hardware description
+ *
+ * GENI based QUP is a highly-flexible and programmable module for supporting
+ * a wide range of serial interfaces like UART, SPI, I2C, I3C, etc. A single
+ * QUP module can provide upto 8 serial interfaces, using its internal
+ * serial engines. The actual configuration is determined by the target
+ * platform configuration. The protocol supported by each interface is
+ * determined by the firmware loaded to the serial engine. Each SE consists
+ * of a DMA Engine and GENI sub modules which enable serial engines to
+ * support FIFO and DMA modes of operation.
+ *
+ *
+ *                      +-----------------------------------------+
+ *                      |QUP Wrapper                              |
+ *                      |         +----------------------------+  |
+ *   --QUP & SE Clocks-->         | Serial Engine N            |  +-IO------>
+ *                      |         | ...                        |  | Interface
+ *   <---Clock Perf.----+    +----+-----------------------+    |  |
+ *     State Interface  |    | Serial Engine 1            |    |  |
+ *                      |    |                            |    |  |
+ *                      |    |                            |    |  |
+ *   <--------AHB------->    |                            |    |  |
+ *                      |    |                            +----+  |
+ *                      |    |                            |       |
+ *                      |    |                            |       |
+ *   <------SE IRQ------+    +----------------------------+       |
+ *                      |                                         |
+ *                      +-----------------------------------------+
+ *
+ *                         Figure 1: GENI based QUP Wrapper
+ *
+ * The GENI submodules include primary and secondary sequencers which are
+ * used to drive TX & RX operations. On serial interfaces that operate using
+ * master-slave model, primary sequencer drives both TX & RX operations. On
+ * serial interfaces that operate using peer-to-peer model, primary sequencer
+ * drives TX operation and secondary sequencer drives RX operation.
+ */
+
+/**
+ * DOC: Software description
+ *
+ * GENI SE Wrapper driver is structured into 2 parts:
+ *
+ * geni_wrapper represents QUP Wrapper controller. This part of the driver
+ * manages QUP Wrapper information such as hardware version, clock
+ * performance table that is common to all the internal serial engines.
+ *
+ * geni_se represents serial engine. This part of the driver manages serial
+ * engine information such as clocks, containing QUP Wrapper, etc. This part
+ * of driver also supports operations (eg. initialize the concerned serial
+ * engine, select between FIFO and DMA mode of operation etc.) that are
+ * common to all the serial engines and are independent of serial interfaces.
+ */
+
+#define MAX_CLK_PERF_LEVEL 32
+#define NUM_AHB_CLKS 2
+
+/**
+ * @struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * @dev:		Device pointer of the QUP wrapper core
+ * @base:		Base address of this instance of QUP wrapper core
+ * @ahb_clks:		Handle to the primary & secondary AHB clocks
+ */
+struct geni_wrapper {
+	struct device *dev;
+	void __iomem *base;
+	struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+};
+
+#define QUP_HW_VER_REG			0x4
+
+/* Common SE registers */
+#define GENI_INIT_CFG_REVISION		0x0
+#define GENI_S_INIT_CFG_REVISION	0x4
+#define GENI_OUTPUT_CTRL		0x24
+#define GENI_CGC_CTRL			0x28
+#define GENI_CLK_CTRL_RO		0x60
+#define GENI_IF_DISABLE_RO		0x64
+#define GENI_FW_S_REVISION_RO		0x6c
+#define SE_GENI_BYTE_GRAN		0x254
+#define SE_GENI_TX_PACKING_CFG0		0x260
+#define SE_GENI_TX_PACKING_CFG1		0x264
+#define SE_GENI_RX_PACKING_CFG0		0x284
+#define SE_GENI_RX_PACKING_CFG1		0x288
+#define SE_GENI_M_GP_LENGTH		0x910
+#define SE_GENI_S_GP_LENGTH		0x914
+#define SE_DMA_TX_PTR_L			0xc30
+#define SE_DMA_TX_PTR_H			0xc34
+#define SE_DMA_TX_ATTR			0xc38
+#define SE_DMA_TX_LEN			0xc3c
+#define SE_DMA_TX_IRQ_EN		0xc48
+#define SE_DMA_TX_IRQ_EN_SET		0xc4c
+#define SE_DMA_TX_IRQ_EN_CLR		0xc50
+#define SE_DMA_TX_LEN_IN		0xc54
+#define SE_DMA_TX_MAX_BURST		0xc5c
+#define SE_DMA_RX_PTR_L			0xd30
+#define SE_DMA_RX_PTR_H			0xd34
+#define SE_DMA_RX_ATTR			0xd38
+#define SE_DMA_RX_LEN			0xd3c
+#define SE_DMA_RX_IRQ_EN		0xd48
+#define SE_DMA_RX_IRQ_EN_SET		0xd4c
+#define SE_DMA_RX_IRQ_EN_CLR		0xd50
+#define SE_DMA_RX_LEN_IN		0xd54
+#define SE_DMA_RX_MAX_BURST		0xd5c
+#define SE_DMA_RX_FLUSH			0xd60
+#define SE_GSI_EVENT_EN			0xe18
+#define SE_IRQ_EN			0xe1c
+#define SE_DMA_GENERAL_CFG		0xe30
+
+/* GENI_OUTPUT_CTRL fields */
+#define DEFAULT_IO_OUTPUT_CTRL_MSK	GENMASK(6, 0)
+
+/* GENI_CGC_CTRL fields */
+#define CFG_AHB_CLK_CGC_ON		BIT(0)
+#define CFG_AHB_WR_ACLK_CGC_ON		BIT(1)
+#define DATA_AHB_CLK_CGC_ON		BIT(2)
+#define SCLK_CGC_ON			BIT(3)
+#define TX_CLK_CGC_ON			BIT(4)
+#define RX_CLK_CGC_ON			BIT(5)
+#define EXT_CLK_CGC_ON			BIT(6)
+#define PROG_RAM_HCLK_OFF		BIT(8)
+#define PROG_RAM_SCLK_OFF		BIT(9)
+#define DEFAULT_CGC_EN			GENMASK(6, 0)
+
+/* SE_GSI_EVENT_EN fields */
+#define DMA_RX_EVENT_EN			BIT(0)
+#define DMA_TX_EVENT_EN			BIT(1)
+#define GENI_M_EVENT_EN			BIT(2)
+#define GENI_S_EVENT_EN			BIT(3)
+
+/* SE_IRQ_EN fields */
+#define DMA_RX_IRQ_EN			BIT(0)
+#define DMA_TX_IRQ_EN			BIT(1)
+#define GENI_M_IRQ_EN			BIT(2)
+#define GENI_S_IRQ_EN			BIT(3)
+
+/* SE_DMA_GENERAL_CFG */
+#define DMA_RX_CLK_CGC_ON		BIT(0)
+#define DMA_TX_CLK_CGC_ON		BIT(1)
+#define DMA_AHB_SLV_CFG_ON		BIT(2)
+#define AHB_SEC_SLV_CLK_CGC_ON		BIT(3)
+#define DUMMY_RX_NON_BUFFERABLE		BIT(4)
+#define RX_DMA_ZERO_PADDING_EN		BIT(5)
+#define RX_DMA_IRQ_DELAY_MSK		GENMASK(8, 6)
+#define RX_DMA_IRQ_DELAY_SHFT		6
+
+/**
+ * geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
+ * @se:	Pointer to the corresponding serial engine.
+ *
+ * Return: Hardware Version of the wrapper.
+ */
+u32 geni_se_get_qup_hw_version(struct geni_se *se)
+{
+	struct geni_wrapper *wrapper = se->wrapper;
+
+	return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+}
+EXPORT_SYMBOL(geni_se_get_qup_hw_version);
+
+static void geni_se_io_set_mode(void __iomem *base)
+{
+	u32 val;
+
+	val = readl_relaxed(base + SE_IRQ_EN);
+	val |= GENI_M_IRQ_EN | GENI_S_IRQ_EN;
+	val |= DMA_TX_IRQ_EN | DMA_RX_IRQ_EN;
+	writel_relaxed(val, base + SE_IRQ_EN);
+
+	val = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
+	val &= ~GENI_DMA_MODE_EN;
+	writel_relaxed(val, base + SE_GENI_DMA_MODE_EN);
+
+	writel_relaxed(0, base + SE_GSI_EVENT_EN);
+}
+
+static void geni_se_io_init(void __iomem *base)
+{
+	u32 val;
+
+	val = readl_relaxed(base + GENI_CGC_CTRL);
+	val |= DEFAULT_CGC_EN;
+	writel_relaxed(val, base + GENI_CGC_CTRL);
+
+	val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
+	val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+	val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+	writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
+
+	writel_relaxed(DEFAULT_IO_OUTPUT_CTRL_MSK, base + GENI_OUTPUT_CTRL);
+	writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG);
+}
+
+/**
+ * geni_se_init() - Initialize the GENI serial engine
+ * @se:		Pointer to the concerned serial engine.
+ * @rx_wm:	Receive watermark, in units of FIFO words.
+ * @rx_rfr_wm:	Ready-for-receive watermark, in units of FIFO words.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * receive watermark and ready-for-receive watermarks.
+ */
+void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr)
+{
+	u32 val;
+
+	geni_se_io_init(se->base);
+	geni_se_io_set_mode(se->base);
+
+	writel_relaxed(rx_wm, se->base + SE_GENI_RX_WATERMARK_REG);
+	writel_relaxed(rx_rfr, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+	val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+	val |= M_COMMON_GENI_M_IRQ_EN;
+	writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+	val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+	val |= S_COMMON_GENI_S_IRQ_EN;
+	writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+}
+EXPORT_SYMBOL(geni_se_init);
+
+static void geni_se_select_fifo_mode(struct geni_se *se)
+{
+	u32 proto = geni_se_read_proto(se);
+	u32 val;
+
+	writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
+	writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
+	writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
+	writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
+	writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
+	writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+
+	val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+	if (proto != GENI_SE_UART) {
+		val |= M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN;
+		val |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+	}
+	writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+	val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+	if (proto != GENI_SE_UART)
+		val |= S_CMD_DONE_EN;
+	writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+
+	val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+	val &= ~GENI_DMA_MODE_EN;
+	writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+static void geni_se_select_dma_mode(struct geni_se *se)
+{
+	u32 val;
+
+	writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
+	writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
+	writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
+	writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
+	writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
+	writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+
+	val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+	val |= GENI_DMA_MODE_EN;
+	writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @se:		Pointer to the concerned serial engine.
+ * @mode:	Transfer mode to be selected.
+ */
+void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+	WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA);
+
+	switch (mode) {
+	case GENI_SE_FIFO:
+		geni_se_select_fifo_mode(se);
+		break;
+	case GENI_SE_DMA:
+		geni_se_select_dma_mode(se);
+		break;
+	case GENI_SE_INVALID:
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL(geni_se_select_mode);
+
+/**
+ * DOC: Overview
+ *
+ * GENI FIFO packing is highly configurable. TX/RX packing/unpacking consist
+ * of up to 4 operations, each operation represented by 4 configuration vectors
+ * of 10 bits programmed in GENI_TX_PACKING_CFG0 and GENI_TX_PACKING_CFG1 for
+ * TX FIFO and in GENI_RX_PACKING_CFG0 and GENI_RX_PACKING_CFG1 for RX FIFO.
+ * Refer to below examples for detailed bit-field description.
+ *
+ * Example 1: word_size = 7, packing_mode = 4 x 8, msb_to_lsb = 1
+ *
+ *        +-----------+-------+-------+-------+-------+
+ *        |           | vec_0 | vec_1 | vec_2 | vec_3 |
+ *        +-----------+-------+-------+-------+-------+
+ *        | start     | 0x6   | 0xe   | 0x16  | 0x1e  |
+ *        | direction | 1     | 1     | 1     | 1     |
+ *        | length    | 6     | 6     | 6     | 6     |
+ *        | stop      | 0     | 0     | 0     | 1     |
+ *        +-----------+-------+-------+-------+-------+
+ *
+ * Example 2: word_size = 15, packing_mode = 2 x 16, msb_to_lsb = 0
+ *
+ *        +-----------+-------+-------+-------+-------+
+ *        |           | vec_0 | vec_1 | vec_2 | vec_3 |
+ *        +-----------+-------+-------+-------+-------+
+ *        | start     | 0x0   | 0x8   | 0x10  | 0x18  |
+ *        | direction | 0     | 0     | 0     | 0     |
+ *        | length    | 7     | 6     | 7     | 6     |
+ *        | stop      | 0     | 0     | 0     | 1     |
+ *        +-----------+-------+-------+-------+-------+
+ *
+ * Example 3: word_size = 23, packing_mode = 1 x 32, msb_to_lsb = 1
+ *
+ *        +-----------+-------+-------+-------+-------+
+ *        |           | vec_0 | vec_1 | vec_2 | vec_3 |
+ *        +-----------+-------+-------+-------+-------+
+ *        | start     | 0x16  | 0xe   | 0x6   | 0x0   |
+ *        | direction | 1     | 1     | 1     | 1     |
+ *        | length    | 7     | 7     | 6     | 0     |
+ *        | stop      | 0     | 0     | 1     | 0     |
+ *        +-----------+-------+-------+-------+-------+
+ *
+ */
+
+#define NUM_PACKING_VECTORS 4
+#define PACKING_START_SHIFT 5
+#define PACKING_DIR_SHIFT 4
+#define PACKING_LEN_SHIFT 1
+#define PACKING_STOP_BIT BIT(0)
+#define PACKING_VECTOR_SHIFT 10
+/**
+ * geni_se_config_packing() - Packing configuration of the serial engine
+ * @se:		Pointer to the concerned serial engine
+ * @bpw:	Bits of data per transfer word.
+ * @pack_words:	Number of words per fifo element.
+ * @msb_to_lsb:	Transfer from MSB to LSB or vice-versa.
+ * @tx_cfg:	Flag to configure the TX Packing.
+ * @rx_cfg:	Flag to configure the RX Packing.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
+			    bool msb_to_lsb, bool tx_cfg, bool rx_cfg)
+{
+	u32 cfg0, cfg1, cfg[NUM_PACKING_VECTORS] = {0};
+	int len;
+	int temp_bpw = bpw;
+	int idx_start = msb_to_lsb ? bpw - 1 : 0;
+	int idx = idx_start;
+	int idx_delta = msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE;
+	int ceil_bpw = ALIGN(bpw, BITS_PER_BYTE);
+	int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE;
+	int i;
+
+	if (iter <= 0 || iter > NUM_PACKING_VECTORS)
+		return;
+
+	for (i = 0; i < iter; i++) {
+		len = min_t(int, temp_bpw, BITS_PER_BYTE) - 1;
+		cfg[i] = idx << PACKING_START_SHIFT;
+		cfg[i] |= msb_to_lsb << PACKING_DIR_SHIFT;
+		cfg[i] |= len << PACKING_LEN_SHIFT;
+
+		if (temp_bpw <= BITS_PER_BYTE) {
+			idx = ((i + 1) * BITS_PER_BYTE) + idx_start;
+			temp_bpw = bpw;
+		} else {
+			idx = idx + idx_delta;
+			temp_bpw = temp_bpw - BITS_PER_BYTE;
+		}
+	}
+	cfg[iter - 1] |= PACKING_STOP_BIT;
+	cfg0 = cfg[0] | (cfg[1] << PACKING_VECTOR_SHIFT);
+	cfg1 = cfg[2] | (cfg[3] << PACKING_VECTOR_SHIFT);
+
+	if (tx_cfg) {
+		writel_relaxed(cfg0, se->base + SE_GENI_TX_PACKING_CFG0);
+		writel_relaxed(cfg1, se->base + SE_GENI_TX_PACKING_CFG1);
+	}
+	if (rx_cfg) {
+		writel_relaxed(cfg0, se->base + SE_GENI_RX_PACKING_CFG0);
+		writel_relaxed(cfg1, se->base + SE_GENI_RX_PACKING_CFG1);
+	}
+
+	/*
+	 * Number of protocol words in each FIFO entry
+	 * 0 - 4x8, four words in each entry, max word size of 8 bits
+	 * 1 - 2x16, two words in each entry, max word size of 16 bits
+	 * 2 - 1x32, one word in each entry, max word size of 32 bits
+	 * 3 - undefined
+	 */
+	if (pack_words || bpw == 32)
+		writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN);
+}
+EXPORT_SYMBOL(geni_se_config_packing);
+
+static void geni_se_clks_off(struct geni_se *se)
+{
+	struct geni_wrapper *wrapper = se->wrapper;
+
+	clk_disable_unprepare(se->clk);
+	clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks),
+						wrapper->ahb_clks);
+}
+
+/**
+ * geni_se_resources_off() - Turn off resources associated with the serial
+ *                           engine
+ * @se:	Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_off(struct geni_se *se)
+{
+	int ret;
+
+	ret = pinctrl_pm_select_sleep_state(se->dev);
+	if (ret)
+		return ret;
+
+	geni_se_clks_off(se);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_resources_off);
+
+static int geni_se_clks_on(struct geni_se *se)
+{
+	int ret;
+	struct geni_wrapper *wrapper = se->wrapper;
+
+	ret = clk_bulk_prepare_enable(ARRAY_SIZE(wrapper->ahb_clks),
+						wrapper->ahb_clks);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(se->clk);
+	if (ret)
+		clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks),
+							wrapper->ahb_clks);
+	return ret;
+}
+
+/**
+ * geni_se_resources_on() - Turn on resources associated with the serial
+ *                          engine
+ * @se:	Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_on(struct geni_se *se)
+{
+	int ret;
+
+	ret = geni_se_clks_on(se);
+	if (ret)
+		return ret;
+
+	ret = pinctrl_pm_select_default_state(se->dev);
+	if (ret)
+		geni_se_clks_off(se);
+
+	return ret;
+}
+EXPORT_SYMBOL(geni_se_resources_on);
+
+/**
+ * geni_se_clk_tbl_get() - Get the clock table to program DFS
+ * @se:		Pointer to the concerned serial engine.
+ * @tbl:	Table in which the output is returned.
+ *
+ * This function is called by the protocol drivers to determine the different
+ * clock frequencies supported by serial engine core clock. The protocol
+ * drivers use the output to determine the clock frequency index to be
+ * programmed into DFS.
+ *
+ * Return: number of valid performance levels in the table on success,
+ *	   standard Linux error codes on failure.
+ */
+int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
+{
+	unsigned long freq = 0;
+	int i;
+
+	if (se->clk_perf_tbl) {
+		*tbl = se->clk_perf_tbl;
+		return se->num_clk_levels;
+	}
+
+	se->clk_perf_tbl = devm_kcalloc(se->dev, MAX_CLK_PERF_LEVEL,
+					sizeof(*se->clk_perf_tbl),
+					GFP_KERNEL);
+	if (!se->clk_perf_tbl)
+		return -ENOMEM;
+
+	for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
+		freq = clk_round_rate(se->clk, freq + 1);
+		if (!freq || freq == se->clk_perf_tbl[i - 1])
+			break;
+		se->clk_perf_tbl[i] = freq;
+	}
+	se->num_clk_levels = i;
+	*tbl = se->clk_perf_tbl;
+	return se->num_clk_levels;
+}
+EXPORT_SYMBOL(geni_se_clk_tbl_get);
+
+/**
+ * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
+ * @se:		Pointer to the concerned serial engine.
+ * @req_freq:	Requested clock frequency.
+ * @index:	Index of the resultant frequency in the table.
+ * @res_freq:	Resultant frequency which matches or is closer to the
+ *		requested frequency.
+ * @exact:	Flag to indicate exact multiple requirement of the requested
+ *		frequency.
+ *
+ * This function is called by the protocol drivers to determine the matching
+ * or exact multiple of the requested frequency, as provided by the serial
+ * engine clock in order to meet the performance requirements. If there is
+ * no matching or exact multiple of the requested frequency found, then it
+ * selects the closest floor frequency, if exact flag is not set.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
+			   unsigned int *index, unsigned long *res_freq,
+			   bool exact)
+{
+	unsigned long *tbl;
+	int num_clk_levels;
+	int i;
+
+	num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
+	if (num_clk_levels < 0)
+		return num_clk_levels;
+
+	if (num_clk_levels == 0)
+		return -EINVAL;
+
+	*res_freq = 0;
+	for (i = 0; i < num_clk_levels; i++) {
+		if (!(tbl[i] % req_freq)) {
+			*index = i;
+			*res_freq = tbl[i];
+			return 0;
+		}
+
+		if (!(*res_freq) || ((tbl[i] > *res_freq) &&
+				     (tbl[i] < req_freq))) {
+			*index = i;
+			*res_freq = tbl[i];
+		}
+	}
+
+	if (exact)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_clk_freq_match);
+
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_EOT_BUF BIT(0)
+/**
+ * geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer
+ * @se:			Pointer to the concerned serial engine.
+ * @buf:		Pointer to the TX buffer.
+ * @len:		Length of the TX buffer.
+ * @iova:		Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
+			dma_addr_t *iova)
+{
+	struct geni_wrapper *wrapper = se->wrapper;
+	u32 val;
+
+	*iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(wrapper->dev, *iova))
+		return -EIO;
+
+	val = GENI_SE_DMA_DONE_EN;
+	val |= GENI_SE_DMA_EOT_EN;
+	val |= GENI_SE_DMA_AHB_ERR_EN;
+	writel_relaxed(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+	writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
+	writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
+	writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
+	writel_relaxed(len, se->base + SE_DMA_TX_LEN);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_tx_dma_prep);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer
+ * @se:			Pointer to the concerned serial engine.
+ * @buf:		Pointer to the RX buffer.
+ * @len:		Length of the RX buffer.
+ * @iova:		Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
+			dma_addr_t *iova)
+{
+	struct geni_wrapper *wrapper = se->wrapper;
+	u32 val;
+
+	*iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(wrapper->dev, *iova))
+		return -EIO;
+
+	val = GENI_SE_DMA_DONE_EN;
+	val |= GENI_SE_DMA_EOT_EN;
+	val |= GENI_SE_DMA_AHB_ERR_EN;
+	writel_relaxed(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+	writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_RX_PTR_L);
+	writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
+	/* RX does not have EOT buffer type bit. So just reset RX_ATTR */
+	writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
+	writel_relaxed(len, se->base + SE_DMA_RX_LEN);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_rx_dma_prep);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer
+ * @se:			Pointer to the concerned serial engine.
+ * @iova:		DMA address of the TX buffer.
+ * @len:		Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+	struct geni_wrapper *wrapper = se->wrapper;
+
+	if (iova && !dma_mapping_error(wrapper->dev, iova))
+		dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer
+ * @se:			Pointer to the concerned serial engine.
+ * @iova:		DMA address of the RX buffer.
+ * @len:		Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+	struct geni_wrapper *wrapper = se->wrapper;
+
+	if (iova && !dma_mapping_error(wrapper->dev, iova))
+		dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+
+static int geni_se_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct geni_wrapper *wrapper;
+	int ret;
+
+	wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
+	if (!wrapper)
+		return -ENOMEM;
+
+	wrapper->dev = dev;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	wrapper->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(wrapper->base))
+		return PTR_ERR(wrapper->base);
+
+	wrapper->ahb_clks[0].id = "m-ahb";
+	wrapper->ahb_clks[1].id = "s-ahb";
+	ret = devm_clk_bulk_get(dev, NUM_AHB_CLKS, wrapper->ahb_clks);
+	if (ret) {
+		dev_err(dev, "Err getting AHB clks %d\n", ret);
+		return ret;
+	}
+
+	dev_set_drvdata(dev, wrapper);
+	dev_dbg(dev, "GENI SE Driver probed\n");
+	return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id geni_se_dt_match[] = {
+	{ .compatible = "qcom,geni-se-qup", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, geni_se_dt_match);
+
+static struct platform_driver geni_se_driver = {
+	.driver = {
+		.name = "geni_se_qup",
+		.of_match_table = geni_se_dt_match,
+	},
+	.probe = geni_se_probe,
+};
+module_platform_driver(geni_se_driver);
+
+MODULE_DESCRIPTION("GENI Serial Engine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 0000000..09c669e
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+
+#define GSBI_CTRL_REG		0x0000
+#define GSBI_PROTOCOL_SHIFT	4
+#define MAX_GSBI		12
+
+#define TCSR_ADM_CRCI_BASE	0x70
+
+struct crci_config {
+	u32 num_rows;
+	const u32 (*array)[MAX_GSBI];
+};
+
+static const u32 crci_ipq8064[][MAX_GSBI] = {
+	{
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+};
+
+static const struct crci_config config_ipq8064 = {
+	.num_rows = ARRAY_SIZE(crci_ipq8064),
+	.array = crci_ipq8064,
+};
+
+static const unsigned int crci_apq8064[][MAX_GSBI] = {
+	{
+		0x001800, 0x006000, 0x000030, 0x0000c0,
+		0x000300, 0x000400, 0x000000, 0x000000,
+		0x000000, 0x000000, 0x000000, 0x000000
+	},
+	{
+		0x000000, 0x000000, 0x000000, 0x000000,
+		0x000000, 0x000020, 0x0000c0, 0x000000,
+		0x000000, 0x000000, 0x000000, 0x000000
+	},
+};
+
+static const struct crci_config config_apq8064 = {
+	.num_rows = ARRAY_SIZE(crci_apq8064),
+	.array = crci_apq8064,
+};
+
+static const unsigned int crci_msm8960[][MAX_GSBI] = {
+	{
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000400, 0x000000, 0x000000,
+		0x000000, 0x000000, 0x000000, 0x000000
+	},
+	{
+		0x000000, 0x000000, 0x000000, 0x000000,
+		0x000000, 0x000020, 0x0000c0, 0x000300,
+		0x001800, 0x006000, 0x000000, 0x000000
+	},
+};
+
+static const struct crci_config config_msm8960 = {
+	.num_rows = ARRAY_SIZE(crci_msm8960),
+	.array = crci_msm8960,
+};
+
+static const unsigned int crci_msm8660[][MAX_GSBI] = {
+	{	/* ADM 0 - B */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{	/* ADM 0 - B */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{	/* ADM 1 - A */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+	{	/* ADM 1 - B */
+		0x000003, 0x00000c, 0x000030, 0x0000c0,
+		0x000300, 0x000c00, 0x003000, 0x00c000,
+		0x030000, 0x0c0000, 0x300000, 0xc00000
+	},
+};
+
+static const struct crci_config config_msm8660 = {
+	.num_rows = ARRAY_SIZE(crci_msm8660),
+	.array = crci_msm8660,
+};
+
+struct gsbi_info {
+	struct clk *hclk;
+	u32 mode;
+	u32 crci;
+	struct regmap *tcsr;
+};
+
+static const struct of_device_id tcsr_dt_match[] = {
+	{ .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
+	{ .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
+	{ .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
+	{ .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660},
+	{ },
+};
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *tcsr_node;
+	const struct of_device_id *match;
+	struct resource *res;
+	void __iomem *base;
+	struct gsbi_info *gsbi;
+	int i;
+	u32 mask, gsbi_num;
+	const struct crci_config *config = NULL;
+
+	gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+	if (!gsbi)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	/* get the tcsr node and setup the config and regmap */
+	gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
+
+	if (!IS_ERR(gsbi->tcsr)) {
+		tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
+		if (tcsr_node) {
+			match = of_match_node(tcsr_dt_match, tcsr_node);
+			if (match)
+				config = match->data;
+			else
+				dev_warn(&pdev->dev, "no matching TCSR\n");
+
+			of_node_put(tcsr_node);
+		}
+	}
+
+	if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
+		dev_err(&pdev->dev, "missing cell-index\n");
+		return -EINVAL;
+	}
+
+	if (gsbi_num < 1 || gsbi_num > MAX_GSBI) {
+		dev_err(&pdev->dev, "invalid cell-index\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
+		dev_err(&pdev->dev, "missing mode configuration\n");
+		return -EINVAL;
+	}
+
+	/* not required, so default to 0 if not present */
+	of_property_read_u32(node, "qcom,crci", &gsbi->crci);
+
+	dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+		 gsbi->mode, gsbi->crci);
+	gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+	if (IS_ERR(gsbi->hclk))
+		return PTR_ERR(gsbi->hclk);
+
+	clk_prepare_enable(gsbi->hclk);
+
+	writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
+				base + GSBI_CTRL_REG);
+
+	/*
+	 * modify tcsr to reflect mode and ADM CRCI mux
+	 * Each gsbi contains a pair of bits, one for RX and one for TX
+	 * SPI mode requires both bits cleared, otherwise they are set
+	 */
+	if (config) {
+		for (i = 0; i < config->num_rows; i++) {
+			mask = config->array[i][gsbi_num - 1];
+
+			if (gsbi->mode == GSBI_PROT_SPI)
+				regmap_update_bits(gsbi->tcsr,
+					TCSR_ADM_CRCI_BASE + 4 * i, mask, 0);
+			else
+				regmap_update_bits(gsbi->tcsr,
+					TCSR_ADM_CRCI_BASE + 4 * i, mask, mask);
+
+		}
+	}
+
+	/* make sure the gsbi control write is not reordered */
+	wmb();
+
+	platform_set_drvdata(pdev, gsbi);
+
+	return of_platform_populate(node, NULL, NULL, &pdev->dev);
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+	struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(gsbi->hclk);
+
+	return 0;
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+	{ .compatible = "qcom,gsbi-v1.0.0", },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+	.driver = {
+		.name		= "gsbi",
+		.of_match_table	= gsbi_dt_match,
+	},
+	.probe = gsbi_probe,
+	.remove	= gsbi_remove,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
new file mode 100644
index 0000000..3aaab71
--- /dev/null
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+	*p_dst++ = type; \
+	*p_dst++ = ((u8)((length) & 0xFF)); \
+	*p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+	*p_type = (u8)*p_src++; \
+	*p_length = (u8)*p_src++; \
+	*p_length |= ((u8)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+	memcpy(p_dst, p_src, size); \
+	p_dst = (u8 *)p_dst + size; \
+	p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+	memcpy(p_dst, p_src, size); \
+	p_dst = (u8 *)p_dst + size; \
+	p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+				encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+	buf_dst = (u8 *)buf_dst + rc; \
+	encoded_bytes += rc; \
+	tlv_len += rc; \
+	temp_si = temp_si + 1; \
+	encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+	buf_src = (u8 *)buf_src + rc; \
+	decoded_bytes += rc; \
+} while (0)
+
+#define TLV_LEN_SIZE sizeof(u16)
+#define TLV_TYPE_SIZE sizeof(u8)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+		      const void *in_c_struct, u32 out_buf_len,
+		      int enc_level);
+
+static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+		      const void *in_buf, u32 in_buf_len, int dec_level);
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ *
+ * Return: struct info of the next element that can be encoded.
+ */
+static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
+					       int level)
+{
+	struct qmi_elem_info *temp_ei = ei_array;
+	u8 tlv_type;
+
+	if (level > 1) {
+		temp_ei = temp_ei + 1;
+	} else {
+		do {
+			tlv_type = temp_ei->tlv_type;
+			temp_ei = temp_ei + 1;
+		} while (tlv_type == temp_ei->tlv_type);
+	}
+
+	return temp_ei;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * Return: Expected minimum length of the QMI message or 0 on error.
+ */
+static int qmi_calc_min_msg_len(struct qmi_elem_info *ei_array,
+				int level)
+{
+	int min_msg_len = 0;
+	struct qmi_elem_info *temp_ei = ei_array;
+
+	if (!ei_array)
+		return min_msg_len;
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		/* Optional elements do not count in minimum length */
+		if (temp_ei->data_type == QMI_OPT_FLAG) {
+			temp_ei = skip_to_next_elem(temp_ei, level);
+			continue;
+		}
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			min_msg_len += (temp_ei->elem_size == sizeof(u8) ?
+					sizeof(u8) : sizeof(u16));
+			temp_ei++;
+			continue;
+		} else if (temp_ei->data_type == QMI_STRUCT) {
+			min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+							    (level + 1));
+			temp_ei++;
+		} else if (temp_ei->data_type == QMI_STRING) {
+			if (level > 1)
+				min_msg_len += temp_ei->elem_len <= U8_MAX ?
+					sizeof(u8) : sizeof(u16);
+			min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+			temp_ei++;
+		} else {
+			min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+			temp_ei++;
+		}
+
+		/*
+		 * Type & Length info. not prepended for elements in the
+		 * nested structure.
+		 */
+		if (level == 1)
+			min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+	}
+
+	return min_msg_len;
+}
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
+				 u32 elem_len, u32 elem_size)
+{
+	u32 i, rc = 0;
+
+	for (i = 0; i < elem_len; i++) {
+		QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+		rc += elem_size;
+	}
+
+	return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
+				  void *buf_dst, const void *buf_src,
+				  u32 elem_len, u32 out_buf_len,
+				  int enc_level)
+{
+	int i, rc, encoded_bytes = 0;
+	struct qmi_elem_info *temp_ei = ei_array;
+
+	for (i = 0; i < elem_len; i++) {
+		rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
+				out_buf_len - encoded_bytes, enc_level);
+		if (rc < 0) {
+			pr_err("%s: STRUCT Encode failure\n", __func__);
+			return rc;
+		}
+		buf_dst = buf_dst + rc;
+		buf_src = buf_src + temp_ei->elem_size;
+		encoded_bytes += rc;
+	}
+
+	return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
+				  void *buf_dst, const void *buf_src,
+				  u32 out_buf_len, int enc_level)
+{
+	int rc;
+	int encoded_bytes = 0;
+	struct qmi_elem_info *temp_ei = ei_array;
+	u32 string_len = 0;
+	u32 string_len_sz = 0;
+
+	string_len = strlen(buf_src);
+	string_len_sz = temp_ei->elem_len <= U8_MAX ?
+			sizeof(u8) : sizeof(u16);
+	if (string_len > temp_ei->elem_len) {
+		pr_err("%s: String to be encoded is longer - %d > %d\n",
+		       __func__, string_len, temp_ei->elem_len);
+		return -EINVAL;
+	}
+
+	if (enc_level == 1) {
+		if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+		    out_buf_len) {
+			pr_err("%s: Output len %d > Out Buf len %d\n",
+			       __func__, string_len, out_buf_len);
+			return -ETOOSMALL;
+		}
+	} else {
+		if (string_len + string_len_sz > out_buf_len) {
+			pr_err("%s: Output len %d > Out Buf len %d\n",
+			       __func__, string_len, out_buf_len);
+			return -ETOOSMALL;
+		}
+		rc = qmi_encode_basic_elem(buf_dst, &string_len,
+					   1, string_len_sz);
+		encoded_bytes += rc;
+	}
+
+	rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+				   string_len, temp_ei->elem_size);
+	encoded_bytes += rc;
+
+	return encoded_bytes;
+}
+
+/**
+ * qmi_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ *             within the main structure, being encoded.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+		      const void *in_c_struct, u32 out_buf_len,
+		      int enc_level)
+{
+	struct qmi_elem_info *temp_ei = ei_array;
+	u8 opt_flag_value = 0;
+	u32 data_len_value = 0, data_len_sz;
+	u8 *buf_dst = (u8 *)out_buf;
+	u8 *tlv_pointer;
+	u32 tlv_len;
+	u8 tlv_type;
+	u32 encoded_bytes = 0;
+	const void *buf_src;
+	int encode_tlv = 0;
+	int rc;
+
+	if (!ei_array)
+		return 0;
+
+	tlv_pointer = buf_dst;
+	tlv_len = 0;
+	if (enc_level == 1)
+		buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		buf_src = in_c_struct + temp_ei->offset;
+		tlv_type = temp_ei->tlv_type;
+
+		if (temp_ei->array_type == NO_ARRAY) {
+			data_len_value = 1;
+		} else if (temp_ei->array_type == STATIC_ARRAY) {
+			data_len_value = temp_ei->elem_len;
+		} else if (data_len_value <= 0 ||
+			    temp_ei->elem_len < data_len_value) {
+			pr_err("%s: Invalid data length\n", __func__);
+			return -EINVAL;
+		}
+
+		switch (temp_ei->data_type) {
+		case QMI_OPT_FLAG:
+			rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+						   1, sizeof(u8));
+			if (opt_flag_value)
+				temp_ei = temp_ei + 1;
+			else
+				temp_ei = skip_to_next_elem(temp_ei, enc_level);
+			break;
+
+		case QMI_DATA_LEN:
+			memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+			data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+					sizeof(u8) : sizeof(u16);
+			/* Check to avoid out of range buffer access */
+			if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+			    TLV_TYPE_SIZE) > out_buf_len) {
+				pr_err("%s: Too Small Buffer @DATA_LEN\n",
+				       __func__);
+				return -ETOOSMALL;
+			}
+			rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+						   1, data_len_sz);
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+						encoded_bytes, tlv_len,
+						encode_tlv, rc);
+			if (!data_len_value)
+				temp_ei = skip_to_next_elem(temp_ei, enc_level);
+			else
+				encode_tlv = 0;
+			break;
+
+		case QMI_UNSIGNED_1_BYTE:
+		case QMI_UNSIGNED_2_BYTE:
+		case QMI_UNSIGNED_4_BYTE:
+		case QMI_UNSIGNED_8_BYTE:
+		case QMI_SIGNED_2_BYTE_ENUM:
+		case QMI_SIGNED_4_BYTE_ENUM:
+			/* Check to avoid out of range buffer access */
+			if (((data_len_value * temp_ei->elem_size) +
+			    encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+			    out_buf_len) {
+				pr_err("%s: Too Small Buffer @data_type:%d\n",
+				       __func__, temp_ei->data_type);
+				return -ETOOSMALL;
+			}
+			rc = qmi_encode_basic_elem(buf_dst, buf_src,
+						   data_len_value,
+						   temp_ei->elem_size);
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+						encoded_bytes, tlv_len,
+						encode_tlv, rc);
+			break;
+
+		case QMI_STRUCT:
+			rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+						    data_len_value,
+						    out_buf_len - encoded_bytes,
+						    enc_level + 1);
+			if (rc < 0)
+				return rc;
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+						encoded_bytes, tlv_len,
+						encode_tlv, rc);
+			break;
+
+		case QMI_STRING:
+			rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+						    out_buf_len - encoded_bytes,
+						    enc_level);
+			if (rc < 0)
+				return rc;
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+						encoded_bytes, tlv_len,
+						encode_tlv, rc);
+			break;
+		default:
+			pr_err("%s: Unrecognized data type\n", __func__);
+			return -EINVAL;
+		}
+
+		if (encode_tlv && enc_level == 1) {
+			QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+			encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			tlv_pointer = buf_dst;
+			tlv_len = 0;
+			buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+			encode_tlv = 0;
+		}
+	}
+
+	return encoded_bytes;
+}
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements, in bytes.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
+				 u32 elem_len, u32 elem_size)
+{
+	u32 i, rc = 0;
+
+	for (i = 0; i < elem_len; i++) {
+		QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+		rc += elem_size;
+	}
+
+	return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ *           this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
+				  void *buf_dst, const void *buf_src,
+				  u32 elem_len, u32 tlv_len,
+				  int dec_level)
+{
+	int i, rc, decoded_bytes = 0;
+	struct qmi_elem_info *temp_ei = ei_array;
+
+	for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+		rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
+				tlv_len - decoded_bytes, dec_level);
+		if (rc < 0)
+			return rc;
+		buf_src = buf_src + rc;
+		buf_dst = buf_dst + temp_ei->elem_size;
+		decoded_bytes += rc;
+	}
+
+	if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+	    (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+		pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+		       __func__, dec_level, decoded_bytes, tlv_len,
+		       i, elem_len);
+		return -EFAULT;
+	}
+
+	return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ *           this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
+				  void *buf_dst, const void *buf_src,
+				  u32 tlv_len, int dec_level)
+{
+	int rc;
+	int decoded_bytes = 0;
+	u32 string_len = 0;
+	u32 string_len_sz = 0;
+	struct qmi_elem_info *temp_ei = ei_array;
+
+	if (dec_level == 1) {
+		string_len = tlv_len;
+	} else {
+		string_len_sz = temp_ei->elem_len <= U8_MAX ?
+				sizeof(u8) : sizeof(u16);
+		rc = qmi_decode_basic_elem(&string_len, buf_src,
+					   1, string_len_sz);
+		decoded_bytes += rc;
+	}
+
+	if (string_len > temp_ei->elem_len) {
+		pr_err("%s: String len %d > Max Len %d\n",
+		       __func__, string_len, temp_ei->elem_len);
+		return -ETOOSMALL;
+	} else if (string_len > tlv_len) {
+		pr_err("%s: String len %d > Input Buffer Len %d\n",
+		       __func__, string_len, tlv_len);
+		return -EFAULT;
+	}
+
+	rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+				   string_len, temp_ei->elem_size);
+	*((char *)buf_dst + string_len) = '\0';
+	decoded_bytes += rc;
+
+	return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ *
+ * Return: Pointer to struct info, if found
+ */
+static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
+				     u32 type)
+{
+	struct qmi_elem_info *temp_ei = ei_array;
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		if (temp_ei->tlv_type == (u8)type)
+			return temp_ei;
+		temp_ei = temp_ei + 1;
+	}
+
+	return NULL;
+}
+
+/**
+ * qmi_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ *             within the main structure, being decoded
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+		      const void *in_buf, u32 in_buf_len,
+		      int dec_level)
+{
+	struct qmi_elem_info *temp_ei = ei_array;
+	u8 opt_flag_value = 1;
+	u32 data_len_value = 0, data_len_sz = 0;
+	u8 *buf_dst = out_c_struct;
+	const u8 *tlv_pointer;
+	u32 tlv_len = 0;
+	u32 tlv_type;
+	u32 decoded_bytes = 0;
+	const void *buf_src = in_buf;
+	int rc;
+
+	while (decoded_bytes < in_buf_len) {
+		if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+			return decoded_bytes;
+
+		if (dec_level == 1) {
+			tlv_pointer = buf_src;
+			QMI_ENCDEC_DECODE_TLV(&tlv_type,
+					      &tlv_len, tlv_pointer);
+			buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			temp_ei = find_ei(ei_array, tlv_type);
+			if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) {
+				pr_err("%s: Inval element info\n", __func__);
+				return -EINVAL;
+			} else if (!temp_ei) {
+				UPDATE_DECODE_VARIABLES(buf_src,
+							decoded_bytes, tlv_len);
+				continue;
+			}
+		} else {
+			/*
+			 * No length information for elements in nested
+			 * structures. So use remaining decodable buffer space.
+			 */
+			tlv_len = in_buf_len - decoded_bytes;
+		}
+
+		buf_dst = out_c_struct + temp_ei->offset;
+		if (temp_ei->data_type == QMI_OPT_FLAG) {
+			memcpy(buf_dst, &opt_flag_value, sizeof(u8));
+			temp_ei = temp_ei + 1;
+			buf_dst = out_c_struct + temp_ei->offset;
+		}
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+					sizeof(u8) : sizeof(u16);
+			rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+						   1, data_len_sz);
+			memcpy(buf_dst, &data_len_value, sizeof(u32));
+			temp_ei = temp_ei + 1;
+			buf_dst = out_c_struct + temp_ei->offset;
+			tlv_len -= data_len_sz;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+		}
+
+		if (temp_ei->array_type == NO_ARRAY) {
+			data_len_value = 1;
+		} else if (temp_ei->array_type == STATIC_ARRAY) {
+			data_len_value = temp_ei->elem_len;
+		} else if (data_len_value > temp_ei->elem_len) {
+			pr_err("%s: Data len %d > max spec %d\n",
+			       __func__, data_len_value, temp_ei->elem_len);
+			return -ETOOSMALL;
+		}
+
+		switch (temp_ei->data_type) {
+		case QMI_UNSIGNED_1_BYTE:
+		case QMI_UNSIGNED_2_BYTE:
+		case QMI_UNSIGNED_4_BYTE:
+		case QMI_UNSIGNED_8_BYTE:
+		case QMI_SIGNED_2_BYTE_ENUM:
+		case QMI_SIGNED_4_BYTE_ENUM:
+			rc = qmi_decode_basic_elem(buf_dst, buf_src,
+						   data_len_value,
+						   temp_ei->elem_size);
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		case QMI_STRUCT:
+			rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+						    data_len_value, tlv_len,
+						    dec_level + 1);
+			if (rc < 0)
+				return rc;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		case QMI_STRING:
+			rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+						    tlv_len, dec_level);
+			if (rc < 0)
+				return rc;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		default:
+			pr_err("%s: Unrecognized data type\n", __func__);
+			return -EINVAL;
+		}
+		temp_ei = temp_ei + 1;
+	}
+
+	return decoded_bytes;
+}
+
+/**
+ * qmi_encode_message() - Encode C structure as QMI encoded message
+ * @type:	Type of QMI message
+ * @msg_id:	Message ID of the message
+ * @len:	Passed as max length of the message, updated to actual size
+ * @txn_id:	Transaction ID
+ * @ei:		QMI message descriptor
+ * @c_struct:	Reference to structure to encode
+ *
+ * Return: Buffer with encoded message, or negative ERR_PTR() on error
+ */
+void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
+			 unsigned int txn_id, struct qmi_elem_info *ei,
+			 const void *c_struct)
+{
+	struct qmi_header *hdr;
+	ssize_t msglen = 0;
+	void *msg;
+	int ret;
+
+	/* Check the possibility of a zero length QMI message */
+	if (!c_struct) {
+		ret = qmi_calc_min_msg_len(ei, 1);
+		if (ret) {
+			pr_err("%s: Calc. len %d != 0, but NULL c_struct\n",
+			       __func__, ret);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL);
+	if (!msg)
+		return ERR_PTR(-ENOMEM);
+
+	/* Encode message, if we have a message */
+	if (c_struct) {
+		msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1);
+		if (msglen < 0) {
+			kfree(msg);
+			return ERR_PTR(msglen);
+		}
+	}
+
+	hdr = msg;
+	hdr->type = type;
+	hdr->txn_id = txn_id;
+	hdr->msg_id = msg_id;
+	hdr->msg_len = msglen;
+
+	*len = sizeof(*hdr) + msglen;
+
+	return msg;
+}
+EXPORT_SYMBOL(qmi_encode_message);
+
+/**
+ * qmi_decode_message() - Decode QMI encoded message to C structure
+ * @buf:	Buffer with encoded message
+ * @len:	Amount of data in @buf
+ * @ei:		QMI message descriptor
+ * @c_struct:	Reference to structure to decode into
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+int qmi_decode_message(const void *buf, size_t len,
+		       struct qmi_elem_info *ei, void *c_struct)
+{
+	if (!ei)
+		return -EINVAL;
+
+	if (!c_struct || !buf || !len)
+		return -EINVAL;
+
+	return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header),
+			  len - sizeof(struct qmi_header), 1);
+}
+EXPORT_SYMBOL(qmi_decode_message);
+
+/* Common header in all QMI responses */
+struct qmi_elem_info qmi_response_type_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct qmi_response_type_v01, result),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct qmi_response_type_v01, error),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	},
+};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
+
+MODULE_DESCRIPTION("QMI encoder/decoder helper");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
new file mode 100644
index 0000000..938ca41
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/qrtr.h>
+#include <linux/net.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/soc/qcom/qmi.h>
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+				      struct sockaddr_qrtr *sq);
+
+/**
+ * qmi_recv_new_server() - handler of NEW_SERVER control message
+ * @qmi:	qmi handle
+ * @service:	service id of the new server
+ * @instance:	instance id of the new server
+ * @node:	node of the new server
+ * @port:	port of the new server
+ *
+ * Calls the new_server callback to inform the client about a newly registered
+ * server matching the currently registered service lookup.
+ */
+static void qmi_recv_new_server(struct qmi_handle *qmi,
+				unsigned int service, unsigned int instance,
+				unsigned int node, unsigned int port)
+{
+	struct qmi_ops *ops = &qmi->ops;
+	struct qmi_service *svc;
+	int ret;
+
+	if (!ops->new_server)
+		return;
+
+	/* Ignore EOF marker */
+	if (!node && !port)
+		return;
+
+	svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+	if (!svc)
+		return;
+
+	svc->service = service;
+	svc->version = instance & 0xff;
+	svc->instance = instance >> 8;
+	svc->node = node;
+	svc->port = port;
+
+	ret = ops->new_server(qmi, svc);
+	if (ret < 0)
+		kfree(svc);
+	else
+		list_add(&svc->list_node, &qmi->lookup_results);
+}
+
+/**
+ * qmi_recv_del_server() - handler of DEL_SERVER control message
+ * @qmi:	qmi handle
+ * @node:	node of the dying server, a value of -1 matches all nodes
+ * @port:	port of the dying server, a value of -1 matches all ports
+ *
+ * Calls the del_server callback for each previously seen server, allowing the
+ * client to react to the disappearing server.
+ */
+static void qmi_recv_del_server(struct qmi_handle *qmi,
+				unsigned int node, unsigned int port)
+{
+	struct qmi_ops *ops = &qmi->ops;
+	struct qmi_service *svc;
+	struct qmi_service *tmp;
+
+	list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) {
+		if (node != -1 && svc->node != node)
+			continue;
+		if (port != -1 && svc->port != port)
+			continue;
+
+		if (ops->del_server)
+			ops->del_server(qmi, svc);
+
+		list_del(&svc->list_node);
+		kfree(svc);
+	}
+}
+
+/**
+ * qmi_recv_bye() - handler of BYE control message
+ * @qmi:	qmi handle
+ * @node:	id of the dying node
+ *
+ * Signals the client that all previously registered services on this node are
+ * now gone and then calls the bye callback to allow the client client further
+ * cleaning up resources associated with this remote.
+ */
+static void qmi_recv_bye(struct qmi_handle *qmi,
+			 unsigned int node)
+{
+	struct qmi_ops *ops = &qmi->ops;
+
+	qmi_recv_del_server(qmi, node, -1);
+
+	if (ops->bye)
+		ops->bye(qmi, node);
+}
+
+/**
+ * qmi_recv_del_client() - handler of DEL_CLIENT control message
+ * @qmi:	qmi handle
+ * @node:	node of the dying client
+ * @port:	port of the dying client
+ *
+ * Signals the client about a dying client, by calling the del_client callback.
+ */
+static void qmi_recv_del_client(struct qmi_handle *qmi,
+				unsigned int node, unsigned int port)
+{
+	struct qmi_ops *ops = &qmi->ops;
+
+	if (ops->del_client)
+		ops->del_client(qmi, node, port);
+}
+
+static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi,
+			      const void *buf, size_t len)
+{
+	const struct qrtr_ctrl_pkt *pkt = buf;
+
+	if (len < sizeof(struct qrtr_ctrl_pkt)) {
+		pr_debug("ignoring short control packet\n");
+		return;
+	}
+
+	switch (le32_to_cpu(pkt->cmd)) {
+	case QRTR_TYPE_BYE:
+		qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
+		break;
+	case QRTR_TYPE_NEW_SERVER:
+		qmi_recv_new_server(qmi,
+				    le32_to_cpu(pkt->server.service),
+				    le32_to_cpu(pkt->server.instance),
+				    le32_to_cpu(pkt->server.node),
+				    le32_to_cpu(pkt->server.port));
+		break;
+	case QRTR_TYPE_DEL_SERVER:
+		qmi_recv_del_server(qmi,
+				    le32_to_cpu(pkt->server.node),
+				    le32_to_cpu(pkt->server.port));
+		break;
+	case QRTR_TYPE_DEL_CLIENT:
+		qmi_recv_del_client(qmi,
+				    le32_to_cpu(pkt->client.node),
+				    le32_to_cpu(pkt->client.port));
+		break;
+	}
+}
+
+static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct qrtr_ctrl_pkt pkt;
+	struct sockaddr_qrtr sq;
+	struct msghdr msg = { };
+	struct kvec iv = { &pkt, sizeof(pkt) };
+	int ret;
+
+	memset(&pkt, 0, sizeof(pkt));
+	pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
+	pkt.server.service = cpu_to_le32(svc->service);
+	pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+
+	sq.sq_family = qmi->sq.sq_family;
+	sq.sq_node = qmi->sq.sq_node;
+	sq.sq_port = QRTR_PORT_CTRL;
+
+	msg.msg_name = &sq;
+	msg.msg_namelen = sizeof(sq);
+
+	mutex_lock(&qmi->sock_lock);
+	if (qmi->sock) {
+		ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+		if (ret < 0)
+			pr_err("failed to send lookup registration: %d\n", ret);
+	}
+	mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_lookup() - register a new lookup with the name service
+ * @qmi:	qmi handle
+ * @service:	service id of the request
+ * @instance:	instance id of the request
+ * @version:	version number of the request
+ *
+ * Registering a lookup query with the name server will cause the name server
+ * to send NEW_SERVER and DEL_SERVER control messages to this socket as
+ * matching services are registered.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
+		   unsigned int version, unsigned int instance)
+{
+	struct qmi_service *svc;
+
+	svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->service = service;
+	svc->version = version;
+	svc->instance = instance;
+
+	list_add(&svc->list_node, &qmi->lookups);
+
+	qmi_send_new_lookup(qmi, svc);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_add_lookup);
+
+static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct qrtr_ctrl_pkt pkt;
+	struct sockaddr_qrtr sq;
+	struct msghdr msg = { };
+	struct kvec iv = { &pkt, sizeof(pkt) };
+	int ret;
+
+	memset(&pkt, 0, sizeof(pkt));
+	pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+	pkt.server.service = cpu_to_le32(svc->service);
+	pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+	pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
+	pkt.server.port = cpu_to_le32(qmi->sq.sq_port);
+
+	sq.sq_family = qmi->sq.sq_family;
+	sq.sq_node = qmi->sq.sq_node;
+	sq.sq_port = QRTR_PORT_CTRL;
+
+	msg.msg_name = &sq;
+	msg.msg_namelen = sizeof(sq);
+
+	mutex_lock(&qmi->sock_lock);
+	if (qmi->sock) {
+		ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+		if (ret < 0)
+			pr_err("send service registration failed: %d\n", ret);
+	}
+	mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_server() - register a service with the name service
+ * @qmi:	qmi handle
+ * @service:	type of the service
+ * @instance:	instance of the service
+ * @version:	version of the service
+ *
+ * Register a new service with the name service. This allows clients to find
+ * and start sending messages to the client associated with @qmi.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
+		   unsigned int version, unsigned int instance)
+{
+	struct qmi_service *svc;
+
+	svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->service = service;
+	svc->version = version;
+	svc->instance = instance;
+
+	list_add(&svc->list_node, &qmi->services);
+
+	qmi_send_new_server(qmi, svc);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_add_server);
+
+/**
+ * qmi_txn_init() - allocate transaction id within the given QMI handle
+ * @qmi:	QMI handle
+ * @txn:	transaction context
+ * @ei:		description of how to decode a matching response (optional)
+ * @c_struct:	pointer to the object to decode the response into (optional)
+ *
+ * This allocates a transaction id within the QMI handle. If @ei and @c_struct
+ * are specified any responses to this transaction will be decoded as described
+ * by @ei into @c_struct.
+ *
+ * A client calling qmi_txn_init() must call either qmi_txn_wait() or
+ * qmi_txn_cancel() to free up the allocated resources.
+ *
+ * Return: Transaction id on success, negative errno on failure.
+ */
+int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
+		 struct qmi_elem_info *ei, void *c_struct)
+{
+	int ret;
+
+	memset(txn, 0, sizeof(*txn));
+
+	mutex_init(&txn->lock);
+	init_completion(&txn->completion);
+	txn->qmi = qmi;
+	txn->ei = ei;
+	txn->dest = c_struct;
+
+	mutex_lock(&qmi->txn_lock);
+	ret = idr_alloc_cyclic(&qmi->txns, txn, 0, INT_MAX, GFP_KERNEL);
+	if (ret < 0)
+		pr_err("failed to allocate transaction id\n");
+
+	txn->id = ret;
+	mutex_unlock(&qmi->txn_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_txn_init);
+
+/**
+ * qmi_txn_wait() - wait for a response on a transaction
+ * @txn:	transaction handle
+ * @timeout:	timeout, in jiffies
+ *
+ * If the transaction is decoded by the means of @ei and @c_struct the return
+ * value will be the returned value of qmi_decode_message(), otherwise it's up
+ * to the specified message handler to fill out the result.
+ *
+ * Return: the transaction response on success, negative errno on failure.
+ */
+int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
+{
+	struct qmi_handle *qmi = txn->qmi;
+	int ret;
+
+	ret = wait_for_completion_interruptible_timeout(&txn->completion,
+							timeout);
+
+	mutex_lock(&qmi->txn_lock);
+	mutex_lock(&txn->lock);
+	idr_remove(&qmi->txns, txn->id);
+	mutex_unlock(&txn->lock);
+	mutex_unlock(&qmi->txn_lock);
+
+	if (ret < 0)
+		return ret;
+	else if (ret == 0)
+		return -ETIMEDOUT;
+	else
+		return txn->result;
+}
+EXPORT_SYMBOL(qmi_txn_wait);
+
+/**
+ * qmi_txn_cancel() - cancel an ongoing transaction
+ * @txn:	transaction id
+ */
+void qmi_txn_cancel(struct qmi_txn *txn)
+{
+	struct qmi_handle *qmi = txn->qmi;
+
+	mutex_lock(&qmi->txn_lock);
+	mutex_lock(&txn->lock);
+	idr_remove(&qmi->txns, txn->id);
+	mutex_unlock(&txn->lock);
+	mutex_unlock(&qmi->txn_lock);
+}
+EXPORT_SYMBOL(qmi_txn_cancel);
+
+/**
+ * qmi_invoke_handler() - find and invoke a handler for a message
+ * @qmi:	qmi handle
+ * @sq:		sockaddr of the sender
+ * @txn:	transaction object for the message
+ * @buf:	buffer containing the message
+ * @len:	length of @buf
+ *
+ * Find handler and invoke handler for the incoming message.
+ */
+static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+			       struct qmi_txn *txn, const void *buf, size_t len)
+{
+	const struct qmi_msg_handler *handler;
+	const struct qmi_header *hdr = buf;
+	void *dest;
+	int ret;
+
+	if (!qmi->handlers)
+		return;
+
+	for (handler = qmi->handlers; handler->fn; handler++) {
+		if (handler->type == hdr->type &&
+		    handler->msg_id == hdr->msg_id)
+			break;
+	}
+
+	if (!handler->fn)
+		return;
+
+	dest = kzalloc(handler->decoded_size, GFP_KERNEL);
+	if (!dest)
+		return;
+
+	ret = qmi_decode_message(buf, len, handler->ei, dest);
+	if (ret < 0)
+		pr_err("failed to decode incoming message\n");
+	else
+		handler->fn(qmi, sq, txn, dest);
+
+	kfree(dest);
+}
+
+/**
+ * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle
+ * @qmi:	the QMI context
+ *
+ * As a result of registering a name service with the QRTR all open sockets are
+ * flagged with ENETRESET and this function will be called. The typical case is
+ * the initial boot, where this signals that the local node id has been
+ * configured and as such any bound sockets needs to be rebound. So close the
+ * socket, inform the client and re-initialize the socket.
+ *
+ * For clients it's generally sufficient to react to the del_server callbacks,
+ * but server code is expected to treat the net_reset callback as a "bye" from
+ * all nodes.
+ *
+ * Finally the QMI handle will send out registration requests for any lookups
+ * and services.
+ */
+static void qmi_handle_net_reset(struct qmi_handle *qmi)
+{
+	struct sockaddr_qrtr sq;
+	struct qmi_service *svc;
+	struct socket *sock;
+
+	sock = qmi_sock_create(qmi, &sq);
+	if (IS_ERR(sock))
+		return;
+
+	mutex_lock(&qmi->sock_lock);
+	sock_release(qmi->sock);
+	qmi->sock = NULL;
+	mutex_unlock(&qmi->sock_lock);
+
+	qmi_recv_del_server(qmi, -1, -1);
+
+	if (qmi->ops.net_reset)
+		qmi->ops.net_reset(qmi);
+
+	mutex_lock(&qmi->sock_lock);
+	qmi->sock = sock;
+	qmi->sq = sq;
+	mutex_unlock(&qmi->sock_lock);
+
+	list_for_each_entry(svc, &qmi->lookups, list_node)
+		qmi_send_new_lookup(qmi, svc);
+
+	list_for_each_entry(svc, &qmi->services, list_node)
+		qmi_send_new_server(qmi, svc);
+}
+
+static void qmi_handle_message(struct qmi_handle *qmi,
+			       struct sockaddr_qrtr *sq,
+			       const void *buf, size_t len)
+{
+	const struct qmi_header *hdr;
+	struct qmi_txn tmp_txn;
+	struct qmi_txn *txn = NULL;
+	int ret;
+
+	if (len < sizeof(*hdr)) {
+		pr_err("ignoring short QMI packet\n");
+		return;
+	}
+
+	hdr = buf;
+
+	/* If this is a response, find the matching transaction handle */
+	if (hdr->type == QMI_RESPONSE) {
+		mutex_lock(&qmi->txn_lock);
+		txn = idr_find(&qmi->txns, hdr->txn_id);
+
+		/* Ignore unexpected responses */
+		if (!txn) {
+			mutex_unlock(&qmi->txn_lock);
+			return;
+		}
+
+		mutex_lock(&txn->lock);
+		mutex_unlock(&qmi->txn_lock);
+
+		if (txn->dest && txn->ei) {
+			ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
+			if (ret < 0)
+				pr_err("failed to decode incoming message\n");
+
+			txn->result = ret;
+			complete(&txn->completion);
+		} else  {
+			qmi_invoke_handler(qmi, sq, txn, buf, len);
+		}
+
+		mutex_unlock(&txn->lock);
+	} else {
+		/* Create a txn based on the txn_id of the incoming message */
+		memset(&tmp_txn, 0, sizeof(tmp_txn));
+		tmp_txn.id = hdr->txn_id;
+
+		qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len);
+	}
+}
+
+static void qmi_data_ready_work(struct work_struct *work)
+{
+	struct qmi_handle *qmi = container_of(work, struct qmi_handle, work);
+	struct qmi_ops *ops = &qmi->ops;
+	struct sockaddr_qrtr sq;
+	struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) };
+	struct kvec iv;
+	ssize_t msglen;
+
+	for (;;) {
+		iv.iov_base = qmi->recv_buf;
+		iv.iov_len = qmi->recv_buf_size;
+
+		mutex_lock(&qmi->sock_lock);
+		if (qmi->sock)
+			msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1,
+						iv.iov_len, MSG_DONTWAIT);
+		else
+			msglen = -EPIPE;
+		mutex_unlock(&qmi->sock_lock);
+		if (msglen == -EAGAIN)
+			break;
+
+		if (msglen == -ENETRESET) {
+			qmi_handle_net_reset(qmi);
+
+			/* The old qmi->sock is gone, our work is done */
+			break;
+		}
+
+		if (msglen < 0) {
+			pr_err("qmi recvmsg failed: %zd\n", msglen);
+			break;
+		}
+
+		if (sq.sq_node == qmi->sq.sq_node &&
+		    sq.sq_port == QRTR_PORT_CTRL) {
+			qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen);
+		} else if (ops->msg_handler) {
+			ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen);
+		} else {
+			qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen);
+		}
+	}
+}
+
+static void qmi_data_ready(struct sock *sk)
+{
+	struct qmi_handle *qmi = sk->sk_user_data;
+
+	/*
+	 * This will be NULL if we receive data while being in
+	 * qmi_handle_release()
+	 */
+	if (!qmi)
+		return;
+
+	queue_work(qmi->wq, &qmi->work);
+}
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+				      struct sockaddr_qrtr *sq)
+{
+	struct socket *sock;
+	int ret;
+
+	ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
+			       PF_QIPCRTR, &sock);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	ret = kernel_getsockname(sock, (struct sockaddr *)sq);
+	if (ret < 0) {
+		sock_release(sock);
+		return ERR_PTR(ret);
+	}
+
+	sock->sk->sk_user_data = qmi;
+	sock->sk->sk_data_ready = qmi_data_ready;
+	sock->sk->sk_error_report = qmi_data_ready;
+
+	return sock;
+}
+
+/**
+ * qmi_handle_init() - initialize a QMI client handle
+ * @qmi:	QMI handle to initialize
+ * @recv_buf_size: maximum size of incoming message
+ * @ops:	reference to callbacks for QRTR notifications
+ * @handlers:	NULL-terminated list of QMI message handlers
+ *
+ * This initializes the QMI client handle to allow sending and receiving QMI
+ * messages. As messages are received the appropriate handler will be invoked.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
+		    const struct qmi_ops *ops,
+		    const struct qmi_msg_handler *handlers)
+{
+	int ret;
+
+	mutex_init(&qmi->txn_lock);
+	mutex_init(&qmi->sock_lock);
+
+	idr_init(&qmi->txns);
+
+	INIT_LIST_HEAD(&qmi->lookups);
+	INIT_LIST_HEAD(&qmi->lookup_results);
+	INIT_LIST_HEAD(&qmi->services);
+
+	INIT_WORK(&qmi->work, qmi_data_ready_work);
+
+	qmi->handlers = handlers;
+	if (ops)
+		qmi->ops = *ops;
+
+	/* Make room for the header */
+	recv_buf_size += sizeof(struct qmi_header);
+	/* Must also be sufficient to hold a control packet */
+	if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt))
+		recv_buf_size = sizeof(struct qrtr_ctrl_pkt);
+
+	qmi->recv_buf_size = recv_buf_size;
+	qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
+	if (!qmi->recv_buf)
+		return -ENOMEM;
+
+	qmi->wq = alloc_workqueue("qmi_msg_handler", WQ_UNBOUND, 1);
+	if (!qmi->wq) {
+		ret = -ENOMEM;
+		goto err_free_recv_buf;
+	}
+
+	qmi->sock = qmi_sock_create(qmi, &qmi->sq);
+	if (IS_ERR(qmi->sock)) {
+		pr_err("failed to create QMI socket\n");
+		ret = PTR_ERR(qmi->sock);
+		goto err_destroy_wq;
+	}
+
+	return 0;
+
+err_destroy_wq:
+	destroy_workqueue(qmi->wq);
+err_free_recv_buf:
+	kfree(qmi->recv_buf);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_handle_init);
+
+/**
+ * qmi_handle_release() - release the QMI client handle
+ * @qmi:	QMI client handle
+ *
+ * This closes the underlying socket and stops any handling of QMI messages.
+ */
+void qmi_handle_release(struct qmi_handle *qmi)
+{
+	struct socket *sock = qmi->sock;
+	struct qmi_service *svc, *tmp;
+
+	sock->sk->sk_user_data = NULL;
+	cancel_work_sync(&qmi->work);
+
+	qmi_recv_del_server(qmi, -1, -1);
+
+	mutex_lock(&qmi->sock_lock);
+	sock_release(sock);
+	qmi->sock = NULL;
+	mutex_unlock(&qmi->sock_lock);
+
+	destroy_workqueue(qmi->wq);
+
+	idr_destroy(&qmi->txns);
+
+	kfree(qmi->recv_buf);
+
+	/* Free registered lookup requests */
+	list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) {
+		list_del(&svc->list_node);
+		kfree(svc);
+	}
+
+	/* Free registered service information */
+	list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) {
+		list_del(&svc->list_node);
+		kfree(svc);
+	}
+}
+EXPORT_SYMBOL(qmi_handle_release);
+
+/**
+ * qmi_send_message() - send a QMI message
+ * @qmi:	QMI client handle
+ * @sq:		destination sockaddr
+ * @txn:	transaction object to use for the message
+ * @type:	type of message to send
+ * @msg_id:	message id
+ * @len:	max length of the QMI message
+ * @ei:		QMI message description
+ * @c_struct:	object to be encoded
+ *
+ * This function encodes @c_struct using @ei into a message of type @type,
+ * with @msg_id and @txn into a buffer of maximum size @len, and sends this to
+ * @sq.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static ssize_t qmi_send_message(struct qmi_handle *qmi,
+				struct sockaddr_qrtr *sq, struct qmi_txn *txn,
+				int type, int msg_id, size_t len,
+				struct qmi_elem_info *ei, const void *c_struct)
+{
+	struct msghdr msghdr = {};
+	struct kvec iv;
+	void *msg;
+	int ret;
+
+	msg = qmi_encode_message(type,
+				 msg_id, &len,
+				 txn->id, ei,
+				 c_struct);
+	if (IS_ERR(msg))
+		return PTR_ERR(msg);
+
+	iv.iov_base = msg;
+	iv.iov_len = len;
+
+	if (sq) {
+		msghdr.msg_name = sq;
+		msghdr.msg_namelen = sizeof(*sq);
+	}
+
+	mutex_lock(&qmi->sock_lock);
+	if (qmi->sock) {
+		ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
+		if (ret < 0)
+			pr_err("failed to send QMI message\n");
+	} else {
+		ret = -EPIPE;
+	}
+	mutex_unlock(&qmi->sock_lock);
+
+	kfree(msg);
+
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * qmi_send_request() - send a request QMI message
+ * @qmi:	QMI client handle
+ * @sq:		destination sockaddr
+ * @txn:	transaction object to use for the message
+ * @msg_id:	message id
+ * @len:	max length of the QMI message
+ * @ei:		QMI message description
+ * @c_struct:	object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+			 struct qmi_txn *txn, int msg_id, size_t len,
+			 struct qmi_elem_info *ei, const void *c_struct)
+{
+	return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
+				c_struct);
+}
+EXPORT_SYMBOL(qmi_send_request);
+
+/**
+ * qmi_send_response() - send a response QMI message
+ * @qmi:	QMI client handle
+ * @sq:		destination sockaddr
+ * @txn:	transaction object to use for the message
+ * @msg_id:	message id
+ * @len:	max length of the QMI message
+ * @ei:		QMI message description
+ * @c_struct:	object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+			  struct qmi_txn *txn, int msg_id, size_t len,
+			  struct qmi_elem_info *ei, const void *c_struct)
+{
+	return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
+				c_struct);
+}
+EXPORT_SYMBOL(qmi_send_response);
+
+/**
+ * qmi_send_indication() - send an indication QMI message
+ * @qmi:	QMI client handle
+ * @sq:		destination sockaddr
+ * @msg_id:	message id
+ * @len:	max length of the QMI message
+ * @ei:		QMI message description
+ * @c_struct:	object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+			    int msg_id, size_t len, struct qmi_elem_info *ei,
+			    const void *c_struct)
+{
+	struct qmi_txn txn;
+	ssize_t rval;
+	int ret;
+
+	ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+	if (ret < 0)
+		return ret;
+
+	rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei,
+				c_struct);
+
+	/* We don't care about future messages on this txn */
+	qmi_txn_cancel(&txn);
+
+	return rval;
+}
+EXPORT_SYMBOL(qmi_send_indication);
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
new file mode 100644
index 0000000..97bb598
--- /dev/null
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/qcom_scm.h>
+
+#define QCOM_RMTFS_MEM_DEV_MAX	(MINORMASK + 1)
+
+static dev_t qcom_rmtfs_mem_major;
+
+struct qcom_rmtfs_mem {
+	struct device dev;
+	struct cdev cdev;
+
+	void *base;
+	phys_addr_t addr;
+	phys_addr_t size;
+
+	unsigned int client_id;
+
+	unsigned int perms;
+};
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf);
+
+static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+							struct qcom_rmtfs_mem,
+							dev);
+
+	if (attr == &dev_attr_phys_addr)
+		return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
+	if (attr == &dev_attr_size)
+		return sprintf(buf, "%pa\n", &rmtfs_mem->size);
+	if (attr == &dev_attr_client_id)
+		return sprintf(buf, "%d\n", rmtfs_mem->client_id);
+
+	return -EINVAL;
+}
+
+static struct attribute *qcom_rmtfs_mem_attrs[] = {
+	&dev_attr_phys_addr.attr,
+	&dev_attr_size.attr,
+	&dev_attr_client_id.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
+
+static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
+							struct qcom_rmtfs_mem,
+							cdev);
+
+	get_device(&rmtfs_mem->dev);
+	filp->private_data = rmtfs_mem;
+
+	return 0;
+}
+static ssize_t qcom_rmtfs_mem_read(struct file *filp,
+			      char __user *buf, size_t count, loff_t *f_pos)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+	if (*f_pos >= rmtfs_mem->size)
+		return 0;
+
+	if (*f_pos + count >= rmtfs_mem->size)
+		count = rmtfs_mem->size - *f_pos;
+
+	if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
+		return -EFAULT;
+
+	*f_pos += count;
+	return count;
+}
+
+static ssize_t qcom_rmtfs_mem_write(struct file *filp,
+			       const char __user *buf, size_t count,
+			       loff_t *f_pos)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+	if (*f_pos >= rmtfs_mem->size)
+		return 0;
+
+	if (*f_pos + count >= rmtfs_mem->size)
+		count = rmtfs_mem->size - *f_pos;
+
+	if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
+		return -EFAULT;
+
+	*f_pos += count;
+	return count;
+}
+
+static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+	put_device(&rmtfs_mem->dev);
+
+	return 0;
+}
+
+static const struct file_operations qcom_rmtfs_mem_fops = {
+	.owner = THIS_MODULE,
+	.open = qcom_rmtfs_mem_open,
+	.read = qcom_rmtfs_mem_read,
+	.write = qcom_rmtfs_mem_write,
+	.release = qcom_rmtfs_mem_release,
+	.llseek = default_llseek,
+};
+
+static void qcom_rmtfs_mem_release_device(struct device *dev)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+							struct qcom_rmtfs_mem,
+							dev);
+
+	kfree(rmtfs_mem);
+}
+
+static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct qcom_scm_vmperm perms[2];
+	struct reserved_mem *rmem;
+	struct qcom_rmtfs_mem *rmtfs_mem;
+	u32 client_id;
+	u32 vmid;
+	int ret;
+
+	rmem = of_reserved_mem_lookup(node);
+	if (!rmem) {
+		dev_err(&pdev->dev, "failed to acquire memory region\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(node, "qcom,client-id", &client_id);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
+		return ret;
+
+	}
+
+	rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
+	if (!rmtfs_mem)
+		return -ENOMEM;
+
+	rmtfs_mem->addr = rmem->base;
+	rmtfs_mem->client_id = client_id;
+	rmtfs_mem->size = rmem->size;
+
+	device_initialize(&rmtfs_mem->dev);
+	rmtfs_mem->dev.parent = &pdev->dev;
+	rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+	rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
+
+	rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
+					rmtfs_mem->size, MEMREMAP_WC);
+	if (IS_ERR(rmtfs_mem->base)) {
+		dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
+		ret = PTR_ERR(rmtfs_mem->base);
+		goto put_device;
+	}
+
+	cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
+	rmtfs_mem->cdev.owner = THIS_MODULE;
+
+	dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
+	rmtfs_mem->dev.id = client_id;
+	rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
+
+	ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
+		goto put_device;
+	}
+
+	ret = of_property_read_u32(node, "qcom,vmid", &vmid);
+	if (ret < 0 && ret != -EINVAL) {
+		dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
+		goto remove_cdev;
+	} else if (!ret) {
+		if (!qcom_scm_is_available()) {
+			ret = -EPROBE_DEFER;
+			goto remove_cdev;
+		}
+
+		perms[0].vmid = QCOM_SCM_VMID_HLOS;
+		perms[0].perm = QCOM_SCM_PERM_RW;
+		perms[1].vmid = vmid;
+		perms[1].perm = QCOM_SCM_PERM_RW;
+
+		rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
+		ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+					  &rmtfs_mem->perms, perms, 2);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "assign memory failed\n");
+			goto remove_cdev;
+		}
+	}
+
+	dev_set_drvdata(&pdev->dev, rmtfs_mem);
+
+	return 0;
+
+remove_cdev:
+	cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+put_device:
+	put_device(&rmtfs_mem->dev);
+
+	return ret;
+}
+
+static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+{
+	struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
+	struct qcom_scm_vmperm perm;
+
+	if (rmtfs_mem->perms) {
+		perm.vmid = QCOM_SCM_VMID_HLOS;
+		perm.perm = QCOM_SCM_PERM_RW;
+
+		qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+				    &rmtfs_mem->perms, &perm, 1);
+	}
+
+	cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+	put_device(&rmtfs_mem->dev);
+
+	return 0;
+}
+
+static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
+	{ .compatible = "qcom,rmtfs-mem" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
+
+static struct platform_driver qcom_rmtfs_mem_driver = {
+	.probe = qcom_rmtfs_mem_probe,
+	.remove = qcom_rmtfs_mem_remove,
+	.driver  = {
+		.name  = "qcom_rmtfs_mem",
+		.of_match_table = qcom_rmtfs_mem_of_match,
+	},
+};
+
+static int qcom_rmtfs_mem_init(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
+				  QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
+	if (ret < 0) {
+		pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
+		return ret;
+	}
+
+	ret = platform_driver_register(&qcom_rmtfs_mem_driver);
+	if (ret < 0) {
+		pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
+		unregister_chrdev_region(qcom_rmtfs_mem_major,
+					 QCOM_RMTFS_MEM_DEV_MAX);
+	}
+
+	return ret;
+}
+module_init(qcom_rmtfs_mem_init);
+
+static void qcom_rmtfs_mem_exit(void)
+{
+	platform_driver_unregister(&qcom_rmtfs_mem_driver);
+	unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+}
+module_exit(qcom_rmtfs_mem_exit);
+
+MODULE_AUTHOR("Linaro Ltd");
+MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 0000000..a7bbbb6
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef __RPM_INTERNAL_H__
+#define __RPM_INTERNAL_H__
+
+#include <linux/bitmap.h>
+#include <soc/qcom/tcs.h>
+
+#define TCS_TYPE_NR			4
+#define MAX_CMDS_PER_TCS		16
+#define MAX_TCS_PER_TYPE		3
+#define MAX_TCS_NR			(MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define MAX_TCS_SLOTS			(MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+struct rsc_drv;
+
+/**
+ * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
+ * to the controller
+ *
+ * @drv:       the controller
+ * @type:      type of the TCS in this group - active, sleep, wake
+ * @mask:      mask of the TCSes relative to all the TCSes in the RSC
+ * @offset:    start of the TCS group relative to the TCSes in the RSC
+ * @num_tcs:   number of TCSes in this type
+ * @ncpt:      number of commands in each TCS
+ * @lock:      lock for synchronizing this TCS writes
+ * @req:       requests that are sent from the TCS
+ * @cmd_cache: flattened cache of cmds in sleep/wake TCS
+ * @slots:     indicates which of @cmd_addr are occupied
+ */
+struct tcs_group {
+	struct rsc_drv *drv;
+	int type;
+	u32 mask;
+	u32 offset;
+	int num_tcs;
+	int ncpt;
+	spinlock_t lock;
+	const struct tcs_request *req[MAX_TCS_PER_TYPE];
+	u32 *cmd_cache;
+	DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+};
+
+/**
+ * struct rpmh_request: the message to be sent to rpmh-rsc
+ *
+ * @msg: the request
+ * @cmd: the payload that will be part of the @msg
+ * @completion: triggered when request is done
+ * @dev: the device making the request
+ * @err: err return from the controller
+ * @needs_free: check to free dynamically allocated request object
+ */
+struct rpmh_request {
+	struct tcs_request msg;
+	struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
+	struct completion *completion;
+	const struct device *dev;
+	int err;
+	bool needs_free;
+};
+
+/**
+ * struct rpmh_ctrlr: our representation of the controller
+ *
+ * @cache: the list of cached requests
+ * @cache_lock: synchronize access to the cache data
+ * @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
+ */
+struct rpmh_ctrlr {
+	struct list_head cache;
+	spinlock_t cache_lock;
+	bool dirty;
+	struct list_head batch_cache;
+};
+
+/**
+ * struct rsc_drv: the Direct Resource Voter (DRV) of the
+ * Resource State Coordinator controller (RSC)
+ *
+ * @name:       controller identifier
+ * @tcs_base:   start address of the TCS registers in this controller
+ * @id:         instance id in the controller (Direct Resource Voter)
+ * @num_tcs:    number of TCSes in this DRV
+ * @tcs:        TCS groups
+ * @tcs_in_use: s/w state of the TCS
+ * @lock:       synchronize state of the controller
+ * @client:     handle to the DRV's client.
+ */
+struct rsc_drv {
+	const char *name;
+	void __iomem *tcs_base;
+	int id;
+	int num_tcs;
+	struct tcs_group tcs[TCS_TYPE_NR];
+	DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+	spinlock_t lock;
+	struct rpmh_ctrlr client;
+};
+
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
+			     const struct tcs_request *msg);
+int rpmh_rsc_invalidate(struct rsc_drv *drv);
+
+void rpmh_tx_done(const struct tcs_request *msg, int r);
+
+#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 0000000..ee75da6
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+#include "rpmh-internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace-rpmh.h"
+
+#define RSC_DRV_TCS_OFFSET		672
+#define RSC_DRV_CMD_OFFSET		20
+
+/* DRV Configuration Information Register */
+#define DRV_PRNT_CHLD_CONFIG		0x0C
+#define DRV_NUM_TCS_MASK		0x3F
+#define DRV_NUM_TCS_SHIFT		6
+#define DRV_NCPT_MASK			0x1F
+#define DRV_NCPT_SHIFT			27
+
+/* Register offsets */
+#define RSC_DRV_IRQ_ENABLE		0x00
+#define RSC_DRV_IRQ_STATUS		0x04
+#define RSC_DRV_IRQ_CLEAR		0x08
+#define RSC_DRV_CMD_WAIT_FOR_CMPL	0x10
+#define RSC_DRV_CONTROL			0x14
+#define RSC_DRV_STATUS			0x18
+#define RSC_DRV_CMD_ENABLE		0x1C
+#define RSC_DRV_CMD_MSGID		0x30
+#define RSC_DRV_CMD_ADDR		0x34
+#define RSC_DRV_CMD_DATA		0x38
+#define RSC_DRV_CMD_STATUS		0x3C
+#define RSC_DRV_CMD_RESP_DATA		0x40
+
+#define TCS_AMC_MODE_ENABLE		BIT(16)
+#define TCS_AMC_MODE_TRIGGER		BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN			8
+#define CMD_MSGID_RESP_REQ		BIT(8)
+#define CMD_MSGID_WRITE			BIT(16)
+#define CMD_STATUS_ISSUED		BIT(8)
+#define CMD_STATUS_COMPL		BIT(16)
+
+static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+	return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+			     RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
+			  u32 data)
+{
+	writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+		       RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
+{
+	writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+}
+
+static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
+			       u32 data)
+{
+	writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+	for (;;) {
+		if (data == readl(drv->tcs_base + reg +
+				  RSC_DRV_TCS_OFFSET * tcs_id))
+			break;
+		udelay(1);
+	}
+}
+
+static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+{
+	return !test_bit(tcs_id, drv->tcs_in_use) &&
+	       read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+}
+
+static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
+{
+	return &drv->tcs[type];
+}
+
+static int tcs_invalidate(struct rsc_drv *drv, int type)
+{
+	int m;
+	struct tcs_group *tcs;
+
+	tcs = get_tcs_of_type(drv, type);
+
+	spin_lock(&tcs->lock);
+	if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
+		spin_unlock(&tcs->lock);
+		return 0;
+	}
+
+	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+		if (!tcs_is_free(drv, m)) {
+			spin_unlock(&tcs->lock);
+			return -EAGAIN;
+		}
+		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
+	}
+	bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+	spin_unlock(&tcs->lock);
+
+	return 0;
+}
+
+/**
+ * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ *
+ * @drv: the RSC controller
+ */
+int rpmh_rsc_invalidate(struct rsc_drv *drv)
+{
+	int ret;
+
+	ret = tcs_invalidate(drv, SLEEP_TCS);
+	if (!ret)
+		ret = tcs_invalidate(drv, WAKE_TCS);
+
+	return ret;
+}
+
+static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+					 const struct tcs_request *msg)
+{
+	int type, ret;
+	struct tcs_group *tcs;
+
+	switch (msg->state) {
+	case RPMH_ACTIVE_ONLY_STATE:
+		type = ACTIVE_TCS;
+		break;
+	case RPMH_WAKE_ONLY_STATE:
+		type = WAKE_TCS;
+		break;
+	case RPMH_SLEEP_STATE:
+		type = SLEEP_TCS;
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * If we are making an active request on a RSC that does not have a
+	 * dedicated TCS for active state use, then re-purpose a wake TCS to
+	 * send active votes.
+	 * NOTE: The driver must be aware that this RSC does not have a
+	 * dedicated AMC, and therefore would invalidate the sleep and wake
+	 * TCSes before making an active state request.
+	 */
+	tcs = get_tcs_of_type(drv, type);
+	if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
+		tcs = get_tcs_of_type(drv, WAKE_TCS);
+		if (tcs->num_tcs) {
+			ret = rpmh_rsc_invalidate(drv);
+			if (ret)
+				return ERR_PTR(ret);
+		}
+	}
+
+	return tcs;
+}
+
+static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
+						  int tcs_id)
+{
+	struct tcs_group *tcs;
+	int i;
+
+	for (i = 0; i < TCS_TYPE_NR; i++) {
+		tcs = &drv->tcs[i];
+		if (tcs->mask & BIT(tcs_id))
+			return tcs->req[tcs_id - tcs->offset];
+	}
+
+	return NULL;
+}
+
+/**
+ * tcs_tx_done: TX Done interrupt handler
+ */
+static irqreturn_t tcs_tx_done(int irq, void *p)
+{
+	struct rsc_drv *drv = p;
+	int i, j, err = 0;
+	unsigned long irq_status;
+	const struct tcs_request *req;
+	struct tcs_cmd *cmd;
+
+	irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+
+	for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
+		req = get_req_from_tcs(drv, i);
+		if (!req) {
+			WARN_ON(1);
+			goto skip;
+		}
+
+		err = 0;
+		for (j = 0; j < req->num_cmds; j++) {
+			u32 sts;
+
+			cmd = &req->cmds[j];
+			sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+			if (!(sts & CMD_STATUS_ISSUED) ||
+			   ((req->wait_for_compl || cmd->wait) &&
+			   !(sts & CMD_STATUS_COMPL))) {
+				pr_err("Incomplete request: %s: addr=%#x data=%#x",
+				       drv->name, cmd->addr, cmd->data);
+				err = -EIO;
+			}
+		}
+
+		trace_rpmh_tx_done(drv, i, req, err);
+skip:
+		/* Reclaim the TCS */
+		write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
+		write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+		spin_lock(&drv->lock);
+		clear_bit(i, drv->tcs_in_use);
+		spin_unlock(&drv->lock);
+		if (req)
+			rpmh_tx_done(req, err);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
+			       const struct tcs_request *msg)
+{
+	u32 msgid, cmd_msgid;
+	u32 cmd_enable = 0;
+	u32 cmd_complete;
+	struct tcs_cmd *cmd;
+	int i, j;
+
+	cmd_msgid = CMD_MSGID_LEN;
+	cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
+	cmd_msgid |= CMD_MSGID_WRITE;
+
+	cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+
+	for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
+		cmd = &msg->cmds[i];
+		cmd_enable |= BIT(j);
+		cmd_complete |= cmd->wait << j;
+		msgid = cmd_msgid;
+		msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
+
+		write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
+		write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
+		write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
+		trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+	}
+
+	write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
+	cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+	write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
+}
+
+static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
+{
+	u32 enable;
+
+	/*
+	 * HW req: Clear the DRV_CONTROL and enable TCS again
+	 * While clearing ensure that the AMC mode trigger is cleared
+	 * and then the mode enable is cleared.
+	 */
+	enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+	enable &= ~TCS_AMC_MODE_TRIGGER;
+	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+	enable &= ~TCS_AMC_MODE_ENABLE;
+	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+	/* Enable the AMC mode on the TCS and then trigger the TCS */
+	enable = TCS_AMC_MODE_ENABLE;
+	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+	enable |= TCS_AMC_MODE_TRIGGER;
+	write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+}
+
+static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+				  const struct tcs_request *msg)
+{
+	unsigned long curr_enabled;
+	u32 addr;
+	int i, j, k;
+	int tcs_id = tcs->offset;
+
+	for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
+		if (tcs_is_free(drv, tcs_id))
+			continue;
+
+		curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+
+		for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+			addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+			for (k = 0; k < msg->num_cmds; k++) {
+				if (addr == msg->cmds[k].addr)
+					return -EBUSY;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int find_free_tcs(struct tcs_group *tcs)
+{
+	int i;
+
+	for (i = 0; i < tcs->num_tcs; i++) {
+		if (tcs_is_free(tcs->drv, tcs->offset + i))
+			return tcs->offset + i;
+	}
+
+	return -EBUSY;
+}
+
+static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+	struct tcs_group *tcs;
+	int tcs_id;
+	unsigned long flags;
+	int ret;
+
+	tcs = get_tcs_for_msg(drv, msg);
+	if (IS_ERR(tcs))
+		return PTR_ERR(tcs);
+
+	spin_lock_irqsave(&tcs->lock, flags);
+	spin_lock(&drv->lock);
+	/*
+	 * The h/w does not like if we send a request to the same address,
+	 * when one is already in-flight or being processed.
+	 */
+	ret = check_for_req_inflight(drv, tcs, msg);
+	if (ret) {
+		spin_unlock(&drv->lock);
+		goto done_write;
+	}
+
+	tcs_id = find_free_tcs(tcs);
+	if (tcs_id < 0) {
+		ret = tcs_id;
+		spin_unlock(&drv->lock);
+		goto done_write;
+	}
+
+	tcs->req[tcs_id - tcs->offset] = msg;
+	set_bit(tcs_id, drv->tcs_in_use);
+	spin_unlock(&drv->lock);
+
+	__tcs_buffer_write(drv, tcs_id, 0, msg);
+	__tcs_trigger(drv, tcs_id);
+
+done_write:
+	spin_unlock_irqrestore(&tcs->lock, flags);
+	return ret;
+}
+
+/**
+ * rpmh_rsc_send_data: Validate the incoming message and write to the
+ * appropriate TCS block.
+ *
+ * @drv: the controller
+ * @msg: the data to be sent
+ *
+ * Return: 0 on success, -EINVAL on error.
+ * Note: This call blocks until a valid data is written to the TCS.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+	int ret;
+
+	if (!msg || !msg->cmds || !msg->num_cmds ||
+	    msg->num_cmds > MAX_RPMH_PAYLOAD) {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	do {
+		ret = tcs_write(drv, msg);
+		if (ret == -EBUSY) {
+			pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
+					    msg->cmds[0].addr);
+			udelay(10);
+		}
+	} while (ret == -EBUSY);
+
+	return ret;
+}
+
+static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
+		      int len)
+{
+	int i, j;
+
+	/* Check for already cached commands */
+	for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
+		if (tcs->cmd_cache[i] != cmd[0].addr)
+			continue;
+		if (i + len >= tcs->num_tcs * tcs->ncpt)
+			goto seq_err;
+		for (j = 0; j < len; j++) {
+			if (tcs->cmd_cache[i + j] != cmd[j].addr)
+				goto seq_err;
+		}
+		return i;
+	}
+
+	return -ENODATA;
+
+seq_err:
+	WARN(1, "Message does not match previous sequence.\n");
+	return -EINVAL;
+}
+
+static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
+		      int *tcs_id, int *cmd_id)
+{
+	int slot, offset;
+	int i = 0;
+
+	/* Find if we already have the msg in our TCS */
+	slot = find_match(tcs, msg->cmds, msg->num_cmds);
+	if (slot >= 0)
+		goto copy_data;
+
+	/* Do over, until we can fit the full payload in a TCS */
+	do {
+		slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+						  i, msg->num_cmds, 0);
+		if (slot == tcs->num_tcs * tcs->ncpt)
+			return -ENOMEM;
+		i += tcs->ncpt;
+	} while (slot + msg->num_cmds - 1 >= i);
+
+copy_data:
+	bitmap_set(tcs->slots, slot, msg->num_cmds);
+	/* Copy the addresses of the resources over to the slots */
+	for (i = 0; i < msg->num_cmds; i++)
+		tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
+
+	offset = slot / tcs->ncpt;
+	*tcs_id = offset + tcs->offset;
+	*cmd_id = slot % tcs->ncpt;
+
+	return 0;
+}
+
+static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+	struct tcs_group *tcs;
+	int tcs_id = 0, cmd_id = 0;
+	unsigned long flags;
+	int ret;
+
+	tcs = get_tcs_for_msg(drv, msg);
+	if (IS_ERR(tcs))
+		return PTR_ERR(tcs);
+
+	spin_lock_irqsave(&tcs->lock, flags);
+	/* find the TCS id and the command in the TCS to write to */
+	ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
+	if (!ret)
+		__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
+	spin_unlock_irqrestore(&tcs->lock, flags);
+
+	return ret;
+}
+
+/**
+ * rpmh_rsc_write_ctrl_data: Write request to the controller
+ *
+ * @drv: the controller
+ * @msg: the data to be written to the controller
+ *
+ * There is no response returned for writing the request to the controller.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+	if (!msg || !msg->cmds || !msg->num_cmds ||
+	    msg->num_cmds > MAX_RPMH_PAYLOAD) {
+		pr_err("Payload error\n");
+		return -EINVAL;
+	}
+
+	/* Data sent to this API will not be sent immediately */
+	if (msg->state == RPMH_ACTIVE_ONLY_STATE)
+		return -EINVAL;
+
+	return tcs_ctrl_write(drv, msg);
+}
+
+static int rpmh_probe_tcs_config(struct platform_device *pdev,
+				 struct rsc_drv *drv)
+{
+	struct tcs_type_config {
+		u32 type;
+		u32 n;
+	} tcs_cfg[TCS_TYPE_NR] = { { 0 } };
+	struct device_node *dn = pdev->dev.of_node;
+	u32 config, max_tcs, ncpt, offset;
+	int i, ret, n, st = 0;
+	struct tcs_group *tcs;
+	struct resource *res;
+	void __iomem *base;
+	char drv_id[10] = {0};
+
+	snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
+	if (ret)
+		return ret;
+	drv->tcs_base = base + offset;
+
+	config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
+
+	max_tcs = config;
+	max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
+	max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
+
+	ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+	ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+	n = of_property_count_u32_elems(dn, "qcom,tcs-config");
+	if (n != 2 * TCS_TYPE_NR)
+		return -EINVAL;
+
+	for (i = 0; i < TCS_TYPE_NR; i++) {
+		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+						 i * 2, &tcs_cfg[i].type);
+		if (ret)
+			return ret;
+		if (tcs_cfg[i].type >= TCS_TYPE_NR)
+			return -EINVAL;
+
+		ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+						 i * 2 + 1, &tcs_cfg[i].n);
+		if (ret)
+			return ret;
+		if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
+			return -EINVAL;
+	}
+
+	for (i = 0; i < TCS_TYPE_NR; i++) {
+		tcs = &drv->tcs[tcs_cfg[i].type];
+		if (tcs->drv)
+			return -EINVAL;
+		tcs->drv = drv;
+		tcs->type = tcs_cfg[i].type;
+		tcs->num_tcs = tcs_cfg[i].n;
+		tcs->ncpt = ncpt;
+		spin_lock_init(&tcs->lock);
+
+		if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
+			continue;
+
+		if (st + tcs->num_tcs > max_tcs ||
+		    st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
+			return -EINVAL;
+
+		tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
+		tcs->offset = st;
+		st += tcs->num_tcs;
+
+		/*
+		 * Allocate memory to cache sleep and wake requests to
+		 * avoid reading TCS register memory.
+		 */
+		if (tcs->type == ACTIVE_TCS)
+			continue;
+
+		tcs->cmd_cache = devm_kcalloc(&pdev->dev,
+					      tcs->num_tcs * ncpt, sizeof(u32),
+					      GFP_KERNEL);
+		if (!tcs->cmd_cache)
+			return -ENOMEM;
+	}
+
+	drv->num_tcs = st;
+
+	return 0;
+}
+
+static int rpmh_rsc_probe(struct platform_device *pdev)
+{
+	struct device_node *dn = pdev->dev.of_node;
+	struct rsc_drv *drv;
+	int ret, irq;
+
+	/*
+	 * Even though RPMh doesn't directly use cmd-db, all of its children
+	 * do. To avoid adding this check to our children we'll do it now.
+	 */
+	ret = cmd_db_ready();
+	if (ret) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Command DB not available (%d)\n",
+									ret);
+		return ret;
+	}
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
+	if (ret)
+		return ret;
+
+	drv->name = of_get_property(dn, "label", NULL);
+	if (!drv->name)
+		drv->name = dev_name(&pdev->dev);
+
+	ret = rpmh_probe_tcs_config(pdev, drv);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&drv->lock);
+	bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+	irq = platform_get_irq(pdev, drv->id);
+	if (irq < 0)
+		return irq;
+
+	ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
+			       IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+			       drv->name, drv);
+	if (ret)
+		return ret;
+
+	/* Enable the active TCS to send requests immediately */
+	write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+
+	spin_lock_init(&drv->client.cache_lock);
+	INIT_LIST_HEAD(&drv->client.cache);
+	INIT_LIST_HEAD(&drv->client.batch_cache);
+
+	dev_set_drvdata(&pdev->dev, drv);
+
+	return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id rpmh_drv_match[] = {
+	{ .compatible = "qcom,rpmh-rsc", },
+	{ }
+};
+
+static struct platform_driver rpmh_driver = {
+	.probe = rpmh_rsc_probe,
+	.driver = {
+		  .name = "rpmh",
+		  .of_match_table = rpmh_drv_match,
+	},
+};
+
+static int __init rpmh_driver_init(void)
+{
+	return platform_driver_register(&rpmh_driver);
+}
+arch_initcall(rpmh_driver_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 0000000..c7beb68
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+
+#include "rpmh-internal.h"
+
+#define RPMH_TIMEOUT_MS			msecs_to_jiffies(10000)
+
+#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name)	\
+	struct rpmh_request name = {			\
+		.msg = {				\
+			.state = s,			\
+			.cmds = name.cmd,		\
+			.num_cmds = 0,			\
+			.wait_for_compl = true,		\
+		},					\
+		.cmd = { { 0 } },			\
+		.completion = q,			\
+		.dev = dev,				\
+		.needs_free = false,				\
+	}
+
+#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
+
+/**
+ * struct cache_req: the request object for caching
+ *
+ * @addr: the address of the resource
+ * @sleep_val: the sleep vote
+ * @wake_val: the wake vote
+ * @list: linked list obj
+ */
+struct cache_req {
+	u32 addr;
+	u32 sleep_val;
+	u32 wake_val;
+	struct list_head list;
+};
+
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+	struct list_head list;
+	int count;
+	struct rpmh_request rpm_msgs[];
+};
+
+static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
+{
+	struct rsc_drv *drv = dev_get_drvdata(dev->parent);
+
+	return &drv->client;
+}
+
+void rpmh_tx_done(const struct tcs_request *msg, int r)
+{
+	struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
+						    msg);
+	struct completion *compl = rpm_msg->completion;
+
+	rpm_msg->err = r;
+
+	if (r)
+		dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
+			rpm_msg->msg.cmds[0].addr, r);
+
+	if (!compl)
+		goto exit;
+
+	/* Signal the blocking thread we are done */
+	complete(compl);
+
+exit:
+	if (rpm_msg->needs_free)
+		kfree(rpm_msg);
+}
+
+static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
+{
+	struct cache_req *p, *req = NULL;
+
+	list_for_each_entry(p, &ctrlr->cache, list) {
+		if (p->addr == addr) {
+			req = p;
+			break;
+		}
+	}
+
+	return req;
+}
+
+static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+					   enum rpmh_state state,
+					   struct tcs_cmd *cmd)
+{
+	struct cache_req *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	req = __find_req(ctrlr, cmd->addr);
+	if (req)
+		goto existing;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req) {
+		req = ERR_PTR(-ENOMEM);
+		goto unlock;
+	}
+
+	req->addr = cmd->addr;
+	req->sleep_val = req->wake_val = UINT_MAX;
+	INIT_LIST_HEAD(&req->list);
+	list_add_tail(&req->list, &ctrlr->cache);
+
+existing:
+	switch (state) {
+	case RPMH_ACTIVE_ONLY_STATE:
+		if (req->sleep_val != UINT_MAX)
+			req->wake_val = cmd->data;
+		break;
+	case RPMH_WAKE_ONLY_STATE:
+		req->wake_val = cmd->data;
+		break;
+	case RPMH_SLEEP_STATE:
+		req->sleep_val = cmd->data;
+		break;
+	default:
+		break;
+	}
+
+	ctrlr->dirty = true;
+unlock:
+	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+	return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @dev: The device making the request
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (cmds).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+			struct rpmh_request *rpm_msg)
+{
+	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+	int ret = -EINVAL;
+	struct cache_req *req;
+	int i;
+
+	rpm_msg->msg.state = state;
+
+	/* Cache the request in our store and link the payload */
+	for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
+		req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+	}
+
+	rpm_msg->msg.state = state;
+
+	if (state == RPMH_ACTIVE_ONLY_STATE) {
+		WARN_ON(irqs_disabled());
+		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+	} else {
+		ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+				&rpm_msg->msg);
+		/* Clean up our call by spoofing tx_done */
+		rpmh_tx_done(&rpm_msg->msg, ret);
+	}
+
+	return ret;
+}
+
+static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
+		const struct tcs_cmd *cmd, u32 n)
+{
+	if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+		return -EINVAL;
+
+	memcpy(req->cmd, cmd, n * sizeof(*cmd));
+
+	req->msg.state = state;
+	req->msg.cmds = req->cmd;
+	req->msg.num_cmds = n;
+
+	return 0;
+}
+
+/**
+ * rpmh_write_async: Write a set of RPMH commands
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a set of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot.
+ */
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+		     const struct tcs_cmd *cmd, u32 n)
+{
+	struct rpmh_request *rpm_msg;
+	int ret;
+
+	rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
+	if (!rpm_msg)
+		return -ENOMEM;
+	rpm_msg->needs_free = true;
+
+	ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
+	if (ret) {
+		kfree(rpm_msg);
+		return ret;
+	}
+
+	return __rpmh_write(dev, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a set of RPMH commands and block until response
+ *
+ * @rc: The RPMH handle got from rpmh_get_client
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in @cmd
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+	       const struct tcs_cmd *cmd, u32 n)
+{
+	DECLARE_COMPLETION_ONSTACK(compl);
+	DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
+	int ret;
+
+	if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+		return -EINVAL;
+
+	memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
+	rpm_msg.msg.num_cmds = n;
+
+	ret = __rpmh_write(dev, state, &rpm_msg);
+	if (ret)
+		return ret;
+
+	ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
+	WARN_ON(!ret);
+	return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	list_add_tail(&req->list, &ctrlr->batch_cache);
+	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+	struct batch_cache_req *req;
+	const struct rpmh_request *rpm_msg;
+	unsigned long flags;
+	int ret = 0;
+	int i;
+
+	/* Send Sleep/Wake requests to the controller, expect no response */
+	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	list_for_each_entry(req, &ctrlr->batch_cache, list) {
+		for (i = 0; i < req->count; i++) {
+			rpm_msg = req->rpm_msgs + i;
+			ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+						       &rpm_msg->msg);
+			if (ret)
+				break;
+		}
+	}
+	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+	return ret;
+}
+
+static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
+{
+	struct batch_cache_req *req, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+		kfree(req);
+	INIT_LIST_HEAD(&ctrlr->batch_cache);
+	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+		     const struct tcs_cmd *cmd, u32 *n)
+{
+	struct batch_cache_req *req;
+	struct rpmh_request *rpm_msgs;
+	DECLARE_COMPLETION_ONSTACK(compl);
+	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+	unsigned long time_left;
+	int count = 0;
+	int ret, i, j;
+
+	if (!cmd || !n)
+		return -EINVAL;
+
+	while (n[count] > 0)
+		count++;
+	if (!count)
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+		      GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+	req->count = count;
+	rpm_msgs = req->rpm_msgs;
+
+	for (i = 0; i < count; i++) {
+		__fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+		cmd += n[i];
+	}
+
+	if (state != RPMH_ACTIVE_ONLY_STATE) {
+		cache_batch(ctrlr, req);
+		return 0;
+	}
+
+	for (i = 0; i < count; i++) {
+		rpm_msgs[i].completion = &compl;
+		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+		if (ret) {
+			pr_err("Error(%d) sending RPMH message addr=%#x\n",
+			       ret, rpm_msgs[i].msg.cmds[0].addr);
+			for (j = i; j < count; j++)
+				rpmh_tx_done(&rpm_msgs[j].msg, ret);
+			break;
+		}
+	}
+
+	time_left = RPMH_TIMEOUT_MS;
+	for (i = 0; i < count; i++) {
+		time_left = wait_for_completion_timeout(&compl, time_left);
+		if (!time_left) {
+			/*
+			 * Better hope they never finish because they'll signal
+			 * the completion on our stack and that's bad once
+			 * we've returned from the function.
+			 */
+			WARN_ON(1);
+			ret = -ETIMEDOUT;
+			goto exit;
+		}
+	}
+
+exit:
+	kfree(req);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
+static int is_req_valid(struct cache_req *req)
+{
+	return (req->sleep_val != UINT_MAX &&
+		req->wake_val != UINT_MAX &&
+		req->sleep_val != req->wake_val);
+}
+
+static int send_single(const struct device *dev, enum rpmh_state state,
+		       u32 addr, u32 data)
+{
+	DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
+	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+
+	/* Wake sets are always complete and sleep sets are not */
+	rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
+	rpm_msg.cmd[0].addr = addr;
+	rpm_msg.cmd[0].data = data;
+	rpm_msg.msg.num_cmds = 1;
+
+	return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ *
+ * @dev: The device making the request
+ *
+ * Return: -EBUSY if the controller is busy, probably waiting on a response
+ * to a RPMH request sent earlier.
+ *
+ * This function is always called from the sleep code from the last CPU
+ * that is powering down the entire system. Since no other RPMH API would be
+ * executing at this time, it is safe to run lockless.
+ */
+int rpmh_flush(const struct device *dev)
+{
+	struct cache_req *p;
+	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+	int ret;
+
+	if (!ctrlr->dirty) {
+		pr_debug("Skipping flush, TCS has latest data.\n");
+		return 0;
+	}
+
+	/* First flush the cached batch requests */
+	ret = flush_batch(ctrlr);
+	if (ret)
+		return ret;
+
+	/*
+	 * Nobody else should be calling this function other than system PM,
+	 * hence we can run without locks.
+	 */
+	list_for_each_entry(p, &ctrlr->cache, list) {
+		if (!is_req_valid(p)) {
+			pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
+				 __func__, p->addr, p->sleep_val, p->wake_val);
+			continue;
+		}
+		ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+		if (ret)
+			return ret;
+		ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
+				  p->addr, p->wake_val);
+		if (ret)
+			return ret;
+	}
+
+	ctrlr->dirty = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(rpmh_flush);
+
+/**
+ * rpmh_invalidate: Invalidate all sleep and active sets
+ * sets.
+ *
+ * @dev: The device making the request
+ *
+ * Invalidate the sleep and active values in the TCS blocks.
+ */
+int rpmh_invalidate(const struct device *dev)
+{
+	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+	int ret;
+
+	invalidate_batch(ctrlr);
+	ctrlr->dirty = true;
+
+	do {
+		ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+	} while (ret == -EAGAIN);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 0000000..93517ed
--- /dev/null
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT     (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel:	reference to the smd channel
+ * @ack:		completion for acks
+ * @lock:		mutual exclusion around the send/complete pair
+ * @ack_status:		result of the rpm request
+ */
+struct qcom_smd_rpm {
+	struct rpmsg_endpoint *rpm_channel;
+	struct device *dev;
+
+	struct completion ack;
+	struct mutex lock;
+	int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type:	identifier of the service
+ * @length:		length of the payload
+ */
+struct qcom_rpm_header {
+	__le32 service_type;
+	__le32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id:	identifier of the outgoing message
+ * @flags:	active/sleep state flags
+ * @type:	resource type
+ * @id:		resource id
+ * @data_len:	length of the payload following this header
+ */
+struct qcom_rpm_request {
+	__le32 msg_id;
+	__le32 flags;
+	__le32 type;
+	__le32 id;
+	__le32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type:	indicator of the type of message
+ * @length:	the size of this message, including the message header
+ * @msg_id:	message id
+ * @message:	textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+	__le32 msg_type;
+	__le32 length;
+	union {
+		__le32 msg_id;
+		u8 message[0];
+	};
+};
+
+#define RPM_SERVICE_TYPE_REQUEST	0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR		0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID		0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm:	rpm handle
+ * @type:	resource type
+ * @id:		resource identifier
+ * @buf:	the data to be written
+ * @count:	number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+		       int state,
+		       u32 type, u32 id,
+		       void *buf,
+		       size_t count)
+{
+	static unsigned msg_id = 1;
+	int left;
+	int ret;
+	struct {
+		struct qcom_rpm_header hdr;
+		struct qcom_rpm_request req;
+		u8 payload[];
+	} *pkt;
+	size_t size = sizeof(*pkt) + count;
+
+	/* SMD packets to the RPM may not exceed 256 bytes */
+	if (WARN_ON(size >= 256))
+		return -EINVAL;
+
+	pkt = kmalloc(size, GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	mutex_lock(&rpm->lock);
+
+	pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+	pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
+
+	pkt->req.msg_id = cpu_to_le32(msg_id++);
+	pkt->req.flags = cpu_to_le32(state);
+	pkt->req.type = cpu_to_le32(type);
+	pkt->req.id = cpu_to_le32(id);
+	pkt->req.data_len = cpu_to_le32(count);
+	memcpy(pkt->payload, buf, count);
+
+	ret = rpmsg_send(rpm->rpm_channel, pkt, size);
+	if (ret)
+		goto out;
+
+	left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+	if (!left)
+		ret = -ETIMEDOUT;
+	else
+		ret = rpm->ack_status;
+
+out:
+	kfree(pkt);
+	mutex_unlock(&rpm->lock);
+	return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
+				 void *data,
+				 int count,
+				 void *priv,
+				 u32 addr)
+{
+	const struct qcom_rpm_header *hdr = data;
+	size_t hdr_length = le32_to_cpu(hdr->length);
+	const struct qcom_rpm_message *msg;
+	struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+	const u8 *buf = data + sizeof(struct qcom_rpm_header);
+	const u8 *end = buf + hdr_length;
+	char msgbuf[32];
+	int status = 0;
+	u32 len, msg_length;
+
+	if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+	    hdr_length < sizeof(struct qcom_rpm_message)) {
+		dev_err(rpm->dev, "invalid request\n");
+		return 0;
+	}
+
+	while (buf < end) {
+		msg = (struct qcom_rpm_message *)buf;
+		msg_length = le32_to_cpu(msg->length);
+		switch (le32_to_cpu(msg->msg_type)) {
+		case RPM_MSG_TYPE_MSG_ID:
+			break;
+		case RPM_MSG_TYPE_ERR:
+			len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
+			memcpy_fromio(msgbuf, msg->message, len);
+			msgbuf[len - 1] = 0;
+
+			if (!strcmp(msgbuf, "resource does not exist"))
+				status = -ENXIO;
+			else
+				status = -EINVAL;
+			break;
+		}
+
+		buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
+	}
+
+	rpm->ack_status = status;
+	complete(&rpm->ack);
+	return 0;
+}
+
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+{
+	struct qcom_smd_rpm *rpm;
+
+	rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm)
+		return -ENOMEM;
+
+	mutex_init(&rpm->lock);
+	init_completion(&rpm->ack);
+
+	rpm->dev = &rpdev->dev;
+	rpm->rpm_channel = rpdev->ept;
+	dev_set_drvdata(&rpdev->dev, rpm);
+
+	return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+{
+	of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+	{ .compatible = "qcom,rpm-apq8084" },
+	{ .compatible = "qcom,rpm-msm8916" },
+	{ .compatible = "qcom,rpm-msm8974" },
+	{ .compatible = "qcom,rpm-msm8996" },
+	{ .compatible = "qcom,rpm-msm8998" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct rpmsg_driver qcom_smd_rpm_driver = {
+	.probe = qcom_smd_rpm_probe,
+	.remove = qcom_smd_rpm_remove,
+	.callback = qcom_smd_rpm_callback,
+	.drv  = {
+		.name  = "qcom_smd_rpm",
+		.of_match_table = qcom_smd_rpm_of_match,
+	},
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+	return register_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+	unregister_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 0000000..bf4bd71
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets. The header of
+ * cached items comes after the data.
+ *
+ * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
+ * for the global heap. A new global partition is created from the global heap
+ * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
+ * set by the bootloader.
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * The version member of the smem header contains an array of versions for the
+ * various software components in the SoC. We verify that the boot loader
+ * version is a valid version as a sanity check.
+ */
+#define SMEM_MASTER_SBL_VERSION_INDEX	7
+#define SMEM_GLOBAL_HEAP_VERSION	11
+#define SMEM_GLOBAL_PART_VERSION	12
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED	8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT		512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS		0
+
+/* Processor/host identifier for the global partition */
+#define SMEM_GLOBAL_HOST	0xfffe
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT		10
+
+/**
+  * struct smem_proc_comm - proc_comm communication struct (legacy)
+  * @command:	current command to be executed
+  * @status:	status of the currently requested command
+  * @params:	parameters to the command
+  */
+struct smem_proc_comm {
+	__le32 command;
+	__le32 status;
+	__le32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated:	boolean to indicate if this entry is used
+ * @offset:	offset to the allocated space
+ * @size:	size of the allocated space, 8 byte aligned
+ * @aux_base:	base address for the memory region used by this unit, or 0 for
+ *		the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+	__le32 allocated;
+	__le32 offset;
+	__le32 size;
+	__le32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK		0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm:		proc_comm communication interface (legacy)
+ * @version:		array of versions for the various subsystems
+ * @initialized:	boolean to indicate that smem is initialized
+ * @free_offset:	index of the first unallocated byte in smem
+ * @available:		number of bytes available for allocation
+ * @reserved:		reserved field, must be 0
+ * toc:			array of references to items
+ */
+struct smem_header {
+	struct smem_proc_comm proc_comm[4];
+	__le32 version[32];
+	__le32 initialized;
+	__le32 free_offset;
+	__le32 available;
+	__le32 reserved;
+	struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset:	offset, within the main shared memory region, of the partition
+ * @size:	size of the partition
+ * @flags:	flags for the partition (currently unused)
+ * @host0:	first processor/host with access to this partition
+ * @host1:	second processor/host with access to this partition
+ * @cacheline:	alignment for "cached" entries
+ * @reserved:	reserved entries for later use
+ */
+struct smem_ptable_entry {
+	__le32 offset;
+	__le32 size;
+	__le32 flags;
+	__le16 host0;
+	__le16 host1;
+	__le32 cacheline;
+	__le32 reserved[7];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic:	magic number, must be SMEM_PTABLE_MAGIC
+ * @version:	version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved:	for now reserved entries
+ * @entry:	list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+	u8 magic[4];
+	__le32 version;
+	__le32 num_entries;
+	__le32 reserved[5];
+	struct smem_ptable_entry entry[];
+};
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic:	magic number, must be SMEM_PART_MAGIC
+ * @host0:	first processor/host with access to this partition
+ * @host1:	second processor/host with access to this partition
+ * @size:	size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ *		this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ *		partition
+ * @reserved:	for now reserved entries
+ */
+struct smem_partition_header {
+	u8 magic[4];
+	__le16 host0;
+	__le16 host1;
+	__le32 size;
+	__le32 offset_free_uncached;
+	__le32 offset_free_cached;
+	__le32 reserved[3];
+};
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary:	magic number, must be SMEM_PRIVATE_CANARY
+ * @item:	identifying number of the smem item
+ * @size:	size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved:	for now reserved entry
+ */
+struct smem_private_entry {
+	u16 canary; /* bytes are the same so no swapping needed */
+	__le16 item;
+	__le32 size; /* includes padding bytes */
+	__le16 padding_data;
+	__le16 padding_hdr;
+	__le32 reserved;
+};
+#define SMEM_PRIVATE_CANARY	0xa5a5
+
+/**
+ * struct smem_info - smem region info located after the table of contents
+ * @magic:	magic number, must be SMEM_INFO_MAGIC
+ * @size:	size of the smem region
+ * @base_addr:	base address of the smem region
+ * @reserved:	for now reserved entry
+ * @num_items:	highest accepted item number
+ */
+struct smem_info {
+	u8 magic[4];
+	__le32 size;
+	__le32 base_addr;
+	__le32 reserved;
+	__le16 num_items;
+};
+
+static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base:	identifier of aux_mem base
+ * @virt_base:	virtual base address of memory with this aux_mem identifier
+ * @size:	size of the memory region
+ */
+struct smem_region {
+	u32 aux_base;
+	void __iomem *virt_base;
+	size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev:	device pointer
+ * @hwlock:	reference to a hwspinlock
+ * @global_partition:	pointer to global partition when in use
+ * @global_cacheline:	cacheline size for global partition
+ * @partitions:	list of pointers to partitions affecting the current
+ *		processor/host
+ * @cacheline:	list of cacheline sizes for each host
+ * @item_count: max accepted item number
+ * @num_regions: number of @regions
+ * @regions:	list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+	struct device *dev;
+
+	struct hwspinlock *hwlock;
+
+	struct smem_partition_header *global_partition;
+	size_t global_cacheline;
+	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+	size_t cacheline[SMEM_HOST_COUNT];
+	u32 item_count;
+
+	unsigned num_regions;
+	struct smem_region regions[0];
+};
+
+static void *
+phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
+{
+	void *p = phdr;
+
+	return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_cached_entry(struct smem_partition_header *phdr,
+					size_t cacheline)
+{
+	void *p = phdr;
+	struct smem_private_entry *e;
+
+	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *
+phdr_to_last_cached_entry(struct smem_partition_header *phdr)
+{
+	void *p = phdr;
+
+	return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
+{
+	void *p = phdr;
+
+	return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+uncached_entry_next(struct smem_private_entry *e)
+{
+	void *p = e;
+
+	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+	       le32_to_cpu(e->size);
+}
+
+static struct smem_private_entry *
+cached_entry_next(struct smem_private_entry *e, size_t cacheline)
+{
+	void *p = e;
+
+	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *uncached_entry_to_item(struct smem_private_entry *e)
+{
+	void *p = e;
+
+	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
+static void *cached_entry_to_item(struct smem_private_entry *e)
+{
+	void *p = e;
+
+	return p - le32_to_cpu(e->size);
+}
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT	1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+				   struct smem_partition_header *phdr,
+				   unsigned item,
+				   size_t size)
+{
+	struct smem_private_entry *hdr, *end;
+	size_t alloc_size;
+	void *cached;
+
+	hdr = phdr_to_first_uncached_entry(phdr);
+	end = phdr_to_last_uncached_entry(phdr);
+	cached = phdr_to_last_cached_entry(phdr);
+
+	while (hdr < end) {
+		if (hdr->canary != SMEM_PRIVATE_CANARY)
+			goto bad_canary;
+		if (le16_to_cpu(hdr->item) == item)
+			return -EEXIST;
+
+		hdr = uncached_entry_next(hdr);
+	}
+
+	/* Check that we don't grow into the cached region */
+	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+	if ((void *)hdr + alloc_size > cached) {
+		dev_err(smem->dev, "Out of memory\n");
+		return -ENOSPC;
+	}
+
+	hdr->canary = SMEM_PRIVATE_CANARY;
+	hdr->item = cpu_to_le16(item);
+	hdr->size = cpu_to_le32(ALIGN(size, 8));
+	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
+	hdr->padding_hdr = 0;
+
+	/*
+	 * Ensure the header is written before we advance the free offset, so
+	 * that remote processors that does not take the remote spinlock still
+	 * gets a consistent view of the linked list.
+	 */
+	wmb();
+	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
+
+	return 0;
+bad_canary:
+	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+	return -EINVAL;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+				  unsigned item,
+				  size_t size)
+{
+	struct smem_global_entry *entry;
+	struct smem_header *header;
+
+	header = smem->regions[0].virt_base;
+	entry = &header->toc[item];
+	if (entry->allocated)
+		return -EEXIST;
+
+	size = ALIGN(size, 8);
+	if (WARN_ON(size > le32_to_cpu(header->available)))
+		return -ENOMEM;
+
+	entry->offset = header->free_offset;
+	entry->size = cpu_to_le32(size);
+
+	/*
+	 * Ensure the header is consistent before we mark the item allocated,
+	 * so that remote processors will get a consistent view of the item
+	 * even though they do not take the spinlock on read.
+	 */
+	wmb();
+	entry->allocated = cpu_to_le32(1);
+
+	le32_add_cpu(&header->free_offset, size);
+	le32_add_cpu(&header->available, -size);
+
+	return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host:	remote processor id, or -1
+ * @item:	smem item handle
+ * @size:	number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+	struct smem_partition_header *phdr;
+	unsigned long flags;
+	int ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	if (item < SMEM_ITEM_LAST_FIXED) {
+		dev_err(__smem->dev,
+			"Rejecting allocation of static entry %d\n", item);
+		return -EINVAL;
+	}
+
+	if (WARN_ON(item >= __smem->item_count))
+		return -EINVAL;
+
+	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+					  HWSPINLOCK_TIMEOUT,
+					  &flags);
+	if (ret)
+		return ret;
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+		phdr = __smem->partitions[host];
+		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+	} else if (__smem->global_partition) {
+		phdr = __smem->global_partition;
+		ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+	} else {
+		ret = qcom_smem_alloc_global(__smem, item, size);
+	}
+
+	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+				  unsigned item,
+				  size_t *size)
+{
+	struct smem_header *header;
+	struct smem_region *area;
+	struct smem_global_entry *entry;
+	u32 aux_base;
+	unsigned i;
+
+	header = smem->regions[0].virt_base;
+	entry = &header->toc[item];
+	if (!entry->allocated)
+		return ERR_PTR(-ENXIO);
+
+	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
+
+	for (i = 0; i < smem->num_regions; i++) {
+		area = &smem->regions[i];
+
+		if (area->aux_base == aux_base || !aux_base) {
+			if (size != NULL)
+				*size = le32_to_cpu(entry->size);
+			return area->virt_base + le32_to_cpu(entry->offset);
+		}
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+				   struct smem_partition_header *phdr,
+				   size_t cacheline,
+				   unsigned item,
+				   size_t *size)
+{
+	struct smem_private_entry *e, *end;
+
+	e = phdr_to_first_uncached_entry(phdr);
+	end = phdr_to_last_uncached_entry(phdr);
+
+	while (e < end) {
+		if (e->canary != SMEM_PRIVATE_CANARY)
+			goto invalid_canary;
+
+		if (le16_to_cpu(e->item) == item) {
+			if (size != NULL)
+				*size = le32_to_cpu(e->size) -
+					le16_to_cpu(e->padding_data);
+
+			return uncached_entry_to_item(e);
+		}
+
+		e = uncached_entry_next(e);
+	}
+
+	/* Item was not found in the uncached list, search the cached list */
+
+	e = phdr_to_first_cached_entry(phdr, cacheline);
+	end = phdr_to_last_cached_entry(phdr);
+
+	while (e > end) {
+		if (e->canary != SMEM_PRIVATE_CANARY)
+			goto invalid_canary;
+
+		if (le16_to_cpu(e->item) == item) {
+			if (size != NULL)
+				*size = le32_to_cpu(e->size) -
+					le16_to_cpu(e->padding_data);
+
+			return cached_entry_to_item(e);
+		}
+
+		e = cached_entry_next(e, cacheline);
+	}
+
+	return ERR_PTR(-ENOENT);
+
+invalid_canary:
+	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+	return ERR_PTR(-EINVAL);
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host:	the remote processor, or -1
+ * @item:	smem item handle
+ * @size:	pointer to be filled out with size of the item
+ *
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
+ */
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
+{
+	struct smem_partition_header *phdr;
+	unsigned long flags;
+	size_t cacheln;
+	int ret;
+	void *ptr = ERR_PTR(-EPROBE_DEFER);
+
+	if (!__smem)
+		return ptr;
+
+	if (WARN_ON(item >= __smem->item_count))
+		return ERR_PTR(-EINVAL);
+
+	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+					  HWSPINLOCK_TIMEOUT,
+					  &flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+		phdr = __smem->partitions[host];
+		cacheln = __smem->cacheline[host];
+		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+	} else if (__smem->global_partition) {
+		phdr = __smem->global_partition;
+		cacheln = __smem->global_cacheline;
+		ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+	} else {
+		ptr = qcom_smem_get_global(__smem, item, size);
+	}
+
+	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+	return ptr;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host:	the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+	struct smem_partition_header *phdr;
+	struct smem_header *header;
+	unsigned ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+		phdr = __smem->partitions[host];
+		ret = le32_to_cpu(phdr->offset_free_cached) -
+		      le32_to_cpu(phdr->offset_free_uncached);
+	} else if (__smem->global_partition) {
+		phdr = __smem->global_partition;
+		ret = le32_to_cpu(phdr->offset_free_cached) -
+		      le32_to_cpu(phdr->offset_free_uncached);
+	} else {
+		header = __smem->regions[0].virt_base;
+		ret = le32_to_cpu(header->available);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+/**
+ * qcom_smem_virt_to_phys() - return the physical address associated
+ * with an smem item pointer (previously returned by qcom_smem_get()
+ * @p:	the virtual address to convert
+ *
+ * Returns 0 if the pointer provided is not within any smem region.
+ */
+phys_addr_t qcom_smem_virt_to_phys(void *p)
+{
+	unsigned i;
+
+	for (i = 0; i < __smem->num_regions; i++) {
+		struct smem_region *region = &__smem->regions[i];
+
+		if (p < region->virt_base)
+			continue;
+		if (p < region->virt_base + region->size) {
+			u64 offset = p - region->virt_base;
+
+			return (phys_addr_t)region->aux_base + offset;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qcom_smem_virt_to_phys);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+	struct smem_header *header;
+	__le32 *versions;
+
+	header = smem->regions[0].virt_base;
+	versions = header->version;
+
+	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
+}
+
+static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
+{
+	struct smem_ptable *ptable;
+	u32 version;
+
+	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
+		return ERR_PTR(-ENOENT);
+
+	version = le32_to_cpu(ptable->version);
+	if (version != 1) {
+		dev_err(smem->dev,
+			"Unsupported partition header version %d\n", version);
+		return ERR_PTR(-EINVAL);
+	}
+	return ptable;
+}
+
+static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
+{
+	struct smem_ptable *ptable;
+	struct smem_info *info;
+
+	ptable = qcom_smem_get_ptable(smem);
+	if (IS_ERR_OR_NULL(ptable))
+		return SMEM_ITEM_COUNT;
+
+	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
+		return SMEM_ITEM_COUNT;
+
+	return le16_to_cpu(info->num_items);
+}
+
+static int qcom_smem_set_global_partition(struct qcom_smem *smem)
+{
+	struct smem_partition_header *header;
+	struct smem_ptable_entry *entry;
+	struct smem_ptable *ptable;
+	u32 host0, host1, size;
+	bool found = false;
+	int i;
+
+	if (smem->global_partition) {
+		dev_err(smem->dev, "Already found the global partition\n");
+		return -EINVAL;
+	}
+
+	ptable = qcom_smem_get_ptable(smem);
+	if (IS_ERR(ptable))
+		return PTR_ERR(ptable);
+
+	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+		entry = &ptable->entry[i];
+		host0 = le16_to_cpu(entry->host0);
+		host1 = le16_to_cpu(entry->host1);
+
+		if (host0 == SMEM_GLOBAL_HOST && host0 == host1) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		dev_err(smem->dev, "Missing entry for global partition\n");
+		return -EINVAL;
+	}
+
+	if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
+		dev_err(smem->dev, "Invalid entry for global partition\n");
+		return -EINVAL;
+	}
+
+	header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+	host0 = le16_to_cpu(header->host0);
+	host1 = le16_to_cpu(header->host1);
+
+	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
+		dev_err(smem->dev, "Global partition has invalid magic\n");
+		return -EINVAL;
+	}
+
+	if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
+		dev_err(smem->dev, "Global partition hosts are invalid\n");
+		return -EINVAL;
+	}
+
+	if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+		dev_err(smem->dev, "Global partition has invalid size\n");
+		return -EINVAL;
+	}
+
+	size = le32_to_cpu(header->offset_free_uncached);
+	if (size > le32_to_cpu(header->size)) {
+		dev_err(smem->dev,
+			"Global partition has invalid free pointer\n");
+		return -EINVAL;
+	}
+
+	smem->global_partition = header;
+	smem->global_cacheline = le32_to_cpu(entry->cacheline);
+
+	return 0;
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+					  unsigned int local_host)
+{
+	struct smem_partition_header *header;
+	struct smem_ptable_entry *entry;
+	struct smem_ptable *ptable;
+	unsigned int remote_host;
+	u32 host0, host1;
+	int i;
+
+	ptable = qcom_smem_get_ptable(smem);
+	if (IS_ERR(ptable))
+		return PTR_ERR(ptable);
+
+	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+		entry = &ptable->entry[i];
+		host0 = le16_to_cpu(entry->host0);
+		host1 = le16_to_cpu(entry->host1);
+
+		if (host0 != local_host && host1 != local_host)
+			continue;
+
+		if (!le32_to_cpu(entry->offset))
+			continue;
+
+		if (!le32_to_cpu(entry->size))
+			continue;
+
+		if (host0 == local_host)
+			remote_host = host1;
+		else
+			remote_host = host0;
+
+		if (remote_host >= SMEM_HOST_COUNT) {
+			dev_err(smem->dev,
+				"Invalid remote host %d\n",
+				remote_host);
+			return -EINVAL;
+		}
+
+		if (smem->partitions[remote_host]) {
+			dev_err(smem->dev,
+				"Already found a partition for host %d\n",
+				remote_host);
+			return -EINVAL;
+		}
+
+		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+		host0 = le16_to_cpu(header->host0);
+		host1 = le16_to_cpu(header->host1);
+
+		if (memcmp(header->magic, SMEM_PART_MAGIC,
+			    sizeof(header->magic))) {
+			dev_err(smem->dev,
+				"Partition %d has invalid magic\n", i);
+			return -EINVAL;
+		}
+
+		if (host0 != local_host && host1 != local_host) {
+			dev_err(smem->dev,
+				"Partition %d hosts are invalid\n", i);
+			return -EINVAL;
+		}
+
+		if (host0 != remote_host && host1 != remote_host) {
+			dev_err(smem->dev,
+				"Partition %d hosts are invalid\n", i);
+			return -EINVAL;
+		}
+
+		if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+			dev_err(smem->dev,
+				"Partition %d has invalid size\n", i);
+			return -EINVAL;
+		}
+
+		if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
+			dev_err(smem->dev,
+				"Partition %d has invalid free pointer\n", i);
+			return -EINVAL;
+		}
+
+		smem->partitions[remote_host] = header;
+		smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
+	}
+
+	return 0;
+}
+
+static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
+				const char *name, int i)
+{
+	struct device_node *np;
+	struct resource r;
+	int ret;
+
+	np = of_parse_phandle(dev->of_node, name, 0);
+	if (!np) {
+		dev_err(dev, "No %s specified\n", name);
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	smem->regions[i].aux_base = (u32)r.start;
+	smem->regions[i].size = resource_size(&r);
+	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
+	if (!smem->regions[i].virt_base)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+	struct smem_header *header;
+	struct qcom_smem *smem;
+	size_t array_size;
+	int num_regions;
+	int hwlock_id;
+	u32 version;
+	int ret;
+
+	num_regions = 1;
+	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+		num_regions++;
+
+	array_size = num_regions * sizeof(struct smem_region);
+	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+	if (!smem)
+		return -ENOMEM;
+
+	smem->dev = &pdev->dev;
+	smem->num_regions = num_regions;
+
+	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
+	if (ret)
+		return ret;
+
+	if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
+					"qcom,rpm-msg-ram", 1)))
+		return ret;
+
+	header = smem->regions[0].virt_base;
+	if (le32_to_cpu(header->initialized) != 1 ||
+	    le32_to_cpu(header->reserved)) {
+		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+		return -EINVAL;
+	}
+
+	version = qcom_smem_get_sbl_version(smem);
+	switch (version >> 16) {
+	case SMEM_GLOBAL_PART_VERSION:
+		ret = qcom_smem_set_global_partition(smem);
+		if (ret < 0)
+			return ret;
+		smem->item_count = qcom_smem_get_item_count(smem);
+		break;
+	case SMEM_GLOBAL_HEAP_VERSION:
+		smem->item_count = SMEM_ITEM_COUNT;
+		break;
+	default:
+		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+		return -EINVAL;
+	}
+
+	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+	if (ret < 0 && ret != -ENOENT)
+		return ret;
+
+	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+	if (hwlock_id < 0) {
+		if (hwlock_id != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+		return hwlock_id;
+	}
+
+	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+	if (!smem->hwlock)
+		return -ENXIO;
+
+	__smem = smem;
+
+	return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+	hwspin_lock_free(__smem->hwlock);
+	__smem = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+	{ .compatible = "qcom,smem" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+	.probe = qcom_smem_probe,
+	.remove = qcom_smem_remove,
+	.driver  = {
+		.name = "qcom-smem",
+		.of_match_table = qcom_smem_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init qcom_smem_init(void)
+{
+	return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+	platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
new file mode 100644
index 0000000..d5437ca
--- /dev/null
+++ b/drivers/soc/qcom/smem_state.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static LIST_HEAD(smem_states);
+static DEFINE_MUTEX(list_lock);
+
+/**
+ * struct qcom_smem_state - state context
+ * @refcount:	refcount for the state
+ * @orphan:	boolean indicator that this state has been unregistered
+ * @list:	entry in smem_states list
+ * @of_node:	of_node to use for matching the state in DT
+ * @priv:	implementation private data
+ * @ops:	ops for the state
+ */
+struct qcom_smem_state {
+	struct kref refcount;
+	bool orphan;
+
+	struct list_head list;
+	struct device_node *of_node;
+
+	void *priv;
+
+	struct qcom_smem_state_ops ops;
+};
+
+/**
+ * qcom_smem_state_update_bits() - update the masked bits in state with value
+ * @state:	state handle acquired by calling qcom_smem_state_get()
+ * @mask:	bit mask for the change
+ * @value:	new value for the masked bits
+ *
+ * Returns 0 on success, otherwise negative errno.
+ */
+int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+				u32 mask,
+				u32 value)
+{
+	if (state->orphan)
+		return -ENXIO;
+
+	if (!state->ops.update_bits)
+		return -ENOTSUPP;
+
+	return state->ops.update_bits(state->priv, mask, value);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_update_bits);
+
+static struct qcom_smem_state *of_node_to_state(struct device_node *np)
+{
+	struct qcom_smem_state *state;
+
+	mutex_lock(&list_lock);
+
+	list_for_each_entry(state, &smem_states, list) {
+		if (state->of_node == np) {
+			kref_get(&state->refcount);
+			goto unlock;
+		}
+	}
+	state = ERR_PTR(-EPROBE_DEFER);
+
+unlock:
+	mutex_unlock(&list_lock);
+
+	return state;
+}
+
+/**
+ * qcom_smem_state_get() - acquire handle to a state
+ * @dev:	client device pointer
+ * @con_id:	name of the state to lookup
+ * @bit:	flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() must be
+ * called to release the returned state handle.
+ */
+struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+					    const char *con_id,
+					    unsigned *bit)
+{
+	struct qcom_smem_state *state;
+	struct of_phandle_args args;
+	int index = 0;
+	int ret;
+
+	if (con_id) {
+		index = of_property_match_string(dev->of_node,
+						 "qcom,smem-state-names",
+						 con_id);
+		if (index < 0) {
+			dev_err(dev, "missing qcom,smem-state-names\n");
+			return ERR_PTR(index);
+		}
+	}
+
+	ret = of_parse_phandle_with_args(dev->of_node,
+					 "qcom,smem-states",
+					 "#qcom,smem-state-cells",
+					 index,
+					 &args);
+	if (ret) {
+		dev_err(dev, "failed to parse qcom,smem-states property\n");
+		return ERR_PTR(ret);
+	}
+
+	if (args.args_count != 1) {
+		dev_err(dev, "invalid #qcom,smem-state-cells\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	state = of_node_to_state(args.np);
+	if (IS_ERR(state))
+		goto put;
+
+	*bit = args.args[0];
+
+put:
+	of_node_put(args.np);
+	return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_get);
+
+static void qcom_smem_state_release(struct kref *ref)
+{
+	struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
+
+	list_del(&state->list);
+	kfree(state);
+}
+
+/**
+ * qcom_smem_state_put() - release state handle
+ * @state:	state handle to be released
+ */
+void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+	mutex_lock(&list_lock);
+	kref_put(&state->refcount, qcom_smem_state_release);
+	mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_put);
+
+/**
+ * qcom_smem_state_register() - register a new state
+ * @of_node:	of_node used for matching client lookups
+ * @ops:	implementation ops
+ * @priv:	implementation specific private data
+ */
+struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+						 const struct qcom_smem_state_ops *ops,
+						 void *priv)
+{
+	struct qcom_smem_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&state->refcount);
+
+	state->of_node = of_node;
+	state->ops = *ops;
+	state->priv = priv;
+
+	mutex_lock(&list_lock);
+	list_add(&state->list, &smem_states);
+	mutex_unlock(&list_lock);
+
+	return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_register);
+
+/**
+ * qcom_smem_state_unregister() - unregister a registered state
+ * @state:	state handle to be unregistered
+ */
+void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+	state->orphan = true;
+	qcom_smem_state_put(state);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_unregister);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
new file mode 100644
index 0000000..c22503c
--- /dev/null
+++ b/drivers/soc/qcom/smp2p.c
@@ -0,0 +1,606 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors.  Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor.  By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+#define SMP2P_MAGIC 0x504d5324
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic:		magic number
+ * @version:		version - must be 1
+ * @features:		features flag - currently unused
+ * @local_pid:		processor id of sending end
+ * @remote_pid:		processor id of receiving end
+ * @total_entries:	number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries:	number of allocated entries
+ * @flags:
+ * @entries:		individual communication entries
+ *     @name:		name of the entry
+ *     @value:		content of the entry
+ */
+struct smp2p_smem_item {
+	u32 magic;
+	u8 version;
+	unsigned features:24;
+	u16 local_pid;
+	u16 remote_pid;
+	u16 total_entries;
+	u16 valid_entries;
+	u32 flags;
+
+	struct {
+		u8 name[SMP2P_MAX_ENTRY_NAME];
+		u32 value;
+	} entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node:	list entry to keep track of allocated entries
+ * @smp2p:	reference to the device driver context
+ * @name:	name of the entry, to match against smp2p_smem_item
+ * @value:	pointer to smp2p_smem_item entry value
+ * @last_value:	last handled value
+ * @domain:	irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising:	bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @state:	smem state handle
+ * @lock:	spinlock to protect read-modify-write of the value
+ */
+struct smp2p_entry {
+	struct list_head node;
+	struct qcom_smp2p *smp2p;
+
+	const char *name;
+	u32 *value;
+	u32 last_value;
+
+	struct irq_domain *domain;
+	DECLARE_BITMAP(irq_enabled, 32);
+	DECLARE_BITMAP(irq_rising, 32);
+	DECLARE_BITMAP(irq_falling, 32);
+
+	struct qcom_smem_state *state;
+
+	spinlock_t lock;
+};
+
+#define SMP2P_INBOUND	0
+#define SMP2P_OUTBOUND	1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev:	device driver handle
+ * @in:		pointer to the inbound smem item
+ * @smem_items:	ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @local_pid:	processor id of the inbound edge
+ * @remote_pid:	processor id of the outbound edge
+ * @ipc_regmap:	regmap for the outbound ipc
+ * @ipc_offset:	offset within the regmap
+ * @ipc_bit:	bit in regmap@offset to kick to signal remote processor
+ * @mbox_client: mailbox client handle
+ * @mbox_chan:	apcs ipc mailbox channel handle
+ * @inbound:	list of inbound entries
+ * @outbound:	list of outbound entries
+ */
+struct qcom_smp2p {
+	struct device *dev;
+
+	struct smp2p_smem_item *in;
+	struct smp2p_smem_item *out;
+
+	unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+	unsigned valid_entries;
+
+	unsigned local_pid;
+	unsigned remote_pid;
+
+	struct regmap *ipc_regmap;
+	int ipc_offset;
+	int ipc_bit;
+
+	struct mbox_client mbox_client;
+	struct mbox_chan *mbox_chan;
+
+	struct list_head inbound;
+	struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+	/* Make sure any updated data is written before the kick */
+	wmb();
+
+	if (smp2p->mbox_chan) {
+		mbox_send_message(smp2p->mbox_chan, NULL);
+		mbox_client_txdone(smp2p->mbox_chan, 0);
+	} else {
+		regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+	}
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq:	unused
+ * @data:	smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+	struct smp2p_smem_item *in;
+	struct smp2p_entry *entry;
+	struct qcom_smp2p *smp2p = data;
+	unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
+	unsigned pid = smp2p->remote_pid;
+	size_t size;
+	int irq_pin;
+	u32 status;
+	char buf[SMP2P_MAX_ENTRY_NAME];
+	u32 val;
+	int i;
+
+	in = smp2p->in;
+
+	/* Acquire smem item, if not already found */
+	if (!in) {
+		in = qcom_smem_get(pid, smem_id, &size);
+		if (IS_ERR(in)) {
+			dev_err(smp2p->dev,
+				"Unable to acquire remote smp2p item\n");
+			return IRQ_HANDLED;
+		}
+
+		smp2p->in = in;
+	}
+
+	/* Match newly created entries */
+	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+		list_for_each_entry(entry, &smp2p->inbound, node) {
+			memcpy(buf, in->entries[i].name, sizeof(buf));
+			if (!strcmp(buf, entry->name)) {
+				entry->value = &in->entries[i].value;
+				break;
+			}
+		}
+	}
+	smp2p->valid_entries = i;
+
+	/* Fire interrupts based on any value changes */
+	list_for_each_entry(entry, &smp2p->inbound, node) {
+		/* Ignore entries not yet allocated by the remote side */
+		if (!entry->value)
+			continue;
+
+		val = readl(entry->value);
+
+		status = val ^ entry->last_value;
+		entry->last_value = val;
+
+		/* No changes of this entry? */
+		if (!status)
+			continue;
+
+		for_each_set_bit(i, entry->irq_enabled, 32) {
+			if (!(status & BIT(i)))
+				continue;
+
+			if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
+			    (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
+				irq_pin = irq_find_mapping(entry->domain, i);
+				handle_nested_irq(irq_pin);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	if (!(type & IRQ_TYPE_EDGE_BOTH))
+		return -EINVAL;
+
+	if (type & IRQ_TYPE_EDGE_RISING)
+		set_bit(irq, entry->irq_rising);
+	else
+		clear_bit(irq, entry->irq_rising);
+
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		set_bit(irq, entry->irq_falling);
+	else
+		clear_bit(irq, entry->irq_falling);
+
+	return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+	.name           = "smp2p",
+	.irq_mask       = smp2p_mask_irq,
+	.irq_unmask     = smp2p_unmask_irq,
+	.irq_set_type	= smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+			 unsigned int irq,
+			 irq_hw_number_t hw)
+{
+	struct smp2p_entry *entry = d->host_data;
+
+	irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, entry);
+	irq_set_nested_thread(irq, 1);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+	.map = smp2p_irq_map,
+	.xlate = irq_domain_xlate_twocell,
+};
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+				    struct smp2p_entry *entry,
+				    struct device_node *node)
+{
+	entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+	if (!entry->domain) {
+		dev_err(smp2p->dev, "failed to add irq_domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int smp2p_update_bits(void *data, u32 mask, u32 value)
+{
+	struct smp2p_entry *entry = data;
+	u32 orig;
+	u32 val;
+
+	spin_lock(&entry->lock);
+	val = orig = readl(entry->value);
+	val &= ~mask;
+	val |= value;
+	writel(val, entry->value);
+	spin_unlock(&entry->lock);
+
+	if (val != orig)
+		qcom_smp2p_kick(entry->smp2p);
+
+	return 0;
+}
+
+static const struct qcom_smem_state_ops smp2p_state_ops = {
+	.update_bits = smp2p_update_bits,
+};
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+				     struct smp2p_entry *entry,
+				     struct device_node *node)
+{
+	struct smp2p_smem_item *out = smp2p->out;
+	char buf[SMP2P_MAX_ENTRY_NAME] = {};
+
+	/* Allocate an entry from the smem item */
+	strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
+	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+
+	/* Make the logical entry reference the physical value */
+	entry->value = &out->entries[out->valid_entries].value;
+
+	out->valid_entries++;
+
+	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
+	if (IS_ERR(entry->state)) {
+		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
+		return PTR_ERR(entry->state);
+	}
+
+	return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+	struct smp2p_smem_item *out;
+	unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+	unsigned pid = smp2p->remote_pid;
+	int ret;
+
+	ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+	if (ret < 0 && ret != -EEXIST) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(smp2p->dev,
+				"unable to allocate local smp2p item\n");
+		return ret;
+	}
+
+	out = qcom_smem_get(pid, smem_id, NULL);
+	if (IS_ERR(out)) {
+		dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+		return PTR_ERR(out);
+	}
+
+	memset(out, 0, sizeof(*out));
+	out->magic = SMP2P_MAGIC;
+	out->local_pid = smp2p->local_pid;
+	out->remote_pid = smp2p->remote_pid;
+	out->total_entries = SMP2P_MAX_ENTRY;
+	out->valid_entries = 0;
+
+	/*
+	 * Make sure the rest of the header is written before we validate the
+	 * item by writing a valid version number.
+	 */
+	wmb();
+	out->version = 1;
+
+	qcom_smp2p_kick(smp2p);
+
+	smp2p->out = out;
+
+	return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+	struct device_node *syscon;
+	struct device *dev = smp2p->dev;
+	const char *key;
+	int ret;
+
+	syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+	if (!syscon) {
+		dev_err(dev, "no qcom,ipc node\n");
+		return -ENODEV;
+	}
+
+	smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+	if (IS_ERR(smp2p->ipc_regmap))
+		return PTR_ERR(smp2p->ipc_regmap);
+
+	key = "qcom,ipc";
+	ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+	if (ret < 0) {
+		dev_err(dev, "no offset in %s\n", key);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+	if (ret < 0) {
+		dev_err(dev, "no bit in %s\n", key);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+	struct smp2p_entry *entry;
+	struct device_node *node;
+	struct qcom_smp2p *smp2p;
+	const char *key;
+	int irq;
+	int ret;
+
+	smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+	if (!smp2p)
+		return -ENOMEM;
+
+	smp2p->dev = &pdev->dev;
+	INIT_LIST_HEAD(&smp2p->inbound);
+	INIT_LIST_HEAD(&smp2p->outbound);
+
+	platform_set_drvdata(pdev, smp2p);
+
+	key = "qcom,smem";
+	ret = of_property_read_u32_array(pdev->dev.of_node, key,
+					 smp2p->smem_items, 2);
+	if (ret)
+		return ret;
+
+	key = "qcom,local-pid";
+	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+	if (ret)
+		goto report_read_failure;
+
+	key = "qcom,remote-pid";
+	ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+	if (ret)
+		goto report_read_failure;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+		return irq;
+	}
+
+	smp2p->mbox_client.dev = &pdev->dev;
+	smp2p->mbox_client.knows_txdone = true;
+	smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
+	if (IS_ERR(smp2p->mbox_chan)) {
+		if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+			return PTR_ERR(smp2p->mbox_chan);
+
+		smp2p->mbox_chan = NULL;
+
+		ret = smp2p_parse_ipc(smp2p);
+		if (ret)
+			return ret;
+	}
+
+	ret = qcom_smp2p_alloc_outbound_item(smp2p);
+	if (ret < 0)
+		goto release_mbox;
+
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto unwind_interfaces;
+		}
+
+		entry->smp2p = smp2p;
+		spin_lock_init(&entry->lock);
+
+		ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+		if (ret < 0)
+			goto unwind_interfaces;
+
+		if (of_property_read_bool(node, "interrupt-controller")) {
+			ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+			if (ret < 0)
+				goto unwind_interfaces;
+
+			list_add(&entry->node, &smp2p->inbound);
+		} else  {
+			ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+			if (ret < 0)
+				goto unwind_interfaces;
+
+			list_add(&entry->node, &smp2p->outbound);
+		}
+	}
+
+	/* Kick the outgoing edge after allocating entries */
+	qcom_smp2p_kick(smp2p);
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq,
+					NULL, qcom_smp2p_intr,
+					IRQF_ONESHOT,
+					"smp2p", (void *)smp2p);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request interrupt\n");
+		goto unwind_interfaces;
+	}
+
+
+	return 0;
+
+unwind_interfaces:
+	list_for_each_entry(entry, &smp2p->inbound, node)
+		irq_domain_remove(entry->domain);
+
+	list_for_each_entry(entry, &smp2p->outbound, node)
+		qcom_smem_state_unregister(entry->state);
+
+	smp2p->out->valid_entries = 0;
+
+release_mbox:
+	mbox_free_channel(smp2p->mbox_chan);
+
+	return ret;
+
+report_read_failure:
+	dev_err(&pdev->dev, "failed to read %s\n", key);
+	return -EINVAL;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+	struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+	struct smp2p_entry *entry;
+
+	list_for_each_entry(entry, &smp2p->inbound, node)
+		irq_domain_remove(entry->domain);
+
+	list_for_each_entry(entry, &smp2p->outbound, node)
+		qcom_smem_state_unregister(entry->state);
+
+	mbox_free_channel(smp2p->mbox_chan);
+
+	smp2p->out->valid_entries = 0;
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+	{ .compatible = "qcom,smp2p" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+	.probe = qcom_smp2p_probe,
+	.remove = qcom_smp2p_remove,
+	.driver  = {
+		.name  = "qcom_smp2p",
+		.of_match_table = qcom_smp2p_of_match,
+	},
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
new file mode 100644
index 0000000..50214b6
--- /dev/null
+++ b/drivers/soc/qcom/smsm.c
@@ -0,0 +1,629 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+/*
+ * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
+ * for communicating single bit state information to remote processors.
+ *
+ * The implementation is based on two sections of shared memory; the first
+ * holding the state bits and the second holding a matrix of subscription bits.
+ *
+ * The state bits are structured in entries of 32 bits, each belonging to one
+ * system in the SoC. The entry belonging to the local system is considered
+ * read-write, while the rest should be considered read-only.
+ *
+ * The subscription matrix consists of N bitmaps per entry, denoting interest
+ * in updates of the entry for each of the N hosts. Upon updating a state bit
+ * each host's subscription bitmap should be queried and the remote system
+ * should be interrupted if they request so.
+ *
+ * The subscription matrix is laid out in entry-major order:
+ * entry0: [host0 ... hostN]
+ *	.
+ *	.
+ * entryM: [host0 ... hostN]
+ *
+ * A third, optional, shared memory region might contain information regarding
+ * the number of entries in the state bitmap as well as number of columns in
+ * the subscription matrix.
+ */
+
+/*
+ * Shared memory identifiers, used to acquire handles to respective memory
+ * region.
+ */
+#define SMEM_SMSM_SHARED_STATE		85
+#define SMEM_SMSM_CPU_INTR_MASK		333
+#define SMEM_SMSM_SIZE_INFO		419
+
+/*
+ * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
+ */
+#define SMSM_DEFAULT_NUM_ENTRIES	8
+#define SMSM_DEFAULT_NUM_HOSTS		3
+
+struct smsm_entry;
+struct smsm_host;
+
+/**
+ * struct qcom_smsm - smsm driver context
+ * @dev:	smsm device pointer
+ * @local_host:	column in the subscription matrix representing this system
+ * @num_hosts:	number of columns in the subscription matrix
+ * @num_entries: number of entries in the state map and rows in the subscription
+ *		matrix
+ * @local_state: pointer to the local processor's state bits
+ * @subscription: pointer to local processor's row in subscription matrix
+ * @state:	smem state handle
+ * @lock:	spinlock for read-modify-write of the outgoing state
+ * @entries:	context for each of the entries
+ * @hosts:	context for each of the hosts
+ */
+struct qcom_smsm {
+	struct device *dev;
+
+	u32 local_host;
+
+	u32 num_hosts;
+	u32 num_entries;
+
+	u32 *local_state;
+	u32 *subscription;
+	struct qcom_smem_state *state;
+
+	spinlock_t lock;
+
+	struct smsm_entry *entries;
+	struct smsm_host *hosts;
+};
+
+/**
+ * struct smsm_entry - per remote processor entry context
+ * @smsm:	back-reference to driver context
+ * @domain:	IRQ domain for this entry, if representing a remote system
+ * @irq_enabled: bitmap of which state bits IRQs are enabled
+ * @irq_rising:	bitmap tracking if rising bits should be propagated
+ * @irq_falling: bitmap tracking if falling bits should be propagated
+ * @last_value:	snapshot of state bits last time the interrupts where propagated
+ * @remote_state: pointer to this entry's state bits
+ * @subscription: pointer to a row in the subscription matrix representing this
+ *		entry
+ */
+struct smsm_entry {
+	struct qcom_smsm *smsm;
+
+	struct irq_domain *domain;
+	DECLARE_BITMAP(irq_enabled, 32);
+	DECLARE_BITMAP(irq_rising, 32);
+	DECLARE_BITMAP(irq_falling, 32);
+	u32 last_value;
+
+	u32 *remote_state;
+	u32 *subscription;
+};
+
+/**
+ * struct smsm_host - representation of a remote host
+ * @ipc_regmap:	regmap for outgoing interrupt
+ * @ipc_offset:	offset in @ipc_regmap for outgoing interrupt
+ * @ipc_bit:	bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ */
+struct smsm_host {
+	struct regmap *ipc_regmap;
+	int ipc_offset;
+	int ipc_bit;
+};
+
+/**
+ * smsm_update_bits() - change bit in outgoing entry and inform subscribers
+ * @data:	smsm context pointer
+ * @offset:	bit in the entry
+ * @value:	new value
+ *
+ * Used to set and clear the bits in the outgoing/local entry and inform
+ * subscribers about the change.
+ */
+static int smsm_update_bits(void *data, u32 mask, u32 value)
+{
+	struct qcom_smsm *smsm = data;
+	struct smsm_host *hostp;
+	unsigned long flags;
+	u32 changes;
+	u32 host;
+	u32 orig;
+	u32 val;
+
+	spin_lock_irqsave(&smsm->lock, flags);
+
+	/* Update the entry */
+	val = orig = readl(smsm->local_state);
+	val &= ~mask;
+	val |= value;
+
+	/* Don't signal if we didn't change the value */
+	changes = val ^ orig;
+	if (!changes) {
+		spin_unlock_irqrestore(&smsm->lock, flags);
+		goto done;
+	}
+
+	/* Write out the new value */
+	writel(val, smsm->local_state);
+	spin_unlock_irqrestore(&smsm->lock, flags);
+
+	/* Make sure the value update is ordered before any kicks */
+	wmb();
+
+	/* Iterate over all hosts to check whom wants a kick */
+	for (host = 0; host < smsm->num_hosts; host++) {
+		hostp = &smsm->hosts[host];
+
+		val = readl(smsm->subscription + host);
+		if (val & changes && hostp->ipc_regmap) {
+			regmap_write(hostp->ipc_regmap,
+				     hostp->ipc_offset,
+				     BIT(hostp->ipc_bit));
+		}
+	}
+
+done:
+	return 0;
+}
+
+static const struct qcom_smem_state_ops smsm_state_ops = {
+	.update_bits = smsm_update_bits,
+};
+
+/**
+ * smsm_intr() - cascading IRQ handler for SMSM
+ * @irq:	unused
+ * @data:	entry related to this IRQ
+ *
+ * This function cascades an incoming interrupt from a remote system, based on
+ * the state bits and configuration.
+ */
+static irqreturn_t smsm_intr(int irq, void *data)
+{
+	struct smsm_entry *entry = data;
+	unsigned i;
+	int irq_pin;
+	u32 changed;
+	u32 val;
+
+	val = readl(entry->remote_state);
+	changed = val ^ entry->last_value;
+	entry->last_value = val;
+
+	for_each_set_bit(i, entry->irq_enabled, 32) {
+		if (!(changed & BIT(i)))
+			continue;
+
+		if (val & BIT(i)) {
+			if (test_bit(i, entry->irq_rising)) {
+				irq_pin = irq_find_mapping(entry->domain, i);
+				handle_nested_irq(irq_pin);
+			}
+		} else {
+			if (test_bit(i, entry->irq_falling)) {
+				irq_pin = irq_find_mapping(entry->domain, i);
+				handle_nested_irq(irq_pin);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
+ * @irqd:	IRQ handle to be masked
+ *
+ * This un-subscribes the local CPU from interrupts upon changes to the defines
+ * status bit. The bit is also cleared from cascading.
+ */
+static void smsm_mask_irq(struct irq_data *irqd)
+{
+	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qcom_smsm *smsm = entry->smsm;
+	u32 val;
+
+	if (entry->subscription) {
+		val = readl(entry->subscription + smsm->local_host);
+		val &= ~BIT(irq);
+		writel(val, entry->subscription + smsm->local_host);
+	}
+
+	clear_bit(irq, entry->irq_enabled);
+}
+
+/**
+ * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
+ * @irqd:	IRQ handle to be unmasked
+ *
+
+ * This subscribes the local CPU to interrupts upon changes to the defined
+ * status bit. The bit is also marked for cascading.
+
+ */
+static void smsm_unmask_irq(struct irq_data *irqd)
+{
+	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qcom_smsm *smsm = entry->smsm;
+	u32 val;
+
+	set_bit(irq, entry->irq_enabled);
+
+	if (entry->subscription) {
+		val = readl(entry->subscription + smsm->local_host);
+		val |= BIT(irq);
+		writel(val, entry->subscription + smsm->local_host);
+	}
+}
+
+/**
+ * smsm_set_irq_type() - updates the requested IRQ type for the cascading
+ * @irqd:	consumer interrupt handle
+ * @type:	requested flags
+ */
+static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+	struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+	if (!(type & IRQ_TYPE_EDGE_BOTH))
+		return -EINVAL;
+
+	if (type & IRQ_TYPE_EDGE_RISING)
+		set_bit(irq, entry->irq_rising);
+	else
+		clear_bit(irq, entry->irq_rising);
+
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		set_bit(irq, entry->irq_falling);
+	else
+		clear_bit(irq, entry->irq_falling);
+
+	return 0;
+}
+
+static struct irq_chip smsm_irq_chip = {
+	.name           = "smsm",
+	.irq_mask       = smsm_mask_irq,
+	.irq_unmask     = smsm_unmask_irq,
+	.irq_set_type	= smsm_set_irq_type,
+};
+
+/**
+ * smsm_irq_map() - sets up a mapping for a cascaded IRQ
+ * @d:		IRQ domain representing an entry
+ * @irq:	IRQ to set up
+ * @hw:		unused
+ */
+static int smsm_irq_map(struct irq_domain *d,
+			unsigned int irq,
+			irq_hw_number_t hw)
+{
+	struct smsm_entry *entry = d->host_data;
+
+	irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, entry);
+	irq_set_nested_thread(irq, 1);
+
+	return 0;
+}
+
+static const struct irq_domain_ops smsm_irq_ops = {
+	.map = smsm_irq_map,
+	.xlate = irq_domain_xlate_twocell,
+};
+
+/**
+ * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
+ * @smsm:	smsm driver context
+ * @host_id:	index of the remote host to be resolved
+ *
+ * Parses device tree to acquire the information needed for sending the
+ * outgoing interrupts to a remote host - identified by @host_id.
+ */
+static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
+{
+	struct device_node *syscon;
+	struct device_node *node = smsm->dev->of_node;
+	struct smsm_host *host = &smsm->hosts[host_id];
+	char key[16];
+	int ret;
+
+	snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
+	syscon = of_parse_phandle(node, key, 0);
+	if (!syscon)
+		return 0;
+
+	host->ipc_regmap = syscon_node_to_regmap(syscon);
+	if (IS_ERR(host->ipc_regmap))
+		return PTR_ERR(host->ipc_regmap);
+
+	ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
+	if (ret < 0) {
+		dev_err(smsm->dev, "no offset in %s\n", key);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
+	if (ret < 0) {
+		dev_err(smsm->dev, "no bit in %s\n", key);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
+ * @smsm:	smsm driver context
+ * @entry:	entry context to be set up
+ * @node:	dt node containing the entry's properties
+ */
+static int smsm_inbound_entry(struct qcom_smsm *smsm,
+			      struct smsm_entry *entry,
+			      struct device_node *node)
+{
+	int ret;
+	int irq;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (!irq) {
+		dev_err(smsm->dev, "failed to parse smsm interrupt\n");
+		return -EINVAL;
+	}
+
+	ret = devm_request_threaded_irq(smsm->dev, irq,
+					NULL, smsm_intr,
+					IRQF_ONESHOT,
+					"smsm", (void *)entry);
+	if (ret) {
+		dev_err(smsm->dev, "failed to request interrupt\n");
+		return ret;
+	}
+
+	entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+	if (!entry->domain) {
+		dev_err(smsm->dev, "failed to add irq_domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * smsm_get_size_info() - parse the optional memory segment for sizes
+ * @smsm:	smsm driver context
+ *
+ * Attempt to acquire the number of hosts and entries from the optional shared
+ * memory location. Not being able to find this segment should indicate that
+ * we're on a older system where these values was hard coded to
+ * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
+ *
+ * Returns 0 on success, negative errno on failure.
+ */
+static int smsm_get_size_info(struct qcom_smsm *smsm)
+{
+	size_t size;
+	struct {
+		u32 num_hosts;
+		u32 num_entries;
+		u32 reserved0;
+		u32 reserved1;
+	} *info;
+
+	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
+	if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
+		if (PTR_ERR(info) != -EPROBE_DEFER)
+			dev_err(smsm->dev, "unable to retrieve smsm size info\n");
+		return PTR_ERR(info);
+	} else if (IS_ERR(info) || size != sizeof(*info)) {
+		dev_warn(smsm->dev, "no smsm size info, using defaults\n");
+		smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
+		smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
+		return 0;
+	}
+
+	smsm->num_entries = info->num_entries;
+	smsm->num_hosts = info->num_hosts;
+
+	dev_dbg(smsm->dev,
+		"found custom size of smsm: %d entries %d hosts\n",
+		smsm->num_entries, smsm->num_hosts);
+
+	return 0;
+}
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+	struct device_node *local_node;
+	struct device_node *node;
+	struct smsm_entry *entry;
+	struct qcom_smsm *smsm;
+	u32 *intr_mask;
+	size_t size;
+	u32 *states;
+	u32 id;
+	int ret;
+
+	smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+	if (!smsm)
+		return -ENOMEM;
+	smsm->dev = &pdev->dev;
+	spin_lock_init(&smsm->lock);
+
+	ret = smsm_get_size_info(smsm);
+	if (ret)
+		return ret;
+
+	smsm->entries = devm_kcalloc(&pdev->dev,
+				     smsm->num_entries,
+				     sizeof(struct smsm_entry),
+				     GFP_KERNEL);
+	if (!smsm->entries)
+		return -ENOMEM;
+
+	smsm->hosts = devm_kcalloc(&pdev->dev,
+				   smsm->num_hosts,
+				   sizeof(struct smsm_host),
+				   GFP_KERNEL);
+	if (!smsm->hosts)
+		return -ENOMEM;
+
+	for_each_child_of_node(pdev->dev.of_node, local_node) {
+		if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
+			break;
+	}
+	if (!local_node) {
+		dev_err(&pdev->dev, "no state entry\n");
+		return -EINVAL;
+	}
+
+	of_property_read_u32(pdev->dev.of_node,
+			     "qcom,local-host",
+			     &smsm->local_host);
+
+	/* Parse the host properties */
+	for (id = 0; id < smsm->num_hosts; id++) {
+		ret = smsm_parse_ipc(smsm, id);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Acquire the main SMSM state vector */
+	ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
+			      smsm->num_entries * sizeof(u32));
+	if (ret < 0 && ret != -EEXIST) {
+		dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+		return ret;
+	}
+
+	states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
+	if (IS_ERR(states)) {
+		dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+		return PTR_ERR(states);
+	}
+
+	/* Acquire the list of interrupt mask vectors */
+	size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
+	ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
+	if (ret < 0 && ret != -EEXIST) {
+		dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
+		return ret;
+	}
+
+	intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
+	if (IS_ERR(intr_mask)) {
+		dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
+		return PTR_ERR(intr_mask);
+	}
+
+	/* Setup the reference to the local state bits */
+	smsm->local_state = states + smsm->local_host;
+	smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
+
+	/* Register the outgoing state */
+	smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
+	if (IS_ERR(smsm->state)) {
+		dev_err(smsm->dev, "failed to register qcom_smem_state\n");
+		return PTR_ERR(smsm->state);
+	}
+
+	/* Register handlers for remote processor entries of interest. */
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		if (!of_property_read_bool(node, "interrupt-controller"))
+			continue;
+
+		ret = of_property_read_u32(node, "reg", &id);
+		if (ret || id >= smsm->num_entries) {
+			dev_err(&pdev->dev, "invalid reg of entry\n");
+			if (!ret)
+				ret = -EINVAL;
+			goto unwind_interfaces;
+		}
+		entry = &smsm->entries[id];
+
+		entry->smsm = smsm;
+		entry->remote_state = states + id;
+
+		/* Setup subscription pointers and unsubscribe to any kicks */
+		entry->subscription = intr_mask + id * smsm->num_hosts;
+		writel(0, entry->subscription + smsm->local_host);
+
+		ret = smsm_inbound_entry(smsm, entry, node);
+		if (ret < 0)
+			goto unwind_interfaces;
+	}
+
+	platform_set_drvdata(pdev, smsm);
+
+	return 0;
+
+unwind_interfaces:
+	for (id = 0; id < smsm->num_entries; id++)
+		if (smsm->entries[id].domain)
+			irq_domain_remove(smsm->entries[id].domain);
+
+	qcom_smem_state_unregister(smsm->state);
+
+	return ret;
+}
+
+static int qcom_smsm_remove(struct platform_device *pdev)
+{
+	struct qcom_smsm *smsm = platform_get_drvdata(pdev);
+	unsigned id;
+
+	for (id = 0; id < smsm->num_entries; id++)
+		if (smsm->entries[id].domain)
+			irq_domain_remove(smsm->entries[id].domain);
+
+	qcom_smem_state_unregister(smsm->state);
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+	{ .compatible = "qcom,smsm" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+	.probe = qcom_smsm_probe,
+	.remove = qcom_smsm_remove,
+	.driver  = {
+		.name  = "qcom-smsm",
+		.of_match_table = qcom_smsm_of_match,
+	},
+};
+module_platform_driver(qcom_smsm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
new file mode 100644
index 0000000..f9d7a85
--- /dev/null
+++ b/drivers/soc/qcom/spm.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cpuidle.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#define MAX_PMIC_DATA		2
+#define MAX_SEQ_DATA		64
+#define SPM_CTL_INDEX		0x7f
+#define SPM_CTL_INDEX_SHIFT	4
+#define SPM_CTL_EN		BIT(0)
+
+enum pm_sleep_mode {
+	PM_SLEEP_MODE_STBY,
+	PM_SLEEP_MODE_RET,
+	PM_SLEEP_MODE_SPC,
+	PM_SLEEP_MODE_PC,
+	PM_SLEEP_MODE_NR,
+};
+
+enum spm_reg {
+	SPM_REG_CFG,
+	SPM_REG_SPM_CTL,
+	SPM_REG_DLY,
+	SPM_REG_PMIC_DLY,
+	SPM_REG_PMIC_DATA_0,
+	SPM_REG_PMIC_DATA_1,
+	SPM_REG_VCTL,
+	SPM_REG_SEQ_ENTRY,
+	SPM_REG_SPM_STS,
+	SPM_REG_PMIC_STS,
+	SPM_REG_NR,
+};
+
+struct spm_reg_data {
+	const u8 *reg_offset;
+	u32 spm_cfg;
+	u32 spm_dly;
+	u32 pmic_dly;
+	u32 pmic_data[MAX_PMIC_DATA];
+	u8 seq[MAX_SEQ_DATA];
+	u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+	void __iomem *reg_base;
+	const struct spm_reg_data *reg_data;
+};
+
+static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
+	[SPM_REG_CFG]		= 0x08,
+	[SPM_REG_SPM_CTL]	= 0x30,
+	[SPM_REG_DLY]		= 0x34,
+	[SPM_REG_SEQ_ENTRY]	= 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu  = {
+	.reg_offset = spm_reg_offset_v2_1,
+	.spm_cfg = 0x1,
+	.spm_dly = 0x3C102800,
+	.seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+		0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+		0x0F },
+	.start_index[PM_SLEEP_MODE_STBY] = 0,
+	.start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
+	[SPM_REG_CFG]		= 0x08,
+	[SPM_REG_SPM_CTL]	= 0x20,
+	[SPM_REG_PMIC_DLY]	= 0x24,
+	[SPM_REG_PMIC_DATA_0]	= 0x28,
+	[SPM_REG_PMIC_DATA_1]	= 0x2C,
+	[SPM_REG_SEQ_ENTRY]	= 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+	.reg_offset = spm_reg_offset_v1_1,
+	.spm_cfg = 0x1F,
+	.pmic_dly = 0x02020004,
+	.pmic_data[0] = 0x0084009C,
+	.pmic_data[1] = 0x00A4001C,
+	.seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+		0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+	.start_index[PM_SLEEP_MODE_STBY] = 0,
+	.start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
+
+typedef int (*idle_fn)(void);
+static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+					enum spm_reg reg, u32 val)
+{
+	if (drv->reg_data->reg_offset[reg])
+		writel_relaxed(val, drv->reg_base +
+				drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+					enum spm_reg reg, u32 val)
+{
+	u32 ret;
+
+	if (!drv->reg_data->reg_offset[reg])
+		return;
+
+	do {
+		writel_relaxed(val, drv->reg_base +
+				drv->reg_data->reg_offset[reg]);
+		ret = readl_relaxed(drv->reg_base +
+				drv->reg_data->reg_offset[reg]);
+		if (ret == val)
+			break;
+		cpu_relax();
+	} while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+					enum spm_reg reg)
+{
+	return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+static void spm_set_low_power_mode(struct spm_driver_data *drv,
+					enum pm_sleep_mode mode)
+{
+	u32 start_index;
+	u32 ctl_val;
+
+	start_index = drv->reg_data->start_index[mode];
+
+	ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+	ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+	ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+	ctl_val |= SPM_CTL_EN;
+	spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static int qcom_pm_collapse(unsigned long int unused)
+{
+	qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
+
+	/*
+	 * Returns here only if there was a pending interrupt and we did not
+	 * power down as a result.
+	 */
+	return -1;
+}
+
+static int qcom_cpu_spc(void)
+{
+	int ret;
+	struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
+
+	spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
+	ret = cpu_suspend(0, qcom_pm_collapse);
+	/*
+	 * ARM common code executes WFI without calling into our driver and
+	 * if the SPM mode is not reset, then we may accidently power down the
+	 * cpu when we intended only to gate the cpu clock.
+	 * Ensure the state is set to standby before returning.
+	 */
+	spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+	return ret;
+}
+
+static int qcom_idle_enter(unsigned long index)
+{
+	return __this_cpu_read(qcom_idle_ops)[index]();
+}
+
+static const struct of_device_id qcom_idle_state_match[] __initconst = {
+	{ .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
+	{ },
+};
+
+static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
+{
+	const struct of_device_id *match_id;
+	struct device_node *state_node;
+	int i;
+	int state_count = 1;
+	idle_fn idle_fns[CPUIDLE_STATE_MAX];
+	idle_fn *fns;
+	cpumask_t mask;
+	bool use_scm_power_down = false;
+
+	for (i = 0; ; i++) {
+		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+		if (!state_node)
+			break;
+
+		if (!of_device_is_available(state_node))
+			continue;
+
+		if (i == CPUIDLE_STATE_MAX) {
+			pr_warn("%s: cpuidle states reached max possible\n",
+					__func__);
+			break;
+		}
+
+		match_id = of_match_node(qcom_idle_state_match, state_node);
+		if (!match_id)
+			return -ENODEV;
+
+		idle_fns[state_count] = match_id->data;
+
+		/* Check if any of the states allow power down */
+		if (match_id->data == qcom_cpu_spc)
+			use_scm_power_down = true;
+
+		state_count++;
+	}
+
+	if (state_count == 1)
+		goto check_spm;
+
+	fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
+			GFP_KERNEL);
+	if (!fns)
+		return -ENOMEM;
+
+	for (i = 1; i < state_count; i++)
+		fns[i] = idle_fns[i];
+
+	if (use_scm_power_down) {
+		/* We have atleast one power down mode */
+		cpumask_clear(&mask);
+		cpumask_set_cpu(cpu, &mask);
+		qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
+	}
+
+	per_cpu(qcom_idle_ops, cpu) = fns;
+
+	/*
+	 * SPM probe for the cpu should have happened by now, if the
+	 * SPM device does not exist, return -ENXIO to indicate that the
+	 * cpu does not support idle states.
+	 */
+check_spm:
+	return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
+}
+
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
+	.suspend = qcom_idle_enter,
+	.init = qcom_cpuidle_init,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+
+static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
+		int *spm_cpu)
+{
+	struct spm_driver_data *drv = NULL;
+	struct device_node *cpu_node, *saw_node;
+	int cpu;
+	bool found = 0;
+
+	for_each_possible_cpu(cpu) {
+		cpu_node = of_cpu_device_node_get(cpu);
+		if (!cpu_node)
+			continue;
+		saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+		found = (saw_node == pdev->dev.of_node);
+		of_node_put(saw_node);
+		of_node_put(cpu_node);
+		if (found)
+			break;
+	}
+
+	if (found) {
+		drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+		if (drv)
+			*spm_cpu = cpu;
+	}
+
+	return drv;
+}
+
+static const struct of_device_id spm_match_table[] = {
+	{ .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+	  .data = &spm_reg_8974_8084_cpu },
+	{ .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+	  .data = &spm_reg_8974_8084_cpu },
+	{ .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+	  .data = &spm_reg_8064_cpu },
+	{ },
+};
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+	struct spm_driver_data *drv;
+	struct resource *res;
+	const struct of_device_id *match_id;
+	void __iomem *addr;
+	int cpu;
+
+	drv = spm_get_drv(pdev, &cpu);
+	if (!drv)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(drv->reg_base))
+		return PTR_ERR(drv->reg_base);
+
+	match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+	if (!match_id)
+		return -ENODEV;
+
+	drv->reg_data = match_id->data;
+
+	/* Write the SPM sequences first.. */
+	addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+	__iowrite32_copy(addr, drv->reg_data->seq,
+			ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+	/*
+	 * ..and then the control registers.
+	 * On some SoC if the control registers are written first and if the
+	 * CPU was held in reset, the reset signal could trigger the SPM state
+	 * machine, before the sequences are completely written.
+	 */
+	spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+	spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+	spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+	spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+				drv->reg_data->pmic_data[0]);
+	spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+				drv->reg_data->pmic_data[1]);
+
+	/* Set up Standby as the default low power mode */
+	spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+	per_cpu(cpu_spm_drv, cpu) = drv;
+
+	return 0;
+}
+
+static struct platform_driver spm_driver = {
+	.probe = spm_dev_probe,
+	.driver = {
+		.name = "saw",
+		.of_match_table = spm_match_table,
+	},
+};
+
+builtin_platform_driver(spm_driver);
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 0000000..feb0cb4
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#include <linux/tracepoint.h>
+#include "rpmh-internal.h"
+
+TRACE_EVENT(rpmh_tx_done,
+
+	TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
+
+	TP_ARGS(d, m, r, e),
+
+	TP_STRUCT__entry(
+			 __string(name, d->name)
+			 __field(int, m)
+			 __field(u32, addr)
+			 __field(u32, data)
+			 __field(int, err)
+	),
+
+	TP_fast_assign(
+		       __assign_str(name, d->name);
+		       __entry->m = m;
+		       __entry->addr = r->cmds[0].addr;
+		       __entry->data = r->cmds[0].data;
+		       __entry->err = e;
+	),
+
+	TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x errno: %d",
+		  __get_str(name), __entry->m, __entry->addr, __entry->data,
+		  __entry->err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+	TP_PROTO(struct rsc_drv *d, int m, int n, u32 h,
+		 const struct tcs_cmd *c),
+
+	TP_ARGS(d, m, n, h, c),
+
+	TP_STRUCT__entry(
+			 __string(name, d->name)
+			 __field(int, m)
+			 __field(int, n)
+			 __field(u32, hdr)
+			 __field(u32, addr)
+			 __field(u32, data)
+			 __field(bool, wait)
+	),
+
+	TP_fast_assign(
+		       __assign_str(name, d->name);
+		       __entry->m = m;
+		       __entry->n = n;
+		       __entry->hdr = h;
+		       __entry->addr = c->addr;
+		       __entry->data = c->data;
+		       __entry->wait = c->wait;
+	),
+
+	TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
+		  __get_str(name), __entry->m, __entry->n, __entry->hdr,
+		  __entry->addr, __entry->data, __entry->wait)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-rpmh
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
new file mode 100644
index 0000000..df3ccb3
--- /dev/null
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+
+#define WCNSS_REQUEST_TIMEOUT	(5 * HZ)
+#define WCNSS_CBC_TIMEOUT	(10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING	1
+#define WCNSS_ACK_COLD_BOOTING	2
+
+#define NV_FRAGMENT_SIZE	3072
+#define NVBIN_FILE		"wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/**
+ * struct wcnss_ctrl - driver context
+ * @dev:	device handle
+ * @channel:	SMD channel handle
+ * @ack:	completion for outstanding requests
+ * @cbc:	completion for cbc complete indication
+ * @ack_status:	status of the outstanding request
+ * @probe_work: worker for uploading nv binary
+ */
+struct wcnss_ctrl {
+	struct device *dev;
+	struct rpmsg_endpoint *channel;
+
+	struct completion ack;
+	struct completion cbc;
+	int ack_status;
+
+	struct work_struct probe_work;
+};
+
+/* message types */
+enum {
+	WCNSS_VERSION_REQ = 0x01000000,
+	WCNSS_VERSION_RESP,
+	WCNSS_DOWNLOAD_NV_REQ,
+	WCNSS_DOWNLOAD_NV_RESP,
+	WCNSS_UPLOAD_CAL_REQ,
+	WCNSS_UPLOAD_CAL_RESP,
+	WCNSS_DOWNLOAD_CAL_REQ,
+	WCNSS_DOWNLOAD_CAL_RESP,
+	WCNSS_VBAT_LEVEL_IND,
+	WCNSS_BUILD_VERSION_REQ,
+	WCNSS_BUILD_VERSION_RESP,
+	WCNSS_PM_CONFIG_REQ,
+	WCNSS_CBC_COMPLETE_IND,
+};
+
+/**
+ * struct wcnss_msg_hdr - common packet header for requests and responses
+ * @type:	packet message type
+ * @len:	total length of the packet, including this header
+ */
+struct wcnss_msg_hdr {
+	u32 type;
+	u32 len;
+} __packed;
+
+/**
+ * struct wcnss_version_resp - version request response
+ * @hdr:	common packet wcnss_msg_hdr header
+ */
+struct wcnss_version_resp {
+	struct wcnss_msg_hdr hdr;
+	u8 major;
+	u8 minor;
+	u8 version;
+	u8 revision;
+} __packed;
+
+/**
+ * struct wcnss_download_nv_req - firmware fragment request
+ * @hdr:	common packet wcnss_msg_hdr header
+ * @seq:	sequence number of this fragment
+ * @last:	boolean indicator of this being the last fragment of the binary
+ * @frag_size:	length of this fragment
+ * @fragment:	fragment data
+ */
+struct wcnss_download_nv_req {
+	struct wcnss_msg_hdr hdr;
+	u16 seq;
+	u16 last;
+	u32 frag_size;
+	u8 fragment[];
+} __packed;
+
+/**
+ * struct wcnss_download_nv_resp - firmware download response
+ * @hdr:	common packet wcnss_msg_hdr header
+ * @status:	boolean to indicate success of the download
+ */
+struct wcnss_download_nv_resp {
+	struct wcnss_msg_hdr hdr;
+	u8 status;
+} __packed;
+
+/**
+ * wcnss_ctrl_smd_callback() - handler from SMD responses
+ * @channel:	smd channel handle
+ * @data:	pointer to the incoming data packet
+ * @count:	size of the incoming data packet
+ *
+ * Handles any incoming packets from the remote WCNSS_CTRL service.
+ */
+static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev,
+				   void *data,
+				   int count,
+				   void *priv,
+				   u32 addr)
+{
+	struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+	const struct wcnss_download_nv_resp *nvresp;
+	const struct wcnss_version_resp *version;
+	const struct wcnss_msg_hdr *hdr = data;
+
+	switch (hdr->type) {
+	case WCNSS_VERSION_RESP:
+		if (count != sizeof(*version)) {
+			dev_err(wcnss->dev,
+				"invalid size of version response\n");
+			break;
+		}
+
+		version = data;
+		dev_info(wcnss->dev, "WCNSS Version %d.%d %d.%d\n",
+			 version->major, version->minor,
+			 version->version, version->revision);
+
+		complete(&wcnss->ack);
+		break;
+	case WCNSS_DOWNLOAD_NV_RESP:
+		if (count != sizeof(*nvresp)) {
+			dev_err(wcnss->dev,
+				"invalid size of download response\n");
+			break;
+		}
+
+		nvresp = data;
+		wcnss->ack_status = nvresp->status;
+		complete(&wcnss->ack);
+		break;
+	case WCNSS_CBC_COMPLETE_IND:
+		dev_dbg(wcnss->dev, "cold boot complete\n");
+		complete(&wcnss->cbc);
+		break;
+	default:
+		dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * wcnss_request_version() - send a version request to WCNSS
+ * @wcnss:	wcnss ctrl driver context
+ */
+static int wcnss_request_version(struct wcnss_ctrl *wcnss)
+{
+	struct wcnss_msg_hdr msg;
+	int ret;
+
+	msg.type = WCNSS_VERSION_REQ;
+	msg.len = sizeof(msg);
+	ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg));
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+	if (!ret) {
+		dev_err(wcnss->dev, "timeout waiting for version response\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/**
+ * wcnss_download_nv() - send nv binary to WCNSS
+ * @wcnss:	wcnss_ctrl state handle
+ * @expect_cbc:	indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
+ */
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
+{
+	struct wcnss_download_nv_req *req;
+	const struct firmware *fw;
+	const void *data;
+	ssize_t left;
+	int ret;
+
+	req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev);
+	if (ret < 0) {
+		dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
+			NVBIN_FILE, ret);
+		goto free_req;
+	}
+
+	data = fw->data;
+	left = fw->size;
+
+	req->hdr.type = WCNSS_DOWNLOAD_NV_REQ;
+	req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE;
+
+	req->last = 0;
+	req->frag_size = NV_FRAGMENT_SIZE;
+
+	req->seq = 0;
+	do {
+		if (left <= NV_FRAGMENT_SIZE) {
+			req->last = 1;
+			req->frag_size = left;
+			req->hdr.len = sizeof(*req) + left;
+		}
+
+		memcpy(req->fragment, data, req->frag_size);
+
+		ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
+		if (ret < 0) {
+			dev_err(wcnss->dev, "failed to send smd packet\n");
+			goto release_fw;
+		}
+
+		/* Increment for next fragment */
+		req->seq++;
+
+		data += NV_FRAGMENT_SIZE;
+		left -= NV_FRAGMENT_SIZE;
+	} while (left > 0);
+
+	ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
+	if (!ret) {
+		dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
+		ret = -ETIMEDOUT;
+	} else {
+		*expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+		ret = 0;
+	}
+
+release_fw:
+	release_firmware(fw);
+free_req:
+	kfree(req);
+
+	return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss:	wcnss handle, retrieved from drvdata
+ * @name:	SMD channel name
+ * @cb:		callback to handle incoming data on the channel
+ */
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
+{
+	struct rpmsg_channel_info chinfo;
+	struct wcnss_ctrl *_wcnss = wcnss;
+
+	strncpy(chinfo.name, name, sizeof(chinfo.name));
+	chinfo.src = RPMSG_ADDR_ANY;
+	chinfo.dst = RPMSG_ADDR_ANY;
+
+	return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+	struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+	bool expect_cbc;
+	int ret;
+
+	ret = wcnss_request_version(wcnss);
+	if (ret < 0)
+		return;
+
+	ret = wcnss_download_nv(wcnss, &expect_cbc);
+	if (ret < 0)
+		return;
+
+	/* Wait for pending cold boot completion if indicated by the nv downloader */
+	if (expect_cbc) {
+		ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+		if (!ret)
+			dev_err(wcnss->dev, "expected cold boot completion\n");
+	}
+
+	of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
+}
+
+static int wcnss_ctrl_probe(struct rpmsg_device *rpdev)
+{
+	struct wcnss_ctrl *wcnss;
+
+	wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL);
+	if (!wcnss)
+		return -ENOMEM;
+
+	wcnss->dev = &rpdev->dev;
+	wcnss->channel = rpdev->ept;
+
+	init_completion(&wcnss->ack);
+	init_completion(&wcnss->cbc);
+	INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
+
+	dev_set_drvdata(&rpdev->dev, wcnss);
+
+	schedule_work(&wcnss->probe_work);
+
+	return 0;
+}
+
+static void wcnss_ctrl_remove(struct rpmsg_device *rpdev)
+{
+	struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+
+	cancel_work_sync(&wcnss->probe_work);
+	of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+	{ .compatible = "qcom,wcnss", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match);
+
+static struct rpmsg_driver wcnss_ctrl_driver = {
+	.probe = wcnss_ctrl_probe,
+	.remove = wcnss_ctrl_remove,
+	.callback = wcnss_ctrl_smd_callback,
+	.drv  = {
+		.name  = "qcom_wcnss_ctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = wcnss_ctrl_of_match,
+	},
+};
+
+module_rpmsg_driver(wcnss_ctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCNSS control driver");
+MODULE_LICENSE("GPL v2");