v4.19.13 snapshot.
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
new file mode 100644
index 0000000..9d73ad8
--- /dev/null
+++ b/drivers/slimbus/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# SLIMbus driver configuration
+#
+menuconfig SLIMBUS
+	tristate "SLIMbus support"
+	help
+	  SLIMbus is standard interface between System-on-Chip and audio codec,
+	  and other peripheral components in typical embedded systems.
+
+	  If unsure, choose N.
+
+if SLIMBUS
+
+# SLIMbus controllers
+config SLIM_QCOM_CTRL
+	tristate "Qualcomm SLIMbus Manager Component"
+	depends on HAS_IOMEM
+	help
+	  Select driver if Qualcomm's SLIMbus Manager Component is
+	  programmed using Linux kernel.
+
+config SLIM_QCOM_NGD_CTRL
+	tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
+	depends on QCOM_QMI_HELPERS
+	depends on HAS_IOMEM && DMA_ENGINE
+	help
+	  Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device
+	  Component is programmed using Linux kernel.
+	  This is light-weight slimbus controller driver responsible for
+	  communicating with slave HW directly over the bus using messaging
+	  interface, and communicating with master component residing on ADSP
+	  for bandwidth and data-channel management.
+endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
new file mode 100644
index 0000000..d9aa011
--- /dev/null
+++ b/drivers/slimbus/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for kernel SLIMbus framework.
+#
+obj-$(CONFIG_SLIMBUS)			+= slimbus.o
+slimbus-y				:= core.o messaging.o sched.o stream.o
+
+#Controllers
+obj-$(CONFIG_SLIM_QCOM_CTRL)		+= slim-qcom-ctrl.o
+slim-qcom-ctrl-y			:= qcom-ctrl.o
+
+obj-$(CONFIG_SLIM_QCOM_NGD_CTRL)	+= slim-qcom-ngd-ctrl.o
+slim-qcom-ngd-ctrl-y			:= qcom-ngd-ctrl.o
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
new file mode 100644
index 0000000..95b00d2
--- /dev/null
+++ b/drivers/slimbus/core.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus.h>
+#include "slimbus.h"
+
+static DEFINE_IDA(ctrl_ida);
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+					       const struct slim_device *sbdev)
+{
+	while (id->manf_id != 0 || id->prod_code != 0) {
+		if (id->manf_id == sbdev->e_addr.manf_id &&
+		    id->prod_code == sbdev->e_addr.prod_code)
+			return id;
+		id++;
+	}
+	return NULL;
+}
+
+static int slim_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+	struct slim_driver *sbdrv = to_slim_driver(drv);
+
+	return !!slim_match(sbdrv->id_table, sbdev);
+}
+
+static int slim_device_probe(struct device *dev)
+{
+	struct slim_device	*sbdev = to_slim_device(dev);
+	struct slim_driver	*sbdrv = to_slim_driver(dev->driver);
+
+	return sbdrv->probe(sbdev);
+}
+
+static int slim_device_remove(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+	struct slim_driver *sbdrv;
+
+	if (dev->driver) {
+		sbdrv = to_slim_driver(dev->driver);
+		if (sbdrv->remove)
+			sbdrv->remove(sbdev);
+	}
+
+	return 0;
+}
+
+struct bus_type slimbus_bus = {
+	.name		= "slimbus",
+	.match		= slim_device_match,
+	.probe		= slim_device_probe,
+	.remove		= slim_device_remove,
+};
+EXPORT_SYMBOL_GPL(slimbus_bus);
+
+/*
+ * __slim_driver_register() - Client driver registration with SLIMbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the SLIMbus
+ * It is called from the driver's module-init function.
+ */
+int __slim_driver_register(struct slim_driver *drv, struct module *owner)
+{
+	/* ID table and probe are mandatory */
+	if (!drv->id_table || !drv->probe)
+		return -EINVAL;
+
+	drv->driver.bus = &slimbus_bus;
+	drv->driver.owner = owner;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__slim_driver_register);
+
+/*
+ * slim_driver_unregister() - Undo effect of slim_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void slim_driver_unregister(struct slim_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(slim_driver_unregister);
+
+static void slim_dev_release(struct device *dev)
+{
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	kfree(sbdev);
+}
+
+static int slim_add_device(struct slim_controller *ctrl,
+			   struct slim_device *sbdev,
+			   struct device_node *node)
+{
+	sbdev->dev.bus = &slimbus_bus;
+	sbdev->dev.parent = ctrl->dev;
+	sbdev->dev.release = slim_dev_release;
+	sbdev->dev.driver = NULL;
+	sbdev->ctrl = ctrl;
+	INIT_LIST_HEAD(&sbdev->stream_list);
+	spin_lock_init(&sbdev->stream_list_lock);
+
+	if (node)
+		sbdev->dev.of_node = of_node_get(node);
+
+	dev_set_name(&sbdev->dev, "%x:%x:%x:%x",
+				  sbdev->e_addr.manf_id,
+				  sbdev->e_addr.prod_code,
+				  sbdev->e_addr.dev_index,
+				  sbdev->e_addr.instance);
+
+	return device_register(&sbdev->dev);
+}
+
+static struct slim_device *slim_alloc_device(struct slim_controller *ctrl,
+					     struct slim_eaddr *eaddr,
+					     struct device_node *node)
+{
+	struct slim_device *sbdev;
+	int ret;
+
+	sbdev = kzalloc(sizeof(*sbdev), GFP_KERNEL);
+	if (!sbdev)
+		return NULL;
+
+	sbdev->e_addr = *eaddr;
+	ret = slim_add_device(ctrl, sbdev, node);
+	if (ret) {
+		put_device(&sbdev->dev);
+		return NULL;
+	}
+
+	return sbdev;
+}
+
+static void of_register_slim_devices(struct slim_controller *ctrl)
+{
+	struct device *dev = ctrl->dev;
+	struct device_node *node;
+
+	if (!ctrl->dev->of_node)
+		return;
+
+	for_each_child_of_node(ctrl->dev->of_node, node) {
+		struct slim_device *sbdev;
+		struct slim_eaddr e_addr;
+		const char *compat = NULL;
+		int reg[2], ret;
+		int manf_id, prod_code;
+
+		compat = of_get_property(node, "compatible", NULL);
+		if (!compat)
+			continue;
+
+		ret = sscanf(compat, "slim%x,%x", &manf_id, &prod_code);
+		if (ret != 2) {
+			dev_err(dev, "Manf ID & Product code not found %s\n",
+				compat);
+			continue;
+		}
+
+		ret = of_property_read_u32_array(node, "reg", reg, 2);
+		if (ret) {
+			dev_err(dev, "Device and Instance id not found:%d\n",
+				ret);
+			continue;
+		}
+
+		e_addr.dev_index = reg[0];
+		e_addr.instance = reg[1];
+		e_addr.manf_id = manf_id;
+		e_addr.prod_code = prod_code;
+
+		sbdev = slim_alloc_device(ctrl, &e_addr, node);
+		if (!sbdev)
+			continue;
+	}
+}
+
+/*
+ * slim_register_controller() - Controller bring-up and registration.
+ *
+ * @ctrl: Controller to be registered.
+ *
+ * A controller is registered with the framework using this API.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up
+ */
+int slim_register_controller(struct slim_controller *ctrl)
+{
+	int id;
+
+	id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+	if (id < 0)
+		return id;
+
+	ctrl->id = id;
+
+	if (!ctrl->min_cg)
+		ctrl->min_cg = SLIM_MIN_CLK_GEAR;
+	if (!ctrl->max_cg)
+		ctrl->max_cg = SLIM_MAX_CLK_GEAR;
+
+	ida_init(&ctrl->laddr_ida);
+	idr_init(&ctrl->tid_idr);
+	mutex_init(&ctrl->lock);
+	mutex_init(&ctrl->sched.m_reconf);
+	init_completion(&ctrl->sched.pause_comp);
+
+	dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
+		ctrl->name, ctrl->dev);
+
+	of_register_slim_devices(ctrl);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_register_controller);
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+static void slim_remove_device(struct slim_device *sbdev)
+{
+	device_unregister(&sbdev->dev);
+}
+
+static int slim_ctrl_remove_device(struct device *dev, void *null)
+{
+	slim_remove_device(to_slim_device(dev));
+	return 0;
+}
+
+/**
+ * slim_unregister_controller() - Controller tear-down.
+ *
+ * @ctrl: Controller to tear-down.
+ */
+int slim_unregister_controller(struct slim_controller *ctrl)
+{
+	/* Remove all clients */
+	device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
+	/* Enter Clock Pause */
+	slim_ctrl_clk_pause(ctrl, false, 0);
+	ida_simple_remove(&ctrl_ida, ctrl->id);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_unregister_controller);
+
+static void slim_device_update_status(struct slim_device *sbdev,
+				      enum slim_device_status status)
+{
+	struct slim_driver *sbdrv;
+
+	if (sbdev->status == status)
+		return;
+
+	sbdev->status = status;
+	if (!sbdev->dev.driver)
+		return;
+
+	sbdrv = to_slim_driver(sbdev->dev.driver);
+	if (sbdrv->device_status)
+		sbdrv->device_status(sbdev, sbdev->status);
+}
+
+/**
+ * slim_report_absent() - Controller calls this function when a device
+ *	reports absent, OR when the device cannot be communicated with
+ *
+ * @sbdev: Device that cannot be reached, or sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev)
+{
+	struct slim_controller *ctrl = sbdev->ctrl;
+
+	if (!ctrl)
+		return;
+
+	/* invalidate logical addresses */
+	mutex_lock(&ctrl->lock);
+	sbdev->is_laddr_valid = false;
+	mutex_unlock(&ctrl->lock);
+
+	ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+	slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
+}
+EXPORT_SYMBOL_GPL(slim_report_absent);
+
+static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
+{
+	return (a->manf_id == b->manf_id &&
+		a->prod_code == b->prod_code &&
+		a->dev_index == b->dev_index &&
+		a->instance == b->instance);
+}
+
+static int slim_match_dev(struct device *dev, void *data)
+{
+	struct slim_eaddr *e_addr = data;
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	return slim_eaddr_equal(&sbdev->e_addr, e_addr);
+}
+
+static struct slim_device *find_slim_device(struct slim_controller *ctrl,
+					    struct slim_eaddr *eaddr)
+{
+	struct slim_device *sbdev;
+	struct device *dev;
+
+	dev = device_find_child(ctrl->dev, eaddr, slim_match_dev);
+	if (dev) {
+		sbdev = to_slim_device(dev);
+		return sbdev;
+	}
+
+	return NULL;
+}
+
+/**
+ * slim_get_device() - get handle to a device.
+ *
+ * @ctrl: Controller on which this device will be added/queried
+ * @e_addr: Enumeration address of the device to be queried
+ *
+ * Return: pointer to a device if it has already reported. Creates a new
+ * device and returns pointer to it if the device has not yet enumerated.
+ */
+struct slim_device *slim_get_device(struct slim_controller *ctrl,
+				    struct slim_eaddr *e_addr)
+{
+	struct slim_device *sbdev;
+
+	sbdev = find_slim_device(ctrl, e_addr);
+	if (!sbdev) {
+		sbdev = slim_alloc_device(ctrl, e_addr, NULL);
+		if (!sbdev)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	return sbdev;
+}
+EXPORT_SYMBOL_GPL(slim_get_device);
+
+static int of_slim_match_dev(struct device *dev, void *data)
+{
+	struct device_node *np = data;
+	struct slim_device *sbdev = to_slim_device(dev);
+
+	return (sbdev->dev.of_node == np);
+}
+
+static struct slim_device *of_find_slim_device(struct slim_controller *ctrl,
+					       struct device_node *np)
+{
+	struct slim_device *sbdev;
+	struct device *dev;
+
+	dev = device_find_child(ctrl->dev, np, of_slim_match_dev);
+	if (dev) {
+		sbdev = to_slim_device(dev);
+		return sbdev;
+	}
+
+	return NULL;
+}
+
+/**
+ * of_slim_get_device() - get handle to a device using dt node.
+ *
+ * @ctrl: Controller on which this device will be added/queried
+ * @np: node pointer to device
+ *
+ * Return: pointer to a device if it has already reported. Creates a new
+ * device and returns pointer to it if the device has not yet enumerated.
+ */
+struct slim_device *of_slim_get_device(struct slim_controller *ctrl,
+				       struct device_node *np)
+{
+	return of_find_slim_device(ctrl, np);
+}
+EXPORT_SYMBOL_GPL(of_slim_get_device);
+
+static int slim_device_alloc_laddr(struct slim_device *sbdev,
+				   bool report_present)
+{
+	struct slim_controller *ctrl = sbdev->ctrl;
+	u8 laddr;
+	int ret;
+
+	mutex_lock(&ctrl->lock);
+	if (ctrl->get_laddr) {
+		ret = ctrl->get_laddr(ctrl, &sbdev->e_addr, &laddr);
+		if (ret < 0)
+			goto err;
+	} else if (report_present) {
+		ret = ida_simple_get(&ctrl->laddr_ida,
+				     0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
+		if (ret < 0)
+			goto err;
+
+		laddr = ret;
+	} else {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (ctrl->set_laddr) {
+		ret = ctrl->set_laddr(ctrl, &sbdev->e_addr, laddr);
+		if (ret) {
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	sbdev->laddr = laddr;
+	sbdev->is_laddr_valid = true;
+
+	slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
+
+	dev_dbg(ctrl->dev, "setting slimbus l-addr:%x, ea:%x,%x,%x,%x\n",
+		laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
+		sbdev->e_addr.dev_index, sbdev->e_addr.instance);
+
+err:
+	mutex_unlock(&ctrl->lock);
+	return ret;
+
+}
+
+/**
+ * slim_device_report_present() - Report enumerated device.
+ *
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: Enumeration address of the device.
+ * @laddr: Return logical address (if valid flag is false)
+ *
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_device_report_present(struct slim_controller *ctrl,
+			       struct slim_eaddr *e_addr, u8 *laddr)
+{
+	struct slim_device *sbdev;
+	int ret;
+
+	ret = pm_runtime_get_sync(ctrl->dev);
+
+	if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
+		dev_err(ctrl->dev, "slim ctrl not active,state:%d, ret:%d\n",
+				    ctrl->sched.clk_state, ret);
+		goto slimbus_not_active;
+	}
+
+	sbdev = slim_get_device(ctrl, e_addr);
+	if (IS_ERR(sbdev))
+		return -ENODEV;
+
+	if (sbdev->is_laddr_valid) {
+		*laddr = sbdev->laddr;
+		return 0;
+	}
+
+	ret = slim_device_alloc_laddr(sbdev, true);
+
+slimbus_not_active:
+	pm_runtime_mark_last_busy(ctrl->dev);
+	pm_runtime_put_autosuspend(ctrl->dev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_device_report_present);
+
+/**
+ * slim_get_logical_addr() - get/allocate logical address of a SLIMbus device.
+ *
+ * @sbdev: client handle requesting the address.
+ *
+ * Return: zero if a logical address is valid or a new logical address
+ * has been assigned. error code in case of error.
+ */
+int slim_get_logical_addr(struct slim_device *sbdev)
+{
+	if (!sbdev->is_laddr_valid)
+		return slim_device_alloc_laddr(sbdev, false);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_get_logical_addr);
+
+static void __exit slimbus_exit(void)
+{
+	bus_unregister(&slimbus_bus);
+}
+module_exit(slimbus_exit);
+
+static int __init slimbus_init(void)
+{
+	return bus_register(&slimbus_bus);
+}
+postcore_initcall(slimbus_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SLIMbus core");
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
new file mode 100644
index 0000000..d587914
--- /dev/null
+++ b/drivers/slimbus/messaging.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include "slimbus.h"
+
+/**
+ * slim_msg_response() - Deliver Message response received from a device to the
+ *			framework.
+ *
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ *
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+	struct slim_msg_txn *txn;
+	struct slim_val_inf *msg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	txn = idr_find(&ctrl->tid_idr, tid);
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+	if (txn == NULL)
+		return;
+
+	msg = txn->msg;
+	if (msg == NULL || msg->rbuf == NULL) {
+		dev_err(ctrl->dev, "Got response to invalid TID:%d, len:%d\n",
+				tid, len);
+		return;
+	}
+
+	slim_free_txn_tid(ctrl, txn);
+	memcpy(msg->rbuf, reply, len);
+	if (txn->comp)
+		complete(txn->comp);
+
+	/* Remove runtime-pm vote now that response was received for TID txn */
+	pm_runtime_mark_last_busy(ctrl->dev);
+	pm_runtime_put_autosuspend(ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(slim_msg_response);
+
+/**
+ * slim_alloc_txn_tid() - Allocate a tid to txn
+ *
+ * @ctrl: Controller handle
+ * @txn: transaction to be allocated with tid.
+ *
+ * Return: zero on success with valid txn->tid and error code on failures.
+ */
+int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+				SLIM_MAX_TIDS, GFP_ATOMIC);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+		return ret;
+	}
+	txn->tid = ret;
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_txn_tid);
+
+/**
+ * slim_free_txn_tid() - Freee tid of txn
+ *
+ * @ctrl: Controller handle
+ * @txn: transaction whose tid should be freed
+ */
+void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	idr_remove(&ctrl->tid_idr, txn->tid);
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+}
+EXPORT_SYMBOL_GPL(slim_free_txn_tid);
+
+/**
+ * slim_do_transfer() - Process a SLIMbus-messaging transaction
+ *
+ * @ctrl: Controller handle
+ * @txn: Transaction to be sent over SLIMbus
+ *
+ * Called by controller to transmit messaging transactions not dealing with
+ * Interface/Value elements. (e.g. transmittting a message to assign logical
+ * address to a slave device
+ *
+ * Return: -ETIMEDOUT: If transmission of this message timed out
+ *	(e.g. due to bus lines not being clocked or driven by controller)
+ */
+int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	bool need_tid = false, clk_pause_msg = false;
+	int ret, timeout;
+
+	/*
+	 * do not vote for runtime-PM if the transactions are part of clock
+	 * pause sequence
+	 */
+	if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
+		(txn->mt == SLIM_MSG_MT_CORE &&
+		 txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+		 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+		clk_pause_msg = true;
+
+	if (!clk_pause_msg) {
+		ret = pm_runtime_get_sync(ctrl->dev);
+		if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
+			dev_err(ctrl->dev, "ctrl wrong state:%d, ret:%d\n",
+				ctrl->sched.clk_state, ret);
+			goto slim_xfer_err;
+		}
+	}
+
+	need_tid = slim_tid_txn(txn->mt, txn->mc);
+
+	if (need_tid) {
+		ret = slim_alloc_txn_tid(ctrl, txn);
+		if (ret)
+			return ret;
+
+		if (!txn->msg->comp)
+			txn->comp = &done;
+		else
+			txn->comp = txn->comp;
+	}
+
+	ret = ctrl->xfer_msg(ctrl, txn);
+
+	if (!ret && need_tid && !txn->msg->comp) {
+		unsigned long ms = txn->rl + HZ;
+
+		timeout = wait_for_completion_timeout(txn->comp,
+						      msecs_to_jiffies(ms));
+		if (!timeout) {
+			ret = -ETIMEDOUT;
+			slim_free_txn_tid(ctrl, txn);
+		}
+	}
+
+	if (ret)
+		dev_err(ctrl->dev, "Tx:MT:0x%x, MC:0x%x, LA:0x%x failed:%d\n",
+			txn->mt, txn->mc, txn->la, ret);
+
+slim_xfer_err:
+	if (!clk_pause_msg && (!need_tid  || ret == -ETIMEDOUT)) {
+		/*
+		 * remove runtime-pm vote if this was TX only, or
+		 * if there was error during this transaction
+		 */
+		pm_runtime_mark_last_busy(ctrl->dev);
+		pm_runtime_put_autosuspend(ctrl->dev);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_do_transfer);
+
+static int slim_val_inf_sanity(struct slim_controller *ctrl,
+			       struct slim_val_inf *msg, u8 mc)
+{
+	if (!msg || msg->num_bytes > 16 ||
+	    (msg->start_offset + msg->num_bytes) > 0xC00)
+		goto reterr;
+	switch (mc) {
+	case SLIM_MSG_MC_REQUEST_VALUE:
+	case SLIM_MSG_MC_REQUEST_INFORMATION:
+		if (msg->rbuf != NULL)
+			return 0;
+		break;
+
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		if (msg->wbuf != NULL)
+			return 0;
+		break;
+
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+		if (msg->rbuf != NULL && msg->wbuf != NULL)
+			return 0;
+		break;
+	}
+reterr:
+	if (msg)
+		dev_err(ctrl->dev, "Sanity check failed:msg:offset:0x%x, mc:%d\n",
+			msg->start_offset, mc);
+	return -EINVAL;
+}
+
+static u16 slim_slicesize(int code)
+{
+	static const u8 sizetocode[16] = {
+		0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
+	};
+
+	code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
+
+	return sizetocode[code - 1];
+}
+
+/**
+ * slim_xfer_msg() - Transfer a value info message on slim device
+ *
+ * @sbdev: slim device to which this msg has to be transfered
+ * @msg: value info message pointer
+ * @mc: message code of the message
+ *
+ * Called by drivers which want to transfer a vlaue or info elements.
+ *
+ * Return: -ETIMEDOUT: If transmission of this message timed out
+ */
+int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
+		  u8 mc)
+{
+	DEFINE_SLIM_LDEST_TXN(txn_stack, mc, 6, sbdev->laddr, msg);
+	struct slim_msg_txn *txn = &txn_stack;
+	struct slim_controller *ctrl = sbdev->ctrl;
+	int ret;
+	u16 sl;
+
+	if (!ctrl)
+		return -EINVAL;
+
+	ret = slim_val_inf_sanity(ctrl, msg, mc);
+	if (ret)
+		return ret;
+
+	sl = slim_slicesize(msg->num_bytes);
+
+	dev_dbg(ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+		msg->start_offset, msg->num_bytes, mc, sl);
+
+	txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+	switch (mc) {
+	case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+	case SLIM_MSG_MC_CHANGE_VALUE:
+	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+	case SLIM_MSG_MC_CLEAR_INFORMATION:
+		txn->rl += msg->num_bytes;
+	default:
+		break;
+	}
+
+	if (slim_tid_txn(txn->mt, txn->mc))
+		txn->rl++;
+
+	return slim_do_transfer(ctrl, txn);
+}
+EXPORT_SYMBOL_GPL(slim_xfer_msg);
+
+static void slim_fill_msg(struct slim_val_inf *msg, u32 addr,
+			 size_t count, u8 *rbuf, u8 *wbuf)
+{
+	msg->start_offset = addr;
+	msg->num_bytes = count;
+	msg->rbuf = rbuf;
+	msg->wbuf = wbuf;
+	msg->comp = NULL;
+}
+
+/**
+ * slim_read() - Read SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address of value element to read.
+ * @count: number of bytes to read. Maximum bytes allowed are 16.
+ * @val: will return what the value element value was
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ */
+int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
+{
+	struct slim_val_inf msg;
+
+	slim_fill_msg(&msg, addr, count, val, NULL);
+
+	return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_REQUEST_VALUE);
+}
+EXPORT_SYMBOL_GPL(slim_read);
+
+/**
+ * slim_readb() - Read byte from SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address in the value element to read.
+ *
+ * Return: byte value of value element.
+ */
+int slim_readb(struct slim_device *sdev, u32 addr)
+{
+	int ret;
+	u8 buf;
+
+	ret = slim_read(sdev, addr, 1, &buf);
+	if (ret < 0)
+		return ret;
+	else
+		return buf;
+}
+EXPORT_SYMBOL_GPL(slim_readb);
+
+/**
+ * slim_write() - Write SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address in the value element to write.
+ * @count: number of bytes to write. Maximum bytes allowed are 16.
+ * @val: value to write to value element
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ */
+int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
+{
+	struct slim_val_inf msg;
+
+	slim_fill_msg(&msg, addr, count,  NULL, val);
+
+	return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_CHANGE_VALUE);
+}
+EXPORT_SYMBOL_GPL(slim_write);
+
+/**
+ * slim_writeb() - Write byte to SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr:  address of value element to write.
+ * @value: value to write to value element
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ *
+ */
+int slim_writeb(struct slim_device *sdev, u32 addr, u8 value)
+{
+	return slim_write(sdev, addr, 1, &value);
+}
+EXPORT_SYMBOL_GPL(slim_writeb);
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
new file mode 100644
index 0000000..db1f513
--- /dev/null
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include "slimbus.h"
+
+/* Manager registers */
+#define	MGR_CFG		0x200
+#define	MGR_STATUS	0x204
+#define	MGR_INT_EN	0x210
+#define	MGR_INT_STAT	0x214
+#define	MGR_INT_CLR	0x218
+#define	MGR_TX_MSG	0x230
+#define	MGR_RX_MSG	0x270
+#define	MGR_IE_STAT	0x2F0
+#define	MGR_VE_STAT	0x300
+#define	MGR_CFG_ENABLE	1
+
+/* Framer registers */
+#define	FRM_CFG		0x400
+#define	FRM_STAT	0x404
+#define	FRM_INT_EN	0x410
+#define	FRM_INT_STAT	0x414
+#define	FRM_INT_CLR	0x418
+#define	FRM_WAKEUP	0x41C
+#define	FRM_CLKCTL_DONE	0x420
+#define	FRM_IE_STAT	0x430
+#define	FRM_VE_STAT	0x440
+
+/* Interface registers */
+#define	INTF_CFG	0x600
+#define	INTF_STAT	0x604
+#define	INTF_INT_EN	0x610
+#define	INTF_INT_STAT	0x614
+#define	INTF_INT_CLR	0x618
+#define	INTF_IE_STAT	0x630
+#define	INTF_VE_STAT	0x640
+
+/* Interrupt status bits */
+#define	MGR_INT_TX_NACKED_2	BIT(25)
+#define	MGR_INT_MSG_BUF_CONTE	BIT(26)
+#define	MGR_INT_RX_MSG_RCVD	BIT(30)
+#define	MGR_INT_TX_MSG_SENT	BIT(31)
+
+/* Framer config register settings */
+#define	FRM_ACTIVE	1
+#define	CLK_GEAR	7
+#define	ROOT_FREQ	11
+#define	REF_CLK_GEAR	15
+#define	INTR_WAKE	19
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+		((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define SLIM_ROOT_FREQ 24576000
+#define QCOM_SLIM_AUTOSUSPEND 1000
+
+/* MAX message size over control channel */
+#define SLIM_MSGQ_BUF_LEN	40
+#define QCOM_TX_MSGS 2
+#define QCOM_RX_MSGS	8
+#define QCOM_BUF_ALLOC_RETRIES	10
+
+#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
+
+/* V2 Component registers */
+#define CFG_PORT_V2(r) ((r ## _V2))
+#define	COMP_CFG_V2		4
+#define	COMP_TRUST_CFG_V2	0x3000
+
+/* V1 Component registers */
+#define CFG_PORT_V1(r) ((r ## _V1))
+#define	COMP_CFG_V1		0
+#define	COMP_TRUST_CFG_V1	0x14
+
+/* Resource group info for manager, and non-ported generic device-components */
+#define EE_MGR_RSC_GRP	(1 << 10)
+#define EE_NGD_2	(2 << 6)
+#define EE_NGD_1	0
+
+struct slim_ctrl_buf {
+	void		*base;
+	spinlock_t	lock;
+	int		head;
+	int		tail;
+	int		sl_sz;
+	int		n;
+};
+
+struct qcom_slim_ctrl {
+	struct slim_controller  ctrl;
+	struct slim_framer	framer;
+	struct device		*dev;
+	void __iomem		*base;
+	void __iomem		*slew_reg;
+
+	struct slim_ctrl_buf	rx;
+	struct slim_ctrl_buf	tx;
+
+	struct completion	**wr_comp;
+	int			irq;
+	struct workqueue_struct *rxwq;
+	struct work_struct	wd;
+	struct clk		*rclk;
+	struct clk		*hclk;
+};
+
+static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
+			       u8 len, u32 tx_reg)
+{
+	int count = (len + 3) >> 2;
+
+	__iowrite32_copy(ctrl->base + tx_reg, buf, count);
+
+	/* Ensure Oder of subsequent writes */
+	mb();
+}
+
+static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
+{
+	unsigned long flags;
+	int idx;
+
+	spin_lock_irqsave(&ctrl->rx.lock, flags);
+	if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
+		spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+		dev_err(ctrl->dev, "RX QUEUE full!");
+		return NULL;
+	}
+	idx = ctrl->rx.tail;
+	ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
+	spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+
+	return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
+}
+
+static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
+{
+	struct completion *comp;
+	unsigned long flags;
+	int idx;
+
+	spin_lock_irqsave(&ctrl->tx.lock, flags);
+	idx = ctrl->tx.head;
+	ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
+	spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+
+	comp = ctrl->wr_comp[idx];
+	ctrl->wr_comp[idx] = NULL;
+
+	complete(comp);
+}
+
+static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
+					   u32 stat)
+{
+	int err = 0;
+
+	if (stat & MGR_INT_TX_MSG_SENT)
+		writel_relaxed(MGR_INT_TX_MSG_SENT,
+			       ctrl->base + MGR_INT_CLR);
+
+	if (stat & MGR_INT_TX_NACKED_2) {
+		u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
+		u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
+		u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
+		u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
+		u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
+		u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
+		u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
+		u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
+		u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
+
+		writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
+
+		dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
+			stat, mgr_stat);
+		dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
+		dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
+			frm_intr_stat, frm_stat);
+		dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
+			frm_cfg, frm_ie_stat);
+		dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
+			intf_intr_stat, intf_stat);
+		dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
+			intf_ie_stat);
+		err = -ENOTCONN;
+	}
+
+	slim_ack_txn(ctrl, err);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
+					   u32 stat)
+{
+	u32 *rx_buf, pkt[10];
+	bool q_rx = false;
+	u8 mc, mt, len;
+
+	pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
+	mt = SLIM_HEADER_GET_MT(pkt[0]);
+	len = SLIM_HEADER_GET_RL(pkt[0]);
+	mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
+
+	/*
+	 * this message cannot be handled by ISR, so
+	 * let work-queue handle it
+	 */
+	if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
+		rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
+		if (!rx_buf) {
+			dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
+					pkt[0]);
+			goto rx_ret_irq;
+		}
+		rx_buf[0] = pkt[0];
+
+	} else {
+		rx_buf = pkt;
+	}
+
+	__ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
+			DIV_ROUND_UP(len, 4));
+
+	switch (mc) {
+
+	case SLIM_MSG_MC_REPORT_PRESENT:
+		q_rx = true;
+		break;
+	case SLIM_MSG_MC_REPLY_INFORMATION:
+	case SLIM_MSG_MC_REPLY_VALUE:
+		slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
+				  (u8)(*rx_buf >> 24), (len - 4));
+		break;
+	default:
+		dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
+			mc, mt);
+		break;
+	}
+rx_ret_irq:
+	writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
+		       MGR_INT_CLR);
+	if (q_rx)
+		queue_work(ctrl->rxwq, &ctrl->wd);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_slim_interrupt(int irq, void *d)
+{
+	struct qcom_slim_ctrl *ctrl = d;
+	u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
+	int ret = IRQ_NONE;
+
+	if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
+		ret = qcom_slim_handle_tx_irq(ctrl, stat);
+
+	if (stat & MGR_INT_RX_MSG_RCVD)
+		ret = qcom_slim_handle_rx_irq(ctrl, stat);
+
+	return ret;
+}
+
+static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
+{
+	struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+
+	clk_prepare_enable(ctrl->hclk);
+	clk_prepare_enable(ctrl->rclk);
+	enable_irq(ctrl->irq);
+
+	writel_relaxed(1, ctrl->base + FRM_WAKEUP);
+	/* Make sure framer wakeup write goes through before ISR fires */
+	mb();
+	/*
+	 * HW Workaround: Currently, slave is reporting lost-sync messages
+	 * after SLIMbus comes out of clock pause.
+	 * Transaction with slave fail before slave reports that message
+	 * Give some time for that report to come
+	 * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
+	 * being 250 usecs, we wait for 5-10 superframes here to ensure
+	 * we get the message
+	 */
+	usleep_range(1250, 2500);
+	return 0;
+}
+
+static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
+			      struct slim_msg_txn *txn,
+			      struct completion *done)
+{
+	unsigned long flags;
+	int idx;
+
+	spin_lock_irqsave(&ctrl->tx.lock, flags);
+	if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
+		spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+		dev_err(ctrl->dev, "controller TX buf unavailable");
+		return NULL;
+	}
+	idx = ctrl->tx.tail;
+	ctrl->wr_comp[idx] = done;
+	ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
+
+	spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+
+	return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
+}
+
+
+static int qcom_xfer_msg(struct slim_controller *sctrl,
+			 struct slim_msg_txn *txn)
+{
+	struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+	DECLARE_COMPLETION_ONSTACK(done);
+	void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
+	unsigned long ms = txn->rl + HZ;
+	u8 *puc;
+	int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
+	u8 la = txn->la;
+	u32 *head;
+	/* HW expects length field to be excluded */
+	txn->rl--;
+
+	/* spin till buffer is made available */
+	if (!pbuf) {
+		while (retries--) {
+			usleep_range(10000, 15000);
+			pbuf = slim_alloc_txbuf(ctrl, txn, &done);
+			if (pbuf)
+				break;
+		}
+	}
+
+	if (retries < 0 && !pbuf)
+		return -ENOMEM;
+
+	puc = (u8 *)pbuf;
+	head = (u32 *)pbuf;
+
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
+		*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
+						txn->mc, 0, la);
+		puc += 3;
+	} else {
+		*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
+						txn->mc, 1, la);
+		puc += 2;
+	}
+
+	if (slim_tid_txn(txn->mt, txn->mc))
+		*(puc++) = txn->tid;
+
+	if (slim_ec_txn(txn->mt, txn->mc)) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8) & 0xFF;
+	}
+
+	if (txn->msg && txn->msg->wbuf)
+		memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
+
+	qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
+	timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
+
+	if (!timeout) {
+		dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+					txn->mt);
+		ret = -ETIMEDOUT;
+	}
+
+	return ret;
+
+}
+
+static int qcom_set_laddr(struct slim_controller *sctrl,
+				struct slim_eaddr *ead, u8 laddr)
+{
+	struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+	struct {
+		__be16 manf_id;
+		__be16 prod_code;
+		u8 dev_index;
+		u8 instance;
+		u8 laddr;
+	} __packed p;
+	struct slim_val_inf msg = {0};
+	DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
+			      10, laddr, &msg);
+	int ret;
+
+	p.manf_id = cpu_to_be16(ead->manf_id);
+	p.prod_code = cpu_to_be16(ead->prod_code);
+	p.dev_index = ead->dev_index;
+	p.instance = ead->instance;
+	p.laddr = laddr;
+
+	msg.wbuf = (void *)&p;
+	msg.num_bytes = 7;
+	ret = slim_do_transfer(&ctrl->ctrl, &txn);
+
+	if (ret)
+		dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
+				  laddr, ret);
+	return ret;
+}
+
+static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->rx.lock, flags);
+	if (ctrl->rx.tail == ctrl->rx.head) {
+		spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+		return -ENODATA;
+	}
+	memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
+				ctrl->rx.sl_sz);
+
+	ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
+	spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+
+	return 0;
+}
+
+static void qcom_slim_rxwq(struct work_struct *work)
+{
+	u8 buf[SLIM_MSGQ_BUF_LEN];
+	u8 mc, mt;
+	int ret;
+	struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
+						 wd);
+
+	while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
+		mt = SLIM_HEADER_GET_MT(buf[0]);
+		mc = SLIM_HEADER_GET_MC(buf[1]);
+		if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_PRESENT) {
+			struct slim_eaddr ea;
+			u8 laddr;
+
+			ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
+			ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
+			ea.dev_index = buf[6];
+			ea.instance = buf[7];
+
+			ret = slim_device_report_present(&ctrl->ctrl, &ea,
+							 &laddr);
+			if (ret < 0)
+				dev_err(ctrl->dev, "assign laddr failed:%d\n",
+					ret);
+		} else {
+			dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
+				mc, mt);
+		}
+	}
+}
+
+static void qcom_slim_prg_slew(struct platform_device *pdev,
+				struct qcom_slim_ctrl *ctrl)
+{
+	struct resource	*slew_mem;
+
+	if (!ctrl->slew_reg) {
+		/* SLEW RATE register for this SLIMbus */
+		slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"slew");
+		ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start,
+				resource_size(slew_mem));
+		if (!ctrl->slew_reg)
+			return;
+	}
+
+	writel_relaxed(1, ctrl->slew_reg);
+	/* Make sure SLIMbus-slew rate enabling goes through */
+	wmb();
+}
+
+static int qcom_slim_probe(struct platform_device *pdev)
+{
+	struct qcom_slim_ctrl *ctrl;
+	struct slim_controller *sctrl;
+	struct resource *slim_mem;
+	int ret, ver;
+
+	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
+	if (IS_ERR(ctrl->hclk))
+		return PTR_ERR(ctrl->hclk);
+
+	ctrl->rclk = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(ctrl->rclk))
+		return PTR_ERR(ctrl->rclk);
+
+	ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
+	if (ret) {
+		dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
+		return ret;
+	}
+
+	ctrl->irq = platform_get_irq(pdev, 0);
+	if (!ctrl->irq) {
+		dev_err(&pdev->dev, "no slimbus IRQ\n");
+		return -ENODEV;
+	}
+
+	sctrl = &ctrl->ctrl;
+	sctrl->dev = &pdev->dev;
+	ctrl->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ctrl);
+	dev_set_drvdata(ctrl->dev, ctrl);
+
+	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+	ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
+	if (IS_ERR(ctrl->base)) {
+		dev_err(&pdev->dev, "IOremap failed\n");
+		return PTR_ERR(ctrl->base);
+	}
+
+	sctrl->set_laddr = qcom_set_laddr;
+	sctrl->xfer_msg = qcom_xfer_msg;
+	sctrl->wakeup =  qcom_clk_pause_wakeup;
+	ctrl->tx.n = QCOM_TX_MSGS;
+	ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
+	ctrl->rx.n = QCOM_RX_MSGS;
+	ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
+	ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
+				GFP_KERNEL);
+	if (!ctrl->wr_comp)
+		return -ENOMEM;
+
+	spin_lock_init(&ctrl->rx.lock);
+	spin_lock_init(&ctrl->tx.lock);
+	INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
+	ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
+	if (!ctrl->rxwq) {
+		dev_err(ctrl->dev, "Failed to start Rx WQ\n");
+		return -ENOMEM;
+	}
+
+	ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
+	ctrl->framer.superfreq =
+		ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+	sctrl->a_framer = &ctrl->framer;
+	sctrl->clkgear = SLIM_MAX_CLK_GEAR;
+
+	qcom_slim_prg_slew(pdev, ctrl);
+
+	ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
+				IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		goto err_request_irq_failed;
+	}
+
+	ret = clk_prepare_enable(ctrl->hclk);
+	if (ret)
+		goto err_hclk_enable_failed;
+
+	ret = clk_prepare_enable(ctrl->rclk);
+	if (ret)
+		goto err_rclk_enable_failed;
+
+	ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
+				     GFP_KERNEL);
+	if (!ctrl->tx.base) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
+				     GFP_KERNEL);
+	if (!ctrl->rx.base) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* Register with framework before enabling frame, clock */
+	ret = slim_register_controller(&ctrl->ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "error adding controller\n");
+		goto err;
+	}
+
+	ver = readl_relaxed(ctrl->base);
+	/* Version info in 16 MSbits */
+	ver >>= 16;
+	/* Component register initialization */
+	writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
+	writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
+				ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
+
+	writel((MGR_INT_TX_NACKED_2 |
+			MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
+			MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
+	writel(1, ctrl->base + MGR_CFG);
+	/* Framer register initialization */
+	writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
+		(0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
+		ctrl->base + FRM_CFG);
+	writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
+	writel(1, ctrl->base + INTF_CFG);
+	writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
+	return 0;
+
+err:
+	clk_disable_unprepare(ctrl->rclk);
+err_rclk_enable_failed:
+	clk_disable_unprepare(ctrl->hclk);
+err_hclk_enable_failed:
+err_request_irq_failed:
+	destroy_workqueue(ctrl->rxwq);
+	return ret;
+}
+
+static int qcom_slim_remove(struct platform_device *pdev)
+{
+	struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+	slim_unregister_controller(&ctrl->ctrl);
+	destroy_workqueue(ctrl->rxwq);
+	return 0;
+}
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume.
+ */
+#ifdef CONFIG_PM
+static int qcom_slim_runtime_suspend(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+	int ret;
+
+	dev_dbg(device, "pm_runtime: suspending...\n");
+	ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
+	if (ret) {
+		dev_err(device, "clk pause not entered:%d", ret);
+	} else {
+		disable_irq(ctrl->irq);
+		clk_disable_unprepare(ctrl->hclk);
+		clk_disable_unprepare(ctrl->rclk);
+	}
+	return ret;
+}
+
+static int qcom_slim_runtime_resume(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	dev_dbg(device, "pm_runtime: resuming...\n");
+	ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
+	if (ret)
+		dev_err(device, "clk pause not exited:%d", ret);
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int qcom_slim_suspend(struct device *dev)
+{
+	int ret = 0;
+
+	if (!pm_runtime_enabled(dev) ||
+		(!pm_runtime_suspended(dev))) {
+		dev_dbg(dev, "system suspend");
+		ret = qcom_slim_runtime_suspend(dev);
+	}
+
+	return ret;
+}
+
+static int qcom_slim_resume(struct device *dev)
+{
+	if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+		int ret;
+
+		dev_dbg(dev, "system resume");
+		ret = qcom_slim_runtime_resume(dev);
+		if (!ret) {
+			pm_runtime_mark_last_busy(dev);
+			pm_request_autosuspend(dev);
+		}
+		return ret;
+
+	}
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
+	SET_RUNTIME_PM_OPS(
+			   qcom_slim_runtime_suspend,
+			   qcom_slim_runtime_resume,
+			   NULL
+	)
+};
+
+static const struct of_device_id qcom_slim_dt_match[] = {
+	{ .compatible = "qcom,slim", },
+	{ .compatible = "qcom,apq8064-slim", },
+	{}
+};
+
+static struct platform_driver qcom_slim_driver = {
+	.probe = qcom_slim_probe,
+	.remove = qcom_slim_remove,
+	.driver	= {
+		.name = "qcom_slim_ctrl",
+		.of_match_table = qcom_slim_dt_match,
+		.pm = &qcom_slim_dev_pm_ops,
+	},
+};
+module_platform_driver(qcom_slim_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
new file mode 100644
index 0000000..14a9d18
--- /dev/null
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -0,0 +1,1524 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/slimbus.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/soc/qcom/qmi.h>
+#include <net/sock.h>
+#include "slimbus.h"
+
+/* NGD (Non-ported Generic Device) registers */
+#define	NGD_CFG			0x0
+#define	NGD_CFG_ENABLE		BIT(0)
+#define	NGD_CFG_RX_MSGQ_EN	BIT(1)
+#define	NGD_CFG_TX_MSGQ_EN	BIT(2)
+#define	NGD_STATUS		0x4
+#define NGD_LADDR		BIT(1)
+#define	NGD_RX_MSGQ_CFG		0x8
+#define	NGD_INT_EN		0x10
+#define	NGD_INT_RECFG_DONE	BIT(24)
+#define	NGD_INT_TX_NACKED_2	BIT(25)
+#define	NGD_INT_MSG_BUF_CONTE	BIT(26)
+#define	NGD_INT_MSG_TX_INVAL	BIT(27)
+#define	NGD_INT_IE_VE_CHG	BIT(28)
+#define	NGD_INT_DEV_ERR		BIT(29)
+#define	NGD_INT_RX_MSG_RCVD	BIT(30)
+#define	NGD_INT_TX_MSG_SENT	BIT(31)
+#define	NGD_INT_STAT		0x14
+#define	NGD_INT_CLR		0x18
+#define DEF_NGD_INT_MASK (NGD_INT_TX_NACKED_2 | NGD_INT_MSG_BUF_CONTE | \
+				NGD_INT_MSG_TX_INVAL | NGD_INT_IE_VE_CHG | \
+				NGD_INT_DEV_ERR | NGD_INT_TX_MSG_SENT | \
+				NGD_INT_RX_MSG_RCVD)
+
+/* Slimbus QMI service */
+#define SLIMBUS_QMI_SVC_ID	0x0301
+#define SLIMBUS_QMI_SVC_V1	1
+#define SLIMBUS_QMI_INS_ID	0
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01	0x0020
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01	0x0020
+#define SLIMBUS_QMI_POWER_REQ_V01		0x0021
+#define SLIMBUS_QMI_POWER_RESP_V01		0x0021
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ	0x0022
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP	0x0022
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN	14
+#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN	7
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN	14
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN	7
+#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN	7
+/* QMI response timeout of 500ms */
+#define SLIMBUS_QMI_RESP_TOUT	1000
+
+/* User defined commands */
+#define SLIM_USR_MC_GENERIC_ACK	0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY	0x0
+#define SLIM_USR_MC_REPORT_SATELLITE	0x1
+#define SLIM_USR_MC_ADDR_QUERY		0xD
+#define SLIM_USR_MC_ADDR_REPLY		0xE
+#define SLIM_USR_MC_DEFINE_CHAN		0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN	0x21
+#define SLIM_USR_MC_CHAN_CTRL		0x23
+#define SLIM_USR_MC_RECONFIG_NOW	0x24
+#define SLIM_USR_MC_REQ_BW		0x28
+#define SLIM_USR_MC_CONNECT_SRC		0x2C
+#define SLIM_USR_MC_CONNECT_SINK	0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT	0x2E
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE	0x0
+
+#define QCOM_SLIM_NGD_AUTOSUSPEND	MSEC_PER_SEC
+#define SLIM_RX_MSGQ_TIMEOUT_VAL	0x10000
+
+#define SLIM_LA_MGR	0xFF
+#define SLIM_ROOT_FREQ	24576000
+#define LADDR_RETRY	5
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_MSGQ_BUF_LEN	40
+#define QCOM_SLIM_NGD_DESC_NUM	32
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+		((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define INIT_MX_RETRIES 10
+#define DEF_RETRY_MS	10
+#define SAT_MAGIC_LSB	0xD9
+#define SAT_MAGIC_MSB	0xC5
+#define SAT_MSG_VER	0x1
+#define SAT_MSG_PROT	0x1
+#define to_ngd(d)	container_of(d, struct qcom_slim_ngd, dev)
+
+struct ngd_reg_offset_data {
+	u32 offset, size;
+};
+
+static const struct ngd_reg_offset_data ngd_v1_5_offset_info = {
+	.offset = 0x1000,
+	.size = 0x1000,
+};
+
+enum qcom_slim_ngd_state {
+	QCOM_SLIM_NGD_CTRL_AWAKE,
+	QCOM_SLIM_NGD_CTRL_IDLE,
+	QCOM_SLIM_NGD_CTRL_ASLEEP,
+	QCOM_SLIM_NGD_CTRL_DOWN,
+};
+
+struct qcom_slim_ngd_qmi {
+	struct qmi_handle qmi;
+	struct sockaddr_qrtr svc_info;
+	struct qmi_handle svc_event_hdl;
+	struct qmi_response_type_v01 resp;
+	struct qmi_handle *handle;
+	struct completion qmi_comp;
+};
+
+struct qcom_slim_ngd_ctrl;
+struct qcom_slim_ngd;
+
+struct qcom_slim_ngd_dma_desc {
+	struct dma_async_tx_descriptor *desc;
+	struct qcom_slim_ngd_ctrl *ctrl;
+	struct completion *comp;
+	dma_cookie_t cookie;
+	dma_addr_t phys;
+	void *base;
+};
+
+struct qcom_slim_ngd {
+	struct platform_device *pdev;
+	void __iomem *base;
+	int id;
+};
+
+struct qcom_slim_ngd_ctrl {
+	struct slim_framer framer;
+	struct slim_controller ctrl;
+	struct qcom_slim_ngd_qmi qmi;
+	struct qcom_slim_ngd *ngd;
+	struct device *dev;
+	void __iomem *base;
+	struct dma_chan *dma_rx_channel;
+	struct dma_chan	*dma_tx_channel;
+	struct qcom_slim_ngd_dma_desc rx_desc[QCOM_SLIM_NGD_DESC_NUM];
+	struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM];
+	struct completion reconf;
+	struct work_struct m_work;
+	struct workqueue_struct *mwq;
+	spinlock_t tx_buf_lock;
+	enum qcom_slim_ngd_state state;
+	dma_addr_t rx_phys_base;
+	dma_addr_t tx_phys_base;
+	void *rx_base;
+	void *tx_base;
+	int tx_tail;
+	int tx_head;
+	u32 ver;
+};
+
+enum slimbus_mode_enum_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	SLIMBUS_MODE_SATELLITE_V01 = 1,
+	SLIMBUS_MODE_MASTER_V01 = 2,
+	SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_pm_enum_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	SLIMBUS_PM_INACTIVE_V01 = 1,
+	SLIMBUS_PM_ACTIVE_V01 = 2,
+	SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_resp_enum_type_v01 {
+	SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
+	SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+struct slimbus_select_inst_req_msg_v01 {
+	uint32_t instance;
+	uint8_t mode_valid;
+	enum slimbus_mode_enum_type_v01 mode;
+};
+
+struct slimbus_select_inst_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_power_req_msg_v01 {
+	enum slimbus_pm_enum_type_v01 pm_req;
+	uint8_t resp_type_valid;
+	enum slimbus_resp_enum_type_v01 resp_type;
+};
+
+struct slimbus_power_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+	{
+		.data_type  = QMI_UNSIGNED_4_BYTE,
+		.elem_len   = 1,
+		.elem_size  = sizeof(uint32_t),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x01,
+		.offset     = offsetof(struct slimbus_select_inst_req_msg_v01,
+				       instance),
+		.ei_array   = NULL,
+	},
+	{
+		.data_type  = QMI_OPT_FLAG,
+		.elem_len   = 1,
+		.elem_size  = sizeof(uint8_t),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x10,
+		.offset     = offsetof(struct slimbus_select_inst_req_msg_v01,
+				       mode_valid),
+		.ei_array   = NULL,
+	},
+	{
+		.data_type  = QMI_UNSIGNED_4_BYTE,
+		.elem_len   = 1,
+		.elem_size  = sizeof(enum slimbus_mode_enum_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x10,
+		.offset     = offsetof(struct slimbus_select_inst_req_msg_v01,
+				       mode),
+		.ei_array   = NULL,
+	},
+	{
+		.data_type  = QMI_EOTI,
+		.elem_len   = 0,
+		.elem_size  = 0,
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x00,
+		.offset     = 0,
+		.ei_array   = NULL,
+	},
+};
+
+static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+	{
+		.data_type  = QMI_STRUCT,
+		.elem_len   = 1,
+		.elem_size  = sizeof(struct qmi_response_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x02,
+		.offset     = offsetof(struct slimbus_select_inst_resp_msg_v01,
+				       resp),
+		.ei_array   = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type  = QMI_EOTI,
+		.elem_len   = 0,
+		.elem_size  = 0,
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x00,
+		.offset     = 0,
+		.ei_array   = NULL,
+	},
+};
+
+static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
+	{
+		.data_type  = QMI_UNSIGNED_4_BYTE,
+		.elem_len   = 1,
+		.elem_size  = sizeof(enum slimbus_pm_enum_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x01,
+		.offset     = offsetof(struct slimbus_power_req_msg_v01,
+				       pm_req),
+		.ei_array   = NULL,
+	},
+	{
+		.data_type  = QMI_OPT_FLAG,
+		.elem_len   = 1,
+		.elem_size  = sizeof(uint8_t),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x10,
+		.offset     = offsetof(struct slimbus_power_req_msg_v01,
+				       resp_type_valid),
+	},
+	{
+		.data_type  = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len   = 1,
+		.elem_size  = sizeof(enum slimbus_resp_enum_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x10,
+		.offset     = offsetof(struct slimbus_power_req_msg_v01,
+				       resp_type),
+	},
+	{
+		.data_type  = QMI_EOTI,
+		.elem_len   = 0,
+		.elem_size  = 0,
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x00,
+		.offset     = 0,
+		.ei_array   = NULL,
+	},
+};
+
+static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
+	{
+		.data_type  = QMI_STRUCT,
+		.elem_len   = 1,
+		.elem_size  = sizeof(struct qmi_response_type_v01),
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x02,
+		.offset     = offsetof(struct slimbus_power_resp_msg_v01, resp),
+		.ei_array   = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type  = QMI_EOTI,
+		.elem_len   = 0,
+		.elem_size  = 0,
+		.array_type = NO_ARRAY,
+		.tlv_type   = 0x00,
+		.offset     = 0,
+		.ei_array   = NULL,
+	},
+};
+
+static int qcom_slim_qmi_send_select_inst_req(struct qcom_slim_ngd_ctrl *ctrl,
+				struct slimbus_select_inst_req_msg_v01 *req)
+{
+	struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
+	struct qmi_txn txn;
+	int rc;
+
+	rc = qmi_txn_init(ctrl->qmi.handle, &txn,
+				slimbus_select_inst_resp_msg_v01_ei, &resp);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "QMI TXN init fail: %d\n", rc);
+		return rc;
+	}
+
+	rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
+				SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01,
+				SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN,
+				slimbus_select_inst_req_msg_v01_ei, req);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
+		qmi_txn_cancel(&txn);
+		return rc;
+	}
+
+	rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
+		return rc;
+	}
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		dev_err(ctrl->dev, "QMI request failed 0x%x\n",
+			resp.resp.result);
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
+
+static void qcom_slim_qmi_power_resp_cb(struct qmi_handle *handle,
+					struct sockaddr_qrtr *sq,
+					struct qmi_txn *txn, const void *data)
+{
+	struct slimbus_power_resp_msg_v01 *resp;
+
+	resp = (struct slimbus_power_resp_msg_v01 *)data;
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
+		pr_err("QMI power request failed 0x%x\n",
+				resp->resp.result);
+
+	complete(&txn->completion);
+}
+
+static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl,
+					struct slimbus_power_req_msg_v01 *req)
+{
+	struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
+	struct qmi_txn txn;
+	int rc;
+
+	rc = qmi_txn_init(ctrl->qmi.handle, &txn,
+				slimbus_power_resp_msg_v01_ei, &resp);
+
+	rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
+				SLIMBUS_QMI_POWER_REQ_V01,
+				SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
+				slimbus_power_req_msg_v01_ei, req);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
+		qmi_txn_cancel(&txn);
+		return rc;
+	}
+
+	rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		dev_err(ctrl->dev, "QMI request failed 0x%x\n",
+			resp.resp.result);
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
+
+static struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = {
+	{
+		.type = QMI_RESPONSE,
+		.msg_id = SLIMBUS_QMI_POWER_RESP_V01,
+		.ei = slimbus_power_resp_msg_v01_ei,
+		.decoded_size = sizeof(struct slimbus_power_resp_msg_v01),
+		.fn = qcom_slim_qmi_power_resp_cb,
+	},
+	{}
+};
+
+static int qcom_slim_qmi_init(struct qcom_slim_ngd_ctrl *ctrl,
+			      bool apps_is_master)
+{
+	struct slimbus_select_inst_req_msg_v01 req;
+	struct qmi_handle *handle;
+	int rc;
+
+	handle = devm_kzalloc(ctrl->dev, sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	rc = qmi_handle_init(handle, SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
+				NULL, qcom_slim_qmi_msg_handlers);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "QMI client init failed: %d\n", rc);
+		goto qmi_handle_init_failed;
+	}
+
+	rc = kernel_connect(handle->sock,
+				(struct sockaddr *)&ctrl->qmi.svc_info,
+				sizeof(ctrl->qmi.svc_info), 0);
+	if (rc < 0) {
+		dev_err(ctrl->dev, "Remote Service connect failed: %d\n", rc);
+		goto qmi_connect_to_service_failed;
+	}
+
+	/* Instance is 0 based */
+	req.instance = (ctrl->ngd->id >> 1);
+	req.mode_valid = 1;
+
+	/* Mode indicates the role of the ADSP */
+	if (apps_is_master)
+		req.mode = SLIMBUS_MODE_SATELLITE_V01;
+	else
+		req.mode = SLIMBUS_MODE_MASTER_V01;
+
+	ctrl->qmi.handle = handle;
+
+	rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req);
+	if (rc) {
+		dev_err(ctrl->dev, "failed to select h/w instance\n");
+		goto qmi_select_instance_failed;
+	}
+
+	return 0;
+
+qmi_select_instance_failed:
+	ctrl->qmi.handle = NULL;
+qmi_connect_to_service_failed:
+	qmi_handle_release(handle);
+qmi_handle_init_failed:
+	devm_kfree(ctrl->dev, handle);
+	return rc;
+}
+
+static void qcom_slim_qmi_exit(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	if (!ctrl->qmi.handle)
+		return;
+
+	qmi_handle_release(ctrl->qmi.handle);
+	devm_kfree(ctrl->dev, ctrl->qmi.handle);
+	ctrl->qmi.handle = NULL;
+}
+
+static int qcom_slim_qmi_power_request(struct qcom_slim_ngd_ctrl *ctrl,
+				       bool active)
+{
+	struct slimbus_power_req_msg_v01 req;
+
+	if (active)
+		req.pm_req = SLIMBUS_PM_ACTIVE_V01;
+	else
+		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
+
+	req.resp_type_valid = 0;
+
+	return qcom_slim_qmi_send_power_request(ctrl, &req);
+}
+
+static u32 *qcom_slim_ngd_tx_msg_get(struct qcom_slim_ngd_ctrl *ctrl, int len,
+				     struct completion *comp)
+{
+	struct qcom_slim_ngd_dma_desc *desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+
+	if ((ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM == ctrl->tx_head) {
+		spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+		return NULL;
+	}
+	desc  = &ctrl->txdesc[ctrl->tx_tail];
+	desc->base = ctrl->tx_base + ctrl->tx_tail * SLIM_MSGQ_BUF_LEN;
+	desc->comp = comp;
+	ctrl->tx_tail = (ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM;
+
+	spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+	return desc->base;
+}
+
+static void qcom_slim_ngd_tx_msg_dma_cb(void *args)
+{
+	struct qcom_slim_ngd_dma_desc *desc = args;
+	struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+
+	if (desc->comp) {
+		complete(desc->comp);
+		desc->comp = NULL;
+	}
+
+	ctrl->tx_head = (ctrl->tx_head + 1) % QCOM_SLIM_NGD_DESC_NUM;
+	spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+}
+
+static int qcom_slim_ngd_tx_msg_post(struct qcom_slim_ngd_ctrl *ctrl,
+				     void *buf, int len)
+{
+	struct qcom_slim_ngd_dma_desc *desc;
+	unsigned long flags;
+	int index, offset;
+
+	spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+	offset = buf - ctrl->tx_base;
+	index = offset/SLIM_MSGQ_BUF_LEN;
+
+	desc = &ctrl->txdesc[index];
+	desc->phys = ctrl->tx_phys_base + offset;
+	desc->base = ctrl->tx_base + offset;
+	desc->ctrl = ctrl;
+	len = (len + 3) & 0xfc;
+
+	desc->desc = dmaengine_prep_slave_single(ctrl->dma_tx_channel,
+						desc->phys, len,
+						DMA_MEM_TO_DEV,
+						DMA_PREP_INTERRUPT);
+	if (!desc->desc) {
+		dev_err(ctrl->dev, "unable to prepare channel\n");
+		spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+		return -EINVAL;
+	}
+
+	desc->desc->callback = qcom_slim_ngd_tx_msg_dma_cb;
+	desc->desc->callback_param = desc;
+	desc->desc->cookie = dmaengine_submit(desc->desc);
+	dma_async_issue_pending(ctrl->dma_tx_channel);
+	spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+	return 0;
+}
+
+static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
+{
+	u8 mc, mt, len;
+
+	mt = SLIM_HEADER_GET_MT(buf[0]);
+	len = SLIM_HEADER_GET_RL(buf[0]);
+	mc = SLIM_HEADER_GET_MC(buf[1]);
+
+	if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
+		mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+		queue_work(ctrl->mwq, &ctrl->m_work);
+
+	if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+	    mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY &&
+	    mt == SLIM_MSG_MT_SRC_REFERRED_USER) ||
+		(mc == SLIM_USR_MC_GENERIC_ACK &&
+		 mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
+		slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
+		pm_runtime_mark_last_busy(ctrl->dev);
+	}
+}
+
+static void qcom_slim_ngd_rx_msgq_cb(void *args)
+{
+	struct qcom_slim_ngd_dma_desc *desc = args;
+	struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
+
+	qcom_slim_ngd_rx(ctrl, (u8 *)desc->base);
+	/* Add descriptor back to the queue */
+	desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
+					desc->phys, SLIM_MSGQ_BUF_LEN,
+					DMA_DEV_TO_MEM,
+					DMA_PREP_INTERRUPT);
+	if (!desc->desc) {
+		dev_err(ctrl->dev, "Unable to prepare rx channel\n");
+		return;
+	}
+
+	desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
+	desc->desc->callback_param = desc;
+	desc->desc->cookie = dmaengine_submit(desc->desc);
+	dma_async_issue_pending(ctrl->dma_rx_channel);
+}
+
+static int qcom_slim_ngd_post_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	struct qcom_slim_ngd_dma_desc *desc;
+	int i;
+
+	for (i = 0; i < QCOM_SLIM_NGD_DESC_NUM; i++) {
+		desc = &ctrl->rx_desc[i];
+		desc->phys = ctrl->rx_phys_base + i * SLIM_MSGQ_BUF_LEN;
+		desc->ctrl = ctrl;
+		desc->base = ctrl->rx_base + i * SLIM_MSGQ_BUF_LEN;
+		desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
+						desc->phys, SLIM_MSGQ_BUF_LEN,
+						DMA_DEV_TO_MEM,
+						DMA_PREP_INTERRUPT);
+		if (!desc->desc) {
+			dev_err(ctrl->dev, "Unable to prepare rx channel\n");
+			return -EINVAL;
+		}
+
+		desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
+		desc->desc->callback_param = desc;
+		desc->desc->cookie = dmaengine_submit(desc->desc);
+	}
+	dma_async_issue_pending(ctrl->dma_rx_channel);
+
+	return 0;
+}
+
+static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	struct device *dev = ctrl->dev;
+	int ret, size;
+
+	ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx");
+	if (!ctrl->dma_rx_channel) {
+		dev_err(dev, "Failed to request dma channels");
+		return -EINVAL;
+	}
+
+	size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
+	ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base,
+					   GFP_KERNEL);
+	if (!ctrl->rx_base) {
+		dev_err(dev, "dma_alloc_coherent failed\n");
+		ret = -ENOMEM;
+		goto rel_rx;
+	}
+
+	ret = qcom_slim_ngd_post_rx_msgq(ctrl);
+	if (ret) {
+		dev_err(dev, "post_rx_msgq() failed 0x%x\n", ret);
+		goto rx_post_err;
+	}
+
+	return 0;
+
+rx_post_err:
+	dma_free_coherent(dev, size, ctrl->rx_base, ctrl->rx_phys_base);
+rel_rx:
+	dma_release_channel(ctrl->dma_rx_channel);
+	return ret;
+}
+
+static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	struct device *dev = ctrl->dev;
+	unsigned long flags;
+	int ret = 0;
+	int size;
+
+	ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx");
+	if (!ctrl->dma_tx_channel) {
+		dev_err(dev, "Failed to request dma channels");
+		return -EINVAL;
+	}
+
+	size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
+	ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base,
+					   GFP_KERNEL);
+	if (!ctrl->tx_base) {
+		dev_err(dev, "dma_alloc_coherent failed\n");
+		ret = -EINVAL;
+		goto rel_tx;
+	}
+
+	spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+	ctrl->tx_tail = 0;
+	ctrl->tx_head = 0;
+	spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+	return 0;
+rel_tx:
+	dma_release_channel(ctrl->dma_tx_channel);
+	return ret;
+}
+
+static int qcom_slim_ngd_init_dma(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	int ret = 0;
+
+	ret = qcom_slim_ngd_init_rx_msgq(ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "rx dma init failed\n");
+		return ret;
+	}
+
+	ret = qcom_slim_ngd_init_tx_msgq(ctrl);
+	if (ret)
+		dev_err(ctrl->dev, "tx dma init failed\n");
+
+	return ret;
+}
+
+static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = d;
+	void __iomem *base = ctrl->ngd->base;
+	u32 stat = readl(base + NGD_INT_STAT);
+
+	if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+		(stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+		(stat & NGD_INT_TX_NACKED_2)) {
+		dev_err(ctrl->dev, "Error Interrupt received 0x%x\n", stat);
+	}
+
+	writel(stat, base + NGD_INT_CLR);
+
+	return IRQ_HANDLED;
+}
+
+static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
+				  struct slim_msg_txn *txn)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+	DECLARE_COMPLETION_ONSTACK(tx_sent);
+	DECLARE_COMPLETION_ONSTACK(done);
+	int ret, timeout, i;
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	u8 rbuf[SLIM_MSGQ_BUF_LEN];
+	u32 *pbuf;
+	u8 *puc;
+	u8 la = txn->la;
+	bool usr_msg = false;
+
+	if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
+		return -EPROTONOSUPPORT;
+
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+		 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+		return 0;
+
+	if (txn->dt == SLIM_MSG_DEST_ENUMADDR)
+		return -EPROTONOSUPPORT;
+
+	if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN ||
+			txn->rl > SLIM_MSGQ_BUF_LEN) {
+		dev_err(ctrl->dev, "msg exeeds HW limit\n");
+		return -EINVAL;
+	}
+
+	pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent);
+	if (!pbuf) {
+		dev_err(ctrl->dev, "Message buffer unavailable\n");
+		return -ENOMEM;
+	}
+
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+		txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+		txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+		switch (txn->mc) {
+		case SLIM_MSG_MC_CONNECT_SOURCE:
+			txn->mc = SLIM_USR_MC_CONNECT_SRC;
+			break;
+		case SLIM_MSG_MC_CONNECT_SINK:
+			txn->mc = SLIM_USR_MC_CONNECT_SINK;
+			break;
+		case SLIM_MSG_MC_DISCONNECT_PORT:
+			txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		usr_msg = true;
+		i = 0;
+		wbuf[i++] = txn->la;
+		la = SLIM_LA_MGR;
+		wbuf[i++] = txn->msg->wbuf[0];
+		if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+			wbuf[i++] = txn->msg->wbuf[1];
+
+		txn->comp = &done;
+		ret = slim_alloc_txn_tid(sctrl, txn);
+		if (ret) {
+			dev_err(ctrl->dev, "Unable to allocate TID\n");
+			return ret;
+		}
+
+		wbuf[i++] = txn->tid;
+
+		txn->msg->num_bytes = i;
+		txn->msg->wbuf = wbuf;
+		txn->msg->rbuf = rbuf;
+		txn->rl = txn->msg->num_bytes + 4;
+	}
+
+	/* HW expects length field to be excluded */
+	txn->rl--;
+	puc = (u8 *)pbuf;
+	*pbuf = 0;
+	if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
+				la);
+		puc += 3;
+	} else {
+		*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
+				la);
+		puc += 2;
+	}
+
+	if (slim_tid_txn(txn->mt, txn->mc))
+		*(puc++) = txn->tid;
+
+	if (slim_ec_txn(txn->mt, txn->mc)) {
+		*(puc++) = (txn->ec & 0xFF);
+		*(puc++) = (txn->ec >> 8) & 0xFF;
+	}
+
+	if (txn->msg && txn->msg->wbuf)
+		memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
+
+	ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl);
+	if (ret)
+		return ret;
+
+	timeout = wait_for_completion_timeout(&tx_sent, HZ);
+	if (!timeout) {
+		dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+					txn->mt);
+		return -ETIMEDOUT;
+	}
+
+	if (usr_msg) {
+		timeout = wait_for_completion_timeout(&done, HZ);
+		if (!timeout) {
+			dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
+				txn->mc, txn->mt);
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
+				       struct slim_msg_txn *txn)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	int ret, timeout;
+
+	pm_runtime_get_sync(ctrl->dev);
+
+	txn->comp = &done;
+
+	ret = qcom_slim_ngd_xfer_msg(ctrl, txn);
+	if (ret)
+		return ret;
+
+	timeout = wait_for_completion_timeout(&done, HZ);
+	if (!timeout) {
+		dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+				txn->mt);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt)
+{
+	struct slim_device *sdev = rt->dev;
+	struct slim_controller *ctrl = sdev->ctrl;
+	struct slim_val_inf msg =  {0};
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	u8 rbuf[SLIM_MSGQ_BUF_LEN];
+	struct slim_msg_txn txn = {0,};
+	int i, ret;
+
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.ec = 0;
+	txn.msg = &msg;
+	txn.msg->num_bytes = 0;
+	txn.msg->wbuf = wbuf;
+	txn.msg->rbuf = rbuf;
+
+	for (i = 0; i < rt->num_ports; i++) {
+		struct slim_port *port = &rt->ports[i];
+
+		if (txn.msg->num_bytes == 0) {
+			int seg_interval = SLIM_SLOTS_PER_SUPERFRAME/rt->ratem;
+			int exp;
+
+			wbuf[txn.msg->num_bytes++] = sdev->laddr;
+			wbuf[txn.msg->num_bytes] = rt->bps >> 2 |
+						   (port->ch.aux_fmt << 6);
+
+			/* Data channel segment interval not multiple of 3 */
+			exp = seg_interval % 3;
+			if (exp)
+				wbuf[txn.msg->num_bytes] |= BIT(5);
+
+			txn.msg->num_bytes++;
+			wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot;
+
+			if (rt->prot == SLIM_PROTO_ISO)
+				wbuf[txn.msg->num_bytes++] =
+						port->ch.prrate |
+						SLIM_CHANNEL_CONTENT_FL;
+			else
+				wbuf[txn.msg->num_bytes++] =  port->ch.prrate;
+
+			ret = slim_alloc_txn_tid(ctrl, &txn);
+			if (ret) {
+				dev_err(&sdev->dev, "Fail to allocate TID\n");
+				return -ENXIO;
+			}
+			wbuf[txn.msg->num_bytes++] = txn.tid;
+		}
+		wbuf[txn.msg->num_bytes++] = port->ch.id;
+	}
+
+	txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+	txn.rl = txn.msg->num_bytes + 4;
+	ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+	if (ret) {
+		slim_free_txn_tid(ctrl, &txn);
+		dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+				txn.mt);
+		return ret;
+	}
+
+	txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+	txn.msg->num_bytes = 2;
+	wbuf[1] = sdev->laddr;
+	txn.rl = txn.msg->num_bytes + 4;
+
+	ret = slim_alloc_txn_tid(ctrl, &txn);
+	if (ret) {
+		dev_err(ctrl->dev, "Fail to allocate TID\n");
+		return ret;
+	}
+
+	wbuf[0] = txn.tid;
+	ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+	if (ret) {
+		slim_free_txn_tid(ctrl, &txn);
+		dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+				txn.mt);
+	}
+
+	return ret;
+}
+
+static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
+				   struct slim_eaddr *ea, u8 *laddr)
+{
+	struct slim_val_inf msg =  {0};
+	struct slim_msg_txn txn;
+	u8 wbuf[10] = {0};
+	u8 rbuf[10] = {0};
+	int ret;
+
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.ec = 0;
+
+	txn.mc = SLIM_USR_MC_ADDR_QUERY;
+	txn.rl = 11;
+	txn.msg = &msg;
+	txn.msg->num_bytes = 7;
+	txn.msg->wbuf = wbuf;
+	txn.msg->rbuf = rbuf;
+
+	ret = slim_alloc_txn_tid(ctrl, &txn);
+	if (ret < 0)
+		return ret;
+
+	wbuf[0] = (u8)txn.tid;
+	memcpy(&wbuf[1], ea, sizeof(*ea));
+
+	ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+	if (ret) {
+		slim_free_txn_tid(ctrl, &txn);
+		return ret;
+	}
+
+	*laddr = rbuf[6];
+
+	return ret;
+}
+
+static int qcom_slim_ngd_exit_dma(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	if (ctrl->dma_rx_channel) {
+		dmaengine_terminate_sync(ctrl->dma_rx_channel);
+		dma_release_channel(ctrl->dma_rx_channel);
+	}
+
+	if (ctrl->dma_tx_channel) {
+		dmaengine_terminate_sync(ctrl->dma_tx_channel);
+		dma_release_channel(ctrl->dma_tx_channel);
+	}
+
+	ctrl->dma_tx_channel = ctrl->dma_rx_channel = NULL;
+
+	return 0;
+}
+
+static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	u32 cfg = readl_relaxed(ctrl->ngd->base);
+
+	if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+		qcom_slim_ngd_init_dma(ctrl);
+
+	/* By default enable message queues */
+	cfg |= NGD_CFG_RX_MSGQ_EN;
+	cfg |= NGD_CFG_TX_MSGQ_EN;
+
+	/* Enable NGD if it's not already enabled*/
+	if (!(cfg & NGD_CFG_ENABLE))
+		cfg |= NGD_CFG_ENABLE;
+
+	writel_relaxed(cfg, ctrl->ngd->base);
+}
+
+static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	enum qcom_slim_ngd_state cur_state = ctrl->state;
+	struct qcom_slim_ngd *ngd = ctrl->ngd;
+	u32 laddr, rx_msgq;
+	int timeout, ret = 0;
+
+	if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
+		timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
+		if (!timeout)
+			return -EREMOTEIO;
+	}
+
+	if (ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP ||
+		ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
+		ret = qcom_slim_qmi_power_request(ctrl, true);
+		if (ret) {
+			dev_err(ctrl->dev, "SLIM QMI power request failed:%d\n",
+					ret);
+			return ret;
+		}
+	}
+
+	ctrl->ver = readl_relaxed(ctrl->base);
+	/* Version info in 16 MSbits */
+	ctrl->ver >>= 16;
+
+	laddr = readl_relaxed(ngd->base + NGD_STATUS);
+	if (laddr & NGD_LADDR) {
+		/*
+		 * external MDM restart case where ADSP itself was active framer
+		 * For example, modem restarted when playback was active
+		 */
+		if (cur_state == QCOM_SLIM_NGD_CTRL_AWAKE) {
+			dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
+			return 0;
+		}
+		return 0;
+	}
+
+	writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN);
+	rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG);
+
+	writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
+				ngd->base + NGD_RX_MSGQ_CFG);
+	qcom_slim_ngd_setup(ctrl);
+
+	timeout = wait_for_completion_timeout(&ctrl->reconf, HZ);
+	if (!timeout) {
+		dev_err(ctrl->dev, "capability exchange timed-out\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void qcom_slim_ngd_notify_slaves(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	struct slim_device *sbdev;
+	struct device_node *node;
+
+	for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) {
+		sbdev = of_slim_get_device(&ctrl->ctrl, node);
+		if (!sbdev)
+			continue;
+
+		if (slim_get_logical_addr(sbdev))
+			dev_err(ctrl->dev, "Failed to get logical address\n");
+	}
+}
+
+static void qcom_slim_ngd_master_worker(struct work_struct *work)
+{
+	struct qcom_slim_ngd_ctrl *ctrl;
+	struct slim_msg_txn txn;
+	struct slim_val_inf msg = {0};
+	int retries = 0;
+	u8 wbuf[8];
+	int ret = 0;
+
+	ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work);
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.ec = 0;
+	txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
+	txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+	txn.la = SLIM_LA_MGR;
+	wbuf[0] = SAT_MAGIC_LSB;
+	wbuf[1] = SAT_MAGIC_MSB;
+	wbuf[2] = SAT_MSG_VER;
+	wbuf[3] = SAT_MSG_PROT;
+	txn.msg = &msg;
+	txn.msg->wbuf = wbuf;
+	txn.msg->num_bytes = 4;
+	txn.rl = 8;
+
+	dev_info(ctrl->dev, "SLIM SAT: Rcvd master capability\n");
+
+capability_retry:
+	ret = qcom_slim_ngd_xfer_msg(&ctrl->ctrl, &txn);
+	if (!ret) {
+		if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
+			complete(&ctrl->reconf);
+		else
+			dev_err(ctrl->dev, "unexpected state:%d\n",
+						ctrl->state);
+
+		if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+			qcom_slim_ngd_notify_slaves(ctrl);
+
+	} else if (ret == -EIO) {
+		dev_err(ctrl->dev, "capability message NACKed, retrying\n");
+		if (retries < INIT_MX_RETRIES) {
+			msleep(DEF_RETRY_MS);
+			retries++;
+			goto capability_retry;
+		}
+	} else {
+		dev_err(ctrl->dev, "SLIM: capability TX failed:%d\n", ret);
+	}
+}
+
+static int qcom_slim_ngd_runtime_resume(struct device *dev)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
+		ret = qcom_slim_ngd_power_up(ctrl);
+	if (ret) {
+		/* Did SSR cause this power up failure */
+		if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN)
+			ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
+		else
+			dev_err(ctrl->dev, "HW wakeup attempt during SSR\n");
+	} else {
+		ctrl->state = QCOM_SLIM_NGD_CTRL_AWAKE;
+	}
+
+	return 0;
+}
+
+static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
+{
+	if (enable) {
+		int ret = qcom_slim_qmi_init(ctrl, false);
+
+		if (ret) {
+			dev_err(ctrl->dev, "qmi init fail, ret:%d, state:%d\n",
+				ret, ctrl->state);
+			return ret;
+		}
+		/* controller state should be in sync with framework state */
+		complete(&ctrl->qmi.qmi_comp);
+		if (!pm_runtime_enabled(ctrl->dev) ||
+				!pm_runtime_suspended(ctrl->dev))
+			qcom_slim_ngd_runtime_resume(ctrl->dev);
+		else
+			pm_runtime_resume(ctrl->dev);
+		pm_runtime_mark_last_busy(ctrl->dev);
+		pm_runtime_put(ctrl->dev);
+	} else {
+		qcom_slim_qmi_exit(ctrl);
+	}
+
+	return 0;
+}
+
+static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl,
+					struct qmi_service *service)
+{
+	struct qcom_slim_ngd_qmi *qmi =
+		container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+	struct qcom_slim_ngd_ctrl *ctrl =
+		container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
+
+	qmi->svc_info.sq_family = AF_QIPCRTR;
+	qmi->svc_info.sq_node = service->node;
+	qmi->svc_info.sq_port = service->port;
+
+	qcom_slim_ngd_enable(ctrl, true);
+
+	return 0;
+}
+
+static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
+					 struct qmi_service *service)
+{
+	struct qcom_slim_ngd_qmi *qmi =
+		container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+
+	qmi->svc_info.sq_node = 0;
+	qmi->svc_info.sq_port = 0;
+}
+
+static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
+	.new_server = qcom_slim_ngd_qmi_new_server,
+	.del_server = qcom_slim_ngd_qmi_del_server,
+};
+
+static int qcom_slim_ngd_qmi_svc_event_init(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	struct qcom_slim_ngd_qmi *qmi = &ctrl->qmi;
+	int ret;
+
+	ret = qmi_handle_init(&qmi->svc_event_hdl, 0,
+				&qcom_slim_ngd_qmi_svc_event_ops, NULL);
+	if (ret < 0) {
+		dev_err(ctrl->dev, "qmi_handle_init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = qmi_add_lookup(&qmi->svc_event_hdl, SLIMBUS_QMI_SVC_ID,
+			SLIMBUS_QMI_SVC_V1, SLIMBUS_QMI_INS_ID);
+	if (ret < 0) {
+		dev_err(ctrl->dev, "qmi_add_lookup failed: %d\n", ret);
+		qmi_handle_release(&qmi->svc_event_hdl);
+	}
+	return ret;
+}
+
+static void qcom_slim_ngd_qmi_svc_event_deinit(struct qcom_slim_ngd_qmi *qmi)
+{
+	qmi_handle_release(&qmi->svc_event_hdl);
+}
+
+static struct platform_driver qcom_slim_ngd_driver;
+#define QCOM_SLIM_NGD_DRV_NAME	"qcom,slim-ngd"
+
+static const struct of_device_id qcom_slim_ngd_dt_match[] = {
+	{
+		.compatible = "qcom,slim-ngd-v1.5.0",
+		.data = &ngd_v1_5_offset_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match);
+
+static int of_qcom_slim_ngd_register(struct device *parent,
+				     struct qcom_slim_ngd_ctrl *ctrl)
+{
+	const struct ngd_reg_offset_data *data;
+	struct qcom_slim_ngd *ngd;
+	struct device_node *node;
+	u32 id;
+
+	data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data;
+
+	for_each_available_child_of_node(parent->of_node, node) {
+		if (of_property_read_u32(node, "reg", &id))
+			continue;
+
+		ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
+		if (!ngd)
+			return -ENOMEM;
+
+		ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
+		ngd->id = id;
+		ngd->pdev->dev.parent = parent;
+		ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+		ngd->pdev->dev.of_node = node;
+		ctrl->ngd = ngd;
+		platform_set_drvdata(ngd->pdev, ctrl);
+
+		platform_device_add(ngd->pdev);
+		ngd->base = ctrl->base + ngd->id * data->offset +
+					(ngd->id - 1) * data->size;
+		ctrl->ngd = ngd;
+		platform_driver_register(&qcom_slim_ngd_driver);
+
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static int qcom_slim_ngd_probe(struct platform_device *pdev)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	ctrl->ctrl.dev = dev;
+	ret = slim_register_controller(&ctrl->ctrl);
+	if (ret) {
+		dev_err(dev, "error adding slim controller\n");
+		return ret;
+	}
+
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_enable(dev);
+	pm_runtime_get_noresume(dev);
+	ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
+	if (ret) {
+		dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
+		goto err;
+	}
+
+	INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
+	ctrl->mwq = create_singlethread_workqueue("ngd_master");
+	if (!ctrl->mwq) {
+		dev_err(&pdev->dev, "Failed to start master worker\n");
+		ret = -ENOMEM;
+		goto wq_err;
+	}
+
+	return 0;
+err:
+	slim_unregister_controller(&ctrl->ctrl);
+wq_err:
+	qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
+	if (ctrl->mwq)
+		destroy_workqueue(ctrl->mwq);
+
+	return 0;
+}
+
+static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qcom_slim_ngd_ctrl *ctrl;
+	struct resource *res;
+	int ret;
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, ctrl);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctrl->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctrl->base))
+		return PTR_ERR(ctrl->base);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+		return -ENODEV;
+	}
+
+	ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt,
+			       IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
+	if (ret) {
+		dev_err(&pdev->dev, "request IRQ failed\n");
+		return ret;
+	}
+
+	ctrl->dev = dev;
+	ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+	ctrl->framer.superfreq =
+		ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+
+	ctrl->ctrl.a_framer = &ctrl->framer;
+	ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+	ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr;
+	ctrl->ctrl.enable_stream = qcom_slim_ngd_enable_stream;
+	ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg;
+	ctrl->ctrl.wakeup = NULL;
+	ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
+
+	spin_lock_init(&ctrl->tx_buf_lock);
+	init_completion(&ctrl->reconf);
+	init_completion(&ctrl->qmi.qmi_comp);
+
+	return of_qcom_slim_ngd_register(dev, ctrl);
+}
+
+static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev)
+{
+	platform_driver_unregister(&qcom_slim_ngd_driver);
+
+	return 0;
+}
+
+static int qcom_slim_ngd_remove(struct platform_device *pdev)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+	slim_unregister_controller(&ctrl->ctrl);
+	qcom_slim_ngd_exit_dma(ctrl);
+	qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
+	if (ctrl->mwq)
+		destroy_workqueue(ctrl->mwq);
+
+	kfree(ctrl->ngd);
+	ctrl->ngd = NULL;
+	return 0;
+}
+
+static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+
+	if (ctrl->state == QCOM_SLIM_NGD_CTRL_AWAKE)
+		ctrl->state = QCOM_SLIM_NGD_CTRL_IDLE;
+	pm_request_autosuspend(dev);
+	return -EAGAIN;
+}
+
+static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = qcom_slim_qmi_power_request(ctrl, false);
+	if (ret && ret != -EBUSY)
+		dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
+	if (!ret || ret == -ETIMEDOUT)
+		ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
+
+	return ret;
+}
+
+static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(
+		qcom_slim_ngd_runtime_suspend,
+		qcom_slim_ngd_runtime_resume,
+		qcom_slim_ngd_runtime_idle
+	)
+};
+
+static struct platform_driver qcom_slim_ngd_ctrl_driver = {
+	.probe = qcom_slim_ngd_ctrl_probe,
+	.remove = qcom_slim_ngd_ctrl_remove,
+	.driver	= {
+		.name = "qcom,slim-ngd-ctrl",
+		.of_match_table = qcom_slim_ngd_dt_match,
+	},
+};
+
+static struct platform_driver qcom_slim_ngd_driver = {
+	.probe = qcom_slim_ngd_probe,
+	.remove = qcom_slim_ngd_remove,
+	.driver	= {
+		.name = QCOM_SLIM_NGD_DRV_NAME,
+		.pm = &qcom_slim_ngd_dev_pm_ops,
+	},
+};
+
+module_platform_driver(qcom_slim_ngd_ctrl_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SLIMBus NGD controller");
diff --git a/drivers/slimbus/sched.c b/drivers/slimbus/sched.c
new file mode 100644
index 0000000..af84997
--- /dev/null
+++ b/drivers/slimbus/sched.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/errno.h>
+#include "slimbus.h"
+
+/**
+ * slim_ctrl_clk_pause() - Called by slimbus controller to enter/exit
+ *			   'clock pause'
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ *	isn't used when controller is to be woken up.
+ *
+ * Slimbus specification needs this sequence to turn-off clocks for the bus.
+ * The sequence involves sending 3 broadcast messages (reconfiguration
+ * sequence) to inform all devices on the bus.
+ * To exit clock-pause, controller typically wakes up active framer device.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called.
+ * For entering clock-pause, -EBUSY is returned if a message txn in pending.
+ */
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
+{
+	int i, ret = 0;
+	unsigned long flags;
+	struct slim_sched *sched = &ctrl->sched;
+	struct slim_val_inf msg = {0, 0, NULL, NULL};
+
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+				3, SLIM_LA_MANAGER, &msg);
+
+	if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
+		return -EINVAL;
+
+	mutex_lock(&sched->m_reconf);
+	if (wakeup) {
+		if (sched->clk_state == SLIM_CLK_ACTIVE) {
+			mutex_unlock(&sched->m_reconf);
+			return 0;
+		}
+
+		/*
+		 * Fine-tune calculation based on clock gear,
+		 * message-bandwidth after bandwidth management
+		 */
+		ret = wait_for_completion_timeout(&sched->pause_comp,
+				msecs_to_jiffies(100));
+		if (!ret) {
+			mutex_unlock(&sched->m_reconf);
+			pr_err("Previous clock pause did not finish");
+			return -ETIMEDOUT;
+		}
+		ret = 0;
+
+		/*
+		 * Slimbus framework will call controller wakeup
+		 * Controller should make sure that it sets active framer
+		 * out of clock pause
+		 */
+		if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
+			ret = ctrl->wakeup(ctrl);
+		if (!ret)
+			sched->clk_state = SLIM_CLK_ACTIVE;
+		mutex_unlock(&sched->m_reconf);
+
+		return ret;
+	}
+
+	/* already paused */
+	if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) {
+		mutex_unlock(&sched->m_reconf);
+		return 0;
+	}
+
+	spin_lock_irqsave(&ctrl->txn_lock, flags);
+	for (i = 0; i < SLIM_MAX_TIDS; i++) {
+		/* Pending response for a message */
+		if (idr_find(&ctrl->tid_idr, i)) {
+			spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+			mutex_unlock(&sched->m_reconf);
+			return -EBUSY;
+		}
+	}
+	spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+	sched->clk_state = SLIM_CLK_ENTERING_PAUSE;
+
+	/* clock pause sequence */
+	ret = slim_do_transfer(ctrl, &txn);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
+	txn.rl = 4;
+	msg.num_bytes = 1;
+	msg.wbuf = &restart;
+	ret = slim_do_transfer(ctrl, &txn);
+	if (ret)
+		goto clk_pause_ret;
+
+	txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+	txn.rl = 3;
+	msg.num_bytes = 1;
+	msg.wbuf = NULL;
+	ret = slim_do_transfer(ctrl, &txn);
+
+clk_pause_ret:
+	if (ret) {
+		sched->clk_state = SLIM_CLK_ACTIVE;
+	} else {
+		sched->clk_state = SLIM_CLK_PAUSED;
+		complete(&sched->pause_comp);
+	}
+	mutex_unlock(&sched->m_reconf);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
new file mode 100644
index 0000000..4399d18
--- /dev/null
+++ b/drivers/slimbus/slimbus.h
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#ifndef _DRIVERS_SLIMBUS_H
+#define _DRIVERS_SLIMBUS_H
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slimbus.h>
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME		6144
+#define SLIM_CL_PER_SUPERFRAME_DIV8	(SLIM_CL_PER_SUPERFRAME >> 3)
+
+/* SLIMbus message types. Related to interpretation of message code. */
+#define SLIM_MSG_MT_CORE			0x0
+#define SLIM_MSG_MT_DEST_REFERRED_USER		0x2
+#define SLIM_MSG_MT_SRC_REFERRED_USER		0x6
+
+/*
+ * SLIM Broadcast header format
+ * BYTE 0: MT[7:5] RL[4:0]
+ * BYTE 1: RSVD[7] MC[6:0]
+ * BYTE 2: RSVD[7:6] DT[5:4] PI[3:0]
+ */
+#define SLIM_MSG_MT_MASK	GENMASK(2, 0)
+#define SLIM_MSG_MT_SHIFT	5
+#define SLIM_MSG_RL_MASK	GENMASK(4, 0)
+#define SLIM_MSG_RL_SHIFT	0
+#define SLIM_MSG_MC_MASK	GENMASK(6, 0)
+#define SLIM_MSG_MC_SHIFT	0
+#define SLIM_MSG_DT_MASK	GENMASK(1, 0)
+#define SLIM_MSG_DT_SHIFT	4
+
+#define SLIM_HEADER_GET_MT(b)	((b >> SLIM_MSG_MT_SHIFT) & SLIM_MSG_MT_MASK)
+#define SLIM_HEADER_GET_RL(b)	((b >> SLIM_MSG_RL_SHIFT) & SLIM_MSG_RL_MASK)
+#define SLIM_HEADER_GET_MC(b)	((b >> SLIM_MSG_MC_SHIFT) & SLIM_MSG_MC_MASK)
+#define SLIM_HEADER_GET_DT(b)	((b >> SLIM_MSG_DT_SHIFT) & SLIM_MSG_DT_MASK)
+
+/* Device management messages used by this framework */
+#define SLIM_MSG_MC_REPORT_PRESENT               0x1
+#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS       0x2
+#define SLIM_MSG_MC_REPORT_ABSENT                0xF
+
+/* Data channel management messages */
+#define SLIM_MSG_MC_CONNECT_SOURCE		0x10
+#define SLIM_MSG_MC_CONNECT_SINK		0x11
+#define SLIM_MSG_MC_DISCONNECT_PORT		0x14
+#define SLIM_MSG_MC_CHANGE_CONTENT		0x18
+
+/* Clock pause Reconfiguration messages */
+#define SLIM_MSG_MC_BEGIN_RECONFIGURATION        0x40
+#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK             0x4A
+#define SLIM_MSG_MC_NEXT_DEFINE_CHANNEL          0x50
+#define SLIM_MSG_MC_NEXT_DEFINE_CONTENT          0x51
+#define SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL        0x54
+#define SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL      0x55
+#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL          0x58
+#define SLIM_MSG_MC_RECONFIGURE_NOW              0x5F
+
+/*
+ * Clock pause flag to indicate that the reconfig message
+ * corresponds to clock pause sequence
+ */
+#define SLIM_MSG_CLK_PAUSE_SEQ_FLG		(1U << 8)
+
+/* Clock pause values per SLIMbus spec */
+#define SLIM_CLK_FAST				0
+#define SLIM_CLK_CONST_PHASE			1
+#define SLIM_CLK_UNSPECIFIED			2
+
+/* Destination type Values */
+#define SLIM_MSG_DEST_LOGICALADDR	0
+#define SLIM_MSG_DEST_ENUMADDR		1
+#define	SLIM_MSG_DEST_BROADCAST		3
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_MAX_CLK_GEAR		10
+#define SLIM_MIN_CLK_GEAR		1
+#define SLIM_SLOT_LEN_BITS		4
+
+/* Indicate that the frequency of the flow and the bus frequency are locked */
+#define SLIM_CHANNEL_CONTENT_FL		BIT(7)
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME		6144
+#define SLIM_SLOTS_PER_SUPERFRAME	(SLIM_CL_PER_SUPERFRAME >> 2)
+#define SLIM_SL_PER_SUPERFRAME		(SLIM_CL_PER_SUPERFRAME >> 2)
+/* Manager's logical address is set to 0xFF per spec */
+#define SLIM_LA_MANAGER 0xFF
+
+#define SLIM_MAX_TIDS			256
+/**
+ * struct slim_framer - Represents SLIMbus framer.
+ * Every controller may have multiple framers. There is 1 active framer device
+ * responsible for clocking the bus.
+ * Manager is responsible for framer hand-over.
+ * @dev: Driver model representation of the device.
+ * @e_addr: Enumeration address of the framer.
+ * @rootfreq: Root Frequency at which the framer can run. This is maximum
+ *	frequency ('clock gear 10') at which the bus can operate.
+ * @superfreq: Superframes per root frequency. Every frame is 6144 bits.
+ */
+struct slim_framer {
+	struct device		dev;
+	struct slim_eaddr	e_addr;
+	int			rootfreq;
+	int			superfreq;
+};
+
+#define to_slim_framer(d) container_of(d, struct slim_framer, dev)
+
+/**
+ * struct slim_msg_txn - Message to be sent by the controller.
+ *			This structure has packet header,
+ *			payload and buffer to be filled (if any)
+ * @rl: Header field. remaining length.
+ * @mt: Header field. Message type.
+ * @mc: Header field. LSB is message code for type mt.
+ * @dt: Header field. Destination type.
+ * @ec: Element code. Used for elemental access APIs.
+ * @tid: Transaction ID. Used for messages expecting response.
+ *	(relevant for message-codes involving read operation)
+ * @la: Logical address of the device this message is going to.
+ *	(Not used when destination type is broadcast.)
+ * @msg: Elemental access message to be read/written
+ * @comp: completion if read/write is synchronous, used internally
+ *	for tid based transactions.
+ */
+struct slim_msg_txn {
+	u8			rl;
+	u8			mt;
+	u8			mc;
+	u8			dt;
+	u16			ec;
+	u8			tid;
+	u8			la;
+	struct slim_val_inf	*msg;
+	struct	completion	*comp;
+};
+
+/* Frequently used message transaction structures */
+#define DEFINE_SLIM_LDEST_TXN(name, mc, rl, la, msg) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
+					0, la, msg, }
+
+#define DEFINE_SLIM_BCAST_TXN(name, mc, rl, la, msg) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
+					0, la, msg, }
+
+#define DEFINE_SLIM_EDEST_TXN(name, mc, rl, la, msg) \
+	struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_ENUMADDR, 0,\
+					0, la, msg, }
+/**
+ * enum slim_clk_state: SLIMbus controller's clock state used internally for
+ *	maintaining current clock state.
+ * @SLIM_CLK_ACTIVE: SLIMbus clock is active
+ * @SLIM_CLK_ENTERING_PAUSE: SLIMbus clock pause sequence is being sent on the
+ *	bus. If this succeeds, state changes to SLIM_CLK_PAUSED. If the
+ *	transition fails, state changes back to SLIM_CLK_ACTIVE
+ * @SLIM_CLK_PAUSED: SLIMbus controller clock has paused.
+ */
+enum slim_clk_state {
+	SLIM_CLK_ACTIVE,
+	SLIM_CLK_ENTERING_PAUSE,
+	SLIM_CLK_PAUSED,
+};
+
+/**
+ * struct slim_sched: Framework uses this structure internally for scheduling.
+ * @clk_state: Controller's clock state from enum slim_clk_state
+ * @pause_comp: Signals completion of clock pause sequence. This is useful when
+ *	client tries to call SLIMbus transaction when controller is entering
+ *	clock pause.
+ * @m_reconf: This mutex is held until current reconfiguration (data channel
+ *	scheduling, message bandwidth reservation) is done. Message APIs can
+ *	use the bus concurrently when this mutex is held since elemental access
+ *	messages can be sent on the bus when reconfiguration is in progress.
+ */
+struct slim_sched {
+	enum slim_clk_state	clk_state;
+	struct completion	pause_comp;
+	struct mutex		m_reconf;
+};
+
+/**
+ * enum slim_port_direction: SLIMbus port direction
+ *
+ * @SLIM_PORT_SINK: SLIMbus port is a sink
+ * @SLIM_PORT_SOURCE: SLIMbus port is a source
+ */
+enum slim_port_direction {
+	SLIM_PORT_SINK = 0,
+	SLIM_PORT_SOURCE,
+};
+/**
+ * enum slim_port_state: SLIMbus Port/Endpoint state machine
+ *	according to SLIMbus Spec 2.0
+ * @SLIM_PORT_DISCONNECTED: SLIMbus port is disconnected
+ *	entered from Unconfigure/configured state after
+ *	DISCONNECT_PORT or REMOVE_CHANNEL core command
+ * @SLIM_PORT_UNCONFIGURED: SLIMbus port is in unconfigured state.
+ *	entered from disconnect state after CONNECT_SOURCE/SINK core command
+ * @SLIM_PORT_CONFIGURED: SLIMbus port is in configured state.
+ *	entered from unconfigured state after DEFINE_CHANNEL, DEFINE_CONTENT
+ *	and ACTIVATE_CHANNEL core commands. Ready for data transmission.
+ */
+enum slim_port_state {
+	SLIM_PORT_DISCONNECTED = 0,
+	SLIM_PORT_UNCONFIGURED,
+	SLIM_PORT_CONFIGURED,
+};
+
+/**
+ * enum slim_channel_state: SLIMbus channel state machine used by core.
+ * @SLIM_CH_STATE_DISCONNECTED: SLIMbus channel is disconnected
+ * @SLIM_CH_STATE_ALLOCATED: SLIMbus channel is allocated
+ * @SLIM_CH_STATE_ASSOCIATED: SLIMbus channel is associated with port
+ * @SLIM_CH_STATE_DEFINED: SLIMbus channel parameters are defined
+ * @SLIM_CH_STATE_CONTENT_DEFINED: SLIMbus channel content is defined
+ * @SLIM_CH_STATE_ACTIVE: SLIMbus channel is active and ready for data
+ * @SLIM_CH_STATE_REMOVED: SLIMbus channel is inactive and removed
+ */
+enum slim_channel_state {
+	SLIM_CH_STATE_DISCONNECTED = 0,
+	SLIM_CH_STATE_ALLOCATED,
+	SLIM_CH_STATE_ASSOCIATED,
+	SLIM_CH_STATE_DEFINED,
+	SLIM_CH_STATE_CONTENT_DEFINED,
+	SLIM_CH_STATE_ACTIVE,
+	SLIM_CH_STATE_REMOVED,
+};
+
+/**
+ * enum slim_ch_data_fmt: SLIMbus channel data Type identifiers according to
+ *	Table 60 of SLIMbus Spec 1.01.01
+ * @SLIM_CH_DATA_FMT_NOT_DEFINED: Undefined
+ * @SLIM_CH_DATA_FMT_LPCM_AUDIO: LPCM audio
+ * @SLIM_CH_DATA_FMT_IEC61937_COMP_AUDIO: IEC61937 Compressed audio
+ * @SLIM_CH_DATA_FMT_PACKED_PDM_AUDIO: Packed PDM audio
+ */
+enum slim_ch_data_fmt {
+	SLIM_CH_DATA_FMT_NOT_DEFINED = 0,
+	SLIM_CH_DATA_FMT_LPCM_AUDIO = 1,
+	SLIM_CH_DATA_FMT_IEC61937_COMP_AUDIO = 2,
+	SLIM_CH_DATA_FMT_PACKED_PDM_AUDIO = 3,
+};
+
+/**
+ * enum slim_ch_aux_fmt: SLIMbus channel Aux Field format IDs according to
+ *	Table 63 of SLIMbus Spec 2.0
+ * @SLIM_CH_AUX_FMT_NOT_APPLICABLE: Undefined
+ * @SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958: ZCUV for tunneling IEC60958
+ * @SLIM_CH_AUX_FMT_USER_DEFINED: User defined
+ */
+enum slim_ch_aux_bit_fmt {
+	SLIM_CH_AUX_FMT_NOT_APPLICABLE = 0,
+	SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958 = 1,
+	SLIM_CH_AUX_FMT_USER_DEFINED = 0xF,
+};
+
+/**
+ * struct slim_channel  - SLIMbus channel, used for state machine
+ *
+ * @id: ID of channel
+ * @prrate: Presense rate of channel from Table 66 of SLIMbus 2.0 Specs
+ * @seg_dist: segment distribution code from Table 20 of SLIMbus 2.0 Specs
+ * @data_fmt: Data format of channel.
+ * @aux_fmt: Aux format for this channel.
+ * @state: channel state machine
+ */
+struct slim_channel {
+	int id;
+	int prrate;
+	int seg_dist;
+	enum slim_ch_data_fmt data_fmt;
+	enum slim_ch_aux_bit_fmt aux_fmt;
+	enum slim_channel_state state;
+};
+
+/**
+ * struct slim_port  - SLIMbus port
+ *
+ * @id: Port id
+ * @direction: Port direction, Source or Sink.
+ * @state: state machine of port.
+ * @ch: channel associated with this port.
+ */
+struct slim_port {
+	int id;
+	enum slim_port_direction direction;
+	enum slim_port_state state;
+	struct slim_channel ch;
+};
+
+/**
+ * enum slim_transport_protocol: SLIMbus Transport protocol list from
+ *	Table 47 of SLIMbus 2.0 specs.
+ * @SLIM_PROTO_ISO: Isochronous Protocol, no flow control as data rate match
+ *		channel rate flow control embedded in the data.
+ * @SLIM_PROTO_PUSH: Pushed Protocol, includes flow control, Used to carry
+ *		data whose rate	is equal to, or lower than the channel rate.
+ * @SLIM_PROTO_PULL: Pulled Protocol, similar usage as pushed protocol
+ *		but pull is a unicast.
+ * @SLIM_PROTO_LOCKED: Locked Protocol
+ * @SLIM_PROTO_ASYNC_SMPLX: Asynchronous Protocol-Simplex
+ * @SLIM_PROTO_ASYNC_HALF_DUP: Asynchronous Protocol-Half-duplex
+ * @SLIM_PROTO_EXT_SMPLX: Extended Asynchronous Protocol-Simplex
+ * @SLIM_PROTO_EXT_HALF_DUP: Extended Asynchronous Protocol-Half-duplex
+ */
+enum slim_transport_protocol {
+	SLIM_PROTO_ISO = 0,
+	SLIM_PROTO_PUSH,
+	SLIM_PROTO_PULL,
+	SLIM_PROTO_LOCKED,
+	SLIM_PROTO_ASYNC_SMPLX,
+	SLIM_PROTO_ASYNC_HALF_DUP,
+	SLIM_PROTO_EXT_SMPLX,
+	SLIM_PROTO_EXT_HALF_DUP,
+};
+
+/**
+ * struct slim_stream_runtime  - SLIMbus stream runtime instance
+ *
+ * @name: Name of the stream
+ * @dev: SLIM Device instance associated with this stream
+ * @direction: direction of stream
+ * @prot: Transport protocol used in this stream
+ * @rate: Data rate of samples *
+ * @bps: bits per sample
+ * @ratem: rate multipler which is super frame rate/data rate
+ * @num_ports: number of ports
+ * @ports: pointer to instance of ports
+ * @node: list head for stream associated with slim device.
+ */
+struct slim_stream_runtime {
+	const char *name;
+	struct slim_device *dev;
+	int direction;
+	enum slim_transport_protocol prot;
+	unsigned int rate;
+	unsigned int bps;
+	unsigned int ratem;
+	int num_ports;
+	struct slim_port *ports;
+	struct list_head node;
+};
+
+/**
+ * struct slim_controller  - Controls every instance of SLIMbus
+ *				(similar to 'master' on SPI)
+ * @dev: Device interface to this driver
+ * @id: Board-specific number identifier for this controller/bus
+ * @name: Name for this controller
+ * @min_cg: Minimum clock gear supported by this controller (default value: 1)
+ * @max_cg: Maximum clock gear supported by this controller (default value: 10)
+ * @clkgear: Current clock gear in which this bus is running
+ * @laddr_ida: logical address id allocator
+ * @a_framer: Active framer which is clocking the bus managed by this controller
+ * @lock: Mutex protecting controller data structures
+ * @devices: Slim device list
+ * @tid_idr: tid id allocator
+ * @txn_lock: Lock to protect table of transactions
+ * @sched: scheduler structure used by the controller
+ * @xfer_msg: Transfer a message on this controller (this can be a broadcast
+ *	control/status message like data channel setup, or a unicast message
+ *	like value element read/write.
+ * @set_laddr: Setup logical address at laddr for the slave with elemental
+ *	address e_addr. Drivers implementing controller will be expected to
+ *	send unicast message to this device with its logical address.
+ * @get_laddr: It is possible that controller needs to set fixed logical
+ *	address table and get_laddr can be used in that case so that controller
+ *	can do this assignment. Use case is when the master is on the remote
+ *	processor side, who is resposible for allocating laddr.
+ * @wakeup: This function pointer implements controller-specific procedure
+ *	to wake it up from clock-pause. Framework will call this to bring
+ *	the controller out of clock pause.
+ * @enable_stream: This function pointer implements controller-specific procedure
+ *	to enable a stream.
+ * @disable_stream: This function pointer implements controller-specific procedure
+ *	to disable stream.
+ *
+ *	'Manager device' is responsible for  device management, bandwidth
+ *	allocation, channel setup, and port associations per channel.
+ *	Device management means Logical address assignment/removal based on
+ *	enumeration (report-present, report-absent) of a device.
+ *	Bandwidth allocation is done dynamically by the manager based on active
+ *	channels on the bus, message-bandwidth requests made by SLIMbus devices.
+ *	Based on current bandwidth usage, manager chooses a frequency to run
+ *	the bus at (in steps of 'clock-gear', 1 through 10, each clock gear
+ *	representing twice the frequency than the previous gear).
+ *	Manager is also responsible for entering (and exiting) low-power-mode
+ *	(known as 'clock pause').
+ *	Manager can do handover of framer if there are multiple framers on the
+ *	bus and a certain usecase warrants using certain framer to avoid keeping
+ *	previous framer being powered-on.
+ *
+ *	Controller here performs duties of the manager device, and 'interface
+ *	device'. Interface device is responsible for monitoring the bus and
+ *	reporting information such as loss-of-synchronization, data
+ *	slot-collision.
+ */
+struct slim_controller {
+	struct device		*dev;
+	unsigned int		id;
+	char			name[SLIMBUS_NAME_SIZE];
+	int			min_cg;
+	int			max_cg;
+	int			clkgear;
+	struct ida		laddr_ida;
+	struct slim_framer	*a_framer;
+	struct mutex		lock;
+	struct list_head	devices;
+	struct idr		tid_idr;
+	spinlock_t		txn_lock;
+	struct slim_sched	sched;
+	int			(*xfer_msg)(struct slim_controller *ctrl,
+					    struct slim_msg_txn *tx);
+	int			(*set_laddr)(struct slim_controller *ctrl,
+					     struct slim_eaddr *ea, u8 laddr);
+	int			(*get_laddr)(struct slim_controller *ctrl,
+					     struct slim_eaddr *ea, u8 *laddr);
+	int		(*enable_stream)(struct slim_stream_runtime *rt);
+	int		(*disable_stream)(struct slim_stream_runtime *rt);
+	int			(*wakeup)(struct slim_controller *ctrl);
+};
+
+int slim_device_report_present(struct slim_controller *ctrl,
+			       struct slim_eaddr *e_addr, u8 *laddr);
+void slim_report_absent(struct slim_device *sbdev);
+int slim_register_controller(struct slim_controller *ctrl);
+int slim_unregister_controller(struct slim_controller *ctrl);
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 l);
+int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart);
+int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+
+static inline bool slim_tid_txn(u8 mt, u8 mc)
+{
+	return (mt == SLIM_MSG_MT_CORE &&
+		(mc == SLIM_MSG_MC_REQUEST_INFORMATION ||
+		 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION ||
+		 mc == SLIM_MSG_MC_REQUEST_VALUE ||
+		 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION));
+}
+
+static inline bool slim_ec_txn(u8 mt, u8 mc)
+{
+	return (mt == SLIM_MSG_MT_CORE &&
+		((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+		  mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+		 (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+		  mc <= SLIM_MSG_MC_CHANGE_VALUE)));
+}
+#endif /* _LINUX_SLIMBUS_H */
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
new file mode 100644
index 0000000..2fa0532
--- /dev/null
+++ b/drivers/slimbus/stream.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/slimbus.h>
+#include <uapi/sound/asound.h>
+#include "slimbus.h"
+
+/**
+ * struct segdist_code - Segment Distributions code from
+ *	Table 20 of SLIMbus Specs Version 2.0
+ *
+ * @ratem: Channel Rate Multipler(Segments per Superframe)
+ * @seg_interval: Number of slots between the first Slot of Segment
+ *		and the first slot of the next  consecutive Segment.
+ * @segdist_code: Segment Distribution Code SD[11:0]
+ * @seg_offset_mask: Segment offset mask in SD[11:0]
+ * @segdist_codes: List of all possible Segmet Distribution codes.
+ */
+static const struct segdist_code {
+	int ratem;
+	int seg_interval;
+	int segdist_code;
+	u32 seg_offset_mask;
+
+} segdist_codes[] = {
+	{1,	1536,	0x200,	 0xdff},
+	{2,	768,	0x100,	 0xcff},
+	{4,	384,	0x080,	 0xc7f},
+	{8,	192,	0x040,	 0xc3f},
+	{16,	96,	0x020,	 0xc1f},
+	{32,	48,	0x010,	 0xc0f},
+	{64,	24,	0x008,	 0xc07},
+	{128,	12,	0x004,	 0xc03},
+	{256,	6,	0x002,	 0xc01},
+	{512,	3,	0x001,	 0xc00},
+	{3,	512,	0xe00,	 0x1ff},
+	{6,	256,	0xd00,	 0x0ff},
+	{12,	128,	0xc80,	 0x07f},
+	{24,	64,	0xc40,	 0x03f},
+	{48,	32,	0xc20,	 0x01f},
+	{96,	16,	0xc10,	 0x00f},
+	{192,	8,	0xc08,	 0x007},
+	{364,	4,	0xc04,	 0x003},
+	{768,	2,	0xc02,	 0x001},
+};
+
+/*
+ * Presence Rate table for all Natural Frequencies
+ * The Presence rate of a constant bitrate stream is mean flow rate of the
+ * stream expressed in occupied Segments of that Data Channel per second.
+ * Table 66 from SLIMbus 2.0 Specs
+ *
+ * Index of the table corresponds to Presence rate code for the respective rate
+ * in the table.
+ */
+static const int slim_presence_rate_table[] = {
+	0, /* Not Indicated */
+	12000,
+	24000,
+	48000,
+	96000,
+	192000,
+	384000,
+	768000,
+	0, /* Reserved */
+	110250,
+	220500,
+	441000,
+	882000,
+	176400,
+	352800,
+	705600,
+	4000,
+	8000,
+	16000,
+	32000,
+	64000,
+	128000,
+	256000,
+	512000,
+};
+
+/*
+ * slim_stream_allocate() - Allocate a new SLIMbus Stream
+ * @dev:Slim device to be associated with
+ * @name: name of the stream
+ *
+ * This is very first call for SLIMbus streaming, this API will allocate
+ * a new SLIMbus stream and return a valid stream runtime pointer for client
+ * to use it in subsequent stream apis. state of stream is set to ALLOCATED
+ *
+ * Return: valid pointer on success and error code on failure.
+ * From ASoC DPCM framework, this state is linked to startup() operation.
+ */
+struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev,
+						 const char *name)
+{
+	struct slim_stream_runtime *rt;
+
+	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+	if (!rt)
+		return ERR_PTR(-ENOMEM);
+
+	rt->name = kasprintf(GFP_KERNEL, "slim-%s", name);
+	if (!rt->name) {
+		kfree(rt);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rt->dev = dev;
+	spin_lock(&dev->stream_list_lock);
+	list_add_tail(&rt->node, &dev->stream_list);
+	spin_unlock(&dev->stream_list_lock);
+
+	return rt;
+}
+EXPORT_SYMBOL_GPL(slim_stream_allocate);
+
+static int slim_connect_port_channel(struct slim_stream_runtime *stream,
+				     struct slim_port *port)
+{
+	struct slim_device *sdev = stream->dev;
+	u8 wbuf[2];
+	struct slim_val_inf msg = {0, 2, NULL, wbuf, NULL};
+	u8 mc = SLIM_MSG_MC_CONNECT_SOURCE;
+	DEFINE_SLIM_LDEST_TXN(txn, mc, 6, stream->dev->laddr, &msg);
+
+	if (port->direction == SLIM_PORT_SINK)
+		txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+
+	wbuf[0] = port->id;
+	wbuf[1] = port->ch.id;
+	port->ch.state = SLIM_CH_STATE_ASSOCIATED;
+	port->state = SLIM_PORT_UNCONFIGURED;
+
+	return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_disconnect_port(struct slim_stream_runtime *stream,
+				struct slim_port *port)
+{
+	struct slim_device *sdev = stream->dev;
+	u8 wbuf[1];
+	struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+	u8 mc = SLIM_MSG_MC_DISCONNECT_PORT;
+	DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+
+	wbuf[0] = port->id;
+	port->ch.state = SLIM_CH_STATE_DISCONNECTED;
+	port->state = SLIM_PORT_DISCONNECTED;
+
+	return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_deactivate_remove_channel(struct slim_stream_runtime *stream,
+					  struct slim_port *port)
+{
+	struct slim_device *sdev = stream->dev;
+	u8 wbuf[1];
+	struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+	u8 mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
+	DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+	int ret;
+
+	wbuf[0] = port->ch.id;
+	ret = slim_do_transfer(sdev->ctrl, &txn);
+	if (ret)
+		return ret;
+
+	txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
+	port->ch.state = SLIM_CH_STATE_REMOVED;
+
+	return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_get_prate_code(int rate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(slim_presence_rate_table); i++) {
+		if (rate == slim_presence_rate_table[i])
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * slim_stream_prepare() - Prepare a SLIMbus Stream
+ *
+ * @rt: instance of slim stream runtime to configure
+ * @cfg: new configuration for the stream
+ *
+ * This API will configure SLIMbus stream with config parameters from cfg.
+ * return zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to hw_params() operation.
+ */
+int slim_stream_prepare(struct slim_stream_runtime *rt,
+			struct slim_stream_config *cfg)
+{
+	struct slim_controller *ctrl = rt->dev->ctrl;
+	struct slim_port *port;
+	int num_ports, i, port_id;
+
+	if (rt->ports) {
+		dev_err(&rt->dev->dev, "Stream already Prepared\n");
+		return -EINVAL;
+	}
+
+	num_ports = hweight32(cfg->port_mask);
+	rt->ports = kcalloc(num_ports, sizeof(*port), GFP_KERNEL);
+	if (!rt->ports)
+		return -ENOMEM;
+
+	rt->num_ports = num_ports;
+	rt->rate = cfg->rate;
+	rt->bps = cfg->bps;
+	rt->direction = cfg->direction;
+
+	if (cfg->rate % ctrl->a_framer->superfreq) {
+		/*
+		 * data rate not exactly multiple of super frame,
+		 * use PUSH/PULL protocol
+		 */
+		if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
+			rt->prot = SLIM_PROTO_PUSH;
+		else
+			rt->prot = SLIM_PROTO_PULL;
+	} else {
+		rt->prot = SLIM_PROTO_ISO;
+	}
+
+	rt->ratem = cfg->rate/ctrl->a_framer->superfreq;
+
+	i = 0;
+	for_each_set_bit(port_id, &cfg->port_mask, SLIM_DEVICE_MAX_PORTS) {
+		port = &rt->ports[i];
+		port->state = SLIM_PORT_DISCONNECTED;
+		port->id = port_id;
+		port->ch.prrate = slim_get_prate_code(cfg->rate);
+		port->ch.id = cfg->chs[i];
+		port->ch.data_fmt = SLIM_CH_DATA_FMT_NOT_DEFINED;
+		port->ch.aux_fmt = SLIM_CH_AUX_FMT_NOT_APPLICABLE;
+		port->ch.state = SLIM_CH_STATE_ALLOCATED;
+
+		if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
+			port->direction = SLIM_PORT_SINK;
+		else
+			port->direction = SLIM_PORT_SOURCE;
+
+		slim_connect_port_channel(rt, port);
+		i++;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_prepare);
+
+static int slim_define_channel_content(struct slim_stream_runtime *stream,
+				       struct slim_port *port)
+{
+	struct slim_device *sdev = stream->dev;
+	u8 wbuf[4];
+	struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
+	u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
+	DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
+
+	wbuf[0] = port->ch.id;
+	wbuf[1] = port->ch.prrate;
+
+	/* Frequency Locked for ISO Protocol */
+	if (stream->prot != SLIM_PROTO_ISO)
+		wbuf[1] |= SLIM_CHANNEL_CONTENT_FL;
+
+	wbuf[2] = port->ch.data_fmt | (port->ch.aux_fmt << 4);
+	wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
+	port->ch.state = SLIM_CH_STATE_CONTENT_DEFINED;
+
+	return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_get_segdist_code(int ratem)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(segdist_codes); i++) {
+		if (segdist_codes[i].ratem == ratem)
+			return segdist_codes[i].segdist_code;
+	}
+
+	return -EINVAL;
+}
+
+static int slim_define_channel(struct slim_stream_runtime *stream,
+				       struct slim_port *port)
+{
+	struct slim_device *sdev = stream->dev;
+	u8 wbuf[4];
+	struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
+	u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+	DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
+
+	port->ch.seg_dist = slim_get_segdist_code(stream->ratem);
+
+	wbuf[0] = port->ch.id;
+	wbuf[1] = port->ch.seg_dist & 0xFF;
+	wbuf[2] = (stream->prot << 4) | ((port->ch.seg_dist & 0xF00) >> 8);
+	if (stream->prot == SLIM_PROTO_ISO)
+		wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
+	else
+		wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS + 1;
+
+	port->ch.state = SLIM_CH_STATE_DEFINED;
+
+	return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_activate_channel(struct slim_stream_runtime *stream,
+				 struct slim_port *port)
+{
+	struct slim_device *sdev = stream->dev;
+	u8 wbuf[1];
+	struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+	u8 mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
+	DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+
+	txn.msg->num_bytes = 1;
+	txn.msg->wbuf = wbuf;
+	wbuf[0] = port->ch.id;
+	port->ch.state = SLIM_CH_STATE_ACTIVE;
+
+	return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+/*
+ * slim_stream_enable() - Enable a prepared SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to enable
+ *
+ * This API will enable all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() start operation.
+ */
+int slim_stream_enable(struct slim_stream_runtime *stream)
+{
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+				3, SLIM_LA_MANAGER, NULL);
+	struct slim_controller *ctrl = stream->dev->ctrl;
+	int ret, i;
+
+	if (ctrl->enable_stream) {
+		ret = ctrl->enable_stream(stream);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < stream->num_ports; i++)
+			stream->ports[i].ch.state = SLIM_CH_STATE_ACTIVE;
+
+		return ret;
+	}
+
+	ret = slim_do_transfer(ctrl, &txn);
+	if (ret)
+		return ret;
+
+	/* define channels first before activating them */
+	for (i = 0; i < stream->num_ports; i++) {
+		struct slim_port *port = &stream->ports[i];
+
+		slim_define_channel(stream, port);
+		slim_define_channel_content(stream, port);
+	}
+
+	for (i = 0; i < stream->num_ports; i++) {
+		struct slim_port *port = &stream->ports[i];
+
+		slim_activate_channel(stream, port);
+		port->state = SLIM_PORT_CONFIGURED;
+	}
+	txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+
+	return slim_do_transfer(ctrl, &txn);
+}
+EXPORT_SYMBOL_GPL(slim_stream_enable);
+
+/*
+ * slim_stream_disable() - Disable a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to disable
+ *
+ * This API will disable all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() pause operation.
+ */
+int slim_stream_disable(struct slim_stream_runtime *stream)
+{
+	DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+				3, SLIM_LA_MANAGER, NULL);
+	struct slim_controller *ctrl = stream->dev->ctrl;
+	int ret, i;
+
+	if (ctrl->disable_stream)
+		ctrl->disable_stream(stream);
+
+	ret = slim_do_transfer(ctrl, &txn);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < stream->num_ports; i++)
+		slim_deactivate_remove_channel(stream, &stream->ports[i]);
+
+	txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+
+	return slim_do_transfer(ctrl, &txn);
+}
+EXPORT_SYMBOL_GPL(slim_stream_disable);
+
+/*
+ * slim_stream_unprepare() - Un-prepare a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to unprepare
+ *
+ * This API will un allocate all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() stop operation.
+ */
+int slim_stream_unprepare(struct slim_stream_runtime *stream)
+{
+	int i;
+
+	for (i = 0; i < stream->num_ports; i++)
+		slim_disconnect_port(stream, &stream->ports[i]);
+
+	kfree(stream->ports);
+	stream->ports = NULL;
+	stream->num_ports = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_unprepare);
+
+/*
+ * slim_stream_free() - Free a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to free
+ *
+ * This API will un allocate all the memory associated with
+ * slim stream runtime, user is not allowed to make an dereference
+ * to stream after this call.
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to shutdown() operation.
+ */
+int slim_stream_free(struct slim_stream_runtime *stream)
+{
+	struct slim_device *sdev = stream->dev;
+
+	spin_lock(&sdev->stream_list_lock);
+	list_del(&stream->node);
+	spin_unlock(&sdev->stream_list_lock);
+
+	kfree(stream->name);
+	kfree(stream);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_free);