v4.19.13 snapshot.
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
new file mode 100644
index 0000000..a7761c4
--- /dev/null
+++ b/drivers/dma/qcom/Kconfig
@@ -0,0 +1,29 @@
+config QCOM_BAM_DMA
+	tristate "QCOM BAM DMA support"
+	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	---help---
+	  Enable support for the QCOM BAM DMA controller.  This controller
+	  provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_HIDMA_MGMT
+	tristate "Qualcomm Technologies HIDMA Management support"
+	select DMA_ENGINE
+	help
+	  Enable support for the Qualcomm Technologies HIDMA Management.
+	  Each DMA device requires one management interface driver
+	  for basic initialization before QCOM_HIDMA channel driver can
+	  start managing the channels. In a virtualized environment,
+	  the guest OS would run QCOM_HIDMA channel driver and the
+	  host would run the QCOM_HIDMA_MGMT management driver.
+
+config QCOM_HIDMA
+	tristate "Qualcomm Technologies HIDMA Channel support"
+	select DMA_ENGINE
+	help
+	  Enable support for the Qualcomm Technologies HIDMA controller.
+	  The HIDMA controller supports optimized buffer copies
+	  (user to kernel, kernel to kernel, etc.).  It only supports
+	  memcpy interface. The core is not intended for general
+	  purpose slave DMA.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
new file mode 100644
index 0000000..1ae92da
--- /dev/null
+++ b/drivers/dma/qcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
+hdma_mgmt-objs	 := hidma_mgmt.o hidma_mgmt_sys.o
+obj-$(CONFIG_QCOM_HIDMA) +=  hdma.o
+hdma-objs        := hidma_ll.o hidma.o hidma_dbg.o
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
new file mode 100644
index 0000000..1617715
--- /dev/null
+++ b/drivers/dma/qcom/bam_dma.c
@@ -0,0 +1,1477 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * QCOM BAM DMA engine driver
+ *
+ * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
+ * peripherals on the MSM 8x74.  The configuration of the channels are dependent
+ * on the way they are hard wired to that specific peripheral.  The peripheral
+ * device tree entries specify the configuration of each channel.
+ *
+ * The DMA controller requires the use of external memory for storage of the
+ * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
+ * circular buffer and operations are managed according to the offset within the
+ * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
+ * are back to defaults.
+ *
+ * During DMA operations, we write descriptors to the FIFO, being careful to
+ * handle wrapping and then write the last FIFO offset to that channel's
+ * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
+ * indicates the current FIFO offset that is being processed, so there is some
+ * indication of where the hardware is currently working.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+struct bam_desc_hw {
+	__le32 addr;		/* Buffer physical address */
+	__le16 size;		/* Buffer size in bytes */
+	__le16 flags;
+};
+
+#define BAM_DMA_AUTOSUSPEND_DELAY 100
+
+#define DESC_FLAG_INT BIT(15)
+#define DESC_FLAG_EOT BIT(14)
+#define DESC_FLAG_EOB BIT(13)
+#define DESC_FLAG_NWD BIT(12)
+#define DESC_FLAG_CMD BIT(11)
+
+struct bam_async_desc {
+	struct virt_dma_desc vd;
+
+	u32 num_desc;
+	u32 xfer_len;
+
+	/* transaction flags, EOT|EOB|NWD */
+	u16 flags;
+
+	struct bam_desc_hw *curr_desc;
+
+	/* list node for the desc in the bam_chan list of descriptors */
+	struct list_head desc_node;
+	enum dma_transfer_direction dir;
+	size_t length;
+	struct bam_desc_hw desc[0];
+};
+
+enum bam_reg {
+	BAM_CTRL,
+	BAM_REVISION,
+	BAM_NUM_PIPES,
+	BAM_DESC_CNT_TRSHLD,
+	BAM_IRQ_SRCS,
+	BAM_IRQ_SRCS_MSK,
+	BAM_IRQ_SRCS_UNMASKED,
+	BAM_IRQ_STTS,
+	BAM_IRQ_CLR,
+	BAM_IRQ_EN,
+	BAM_CNFG_BITS,
+	BAM_IRQ_SRCS_EE,
+	BAM_IRQ_SRCS_MSK_EE,
+	BAM_P_CTRL,
+	BAM_P_RST,
+	BAM_P_HALT,
+	BAM_P_IRQ_STTS,
+	BAM_P_IRQ_CLR,
+	BAM_P_IRQ_EN,
+	BAM_P_EVNT_DEST_ADDR,
+	BAM_P_EVNT_REG,
+	BAM_P_SW_OFSTS,
+	BAM_P_DATA_FIFO_ADDR,
+	BAM_P_DESC_FIFO_ADDR,
+	BAM_P_EVNT_GEN_TRSHLD,
+	BAM_P_FIFO_SIZES,
+};
+
+struct reg_offset_data {
+	u32 base_offset;
+	unsigned int pipe_mult, evnt_mult, ee_mult;
+};
+
+static const struct reg_offset_data bam_v1_3_reg_info[] = {
+	[BAM_CTRL]		= { 0x0F80, 0x00, 0x00, 0x00 },
+	[BAM_REVISION]		= { 0x0F84, 0x00, 0x00, 0x00 },
+	[BAM_NUM_PIPES]		= { 0x0FBC, 0x00, 0x00, 0x00 },
+	[BAM_DESC_CNT_TRSHLD]	= { 0x0F88, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS]		= { 0x0F8C, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_MSK]	= { 0x0F90, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0FB0, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_STTS]		= { 0x0F94, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_CLR]		= { 0x0F98, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_EN]		= { 0x0F9C, 0x00, 0x00, 0x00 },
+	[BAM_CNFG_BITS]		= { 0x0FFC, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_EE]	= { 0x1800, 0x00, 0x00, 0x80 },
+	[BAM_IRQ_SRCS_MSK_EE]	= { 0x1804, 0x00, 0x00, 0x80 },
+	[BAM_P_CTRL]		= { 0x0000, 0x80, 0x00, 0x00 },
+	[BAM_P_RST]		= { 0x0004, 0x80, 0x00, 0x00 },
+	[BAM_P_HALT]		= { 0x0008, 0x80, 0x00, 0x00 },
+	[BAM_P_IRQ_STTS]	= { 0x0010, 0x80, 0x00, 0x00 },
+	[BAM_P_IRQ_CLR]		= { 0x0014, 0x80, 0x00, 0x00 },
+	[BAM_P_IRQ_EN]		= { 0x0018, 0x80, 0x00, 0x00 },
+	[BAM_P_EVNT_DEST_ADDR]	= { 0x102C, 0x00, 0x40, 0x00 },
+	[BAM_P_EVNT_REG]	= { 0x1018, 0x00, 0x40, 0x00 },
+	[BAM_P_SW_OFSTS]	= { 0x1000, 0x00, 0x40, 0x00 },
+	[BAM_P_DATA_FIFO_ADDR]	= { 0x1024, 0x00, 0x40, 0x00 },
+	[BAM_P_DESC_FIFO_ADDR]	= { 0x101C, 0x00, 0x40, 0x00 },
+	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1028, 0x00, 0x40, 0x00 },
+	[BAM_P_FIFO_SIZES]	= { 0x1020, 0x00, 0x40, 0x00 },
+};
+
+static const struct reg_offset_data bam_v1_4_reg_info[] = {
+	[BAM_CTRL]		= { 0x0000, 0x00, 0x00, 0x00 },
+	[BAM_REVISION]		= { 0x0004, 0x00, 0x00, 0x00 },
+	[BAM_NUM_PIPES]		= { 0x003C, 0x00, 0x00, 0x00 },
+	[BAM_DESC_CNT_TRSHLD]	= { 0x0008, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS]		= { 0x000C, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_MSK]	= { 0x0010, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0030, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_STTS]		= { 0x0014, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_CLR]		= { 0x0018, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_EN]		= { 0x001C, 0x00, 0x00, 0x00 },
+	[BAM_CNFG_BITS]		= { 0x007C, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_EE]	= { 0x0800, 0x00, 0x00, 0x80 },
+	[BAM_IRQ_SRCS_MSK_EE]	= { 0x0804, 0x00, 0x00, 0x80 },
+	[BAM_P_CTRL]		= { 0x1000, 0x1000, 0x00, 0x00 },
+	[BAM_P_RST]		= { 0x1004, 0x1000, 0x00, 0x00 },
+	[BAM_P_HALT]		= { 0x1008, 0x1000, 0x00, 0x00 },
+	[BAM_P_IRQ_STTS]	= { 0x1010, 0x1000, 0x00, 0x00 },
+	[BAM_P_IRQ_CLR]		= { 0x1014, 0x1000, 0x00, 0x00 },
+	[BAM_P_IRQ_EN]		= { 0x1018, 0x1000, 0x00, 0x00 },
+	[BAM_P_EVNT_DEST_ADDR]	= { 0x182C, 0x00, 0x1000, 0x00 },
+	[BAM_P_EVNT_REG]	= { 0x1818, 0x00, 0x1000, 0x00 },
+	[BAM_P_SW_OFSTS]	= { 0x1800, 0x00, 0x1000, 0x00 },
+	[BAM_P_DATA_FIFO_ADDR]	= { 0x1824, 0x00, 0x1000, 0x00 },
+	[BAM_P_DESC_FIFO_ADDR]	= { 0x181C, 0x00, 0x1000, 0x00 },
+	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1828, 0x00, 0x1000, 0x00 },
+	[BAM_P_FIFO_SIZES]	= { 0x1820, 0x00, 0x1000, 0x00 },
+};
+
+static const struct reg_offset_data bam_v1_7_reg_info[] = {
+	[BAM_CTRL]		= { 0x00000, 0x00, 0x00, 0x00 },
+	[BAM_REVISION]		= { 0x01000, 0x00, 0x00, 0x00 },
+	[BAM_NUM_PIPES]		= { 0x01008, 0x00, 0x00, 0x00 },
+	[BAM_DESC_CNT_TRSHLD]	= { 0x00008, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS]		= { 0x03010, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_MSK]	= { 0x03014, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_UNMASKED]	= { 0x03018, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_STTS]		= { 0x00014, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_CLR]		= { 0x00018, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_EN]		= { 0x0001C, 0x00, 0x00, 0x00 },
+	[BAM_CNFG_BITS]		= { 0x0007C, 0x00, 0x00, 0x00 },
+	[BAM_IRQ_SRCS_EE]	= { 0x03000, 0x00, 0x00, 0x1000 },
+	[BAM_IRQ_SRCS_MSK_EE]	= { 0x03004, 0x00, 0x00, 0x1000 },
+	[BAM_P_CTRL]		= { 0x13000, 0x1000, 0x00, 0x00 },
+	[BAM_P_RST]		= { 0x13004, 0x1000, 0x00, 0x00 },
+	[BAM_P_HALT]		= { 0x13008, 0x1000, 0x00, 0x00 },
+	[BAM_P_IRQ_STTS]	= { 0x13010, 0x1000, 0x00, 0x00 },
+	[BAM_P_IRQ_CLR]		= { 0x13014, 0x1000, 0x00, 0x00 },
+	[BAM_P_IRQ_EN]		= { 0x13018, 0x1000, 0x00, 0x00 },
+	[BAM_P_EVNT_DEST_ADDR]	= { 0x1382C, 0x00, 0x1000, 0x00 },
+	[BAM_P_EVNT_REG]	= { 0x13818, 0x00, 0x1000, 0x00 },
+	[BAM_P_SW_OFSTS]	= { 0x13800, 0x00, 0x1000, 0x00 },
+	[BAM_P_DATA_FIFO_ADDR]	= { 0x13824, 0x00, 0x1000, 0x00 },
+	[BAM_P_DESC_FIFO_ADDR]	= { 0x1381C, 0x00, 0x1000, 0x00 },
+	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x13828, 0x00, 0x1000, 0x00 },
+	[BAM_P_FIFO_SIZES]	= { 0x13820, 0x00, 0x1000, 0x00 },
+};
+
+/* BAM CTRL */
+#define BAM_SW_RST			BIT(0)
+#define BAM_EN				BIT(1)
+#define BAM_EN_ACCUM			BIT(4)
+#define BAM_TESTBUS_SEL_SHIFT		5
+#define BAM_TESTBUS_SEL_MASK		0x3F
+#define BAM_DESC_CACHE_SEL_SHIFT	13
+#define BAM_DESC_CACHE_SEL_MASK		0x3
+#define BAM_CACHED_DESC_STORE		BIT(15)
+#define IBC_DISABLE			BIT(16)
+
+/* BAM REVISION */
+#define REVISION_SHIFT		0
+#define REVISION_MASK		0xFF
+#define NUM_EES_SHIFT		8
+#define NUM_EES_MASK		0xF
+#define CE_BUFFER_SIZE		BIT(13)
+#define AXI_ACTIVE		BIT(14)
+#define USE_VMIDMT		BIT(15)
+#define SECURED			BIT(16)
+#define BAM_HAS_NO_BYPASS	BIT(17)
+#define HIGH_FREQUENCY_BAM	BIT(18)
+#define INACTIV_TMRS_EXST	BIT(19)
+#define NUM_INACTIV_TMRS	BIT(20)
+#define DESC_CACHE_DEPTH_SHIFT	21
+#define DESC_CACHE_DEPTH_1	(0 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_2	(1 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_3	(2 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_4	(3 << DESC_CACHE_DEPTH_SHIFT)
+#define CMD_DESC_EN		BIT(23)
+#define INACTIV_TMR_BASE_SHIFT	24
+#define INACTIV_TMR_BASE_MASK	0xFF
+
+/* BAM NUM PIPES */
+#define BAM_NUM_PIPES_SHIFT		0
+#define BAM_NUM_PIPES_MASK		0xFF
+#define PERIPH_NON_PIPE_GRP_SHIFT	16
+#define PERIPH_NON_PIP_GRP_MASK		0xFF
+#define BAM_NON_PIPE_GRP_SHIFT		24
+#define BAM_NON_PIPE_GRP_MASK		0xFF
+
+/* BAM CNFG BITS */
+#define BAM_PIPE_CNFG		BIT(2)
+#define BAM_FULL_PIPE		BIT(11)
+#define BAM_NO_EXT_P_RST	BIT(12)
+#define BAM_IBC_DISABLE		BIT(13)
+#define BAM_SB_CLK_REQ		BIT(14)
+#define BAM_PSM_CSW_REQ		BIT(15)
+#define BAM_PSM_P_RES		BIT(16)
+#define BAM_AU_P_RES		BIT(17)
+#define BAM_SI_P_RES		BIT(18)
+#define BAM_WB_P_RES		BIT(19)
+#define BAM_WB_BLK_CSW		BIT(20)
+#define BAM_WB_CSW_ACK_IDL	BIT(21)
+#define BAM_WB_RETR_SVPNT	BIT(22)
+#define BAM_WB_DSC_AVL_P_RST	BIT(23)
+#define BAM_REG_P_EN		BIT(24)
+#define BAM_PSM_P_HD_DATA	BIT(25)
+#define BAM_AU_ACCUMED		BIT(26)
+#define BAM_CMD_ENABLE		BIT(27)
+
+#define BAM_CNFG_BITS_DEFAULT	(BAM_PIPE_CNFG |	\
+				 BAM_NO_EXT_P_RST |	\
+				 BAM_IBC_DISABLE |	\
+				 BAM_SB_CLK_REQ |	\
+				 BAM_PSM_CSW_REQ |	\
+				 BAM_PSM_P_RES |	\
+				 BAM_AU_P_RES |		\
+				 BAM_SI_P_RES |		\
+				 BAM_WB_P_RES |		\
+				 BAM_WB_BLK_CSW |	\
+				 BAM_WB_CSW_ACK_IDL |	\
+				 BAM_WB_RETR_SVPNT |	\
+				 BAM_WB_DSC_AVL_P_RST |	\
+				 BAM_REG_P_EN |		\
+				 BAM_PSM_P_HD_DATA |	\
+				 BAM_AU_ACCUMED |	\
+				 BAM_CMD_ENABLE)
+
+/* PIPE CTRL */
+#define P_EN			BIT(1)
+#define P_DIRECTION		BIT(3)
+#define P_SYS_STRM		BIT(4)
+#define P_SYS_MODE		BIT(5)
+#define P_AUTO_EOB		BIT(6)
+#define P_AUTO_EOB_SEL_SHIFT	7
+#define P_AUTO_EOB_SEL_512	(0 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_256	(1 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_128	(2 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_64	(3 << P_AUTO_EOB_SEL_SHIFT)
+#define P_PREFETCH_LIMIT_SHIFT	9
+#define P_PREFETCH_LIMIT_32	(0 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_16	(1 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_4	(2 << P_PREFETCH_LIMIT_SHIFT)
+#define P_WRITE_NWD		BIT(11)
+#define P_LOCK_GROUP_SHIFT	16
+#define P_LOCK_GROUP_MASK	0x1F
+
+/* BAM_DESC_CNT_TRSHLD */
+#define CNT_TRSHLD		0xffff
+#define DEFAULT_CNT_THRSHLD	0x4
+
+/* BAM_IRQ_SRCS */
+#define BAM_IRQ			BIT(31)
+#define P_IRQ			0x7fffffff
+
+/* BAM_IRQ_SRCS_MSK */
+#define BAM_IRQ_MSK		BAM_IRQ
+#define P_IRQ_MSK		P_IRQ
+
+/* BAM_IRQ_STTS */
+#define BAM_TIMER_IRQ		BIT(4)
+#define BAM_EMPTY_IRQ		BIT(3)
+#define BAM_ERROR_IRQ		BIT(2)
+#define BAM_HRESP_ERR_IRQ	BIT(1)
+
+/* BAM_IRQ_CLR */
+#define BAM_TIMER_CLR		BIT(4)
+#define BAM_EMPTY_CLR		BIT(3)
+#define BAM_ERROR_CLR		BIT(2)
+#define BAM_HRESP_ERR_CLR	BIT(1)
+
+/* BAM_IRQ_EN */
+#define BAM_TIMER_EN		BIT(4)
+#define BAM_EMPTY_EN		BIT(3)
+#define BAM_ERROR_EN		BIT(2)
+#define BAM_HRESP_ERR_EN	BIT(1)
+
+/* BAM_P_IRQ_EN */
+#define P_PRCSD_DESC_EN		BIT(0)
+#define P_TIMER_EN		BIT(1)
+#define P_WAKE_EN		BIT(2)
+#define P_OUT_OF_DESC_EN	BIT(3)
+#define P_ERR_EN		BIT(4)
+#define P_TRNSFR_END_EN		BIT(5)
+#define P_DEFAULT_IRQS_EN	(P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
+
+/* BAM_P_SW_OFSTS */
+#define P_SW_OFSTS_MASK		0xffff
+
+#define BAM_DESC_FIFO_SIZE	SZ_32K
+#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
+#define BAM_FIFO_SIZE	(SZ_32K - 8)
+#define IS_BUSY(chan)	(CIRC_SPACE(bchan->tail, bchan->head,\
+			 MAX_DESCRIPTORS + 1) == 0)
+
+struct bam_chan {
+	struct virt_dma_chan vc;
+
+	struct bam_device *bdev;
+
+	/* configuration from device tree */
+	u32 id;
+
+	/* runtime configuration */
+	struct dma_slave_config slave;
+
+	/* fifo storage */
+	struct bam_desc_hw *fifo_virt;
+	dma_addr_t fifo_phys;
+
+	/* fifo markers */
+	unsigned short head;		/* start of active descriptor entries */
+	unsigned short tail;		/* end of active descriptor entries */
+
+	unsigned int initialized;	/* is the channel hw initialized? */
+	unsigned int paused;		/* is the channel paused? */
+	unsigned int reconfigure;	/* new slave config? */
+	/* list of descriptors currently processed */
+	struct list_head desc_list;
+
+	struct list_head node;
+};
+
+static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
+{
+	return container_of(common, struct bam_chan, vc.chan);
+}
+
+struct bam_device {
+	void __iomem *regs;
+	struct device *dev;
+	struct dma_device common;
+	struct device_dma_parameters dma_parms;
+	struct bam_chan *channels;
+	u32 num_channels;
+	u32 num_ees;
+
+	/* execution environment ID, from DT */
+	u32 ee;
+	bool controlled_remotely;
+
+	const struct reg_offset_data *layout;
+
+	struct clk *bamclk;
+	int irq;
+
+	/* dma start transaction tasklet */
+	struct tasklet_struct task;
+};
+
+/**
+ * bam_addr - returns BAM register address
+ * @bdev: bam device
+ * @pipe: pipe instance (ignored when register doesn't have multiple instances)
+ * @reg:  register enum
+ */
+static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
+		enum bam_reg reg)
+{
+	const struct reg_offset_data r = bdev->layout[reg];
+
+	return bdev->regs + r.base_offset +
+		r.pipe_mult * pipe +
+		r.evnt_mult * pipe +
+		r.ee_mult * bdev->ee;
+}
+
+/**
+ * bam_reset_channel - Reset individual BAM DMA channel
+ * @bchan: bam channel
+ *
+ * This function resets a specific BAM channel
+ */
+static void bam_reset_channel(struct bam_chan *bchan)
+{
+	struct bam_device *bdev = bchan->bdev;
+
+	lockdep_assert_held(&bchan->vc.lock);
+
+	/* reset channel */
+	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
+	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
+
+	/* don't allow cpu to reorder BAM register accesses done after this */
+	wmb();
+
+	/* make sure hw is initialized when channel is used the first time  */
+	bchan->initialized = 0;
+}
+
+/**
+ * bam_chan_init_hw - Initialize channel hardware
+ * @bchan: bam channel
+ * @dir: DMA transfer direction
+ *
+ * This function resets and initializes the BAM channel
+ */
+static void bam_chan_init_hw(struct bam_chan *bchan,
+	enum dma_transfer_direction dir)
+{
+	struct bam_device *bdev = bchan->bdev;
+	u32 val;
+
+	/* Reset the channel to clear internal state of the FIFO */
+	bam_reset_channel(bchan);
+
+	/*
+	 * write out 8 byte aligned address.  We have enough space for this
+	 * because we allocated 1 more descriptor (8 bytes) than we can use
+	 */
+	writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
+			bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
+	writel_relaxed(BAM_FIFO_SIZE,
+			bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
+
+	/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
+	writel_relaxed(P_DEFAULT_IRQS_EN,
+			bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
+
+	/* unmask the specific pipe and EE combo */
+	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
+	val |= BIT(bchan->id);
+	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
+
+	/* don't allow cpu to reorder the channel enable done below */
+	wmb();
+
+	/* set fixed direction and mode, then enable channel */
+	val = P_EN | P_SYS_MODE;
+	if (dir == DMA_DEV_TO_MEM)
+		val |= P_DIRECTION;
+
+	writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
+
+	bchan->initialized = 1;
+
+	/* init FIFO pointers */
+	bchan->head = 0;
+	bchan->tail = 0;
+}
+
+/**
+ * bam_alloc_chan - Allocate channel resources for DMA channel.
+ * @chan: specified channel
+ *
+ * This function allocates the FIFO descriptor memory
+ */
+static int bam_alloc_chan(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+
+	if (bchan->fifo_virt)
+		return 0;
+
+	/* allocate FIFO descriptor space, but only if necessary */
+	bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+					&bchan->fifo_phys, GFP_KERNEL);
+
+	if (!bchan->fifo_virt) {
+		dev_err(bdev->dev, "Failed to allocate desc fifo\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int bam_pm_runtime_get_sync(struct device *dev)
+{
+	if (pm_runtime_enabled(dev))
+		return pm_runtime_get_sync(dev);
+
+	return 0;
+}
+
+/**
+ * bam_free_chan - Frees dma resources associated with specific channel
+ * @chan: specified channel
+ *
+ * Free the allocated fifo descriptor memory and channel resources
+ *
+ */
+static void bam_free_chan(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	u32 val;
+	unsigned long flags;
+	int ret;
+
+	ret = bam_pm_runtime_get_sync(bdev->dev);
+	if (ret < 0)
+		return;
+
+	vchan_free_chan_resources(to_virt_chan(chan));
+
+	if (!list_empty(&bchan->desc_list)) {
+		dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
+		goto err;
+	}
+
+	spin_lock_irqsave(&bchan->vc.lock, flags);
+	bam_reset_channel(bchan);
+	spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+	dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+		    bchan->fifo_phys);
+	bchan->fifo_virt = NULL;
+
+	/* mask irq for pipe/channel */
+	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
+	val &= ~BIT(bchan->id);
+	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
+
+	/* disable irq */
+	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
+
+err:
+	pm_runtime_mark_last_busy(bdev->dev);
+	pm_runtime_put_autosuspend(bdev->dev);
+}
+
+/**
+ * bam_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ *
+ */
+static int bam_slave_config(struct dma_chan *chan,
+			    struct dma_slave_config *cfg)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	unsigned long flag;
+
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	memcpy(&bchan->slave, cfg, sizeof(*cfg));
+	bchan->reconfigure = 1;
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+	return 0;
+}
+
+/**
+ * bam_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
+	struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	struct bam_async_desc *async_desc;
+	struct scatterlist *sg;
+	u32 i;
+	struct bam_desc_hw *desc;
+	unsigned int num_alloc = 0;
+
+
+	if (!is_slave_direction(direction)) {
+		dev_err(bdev->dev, "invalid dma direction\n");
+		return NULL;
+	}
+
+	/* calculate number of required entries */
+	for_each_sg(sgl, sg, sg_len, i)
+		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
+
+	/* allocate enough room to accomodate the number of entries */
+	async_desc = kzalloc(sizeof(*async_desc) +
+			(num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+
+	if (!async_desc)
+		goto err_out;
+
+	if (flags & DMA_PREP_FENCE)
+		async_desc->flags |= DESC_FLAG_NWD;
+
+	if (flags & DMA_PREP_INTERRUPT)
+		async_desc->flags |= DESC_FLAG_EOT;
+
+	async_desc->num_desc = num_alloc;
+	async_desc->curr_desc = async_desc->desc;
+	async_desc->dir = direction;
+
+	/* fill in temporary descriptors */
+	desc = async_desc->desc;
+	for_each_sg(sgl, sg, sg_len, i) {
+		unsigned int remainder = sg_dma_len(sg);
+		unsigned int curr_offset = 0;
+
+		do {
+			if (flags & DMA_PREP_CMD)
+				desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
+
+			desc->addr = cpu_to_le32(sg_dma_address(sg) +
+						 curr_offset);
+
+			if (remainder > BAM_FIFO_SIZE) {
+				desc->size = cpu_to_le16(BAM_FIFO_SIZE);
+				remainder -= BAM_FIFO_SIZE;
+				curr_offset += BAM_FIFO_SIZE;
+			} else {
+				desc->size = cpu_to_le16(remainder);
+				remainder = 0;
+			}
+
+			async_desc->length += le16_to_cpu(desc->size);
+			desc++;
+		} while (remainder > 0);
+	}
+
+	return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+
+err_out:
+	kfree(async_desc);
+	return NULL;
+}
+
+/**
+ * bam_dma_terminate_all - terminate all transactions on a channel
+ * @chan: bam dma channel
+ *
+ * Dequeues and frees all transactions
+ * No callbacks are done
+ *
+ */
+static int bam_dma_terminate_all(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_async_desc *async_desc, *tmp;
+	unsigned long flag;
+	LIST_HEAD(head);
+
+	/* remove all transactions, including active transaction */
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	list_for_each_entry_safe(async_desc, tmp,
+				 &bchan->desc_list, desc_node) {
+		list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
+		list_del(&async_desc->desc_node);
+	}
+
+	vchan_get_all_descriptors(&bchan->vc, &head);
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+	vchan_dma_desc_free_list(&bchan->vc, &head);
+
+	return 0;
+}
+
+/**
+ * bam_pause - Pause DMA channel
+ * @chan: dma channel
+ *
+ */
+static int bam_pause(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	unsigned long flag;
+	int ret;
+
+	ret = bam_pm_runtime_get_sync(bdev->dev);
+	if (ret < 0)
+		return ret;
+
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
+	bchan->paused = 1;
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+	pm_runtime_mark_last_busy(bdev->dev);
+	pm_runtime_put_autosuspend(bdev->dev);
+
+	return 0;
+}
+
+/**
+ * bam_resume - Resume DMA channel operations
+ * @chan: dma channel
+ *
+ */
+static int bam_resume(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_device *bdev = bchan->bdev;
+	unsigned long flag;
+	int ret;
+
+	ret = bam_pm_runtime_get_sync(bdev->dev);
+	if (ret < 0)
+		return ret;
+
+	spin_lock_irqsave(&bchan->vc.lock, flag);
+	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
+	bchan->paused = 0;
+	spin_unlock_irqrestore(&bchan->vc.lock, flag);
+	pm_runtime_mark_last_busy(bdev->dev);
+	pm_runtime_put_autosuspend(bdev->dev);
+
+	return 0;
+}
+
+/**
+ * process_channel_irqs - processes the channel interrupts
+ * @bdev: bam controller
+ *
+ * This function processes the channel interrupts
+ *
+ */
+static u32 process_channel_irqs(struct bam_device *bdev)
+{
+	u32 i, srcs, pipe_stts, offset, avail;
+	unsigned long flags;
+	struct bam_async_desc *async_desc, *tmp;
+
+	srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
+
+	/* return early if no pipe/channel interrupts are present */
+	if (!(srcs & P_IRQ))
+		return srcs;
+
+	for (i = 0; i < bdev->num_channels; i++) {
+		struct bam_chan *bchan = &bdev->channels[i];
+
+		if (!(srcs & BIT(i)))
+			continue;
+
+		/* clear pipe irq */
+		pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
+
+		writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
+
+		spin_lock_irqsave(&bchan->vc.lock, flags);
+
+		offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
+				       P_SW_OFSTS_MASK;
+		offset /= sizeof(struct bam_desc_hw);
+
+		/* Number of bytes available to read */
+		avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+
+		list_for_each_entry_safe(async_desc, tmp,
+					 &bchan->desc_list, desc_node) {
+			/* Not enough data to read */
+			if (avail < async_desc->xfer_len)
+				break;
+
+			/* manage FIFO */
+			bchan->head += async_desc->xfer_len;
+			bchan->head %= MAX_DESCRIPTORS;
+
+			async_desc->num_desc -= async_desc->xfer_len;
+			async_desc->curr_desc += async_desc->xfer_len;
+			avail -= async_desc->xfer_len;
+
+			/*
+			 * if complete, process cookie. Otherwise
+			 * push back to front of desc_issued so that
+			 * it gets restarted by the tasklet
+			 */
+			if (!async_desc->num_desc) {
+				vchan_cookie_complete(&async_desc->vd);
+			} else {
+				list_add(&async_desc->vd.node,
+					 &bchan->vc.desc_issued);
+			}
+			list_del(&async_desc->desc_node);
+		}
+
+		spin_unlock_irqrestore(&bchan->vc.lock, flags);
+	}
+
+	return srcs;
+}
+
+/**
+ * bam_dma_irq - irq handler for bam controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t bam_dma_irq(int irq, void *data)
+{
+	struct bam_device *bdev = data;
+	u32 clr_mask = 0, srcs = 0;
+	int ret;
+
+	srcs |= process_channel_irqs(bdev);
+
+	/* kick off tasklet to start next dma transfer */
+	if (srcs & P_IRQ)
+		tasklet_schedule(&bdev->task);
+
+	ret = bam_pm_runtime_get_sync(bdev->dev);
+	if (ret < 0)
+		return ret;
+
+	if (srcs & BAM_IRQ) {
+		clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
+
+		/*
+		 * don't allow reorder of the various accesses to the BAM
+		 * registers
+		 */
+		mb();
+
+		writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
+	}
+
+	pm_runtime_mark_last_busy(bdev->dev);
+	pm_runtime_put_autosuspend(bdev->dev);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * bam_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	struct bam_async_desc *async_desc;
+	struct virt_dma_desc *vd;
+	int ret;
+	size_t residue = 0;
+	unsigned int i;
+	unsigned long flags;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	if (!txstate)
+		return bchan->paused ? DMA_PAUSED : ret;
+
+	spin_lock_irqsave(&bchan->vc.lock, flags);
+	vd = vchan_find_desc(&bchan->vc, cookie);
+	if (vd) {
+		residue = container_of(vd, struct bam_async_desc, vd)->length;
+	} else {
+		list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
+			if (async_desc->vd.tx.cookie != cookie)
+				continue;
+
+			for (i = 0; i < async_desc->num_desc; i++)
+				residue += le16_to_cpu(
+						async_desc->curr_desc[i].size);
+		}
+	}
+
+	spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+	dma_set_residue(txstate, residue);
+
+	if (ret == DMA_IN_PROGRESS && bchan->paused)
+		ret = DMA_PAUSED;
+
+	return ret;
+}
+
+/**
+ * bam_apply_new_config
+ * @bchan: bam dma channel
+ * @dir: DMA direction
+ */
+static void bam_apply_new_config(struct bam_chan *bchan,
+	enum dma_transfer_direction dir)
+{
+	struct bam_device *bdev = bchan->bdev;
+	u32 maxburst;
+
+	if (!bdev->controlled_remotely) {
+		if (dir == DMA_DEV_TO_MEM)
+			maxburst = bchan->slave.src_maxburst;
+		else
+			maxburst = bchan->slave.dst_maxburst;
+
+		writel_relaxed(maxburst,
+			       bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+	}
+
+	bchan->reconfigure = 0;
+}
+
+/**
+ * bam_start_dma - start next transaction
+ * @bchan: bam dma channel
+ */
+static void bam_start_dma(struct bam_chan *bchan)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+	struct bam_device *bdev = bchan->bdev;
+	struct bam_async_desc *async_desc = NULL;
+	struct bam_desc_hw *desc;
+	struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
+					sizeof(struct bam_desc_hw));
+	int ret;
+	unsigned int avail;
+	struct dmaengine_desc_callback cb;
+
+	lockdep_assert_held(&bchan->vc.lock);
+
+	if (!vd)
+		return;
+
+	ret = bam_pm_runtime_get_sync(bdev->dev);
+	if (ret < 0)
+		return;
+
+	while (vd && !IS_BUSY(bchan)) {
+		list_del(&vd->node);
+
+		async_desc = container_of(vd, struct bam_async_desc, vd);
+
+		/* on first use, initialize the channel hardware */
+		if (!bchan->initialized)
+			bam_chan_init_hw(bchan, async_desc->dir);
+
+		/* apply new slave config changes, if necessary */
+		if (bchan->reconfigure)
+			bam_apply_new_config(bchan, async_desc->dir);
+
+		desc = async_desc->curr_desc;
+		avail = CIRC_SPACE(bchan->tail, bchan->head,
+				   MAX_DESCRIPTORS + 1);
+
+		if (async_desc->num_desc > avail)
+			async_desc->xfer_len = avail;
+		else
+			async_desc->xfer_len = async_desc->num_desc;
+
+		/* set any special flags on the last descriptor */
+		if (async_desc->num_desc == async_desc->xfer_len)
+			desc[async_desc->xfer_len - 1].flags |=
+						cpu_to_le16(async_desc->flags);
+
+		vd = vchan_next_desc(&bchan->vc);
+
+		dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
+
+		/*
+		 * An interrupt is generated at this desc, if
+		 *  - FIFO is FULL.
+		 *  - No more descriptors to add.
+		 *  - If a callback completion was requested for this DESC,
+		 *     In this case, BAM will deliver the completion callback
+		 *     for this desc and continue processing the next desc.
+		 */
+		if (((avail <= async_desc->xfer_len) || !vd ||
+		     dmaengine_desc_callback_valid(&cb)) &&
+		    !(async_desc->flags & DESC_FLAG_EOT))
+			desc[async_desc->xfer_len - 1].flags |=
+				cpu_to_le16(DESC_FLAG_INT);
+
+		if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+			u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+			memcpy(&fifo[bchan->tail], desc,
+			       partial * sizeof(struct bam_desc_hw));
+			memcpy(fifo, &desc[partial],
+			       (async_desc->xfer_len - partial) *
+				sizeof(struct bam_desc_hw));
+		} else {
+			memcpy(&fifo[bchan->tail], desc,
+			       async_desc->xfer_len *
+			       sizeof(struct bam_desc_hw));
+		}
+
+		bchan->tail += async_desc->xfer_len;
+		bchan->tail %= MAX_DESCRIPTORS;
+		list_add_tail(&async_desc->desc_node, &bchan->desc_list);
+	}
+
+	/* ensure descriptor writes and dma start not reordered */
+	wmb();
+	writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
+			bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
+
+	pm_runtime_mark_last_busy(bdev->dev);
+	pm_runtime_put_autosuspend(bdev->dev);
+}
+
+/**
+ * dma_tasklet - DMA IRQ tasklet
+ * @data: tasklet argument (bam controller structure)
+ *
+ * Sets up next DMA operation and then processes all completed transactions
+ */
+static void dma_tasklet(unsigned long data)
+{
+	struct bam_device *bdev = (struct bam_device *)data;
+	struct bam_chan *bchan;
+	unsigned long flags;
+	unsigned int i;
+
+	/* go through the channels and kick off transactions */
+	for (i = 0; i < bdev->num_channels; i++) {
+		bchan = &bdev->channels[i];
+		spin_lock_irqsave(&bchan->vc.lock, flags);
+
+		if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
+			bam_start_dma(bchan);
+		spin_unlock_irqrestore(&bchan->vc.lock, flags);
+	}
+
+}
+
+/**
+ * bam_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Calls tasklet directly which in turn starts any pending transactions
+ */
+static void bam_issue_pending(struct dma_chan *chan)
+{
+	struct bam_chan *bchan = to_bam_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&bchan->vc.lock, flags);
+
+	/* if work pending and idle, start a transaction */
+	if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
+		bam_start_dma(bchan);
+
+	spin_unlock_irqrestore(&bchan->vc.lock, flags);
+}
+
+/**
+ * bam_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void bam_dma_free_desc(struct virt_dma_desc *vd)
+{
+	struct bam_async_desc *async_desc = container_of(vd,
+			struct bam_async_desc, vd);
+
+	kfree(async_desc);
+}
+
+static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
+		struct of_dma *of)
+{
+	struct bam_device *bdev = container_of(of->of_dma_data,
+					struct bam_device, common);
+	unsigned int request;
+
+	if (dma_spec->args_count != 1)
+		return NULL;
+
+	request = dma_spec->args[0];
+	if (request >= bdev->num_channels)
+		return NULL;
+
+	return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
+}
+
+/**
+ * bam_init
+ * @bdev: bam device
+ *
+ * Initialization helper for global bam registers
+ */
+static int bam_init(struct bam_device *bdev)
+{
+	u32 val;
+
+	/* read revision and configuration information */
+	if (!bdev->num_ees) {
+		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
+	}
+
+	/* check that configured EE is within range */
+	if (bdev->ee >= bdev->num_ees)
+		return -EINVAL;
+
+	if (!bdev->num_channels) {
+		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
+		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+	}
+
+	if (bdev->controlled_remotely)
+		return 0;
+
+	/* s/w reset bam */
+	/* after reset all pipes are disabled and idle */
+	val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
+	val |= BAM_SW_RST;
+	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
+	val &= ~BAM_SW_RST;
+	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
+
+	/* make sure previous stores are visible before enabling BAM */
+	wmb();
+
+	/* enable bam */
+	val |= BAM_EN;
+	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
+
+	/* set descriptor threshhold, start with 4 bytes */
+	writel_relaxed(DEFAULT_CNT_THRSHLD,
+			bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+
+	/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
+	writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
+
+	/* enable irqs for errors */
+	writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
+			bam_addr(bdev, 0, BAM_IRQ_EN));
+
+	/* unmask global bam interrupt */
+	writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
+
+	return 0;
+}
+
+static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
+	u32 index)
+{
+	bchan->id = index;
+	bchan->bdev = bdev;
+
+	vchan_init(&bchan->vc, &bdev->common);
+	bchan->vc.desc_free = bam_dma_free_desc;
+	INIT_LIST_HEAD(&bchan->desc_list);
+}
+
+static const struct of_device_id bam_of_match[] = {
+	{ .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
+	{ .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
+	{ .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
+static int bam_dma_probe(struct platform_device *pdev)
+{
+	struct bam_device *bdev;
+	const struct of_device_id *match;
+	struct resource *iores;
+	int ret, i;
+
+	bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+	if (!bdev)
+		return -ENOMEM;
+
+	bdev->dev = &pdev->dev;
+
+	match = of_match_node(bam_of_match, pdev->dev.of_node);
+	if (!match) {
+		dev_err(&pdev->dev, "Unsupported BAM module\n");
+		return -ENODEV;
+	}
+
+	bdev->layout = match->data;
+
+	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(bdev->regs))
+		return PTR_ERR(bdev->regs);
+
+	bdev->irq = platform_get_irq(pdev, 0);
+	if (bdev->irq < 0)
+		return bdev->irq;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
+	if (ret) {
+		dev_err(bdev->dev, "Execution environment unspecified\n");
+		return ret;
+	}
+
+	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
+						"qcom,controlled-remotely");
+
+	if (bdev->controlled_remotely) {
+		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+					   &bdev->num_channels);
+		if (ret)
+			dev_err(bdev->dev, "num-channels unspecified in dt\n");
+
+		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+					   &bdev->num_ees);
+		if (ret)
+			dev_err(bdev->dev, "num-ees unspecified in dt\n");
+	}
+
+	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+	if (IS_ERR(bdev->bamclk)) {
+		if (!bdev->controlled_remotely)
+			return PTR_ERR(bdev->bamclk);
+
+		bdev->bamclk = NULL;
+	}
+
+	ret = clk_prepare_enable(bdev->bamclk);
+	if (ret) {
+		dev_err(bdev->dev, "failed to prepare/enable clock\n");
+		return ret;
+	}
+
+	ret = bam_init(bdev);
+	if (ret)
+		goto err_disable_clk;
+
+	tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
+
+	bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
+				sizeof(*bdev->channels), GFP_KERNEL);
+
+	if (!bdev->channels) {
+		ret = -ENOMEM;
+		goto err_tasklet_kill;
+	}
+
+	/* allocate and initialize channels */
+	INIT_LIST_HEAD(&bdev->common.channels);
+
+	for (i = 0; i < bdev->num_channels; i++)
+		bam_channel_init(bdev, &bdev->channels[i], i);
+
+	ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
+			IRQF_TRIGGER_HIGH, "bam_dma", bdev);
+	if (ret)
+		goto err_bam_channel_exit;
+
+	/* set max dma segment size */
+	bdev->common.dev = bdev->dev;
+	bdev->common.dev->dma_parms = &bdev->dma_parms;
+	ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
+	if (ret) {
+		dev_err(bdev->dev, "cannot set maximum segment size\n");
+		goto err_bam_channel_exit;
+	}
+
+	platform_set_drvdata(pdev, bdev);
+
+	/* set capabilities */
+	dma_cap_zero(bdev->common.cap_mask);
+	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
+
+	/* initialize dmaengine apis */
+	bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
+	bdev->common.device_free_chan_resources = bam_free_chan;
+	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
+	bdev->common.device_config = bam_slave_config;
+	bdev->common.device_pause = bam_pause;
+	bdev->common.device_resume = bam_resume;
+	bdev->common.device_terminate_all = bam_dma_terminate_all;
+	bdev->common.device_issue_pending = bam_issue_pending;
+	bdev->common.device_tx_status = bam_tx_status;
+	bdev->common.dev = bdev->dev;
+
+	ret = dma_async_device_register(&bdev->common);
+	if (ret) {
+		dev_err(bdev->dev, "failed to register dma async device\n");
+		goto err_bam_channel_exit;
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
+					&bdev->common);
+	if (ret)
+		goto err_unregister_dma;
+
+	if (bdev->controlled_remotely) {
+		pm_runtime_disable(&pdev->dev);
+		return 0;
+	}
+
+	pm_runtime_irq_safe(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+
+err_unregister_dma:
+	dma_async_device_unregister(&bdev->common);
+err_bam_channel_exit:
+	for (i = 0; i < bdev->num_channels; i++)
+		tasklet_kill(&bdev->channels[i].vc.task);
+err_tasklet_kill:
+	tasklet_kill(&bdev->task);
+err_disable_clk:
+	clk_disable_unprepare(bdev->bamclk);
+
+	return ret;
+}
+
+static int bam_dma_remove(struct platform_device *pdev)
+{
+	struct bam_device *bdev = platform_get_drvdata(pdev);
+	u32 i;
+
+	pm_runtime_force_suspend(&pdev->dev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&bdev->common);
+
+	/* mask all interrupts for this execution environment */
+	writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
+
+	devm_free_irq(bdev->dev, bdev->irq, bdev);
+
+	for (i = 0; i < bdev->num_channels; i++) {
+		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
+		tasklet_kill(&bdev->channels[i].vc.task);
+
+		if (!bdev->channels[i].fifo_virt)
+			continue;
+
+		dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+			    bdev->channels[i].fifo_virt,
+			    bdev->channels[i].fifo_phys);
+	}
+
+	tasklet_kill(&bdev->task);
+
+	clk_disable_unprepare(bdev->bamclk);
+
+	return 0;
+}
+
+static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
+{
+	struct bam_device *bdev = dev_get_drvdata(dev);
+
+	clk_disable(bdev->bamclk);
+
+	return 0;
+}
+
+static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
+{
+	struct bam_device *bdev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_enable(bdev->bamclk);
+	if (ret < 0) {
+		dev_err(dev, "clk_enable failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __maybe_unused bam_dma_suspend(struct device *dev)
+{
+	struct bam_device *bdev = dev_get_drvdata(dev);
+
+	if (!bdev->controlled_remotely)
+		pm_runtime_force_suspend(dev);
+
+	clk_unprepare(bdev->bamclk);
+
+	return 0;
+}
+
+static int __maybe_unused bam_dma_resume(struct device *dev)
+{
+	struct bam_device *bdev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare(bdev->bamclk);
+	if (ret)
+		return ret;
+
+	if (!bdev->controlled_remotely)
+		pm_runtime_force_resume(dev);
+
+	return 0;
+}
+
+static const struct dev_pm_ops bam_dma_pm_ops = {
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
+	SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
+				NULL)
+};
+
+static struct platform_driver bam_dma_driver = {
+	.probe = bam_dma_probe,
+	.remove = bam_dma_remove,
+	.driver = {
+		.name = "bam-dma-engine",
+		.pm = &bam_dma_pm_ops,
+		.of_match_table = bam_of_match,
+	},
+};
+
+module_platform_driver(bam_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
new file mode 100644
index 0000000..43d4b00
--- /dev/null
+++ b/drivers/dma/qcom/hidma.c
@@ -0,0 +1,977 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine interface
+ *
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009;  for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/* Linux Foundation elects GPLv2 license only. */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/msi.h>
+
+#include "../dmaengine.h"
+#include "hidma.h"
+
+/*
+ * Default idle time is 2 seconds. This parameter can
+ * be overridden by changing the following
+ * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
+ * during kernel boot.
+ */
+#define HIDMA_AUTOSUSPEND_TIMEOUT		2000
+#define HIDMA_ERR_INFO_SW			0xFF
+#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE	0x0
+#define HIDMA_NR_DEFAULT_DESC			10
+#define HIDMA_MSI_INTS				11
+
+static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
+{
+	return container_of(dmadev, struct hidma_dev, ddev);
+}
+
+static inline
+struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
+{
+	return container_of(_lldevp, struct hidma_dev, lldev);
+}
+
+static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
+{
+	return container_of(dmach, struct hidma_chan, chan);
+}
+
+static inline
+struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct hidma_desc, desc);
+}
+
+static void hidma_free(struct hidma_dev *dmadev)
+{
+	INIT_LIST_HEAD(&dmadev->ddev.channels);
+}
+
+static unsigned int nr_desc_prm;
+module_param(nr_desc_prm, uint, 0644);
+MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+
+enum hidma_cap {
+	HIDMA_MSI_CAP = 1,
+	HIDMA_IDENTITY_CAP,
+};
+
+/* process completed descriptors */
+static void hidma_process_completed(struct hidma_chan *mchan)
+{
+	struct dma_device *ddev = mchan->chan.device;
+	struct hidma_dev *mdma = to_hidma_dev(ddev);
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t last_cookie;
+	struct hidma_desc *mdesc;
+	struct hidma_desc *next;
+	unsigned long irqflags;
+	struct list_head list;
+
+	INIT_LIST_HEAD(&list);
+
+	/* Get all completed descriptors */
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	list_splice_tail_init(&mchan->completed, &list);
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	/* Execute callbacks and run dependencies */
+	list_for_each_entry_safe(mdesc, next, &list, node) {
+		enum dma_status llstat;
+		struct dmaengine_desc_callback cb;
+		struct dmaengine_result result;
+
+		desc = &mdesc->desc;
+		last_cookie = desc->cookie;
+
+		spin_lock_irqsave(&mchan->lock, irqflags);
+		dma_cookie_complete(desc);
+		spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+		llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+		dmaengine_desc_get_callback(desc, &cb);
+
+		dma_run_dependencies(desc);
+
+		spin_lock_irqsave(&mchan->lock, irqflags);
+		list_move(&mdesc->node, &mchan->free);
+
+		if (llstat == DMA_COMPLETE) {
+			mchan->last_success = last_cookie;
+			result.result = DMA_TRANS_NOERROR;
+		} else
+			result.result = DMA_TRANS_ABORTED;
+
+		spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+		dmaengine_desc_callback_invoke(&cb, &result);
+	}
+}
+
+/*
+ * Called once for each submitted descriptor.
+ * PM is locked once for each descriptor that is currently
+ * in execution.
+ */
+static void hidma_callback(void *data)
+{
+	struct hidma_desc *mdesc = data;
+	struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
+	struct dma_device *ddev = mchan->chan.device;
+	struct hidma_dev *dmadev = to_hidma_dev(ddev);
+	unsigned long irqflags;
+	bool queued = false;
+
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	if (mdesc->node.next) {
+		/* Delete from the active list, add to completed list */
+		list_move_tail(&mdesc->node, &mchan->completed);
+		queued = true;
+
+		/* calculate the next running descriptor */
+		mchan->running = list_first_entry(&mchan->active,
+						  struct hidma_desc, node);
+	}
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	hidma_process_completed(mchan);
+
+	if (queued) {
+		pm_runtime_mark_last_busy(dmadev->ddev.dev);
+		pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	}
+}
+
+static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
+{
+	struct hidma_chan *mchan;
+	struct dma_device *ddev;
+
+	mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
+	if (!mchan)
+		return -ENOMEM;
+
+	ddev = &dmadev->ddev;
+	mchan->dma_sig = dma_sig;
+	mchan->dmadev = dmadev;
+	mchan->chan.device = ddev;
+	dma_cookie_init(&mchan->chan);
+
+	INIT_LIST_HEAD(&mchan->free);
+	INIT_LIST_HEAD(&mchan->prepared);
+	INIT_LIST_HEAD(&mchan->active);
+	INIT_LIST_HEAD(&mchan->completed);
+	INIT_LIST_HEAD(&mchan->queued);
+
+	spin_lock_init(&mchan->lock);
+	list_add_tail(&mchan->chan.device_node, &ddev->channels);
+	dmadev->ddev.chancnt++;
+	return 0;
+}
+
+static void hidma_issue_task(unsigned long arg)
+{
+	struct hidma_dev *dmadev = (struct hidma_dev *)arg;
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	hidma_ll_start(dmadev->lldev);
+}
+
+static void hidma_issue_pending(struct dma_chan *dmach)
+{
+	struct hidma_chan *mchan = to_hidma_chan(dmach);
+	struct hidma_dev *dmadev = mchan->dmadev;
+	unsigned long flags;
+	struct hidma_desc *qdesc, *next;
+	int status;
+
+	spin_lock_irqsave(&mchan->lock, flags);
+	list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
+		hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
+		list_move_tail(&qdesc->node, &mchan->active);
+	}
+
+	if (!mchan->running) {
+		struct hidma_desc *desc = list_first_entry(&mchan->active,
+							   struct hidma_desc,
+							   node);
+		mchan->running = desc;
+	}
+	spin_unlock_irqrestore(&mchan->lock, flags);
+
+	/* PM will be released in hidma_callback function. */
+	status = pm_runtime_get(dmadev->ddev.dev);
+	if (status < 0)
+		tasklet_schedule(&dmadev->task);
+	else
+		hidma_ll_start(dmadev->lldev);
+}
+
+static inline bool hidma_txn_is_success(dma_cookie_t cookie,
+		dma_cookie_t last_success, dma_cookie_t last_used)
+{
+	if (last_success <= last_used) {
+		if ((cookie <= last_success) || (cookie > last_used))
+			return true;
+	} else {
+		if ((cookie <= last_success) && (cookie > last_used))
+			return true;
+	}
+	return false;
+}
+
+static enum dma_status hidma_tx_status(struct dma_chan *dmach,
+				       dma_cookie_t cookie,
+				       struct dma_tx_state *txstate)
+{
+	struct hidma_chan *mchan = to_hidma_chan(dmach);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(dmach, cookie, txstate);
+	if (ret == DMA_COMPLETE) {
+		bool is_success;
+
+		is_success = hidma_txn_is_success(cookie, mchan->last_success,
+						  dmach->cookie);
+		return is_success ? ret : DMA_ERROR;
+	}
+
+	if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
+		unsigned long flags;
+		dma_cookie_t runcookie;
+
+		spin_lock_irqsave(&mchan->lock, flags);
+		if (mchan->running)
+			runcookie = mchan->running->desc.cookie;
+		else
+			runcookie = -EINVAL;
+
+		if (runcookie == cookie)
+			ret = DMA_PAUSED;
+
+		spin_unlock_irqrestore(&mchan->lock, flags);
+	}
+
+	return ret;
+}
+
+/*
+ * Submit descriptor to hardware.
+ * Lock the PM for each descriptor we are sending.
+ */
+static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+	struct hidma_chan *mchan = to_hidma_chan(txd->chan);
+	struct hidma_dev *dmadev = mchan->dmadev;
+	struct hidma_desc *mdesc;
+	unsigned long irqflags;
+	dma_cookie_t cookie;
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	if (!hidma_ll_isenabled(dmadev->lldev)) {
+		pm_runtime_mark_last_busy(dmadev->ddev.dev);
+		pm_runtime_put_autosuspend(dmadev->ddev.dev);
+		return -ENODEV;
+	}
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+
+	mdesc = container_of(txd, struct hidma_desc, desc);
+	spin_lock_irqsave(&mchan->lock, irqflags);
+
+	/* Move descriptor to queued */
+	list_move_tail(&mdesc->node, &mchan->queued);
+
+	/* Update cookie */
+	cookie = dma_cookie_assign(txd);
+
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	return cookie;
+}
+
+static int hidma_alloc_chan_resources(struct dma_chan *dmach)
+{
+	struct hidma_chan *mchan = to_hidma_chan(dmach);
+	struct hidma_dev *dmadev = mchan->dmadev;
+	struct hidma_desc *mdesc, *tmp;
+	unsigned long irqflags;
+	LIST_HEAD(descs);
+	unsigned int i;
+	int rc = 0;
+
+	if (mchan->allocated)
+		return 0;
+
+	/* Alloc descriptors for this channel */
+	for (i = 0; i < dmadev->nr_descriptors; i++) {
+		mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
+		if (!mdesc) {
+			rc = -ENOMEM;
+			break;
+		}
+		dma_async_tx_descriptor_init(&mdesc->desc, dmach);
+		mdesc->desc.tx_submit = hidma_tx_submit;
+
+		rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
+				      "DMA engine", hidma_callback, mdesc,
+				      &mdesc->tre_ch);
+		if (rc) {
+			dev_err(dmach->device->dev,
+				"channel alloc failed at %u\n", i);
+			kfree(mdesc);
+			break;
+		}
+		list_add_tail(&mdesc->node, &descs);
+	}
+
+	if (rc) {
+		/* return the allocated descriptors */
+		list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+			hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
+			kfree(mdesc);
+		}
+		return rc;
+	}
+
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	list_splice_tail_init(&descs, &mchan->free);
+	mchan->allocated = true;
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+	return 1;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
+		size_t len, unsigned long flags)
+{
+	struct hidma_chan *mchan = to_hidma_chan(dmach);
+	struct hidma_desc *mdesc = NULL;
+	struct hidma_dev *mdma = mchan->dmadev;
+	unsigned long irqflags;
+
+	/* Get free descriptor */
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	if (!list_empty(&mchan->free)) {
+		mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+		list_del(&mdesc->node);
+	}
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	if (!mdesc)
+		return NULL;
+
+	hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+				     src, dest, len, flags,
+				     HIDMA_TRE_MEMCPY);
+
+	/* Place descriptor in prepared list */
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	list_add_tail(&mdesc->node, &mchan->prepared);
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	return &mdesc->desc;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
+		size_t len, unsigned long flags)
+{
+	struct hidma_chan *mchan = to_hidma_chan(dmach);
+	struct hidma_desc *mdesc = NULL;
+	struct hidma_dev *mdma = mchan->dmadev;
+	unsigned long irqflags;
+
+	/* Get free descriptor */
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	if (!list_empty(&mchan->free)) {
+		mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+		list_del(&mdesc->node);
+	}
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	if (!mdesc)
+		return NULL;
+
+	hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+				     value, dest, len, flags,
+				     HIDMA_TRE_MEMSET);
+
+	/* Place descriptor in prepared list */
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	list_add_tail(&mdesc->node, &mchan->prepared);
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	return &mdesc->desc;
+}
+
+static int hidma_terminate_channel(struct dma_chan *chan)
+{
+	struct hidma_chan *mchan = to_hidma_chan(chan);
+	struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+	struct hidma_desc *tmp, *mdesc;
+	unsigned long irqflags;
+	LIST_HEAD(list);
+	int rc;
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	/* give completed requests a chance to finish */
+	hidma_process_completed(mchan);
+
+	spin_lock_irqsave(&mchan->lock, irqflags);
+	mchan->last_success = 0;
+	list_splice_init(&mchan->active, &list);
+	list_splice_init(&mchan->prepared, &list);
+	list_splice_init(&mchan->completed, &list);
+	list_splice_init(&mchan->queued, &list);
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+	/* this suspends the existing transfer */
+	rc = hidma_ll_disable(dmadev->lldev);
+	if (rc) {
+		dev_err(dmadev->ddev.dev, "channel did not pause\n");
+		goto out;
+	}
+
+	/* return all user requests */
+	list_for_each_entry_safe(mdesc, tmp, &list, node) {
+		struct dma_async_tx_descriptor *txd = &mdesc->desc;
+
+		dma_descriptor_unmap(txd);
+		dmaengine_desc_get_callback_invoke(txd, NULL);
+		dma_run_dependencies(txd);
+
+		/* move myself to free_list */
+		list_move(&mdesc->node, &mchan->free);
+	}
+
+	rc = hidma_ll_enable(dmadev->lldev);
+out:
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	return rc;
+}
+
+static int hidma_terminate_all(struct dma_chan *chan)
+{
+	struct hidma_chan *mchan = to_hidma_chan(chan);
+	struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+	int rc;
+
+	rc = hidma_terminate_channel(chan);
+	if (rc)
+		return rc;
+
+	/* reinitialize the hardware */
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	rc = hidma_ll_setup(dmadev->lldev);
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	return rc;
+}
+
+static void hidma_free_chan_resources(struct dma_chan *dmach)
+{
+	struct hidma_chan *mchan = to_hidma_chan(dmach);
+	struct hidma_dev *mdma = mchan->dmadev;
+	struct hidma_desc *mdesc, *tmp;
+	unsigned long irqflags;
+	LIST_HEAD(descs);
+
+	/* terminate running transactions and free descriptors */
+	hidma_terminate_channel(dmach);
+
+	spin_lock_irqsave(&mchan->lock, irqflags);
+
+	/* Move data */
+	list_splice_tail_init(&mchan->free, &descs);
+
+	/* Free descriptors */
+	list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+		hidma_ll_free(mdma->lldev, mdesc->tre_ch);
+		list_del(&mdesc->node);
+		kfree(mdesc);
+	}
+
+	mchan->allocated = 0;
+	spin_unlock_irqrestore(&mchan->lock, irqflags);
+}
+
+static int hidma_pause(struct dma_chan *chan)
+{
+	struct hidma_chan *mchan;
+	struct hidma_dev *dmadev;
+
+	mchan = to_hidma_chan(chan);
+	dmadev = to_hidma_dev(mchan->chan.device);
+	if (!mchan->paused) {
+		pm_runtime_get_sync(dmadev->ddev.dev);
+		if (hidma_ll_disable(dmadev->lldev))
+			dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+		mchan->paused = true;
+		pm_runtime_mark_last_busy(dmadev->ddev.dev);
+		pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	}
+	return 0;
+}
+
+static int hidma_resume(struct dma_chan *chan)
+{
+	struct hidma_chan *mchan;
+	struct hidma_dev *dmadev;
+	int rc = 0;
+
+	mchan = to_hidma_chan(chan);
+	dmadev = to_hidma_dev(mchan->chan.device);
+	if (mchan->paused) {
+		pm_runtime_get_sync(dmadev->ddev.dev);
+		rc = hidma_ll_enable(dmadev->lldev);
+		if (!rc)
+			mchan->paused = false;
+		else
+			dev_err(dmadev->ddev.dev,
+				"failed to resume the channel");
+		pm_runtime_mark_last_busy(dmadev->ddev.dev);
+		pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	}
+	return rc;
+}
+
+static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
+{
+	struct hidma_lldev *lldev = arg;
+
+	/*
+	 * All interrupts are request driven.
+	 * HW doesn't send an interrupt by itself.
+	 */
+	return hidma_ll_inthandler(chirq, lldev);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+	struct hidma_lldev **lldevp = arg;
+	struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+	return hidma_ll_inthandler_msi(chirq, *lldevp,
+				       1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
+static ssize_t hidma_show_values(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct hidma_dev *mdev = dev_get_drvdata(dev);
+
+	buf[0] = 0;
+
+	if (strcmp(attr->attr.name, "chid") == 0)
+		sprintf(buf, "%d\n", mdev->chidx);
+
+	return strlen(buf);
+}
+
+static inline void  hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+	device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
+{
+	struct device_attribute *attrs;
+	char *name_copy;
+
+	attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
+			     GFP_KERNEL);
+	if (!attrs)
+		return NULL;
+
+	name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
+	if (!name_copy)
+		return NULL;
+
+	attrs->attr.name = name_copy;
+	attrs->attr.mode = mode;
+	attrs->show = hidma_show_values;
+	sysfs_attr_init(&attrs->attr);
+
+	return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+	dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+	if (!dev->chid_attrs)
+		return -ENOMEM;
+
+	return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	struct device *dev = msi_desc_to_dev(desc);
+	struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+	if (!desc->platform.msi_index) {
+		writel(msg->address_lo, dmadev->dev_evca + 0x118);
+		writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+		writel(msg->data, dmadev->dev_evca + 0x120);
+	}
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	struct device *dev = dmadev->ddev.dev;
+	struct msi_desc *desc;
+
+	/* free allocated MSI interrupts above */
+	for_each_msi_entry(desc, dev)
+		devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+	platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+			     struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	int rc;
+	struct msi_desc *desc;
+	struct msi_desc *failed_desc = NULL;
+
+	rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+					    hidma_write_msi_msg);
+	if (rc)
+		return rc;
+
+	for_each_msi_entry(desc, &pdev->dev) {
+		if (!desc->platform.msi_index)
+			dmadev->msi_virqbase = desc->irq;
+
+		rc = devm_request_irq(&pdev->dev, desc->irq,
+				       hidma_chirq_handler_msi,
+				       0, "qcom-hidma-msi",
+				       &dmadev->lldev);
+		if (rc) {
+			failed_desc = desc;
+			break;
+		}
+	}
+
+	if (rc) {
+		/* free allocated MSI interrupts above */
+		for_each_msi_entry(desc, &pdev->dev) {
+			if (desc == failed_desc)
+				break;
+			devm_free_irq(&pdev->dev, desc->irq,
+				      &dmadev->lldev);
+		}
+	} else {
+		/* Add callback to free MSIs on teardown */
+		hidma_ll_setup_irq(dmadev->lldev, true);
+
+	}
+	if (rc)
+		dev_warn(&pdev->dev,
+			 "failed to request MSI irq, falling back to wired IRQ\n");
+	return rc;
+#else
+	return -EINVAL;
+#endif
+}
+
+static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
+{
+	enum hidma_cap cap;
+
+	cap = (enum hidma_cap) device_get_match_data(dev);
+	return cap ? ((cap & test_cap) > 0) : 0;
+}
+
+static int hidma_probe(struct platform_device *pdev)
+{
+	struct hidma_dev *dmadev;
+	struct resource *trca_resource;
+	struct resource *evca_resource;
+	int chirq;
+	void __iomem *evca;
+	void __iomem *trca;
+	int rc;
+	bool msi;
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+	if (IS_ERR(trca)) {
+		rc = -ENOMEM;
+		goto bailout;
+	}
+
+	evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+	if (IS_ERR(evca)) {
+		rc = -ENOMEM;
+		goto bailout;
+	}
+
+	/*
+	 * This driver only handles the channel IRQs.
+	 * Common IRQ is handled by the management driver.
+	 */
+	chirq = platform_get_irq(pdev, 0);
+	if (chirq < 0) {
+		rc = -ENODEV;
+		goto bailout;
+	}
+
+	dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+	if (!dmadev) {
+		rc = -ENOMEM;
+		goto bailout;
+	}
+
+	INIT_LIST_HEAD(&dmadev->ddev.channels);
+	spin_lock_init(&dmadev->lock);
+	dmadev->ddev.dev = &pdev->dev;
+	pm_runtime_get_sync(dmadev->ddev.dev);
+
+	dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+	dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
+	if (WARN_ON(!pdev->dev.dma_mask)) {
+		rc = -ENXIO;
+		goto dmafree;
+	}
+
+	dmadev->dev_evca = evca;
+	dmadev->evca_resource = evca_resource;
+	dmadev->dev_trca = trca;
+	dmadev->trca_resource = trca_resource;
+	dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+	dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
+	dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
+	dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
+	dmadev->ddev.device_tx_status = hidma_tx_status;
+	dmadev->ddev.device_issue_pending = hidma_issue_pending;
+	dmadev->ddev.device_pause = hidma_pause;
+	dmadev->ddev.device_resume = hidma_resume;
+	dmadev->ddev.device_terminate_all = hidma_terminate_all;
+	dmadev->ddev.copy_align = 8;
+
+	/*
+	 * Determine the MSI capability of the platform. Old HW doesn't
+	 * support MSI.
+	 */
+	msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
+	device_property_read_u32(&pdev->dev, "desc-count",
+				 &dmadev->nr_descriptors);
+
+	if (nr_desc_prm) {
+		dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
+			 nr_desc_prm);
+		dmadev->nr_descriptors = nr_desc_prm;
+	}
+
+	if (!dmadev->nr_descriptors)
+		dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
+
+	if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
+		dmadev->chidx = readl(dmadev->dev_trca + 0x40);
+	else
+		dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+
+	/* Set DMA mask to 64 bits. */
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rc) {
+		dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc)
+			goto dmafree;
+	}
+
+	dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
+				      dmadev->nr_descriptors, dmadev->dev_trca,
+				      dmadev->dev_evca, dmadev->chidx);
+	if (!dmadev->lldev) {
+		rc = -EPROBE_DEFER;
+		goto dmafree;
+	}
+
+	platform_set_drvdata(pdev, dmadev);
+	if (msi)
+		rc = hidma_request_msi(dmadev, pdev);
+
+	if (!msi || rc) {
+		hidma_ll_setup_irq(dmadev->lldev, false);
+		rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+				      0, "qcom-hidma", dmadev->lldev);
+		if (rc)
+			goto uninit;
+	}
+
+	INIT_LIST_HEAD(&dmadev->ddev.channels);
+	rc = hidma_chan_init(dmadev, 0);
+	if (rc)
+		goto uninit;
+
+	rc = dma_async_device_register(&dmadev->ddev);
+	if (rc)
+		goto uninit;
+
+	dmadev->irq = chirq;
+	tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+	hidma_debug_init(dmadev);
+	hidma_sysfs_init(dmadev);
+	dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	return 0;
+
+uninit:
+	if (msi)
+		hidma_free_msis(dmadev);
+
+	hidma_debug_uninit(dmadev);
+	hidma_ll_uninit(dmadev->lldev);
+dmafree:
+	if (dmadev)
+		hidma_free(dmadev);
+bailout:
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return rc;
+}
+
+static void hidma_shutdown(struct platform_device *pdev)
+{
+	struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+	dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	if (hidma_ll_disable(dmadev->lldev))
+		dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+
+}
+
+static int hidma_remove(struct platform_device *pdev)
+{
+	struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	dma_async_device_unregister(&dmadev->ddev);
+	if (!dmadev->lldev->msi_support)
+		devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+	else
+		hidma_free_msis(dmadev);
+
+	tasklet_kill(&dmadev->task);
+	hidma_sysfs_uninit(dmadev);
+	hidma_debug_uninit(dmadev);
+	hidma_ll_uninit(dmadev->lldev);
+	hidma_free(dmadev);
+
+	dev_info(&pdev->dev, "HI-DMA engine removed\n");
+	pm_runtime_put_sync_suspend(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_acpi_ids[] = {
+	{"QCOM8061"},
+	{"QCOM8062", HIDMA_MSI_CAP},
+	{"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
+#endif
+
+static const struct of_device_id hidma_match[] = {
+	{.compatible = "qcom,hidma-1.0",},
+	{.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
+	{.compatible = "qcom,hidma-1.2",
+	 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
+	{},
+};
+MODULE_DEVICE_TABLE(of, hidma_match);
+
+static struct platform_driver hidma_driver = {
+	.probe = hidma_probe,
+	.remove = hidma_remove,
+	.shutdown = hidma_shutdown,
+	.driver = {
+		   .name = "hidma",
+		   .of_match_table = hidma_match,
+		   .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
+	},
+};
+
+module_platform_driver(hidma_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
new file mode 100644
index 0000000..5f9966e
--- /dev/null
+++ b/drivers/dma/qcom/hidma.h
@@ -0,0 +1,171 @@
+/*
+ * Qualcomm Technologies HIDMA data structures
+ *
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QCOM_HIDMA_H
+#define QCOM_HIDMA_H
+
+#include <linux/kfifo.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+#define HIDMA_TRE_SIZE			32 /* each TRE is 32 bytes  */
+#define HIDMA_TRE_CFG_IDX		0
+#define HIDMA_TRE_LEN_IDX		1
+#define HIDMA_TRE_SRC_LOW_IDX		2
+#define HIDMA_TRE_SRC_HI_IDX		3
+#define HIDMA_TRE_DEST_LOW_IDX		4
+#define HIDMA_TRE_DEST_HI_IDX		5
+
+enum tre_type {
+	HIDMA_TRE_MEMCPY = 3,
+	HIDMA_TRE_MEMSET = 4,
+};
+
+struct hidma_tre {
+	atomic_t allocated;		/* if this channel is allocated	    */
+	bool queued;			/* flag whether this is pending     */
+	u16 status;			/* status			    */
+	u32 idx;			/* index of the tre		    */
+	u32 dma_sig;			/* signature of the tre		    */
+	const char *dev_name;		/* name of the device		    */
+	void (*callback)(void *data);	/* requester callback		    */
+	void *data;			/* Data associated with this channel*/
+	struct hidma_lldev *lldev;	/* lldma device pointer		    */
+	u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy  */
+	u32 tre_index;			/* the offset where this was written*/
+	u32 int_flags;			/* interrupt flags		    */
+	u8 err_info;			/* error record in this transfer    */
+	u8 err_code;			/* completion code		    */
+};
+
+struct hidma_lldev {
+	bool msi_support;		/* flag indicating MSI support    */
+	bool initialized;		/* initialized flag               */
+	u8 trch_state;			/* trch_state of the device	  */
+	u8 evch_state;			/* evch_state of the device	  */
+	u8 chidx;			/* channel index in the core	  */
+	u32 nr_tres;			/* max number of configs          */
+	spinlock_t lock;		/* reentrancy                     */
+	struct hidma_tre *trepool;	/* trepool of user configs */
+	struct device *dev;		/* device			  */
+	void __iomem *trca;		/* Transfer Channel address       */
+	void __iomem *evca;		/* Event Channel address          */
+	struct hidma_tre
+		**pending_tre_list;	/* Pointers to pending TREs	  */
+	atomic_t pending_tre_count;	/* Number of TREs pending	  */
+
+	void *tre_ring;			/* TRE ring			  */
+	dma_addr_t tre_dma;		/* TRE ring to be shared with HW  */
+	u32 tre_ring_size;		/* Byte size of the ring	  */
+	u32 tre_processed_off;		/* last processed TRE		  */
+
+	void *evre_ring;		/* EVRE ring			   */
+	dma_addr_t evre_dma;		/* EVRE ring to be shared with HW  */
+	u32 evre_ring_size;		/* Byte size of the ring	   */
+	u32 evre_processed_off;		/* last processed EVRE		   */
+
+	u32 tre_write_offset;           /* TRE write location              */
+	struct tasklet_struct task;	/* task delivering notifications   */
+	DECLARE_KFIFO_PTR(handoff_fifo,
+		struct hidma_tre *);    /* pending TREs FIFO               */
+};
+
+struct hidma_desc {
+	struct dma_async_tx_descriptor	desc;
+	/* link list node for this channel*/
+	struct list_head		node;
+	u32				tre_ch;
+};
+
+struct hidma_chan {
+	bool				paused;
+	bool				allocated;
+	char				dbg_name[16];
+	u32				dma_sig;
+	dma_cookie_t			last_success;
+
+	/*
+	 * active descriptor on this channel
+	 * It is used by the DMA complete notification to
+	 * locate the descriptor that initiated the transfer.
+	 */
+	struct dentry			*debugfs;
+	struct dentry			*stats;
+	struct hidma_dev		*dmadev;
+	struct hidma_desc		*running;
+
+	struct dma_chan			chan;
+	struct list_head		free;
+	struct list_head		prepared;
+	struct list_head		queued;
+	struct list_head		active;
+	struct list_head		completed;
+
+	/* Lock for this structure */
+	spinlock_t			lock;
+};
+
+struct hidma_dev {
+	int				irq;
+	int				chidx;
+	u32				nr_descriptors;
+	int				msi_virqbase;
+
+	struct hidma_lldev		*lldev;
+	void				__iomem *dev_trca;
+	struct resource			*trca_resource;
+	void				__iomem *dev_evca;
+	struct resource			*evca_resource;
+
+	/* used to protect the pending channel list*/
+	spinlock_t			lock;
+	struct dma_device		ddev;
+
+	struct dentry			*debugfs;
+	struct dentry			*stats;
+
+	/* sysfs entry for the channel id */
+	struct device_attribute		*chid_attrs;
+
+	/* Task delivering issue_pending */
+	struct tasklet_struct		task;
+};
+
+int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
+			const char *dev_name,
+			void (*callback)(void *data), void *data, u32 *tre_ch);
+
+void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
+enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
+bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
+void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
+void hidma_ll_start(struct hidma_lldev *llhndl);
+int hidma_ll_disable(struct hidma_lldev *lldev);
+int hidma_ll_enable(struct hidma_lldev *llhndl);
+void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
+	dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
+int hidma_ll_setup(struct hidma_lldev *lldev);
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
+			void __iomem *trca, void __iomem *evca,
+			u8 chidx);
+int hidma_ll_uninit(struct hidma_lldev *llhndl);
+irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
+void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
+				u8 err_code);
+int hidma_debug_init(struct hidma_dev *dmadev);
+void hidma_debug_uninit(struct hidma_dev *dmadev);
+#endif
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
new file mode 100644
index 0000000..3bdcb80
--- /dev/null
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -0,0 +1,217 @@
+/*
+ * Qualcomm Technologies HIDMA debug file
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pm_runtime.h>
+
+#include "hidma.h"
+
+static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch)
+{
+	struct hidma_lldev *lldev = llhndl;
+	struct hidma_tre *tre;
+	u32 length;
+	dma_addr_t src_start;
+	dma_addr_t dest_start;
+	u32 *tre_local;
+
+	if (tre_ch >= lldev->nr_tres) {
+		dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch);
+		return;
+	}
+	tre = &lldev->trepool[tre_ch];
+	seq_printf(s, "------Channel %d -----\n", tre_ch);
+	seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated));
+	seq_printf(s, "queued = 0x%x\n", tre->queued);
+	seq_printf(s, "err_info = 0x%x\n", tre->err_info);
+	seq_printf(s, "err_code = 0x%x\n", tre->err_code);
+	seq_printf(s, "status = 0x%x\n", tre->status);
+	seq_printf(s, "idx = 0x%x\n", tre->idx);
+	seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig);
+	seq_printf(s, "dev_name=%s\n", tre->dev_name);
+	seq_printf(s, "callback=%p\n", tre->callback);
+	seq_printf(s, "data=%p\n", tre->data);
+	seq_printf(s, "tre_index = 0x%x\n", tre->tre_index);
+
+	tre_local = &tre->tre_local[0];
+	src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX];
+	src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start;
+	dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX];
+	dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32);
+	length = tre_local[HIDMA_TRE_LEN_IDX];
+
+	seq_printf(s, "src=%pap\n", &src_start);
+	seq_printf(s, "dest=%pap\n", &dest_start);
+	seq_printf(s, "length = 0x%x\n", length);
+}
+
+static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
+{
+	struct hidma_lldev *lldev = llhndl;
+
+	seq_puts(s, "------Device -----\n");
+	seq_printf(s, "lldev init = 0x%x\n", lldev->initialized);
+	seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state);
+	seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state);
+	seq_printf(s, "chidx = 0x%x\n", lldev->chidx);
+	seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres);
+	seq_printf(s, "trca=%p\n", lldev->trca);
+	seq_printf(s, "tre_ring=%p\n", lldev->tre_ring);
+	seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
+	seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
+	seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
+	seq_printf(s, "pending_tre_count=%d\n",
+			atomic_read(&lldev->pending_tre_count));
+	seq_printf(s, "evca=%p\n", lldev->evca);
+	seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
+	seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
+	seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size);
+	seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off);
+	seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset);
+}
+
+/*
+ * hidma_chan_stats: display HIDMA channel statistics
+ *
+ * Display the statistics for the current HIDMA virtual channel device.
+ */
+static int hidma_chan_stats(struct seq_file *s, void *unused)
+{
+	struct hidma_chan *mchan = s->private;
+	struct hidma_desc *mdesc;
+	struct hidma_dev *dmadev = mchan->dmadev;
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	seq_printf(s, "paused=%u\n", mchan->paused);
+	seq_printf(s, "dma_sig=%u\n", mchan->dma_sig);
+	seq_puts(s, "prepared\n");
+	list_for_each_entry(mdesc, &mchan->prepared, node)
+		hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+	seq_puts(s, "active\n");
+	list_for_each_entry(mdesc, &mchan->active, node)
+		hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+	seq_puts(s, "completed\n");
+	list_for_each_entry(mdesc, &mchan->completed, node)
+		hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+	hidma_ll_devstats(s, mchan->dmadev->lldev);
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	return 0;
+}
+
+/*
+ * hidma_dma_info: display HIDMA device info
+ *
+ * Display the info for the current HIDMA device.
+ */
+static int hidma_dma_info(struct seq_file *s, void *unused)
+{
+	struct hidma_dev *dmadev = s->private;
+	resource_size_t sz;
+
+	seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors);
+	seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca);
+	seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start);
+	sz = resource_size(dmadev->trca_resource);
+	seq_printf(s, "dev_trca_size=%pa\n", &sz);
+	seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca);
+	seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start);
+	sz = resource_size(dmadev->evca_resource);
+	seq_printf(s, "dev_evca_size=%pa\n", &sz);
+	return 0;
+}
+
+static int hidma_chan_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hidma_chan_stats, inode->i_private);
+}
+
+static int hidma_dma_info_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hidma_dma_info, inode->i_private);
+}
+
+static const struct file_operations hidma_chan_fops = {
+	.open = hidma_chan_stats_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static const struct file_operations hidma_dma_fops = {
+	.open = hidma_dma_info_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void hidma_debug_uninit(struct hidma_dev *dmadev)
+{
+	debugfs_remove_recursive(dmadev->debugfs);
+}
+
+int hidma_debug_init(struct hidma_dev *dmadev)
+{
+	int rc = 0;
+	int chidx = 0;
+	struct list_head *position = NULL;
+
+	dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
+	if (!dmadev->debugfs) {
+		rc = -ENODEV;
+		return rc;
+	}
+
+	/* walk through the virtual channel list */
+	list_for_each(position, &dmadev->ddev.channels) {
+		struct hidma_chan *chan;
+
+		chan = list_entry(position, struct hidma_chan,
+				  chan.device_node);
+		sprintf(chan->dbg_name, "chan%d", chidx);
+		chan->debugfs = debugfs_create_dir(chan->dbg_name,
+						   dmadev->debugfs);
+		if (!chan->debugfs) {
+			rc = -ENOMEM;
+			goto cleanup;
+		}
+		chan->stats = debugfs_create_file("stats", S_IRUGO,
+						  chan->debugfs, chan,
+						  &hidma_chan_fops);
+		if (!chan->stats) {
+			rc = -ENOMEM;
+			goto cleanup;
+		}
+		chidx++;
+	}
+
+	dmadev->stats = debugfs_create_file("stats", S_IRUGO,
+					    dmadev->debugfs, dmadev,
+					    &hidma_dma_fops);
+	if (!dmadev->stats) {
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+
+	return 0;
+cleanup:
+	hidma_debug_uninit(dmadev);
+	return rc;
+}
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
new file mode 100644
index 0000000..7c6e2ff
--- /dev/null
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -0,0 +1,865 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine low level code
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/iopoll.h>
+#include <linux/kfifo.h>
+#include <linux/bitops.h>
+
+#include "hidma.h"
+
+#define HIDMA_EVRE_SIZE			16	/* each EVRE is 16 bytes */
+
+#define HIDMA_TRCA_CTRLSTS_REG			0x000
+#define HIDMA_TRCA_RING_LOW_REG		0x008
+#define HIDMA_TRCA_RING_HIGH_REG		0x00C
+#define HIDMA_TRCA_RING_LEN_REG		0x010
+#define HIDMA_TRCA_DOORBELL_REG		0x400
+
+#define HIDMA_EVCA_CTRLSTS_REG			0x000
+#define HIDMA_EVCA_INTCTRL_REG			0x004
+#define HIDMA_EVCA_RING_LOW_REG		0x008
+#define HIDMA_EVCA_RING_HIGH_REG		0x00C
+#define HIDMA_EVCA_RING_LEN_REG		0x010
+#define HIDMA_EVCA_WRITE_PTR_REG		0x020
+#define HIDMA_EVCA_DOORBELL_REG		0x400
+
+#define HIDMA_EVCA_IRQ_STAT_REG		0x100
+#define HIDMA_EVCA_IRQ_CLR_REG			0x108
+#define HIDMA_EVCA_IRQ_EN_REG			0x110
+
+#define HIDMA_EVRE_CFG_IDX			0
+
+#define HIDMA_EVRE_ERRINFO_BIT_POS		24
+#define HIDMA_EVRE_CODE_BIT_POS		28
+
+#define HIDMA_EVRE_ERRINFO_MASK		GENMASK(3, 0)
+#define HIDMA_EVRE_CODE_MASK			GENMASK(3, 0)
+
+#define HIDMA_CH_CONTROL_MASK			GENMASK(7, 0)
+#define HIDMA_CH_STATE_MASK			GENMASK(7, 0)
+#define HIDMA_CH_STATE_BIT_POS			0x8
+
+#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS	0
+#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS	1
+#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS	9
+#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS	10
+#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS	11
+#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS	14
+
+#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
+
+#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size)	\
+do {								\
+	iter += size;						\
+	if (iter >= ring_size)					\
+		iter -= ring_size;				\
+} while (0)
+
+#define HIDMA_CH_STATE(val)	\
+	((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
+
+#define HIDMA_ERR_INT_MASK				\
+	(BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)   |	\
+	 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) |	\
+	 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)	    |	\
+	 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)    |	\
+	 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
+
+enum ch_command {
+	HIDMA_CH_DISABLE = 0,
+	HIDMA_CH_ENABLE = 1,
+	HIDMA_CH_SUSPEND = 2,
+	HIDMA_CH_RESET = 9,
+};
+
+enum ch_state {
+	HIDMA_CH_DISABLED = 0,
+	HIDMA_CH_ENABLED = 1,
+	HIDMA_CH_RUNNING = 2,
+	HIDMA_CH_SUSPENDED = 3,
+	HIDMA_CH_STOPPED = 4,
+};
+
+enum err_code {
+	HIDMA_EVRE_STATUS_COMPLETE = 1,
+	HIDMA_EVRE_STATUS_ERROR = 4,
+};
+
+static int hidma_is_chan_enabled(int state)
+{
+	switch (state) {
+	case HIDMA_CH_ENABLED:
+	case HIDMA_CH_RUNNING:
+		return true;
+	default:
+		return false;
+	}
+}
+
+void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
+{
+	struct hidma_tre *tre;
+
+	if (tre_ch >= lldev->nr_tres) {
+		dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
+		return;
+	}
+
+	tre = &lldev->trepool[tre_ch];
+	if (atomic_read(&tre->allocated) != true) {
+		dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
+		return;
+	}
+
+	atomic_set(&tre->allocated, 0);
+}
+
+int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
+		     void (*callback)(void *data), void *data, u32 *tre_ch)
+{
+	unsigned int i;
+	struct hidma_tre *tre;
+	u32 *tre_local;
+
+	if (!tre_ch || !lldev)
+		return -EINVAL;
+
+	/* need to have at least one empty spot in the queue */
+	for (i = 0; i < lldev->nr_tres - 1; i++) {
+		if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
+			break;
+	}
+
+	if (i == (lldev->nr_tres - 1))
+		return -ENOMEM;
+
+	tre = &lldev->trepool[i];
+	tre->dma_sig = sig;
+	tre->dev_name = dev_name;
+	tre->callback = callback;
+	tre->data = data;
+	tre->idx = i;
+	tre->status = 0;
+	tre->queued = 0;
+	tre->err_code = 0;
+	tre->err_info = 0;
+	tre->lldev = lldev;
+	tre_local = &tre->tre_local[0];
+	tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
+	tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);	/* set IEOB */
+	*tre_ch = i;
+	if (callback)
+		callback(data);
+	return 0;
+}
+
+/*
+ * Multiple TREs may be queued and waiting in the pending queue.
+ */
+static void hidma_ll_tre_complete(unsigned long arg)
+{
+	struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+	struct hidma_tre *tre;
+
+	while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
+		/* call the user if it has been read by the hardware */
+		if (tre->callback)
+			tre->callback(tre->data);
+	}
+}
+
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+				u8 err_code)
+{
+	struct hidma_tre *tre;
+	unsigned long flags;
+	u32 tre_iterator;
+
+	spin_lock_irqsave(&lldev->lock, flags);
+
+	tre_iterator = lldev->tre_processed_off;
+	tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
+	if (!tre) {
+		spin_unlock_irqrestore(&lldev->lock, flags);
+		dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
+			 tre_iterator / HIDMA_TRE_SIZE);
+		return -EINVAL;
+	}
+	lldev->pending_tre_list[tre->tre_index] = NULL;
+
+	/*
+	 * Keep track of pending TREs that SW is expecting to receive
+	 * from HW. We got one now. Decrement our counter.
+	 */
+	if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
+		dev_warn(lldev->dev, "tre count mismatch on completion");
+		atomic_set(&lldev->pending_tre_count, 0);
+	}
+
+	HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+				 lldev->tre_ring_size);
+	lldev->tre_processed_off = tre_iterator;
+	spin_unlock_irqrestore(&lldev->lock, flags);
+
+	tre->err_info = err_info;
+	tre->err_code = err_code;
+	tre->queued = 0;
+
+	kfifo_put(&lldev->handoff_fifo, tre);
+	tasklet_schedule(&lldev->task);
+
+	return 0;
+}
+
+/*
+ * Called to handle the interrupt for the channel.
+ * Return a positive number if TRE or EVRE were consumed on this run.
+ * Return a positive number if there are pending TREs or EVREs.
+ * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
+ */
+static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
+{
+	u32 evre_ring_size = lldev->evre_ring_size;
+	u32 err_info, err_code, evre_write_off;
+	u32 evre_iterator;
+	u32 num_completed = 0;
+
+	evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+	evre_iterator = lldev->evre_processed_off;
+
+	if ((evre_write_off > evre_ring_size) ||
+	    (evre_write_off % HIDMA_EVRE_SIZE)) {
+		dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
+		return 0;
+	}
+
+	/*
+	 * By the time control reaches here the number of EVREs and TREs
+	 * may not match. Only consume the ones that hardware told us.
+	 */
+	while ((evre_iterator != evre_write_off)) {
+		u32 *current_evre = lldev->evre_ring + evre_iterator;
+		u32 cfg;
+
+		cfg = current_evre[HIDMA_EVRE_CFG_IDX];
+		err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
+		err_info &= HIDMA_EVRE_ERRINFO_MASK;
+		err_code =
+		    (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
+
+		if (hidma_post_completed(lldev, err_info, err_code))
+			break;
+
+		HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
+					 evre_ring_size);
+
+		/*
+		 * Read the new event descriptor written by the HW.
+		 * As we are processing the delivered events, other events
+		 * get queued to the SW for processing.
+		 */
+		evre_write_off =
+		    readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+		num_completed++;
+
+		/*
+		 * An error interrupt might have arrived while we are processing
+		 * the completed interrupt.
+		 */
+		if (!hidma_ll_isenabled(lldev))
+			break;
+	}
+
+	if (num_completed) {
+		u32 evre_read_off = (lldev->evre_processed_off +
+				     HIDMA_EVRE_SIZE * num_completed);
+		evre_read_off = evre_read_off % evre_ring_size;
+		writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
+
+		/* record the last processed tre offset */
+		lldev->evre_processed_off = evre_read_off;
+	}
+
+	return num_completed;
+}
+
+void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
+			       u8 err_code)
+{
+	while (atomic_read(&lldev->pending_tre_count)) {
+		if (hidma_post_completed(lldev, err_info, err_code))
+			break;
+	}
+}
+
+static int hidma_ll_reset(struct hidma_lldev *lldev)
+{
+	u32 val;
+	int ret;
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_RESET << 16;
+	writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+	/*
+	 * Delay 10ms after reset to allow DMA logic to quiesce.
+	 * Do a polled read up to 1ms and 10ms maximum.
+	 */
+	ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+				 1000, 10000);
+	if (ret) {
+		dev_err(lldev->dev, "transfer channel did not reset\n");
+		return ret;
+	}
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_RESET << 16;
+	writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+	/*
+	 * Delay 10ms after reset to allow DMA logic to quiesce.
+	 * Do a polled read up to 1ms and 10ms maximum.
+	 */
+	ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+				 1000, 10000);
+	if (ret)
+		return ret;
+
+	lldev->trch_state = HIDMA_CH_DISABLED;
+	lldev->evch_state = HIDMA_CH_DISABLED;
+	return 0;
+}
+
+/*
+ * The interrupt handler for HIDMA will try to consume as many pending
+ * EVRE from the event queue as possible. Each EVRE has an associated
+ * TRE that holds the user interface parameters. EVRE reports the
+ * result of the transaction. Hardware guarantees ordering between EVREs
+ * and TREs. We use last processed offset to figure out which TRE is
+ * associated with which EVRE. If two TREs are consumed by HW, the EVREs
+ * are in order in the event ring.
+ *
+ * This handler will do a one pass for consuming EVREs. Other EVREs may
+ * be delivered while we are working. It will try to consume incoming
+ * EVREs one more time and return.
+ *
+ * For unprocessed EVREs, hardware will trigger another interrupt until
+ * all the interrupt bits are cleared.
+ *
+ * Hardware guarantees that by the time interrupt is observed, all data
+ * transactions in flight are delivered to their respective places and
+ * are visible to the CPU.
+ *
+ * On demand paging for IOMMU is only supported for PCIe via PRI
+ * (Page Request Interface) not for HIDMA. All other hardware instances
+ * including HIDMA work on pinned DMA addresses.
+ *
+ * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
+ * IOMMU latency will be built into the data movement time. By the time
+ * interrupt happens, IOMMU lookups + data movement has already taken place.
+ *
+ * While the first read in a typical PCI endpoint ISR flushes all outstanding
+ * requests traditionally to the destination, this concept does not apply
+ * here for this HW.
+ */
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
+{
+	unsigned long irqflags;
+
+	if (cause & HIDMA_ERR_INT_MASK) {
+		dev_err(lldev->dev, "error 0x%x, disabling...\n",
+				cause);
+
+		/* Clear out pending interrupts */
+		writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+		/* No further submissions. */
+		hidma_ll_disable(lldev);
+
+		/* Driver completes the txn and intimates the client.*/
+		hidma_cleanup_pending_tre(lldev, 0xFF,
+					  HIDMA_EVRE_STATUS_ERROR);
+
+		return;
+	}
+
+	spin_lock_irqsave(&lldev->lock, irqflags);
+	writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	spin_unlock_irqrestore(&lldev->lock, irqflags);
+
+	/*
+	 * Fine tuned for this HW...
+	 *
+	 * This ISR has been designed for this particular hardware. Relaxed
+	 * read and write accessors are used for performance reasons due to
+	 * interrupt delivery guarantees. Do not copy this code blindly and
+	 * expect that to work.
+	 *
+	 * Try to consume as many EVREs as possible.
+	 */
+	hidma_handle_tre_completion(lldev);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+	struct hidma_lldev *lldev = arg;
+	u32 status;
+	u32 enable;
+	u32 cause;
+
+	status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	cause = status & enable;
+
+	while (cause) {
+		hidma_ll_int_handler_internal(lldev, cause);
+
+		/*
+		 * Another interrupt might have arrived while we are
+		 * processing this one. Read the new cause.
+		 */
+		status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+		enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+		cause = status & enable;
+	}
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+	struct hidma_lldev *lldev = arg;
+
+	hidma_ll_int_handler_internal(lldev, cause);
+	return IRQ_HANDLED;
+}
+
+int hidma_ll_enable(struct hidma_lldev *lldev)
+{
+	u32 val;
+	int ret;
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_ENABLE << 16;
+	writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+	ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+				 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+				 1000, 10000);
+	if (ret) {
+		dev_err(lldev->dev, "event channel did not get enabled\n");
+		return ret;
+	}
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_ENABLE << 16;
+	writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+	ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+				 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+				 1000, 10000);
+	if (ret) {
+		dev_err(lldev->dev, "transfer channel did not get enabled\n");
+		return ret;
+	}
+
+	lldev->trch_state = HIDMA_CH_ENABLED;
+	lldev->evch_state = HIDMA_CH_ENABLED;
+
+	/* enable irqs */
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	return 0;
+}
+
+void hidma_ll_start(struct hidma_lldev *lldev)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&lldev->lock, irqflags);
+	writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
+	spin_unlock_irqrestore(&lldev->lock, irqflags);
+}
+
+bool hidma_ll_isenabled(struct hidma_lldev *lldev)
+{
+	u32 val;
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	lldev->trch_state = HIDMA_CH_STATE(val);
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	lldev->evch_state = HIDMA_CH_STATE(val);
+
+	/* both channels have to be enabled before calling this function */
+	if (hidma_is_chan_enabled(lldev->trch_state) &&
+	    hidma_is_chan_enabled(lldev->evch_state))
+		return true;
+
+	return false;
+}
+
+void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
+{
+	struct hidma_tre *tre;
+	unsigned long flags;
+
+	tre = &lldev->trepool[tre_ch];
+
+	/* copy the TRE into its location in the TRE ring */
+	spin_lock_irqsave(&lldev->lock, flags);
+	tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
+	lldev->pending_tre_list[tre->tre_index] = tre;
+	memcpy(lldev->tre_ring + lldev->tre_write_offset,
+			&tre->tre_local[0], HIDMA_TRE_SIZE);
+	tre->err_code = 0;
+	tre->err_info = 0;
+	tre->queued = 1;
+	atomic_inc(&lldev->pending_tre_count);
+	lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
+					% lldev->tre_ring_size;
+	spin_unlock_irqrestore(&lldev->lock, flags);
+}
+
+/*
+ * Note that even though we stop this channel if there is a pending transaction
+ * in flight it will complete and follow the callback. This request will
+ * prevent further requests to be made.
+ */
+int hidma_ll_disable(struct hidma_lldev *lldev)
+{
+	u32 val;
+	int ret;
+
+	/* The channel needs to be in working state */
+	if (!hidma_ll_isenabled(lldev))
+		return 0;
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_SUSPEND << 16;
+	writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+	/*
+	 * Start the wait right after the suspend is confirmed.
+	 * Do a polled read up to 1ms and 10ms maximum.
+	 */
+	ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+				 1000, 10000);
+	if (ret)
+		return ret;
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_SUSPEND << 16;
+	writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+	/*
+	 * Start the wait right after the suspend is confirmed
+	 * Delay up to 10ms after reset to allow DMA logic to quiesce.
+	 */
+	ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+				 1000, 10000);
+	if (ret)
+		return ret;
+
+	lldev->trch_state = HIDMA_CH_SUSPENDED;
+	lldev->evch_state = HIDMA_CH_SUSPENDED;
+
+	/* disable interrupts */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	return 0;
+}
+
+void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
+				  dma_addr_t src, dma_addr_t dest, u32 len,
+				  u32 flags, u32 txntype)
+{
+	struct hidma_tre *tre;
+	u32 *tre_local;
+
+	if (tre_ch >= lldev->nr_tres) {
+		dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
+			tre_ch);
+		return;
+	}
+
+	tre = &lldev->trepool[tre_ch];
+	if (atomic_read(&tre->allocated) != true) {
+		dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
+			tre_ch);
+		return;
+	}
+
+	tre_local = &tre->tre_local[0];
+	tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
+	tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
+	tre_local[HIDMA_TRE_LEN_IDX] = len;
+	tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
+	tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
+	tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
+	tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
+	tre->int_flags = flags;
+}
+
+/*
+ * Called during initialization and after an error condition
+ * to restore hardware state.
+ */
+int hidma_ll_setup(struct hidma_lldev *lldev)
+{
+	int rc;
+	u64 addr;
+	u32 val;
+	u32 nr_tres = lldev->nr_tres;
+
+	atomic_set(&lldev->pending_tre_count, 0);
+	lldev->tre_processed_off = 0;
+	lldev->evre_processed_off = 0;
+	lldev->tre_write_offset = 0;
+
+	/* disable interrupts */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	/* clear all pending interrupts */
+	val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+	rc = hidma_ll_reset(lldev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Clear all pending interrupts again.
+	 * Otherwise, we observe reset complete interrupts.
+	 */
+	val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+	/* disable interrupts again after reset */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	addr = lldev->tre_dma;
+	writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
+	writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
+	writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
+
+	addr = lldev->evre_dma;
+	writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
+	writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
+	writel(HIDMA_EVRE_SIZE * nr_tres,
+			lldev->evca + HIDMA_EVCA_RING_LEN_REG);
+
+	/* configure interrupts */
+	hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+	rc = hidma_ll_enable(lldev);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+	u32 val;
+
+	lldev->msi_support = msi;
+
+	/* disable interrupts again after reset */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	/* support IRQ by default */
+	val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+	val &= ~0xF;
+	if (!lldev->msi_support)
+		val = val | 0x1;
+	writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+
+	/* clear all pending interrupts and enable them */
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+}
+
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
+				  void __iomem *trca, void __iomem *evca,
+				  u8 chidx)
+{
+	u32 required_bytes;
+	struct hidma_lldev *lldev;
+	int rc;
+	size_t sz;
+
+	if (!trca || !evca || !dev || !nr_tres)
+		return NULL;
+
+	/* need at least four TREs */
+	if (nr_tres < 4)
+		return NULL;
+
+	/* need an extra space */
+	nr_tres += 1;
+
+	lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
+	if (!lldev)
+		return NULL;
+
+	lldev->evca = evca;
+	lldev->trca = trca;
+	lldev->dev = dev;
+	sz = sizeof(struct hidma_tre);
+	lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
+	if (!lldev->trepool)
+		return NULL;
+
+	required_bytes = sizeof(lldev->pending_tre_list[0]);
+	lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
+					       GFP_KERNEL);
+	if (!lldev->pending_tre_list)
+		return NULL;
+
+	sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
+	lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
+					      GFP_KERNEL);
+	if (!lldev->tre_ring)
+		return NULL;
+
+	memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
+	lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
+	lldev->nr_tres = nr_tres;
+
+	/* the TRE ring has to be TRE_SIZE aligned */
+	if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
+		u8 tre_ring_shift;
+
+		tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
+		tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
+		lldev->tre_dma += tre_ring_shift;
+		lldev->tre_ring += tre_ring_shift;
+	}
+
+	sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
+	lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
+					       GFP_KERNEL);
+	if (!lldev->evre_ring)
+		return NULL;
+
+	memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
+	lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
+
+	/* the EVRE ring has to be EVRE_SIZE aligned */
+	if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
+		u8 evre_ring_shift;
+
+		evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
+		evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
+		lldev->evre_dma += evre_ring_shift;
+		lldev->evre_ring += evre_ring_shift;
+	}
+	lldev->nr_tres = nr_tres;
+	lldev->chidx = chidx;
+
+	sz = nr_tres * sizeof(struct hidma_tre *);
+	rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
+	if (rc)
+		return NULL;
+
+	rc = hidma_ll_setup(lldev);
+	if (rc)
+		return NULL;
+
+	spin_lock_init(&lldev->lock);
+	tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
+	lldev->initialized = 1;
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	return lldev;
+}
+
+int hidma_ll_uninit(struct hidma_lldev *lldev)
+{
+	u32 required_bytes;
+	int rc = 0;
+	u32 val;
+
+	if (!lldev)
+		return -ENODEV;
+
+	if (!lldev->initialized)
+		return 0;
+
+	lldev->initialized = 0;
+
+	required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
+	tasklet_kill(&lldev->task);
+	memset(lldev->trepool, 0, required_bytes);
+	lldev->trepool = NULL;
+	atomic_set(&lldev->pending_tre_count, 0);
+	lldev->tre_write_offset = 0;
+
+	rc = hidma_ll_reset(lldev);
+
+	/*
+	 * Clear all pending interrupts again.
+	 * Otherwise, we observe reset complete interrupts.
+	 */
+	val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	return rc;
+}
+
+enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
+{
+	enum dma_status ret = DMA_ERROR;
+	struct hidma_tre *tre;
+	unsigned long flags;
+	u8 err_code;
+
+	spin_lock_irqsave(&lldev->lock, flags);
+
+	tre = &lldev->trepool[tre_ch];
+	err_code = tre->err_code;
+
+	if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
+		ret = DMA_COMPLETE;
+	else if (err_code & HIDMA_EVRE_STATUS_ERROR)
+		ret = DMA_ERROR;
+	else
+		ret = DMA_IN_PROGRESS;
+	spin_unlock_irqrestore(&lldev->lock, flags);
+
+	return ret;
+}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
new file mode 100644
index 0000000..d64edeb
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -0,0 +1,431 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine Management interface
+ *
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include "hidma_mgmt.h"
+
+#define HIDMA_QOS_N_OFFSET		0x700
+#define HIDMA_CFG_OFFSET		0x400
+#define HIDMA_MAX_BUS_REQ_LEN_OFFSET	0x41C
+#define HIDMA_MAX_XACTIONS_OFFSET	0x420
+#define HIDMA_HW_VERSION_OFFSET	0x424
+#define HIDMA_CHRESET_TIMEOUT_OFFSET	0x418
+
+#define HIDMA_MAX_WR_XACTIONS_MASK	GENMASK(4, 0)
+#define HIDMA_MAX_RD_XACTIONS_MASK	GENMASK(4, 0)
+#define HIDMA_WEIGHT_MASK		GENMASK(6, 0)
+#define HIDMA_MAX_BUS_REQ_LEN_MASK	GENMASK(15, 0)
+#define HIDMA_CHRESET_TIMEOUT_MASK	GENMASK(19, 0)
+
+#define HIDMA_MAX_WR_XACTIONS_BIT_POS	16
+#define HIDMA_MAX_BUS_WR_REQ_BIT_POS	16
+#define HIDMA_WRR_BIT_POS		8
+#define HIDMA_PRIORITY_BIT_POS		15
+
+#define HIDMA_AUTOSUSPEND_TIMEOUT	2000
+#define HIDMA_MAX_CHANNEL_WEIGHT	15
+
+static unsigned int max_write_request;
+module_param(max_write_request, uint, 0644);
+MODULE_PARM_DESC(max_write_request,
+		"maximum write burst (default: ACPI/DT value)");
+
+static unsigned int max_read_request;
+module_param(max_read_request, uint, 0644);
+MODULE_PARM_DESC(max_read_request,
+		"maximum read burst (default: ACPI/DT value)");
+
+static unsigned int max_wr_xactions;
+module_param(max_wr_xactions, uint, 0644);
+MODULE_PARM_DESC(max_wr_xactions,
+	"maximum number of write transactions (default: ACPI/DT value)");
+
+static unsigned int max_rd_xactions;
+module_param(max_rd_xactions, uint, 0644);
+MODULE_PARM_DESC(max_rd_xactions,
+	"maximum number of read transactions (default: ACPI/DT value)");
+
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
+{
+	unsigned int i;
+	u32 val;
+
+	if (!is_power_of_2(mgmtdev->max_write_request) ||
+	    (mgmtdev->max_write_request < 128) ||
+	    (mgmtdev->max_write_request > 1024)) {
+		dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
+			mgmtdev->max_write_request);
+		return -EINVAL;
+	}
+
+	if (!is_power_of_2(mgmtdev->max_read_request) ||
+	    (mgmtdev->max_read_request < 128) ||
+	    (mgmtdev->max_read_request > 1024)) {
+		dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
+			mgmtdev->max_read_request);
+		return -EINVAL;
+	}
+
+	if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
+		dev_err(&mgmtdev->pdev->dev,
+			"max_wr_xactions cannot be bigger than %ld\n",
+			HIDMA_MAX_WR_XACTIONS_MASK);
+		return -EINVAL;
+	}
+
+	if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
+		dev_err(&mgmtdev->pdev->dev,
+			"max_rd_xactions cannot be bigger than %ld\n",
+			HIDMA_MAX_RD_XACTIONS_MASK);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mgmtdev->dma_channels; i++) {
+		if (mgmtdev->priority[i] > 1) {
+			dev_err(&mgmtdev->pdev->dev,
+				"priority can be 0 or 1\n");
+			return -EINVAL;
+		}
+
+		if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
+			dev_err(&mgmtdev->pdev->dev,
+				"max value of weight can be %d.\n",
+				HIDMA_MAX_CHANNEL_WEIGHT);
+			return -EINVAL;
+		}
+
+		/* weight needs to be at least one */
+		if (mgmtdev->weight[i] == 0)
+			mgmtdev->weight[i] = 1;
+	}
+
+	pm_runtime_get_sync(&mgmtdev->pdev->dev);
+	val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+	val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
+	val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
+	val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
+	val |= mgmtdev->max_read_request;
+	writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+
+	val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+	val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
+	val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
+	val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
+	val |= mgmtdev->max_rd_xactions;
+	writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+
+	mgmtdev->hw_version =
+	    readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
+	mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
+	mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
+
+	for (i = 0; i < mgmtdev->dma_channels; i++) {
+		u32 weight = mgmtdev->weight[i];
+		u32 priority = mgmtdev->priority[i];
+
+		val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+		val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
+		val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
+		val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
+		val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
+		writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+	}
+
+	val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+	val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
+	val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
+	writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+
+	pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
+	pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
+
+static int hidma_mgmt_probe(struct platform_device *pdev)
+{
+	struct hidma_mgmt_dev *mgmtdev;
+	struct resource *res;
+	void __iomem *virtaddr;
+	int irq;
+	int rc;
+	u32 val;
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	virtaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(virtaddr)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "irq resources not found\n");
+		rc = irq;
+		goto out;
+	}
+
+	mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
+	if (!mgmtdev) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	mgmtdev->pdev = pdev;
+	mgmtdev->addrsize = resource_size(res);
+	mgmtdev->virtaddr = virtaddr;
+
+	rc = device_property_read_u32(&pdev->dev, "dma-channels",
+				      &mgmtdev->dma_channels);
+	if (rc) {
+		dev_err(&pdev->dev, "number of channels missing\n");
+		goto out;
+	}
+
+	rc = device_property_read_u32(&pdev->dev,
+				      "channel-reset-timeout-cycles",
+				      &mgmtdev->chreset_timeout_cycles);
+	if (rc) {
+		dev_err(&pdev->dev, "channel reset timeout missing\n");
+		goto out;
+	}
+
+	rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
+				      &mgmtdev->max_write_request);
+	if (rc) {
+		dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
+		goto out;
+	}
+
+	if (max_write_request &&
+			(max_write_request != mgmtdev->max_write_request)) {
+		dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
+			max_write_request);
+		mgmtdev->max_write_request = max_write_request;
+	} else
+		max_write_request = mgmtdev->max_write_request;
+
+	rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
+				      &mgmtdev->max_read_request);
+	if (rc) {
+		dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
+		goto out;
+	}
+	if (max_read_request &&
+			(max_read_request != mgmtdev->max_read_request)) {
+		dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
+			max_read_request);
+		mgmtdev->max_read_request = max_read_request;
+	} else
+		max_read_request = mgmtdev->max_read_request;
+
+	rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
+				      &mgmtdev->max_wr_xactions);
+	if (rc) {
+		dev_err(&pdev->dev, "max-write-transactions missing\n");
+		goto out;
+	}
+	if (max_wr_xactions &&
+			(max_wr_xactions != mgmtdev->max_wr_xactions)) {
+		dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
+			max_wr_xactions);
+		mgmtdev->max_wr_xactions = max_wr_xactions;
+	} else
+		max_wr_xactions = mgmtdev->max_wr_xactions;
+
+	rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
+				      &mgmtdev->max_rd_xactions);
+	if (rc) {
+		dev_err(&pdev->dev, "max-read-transactions missing\n");
+		goto out;
+	}
+	if (max_rd_xactions &&
+			(max_rd_xactions != mgmtdev->max_rd_xactions)) {
+		dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
+			max_rd_xactions);
+		mgmtdev->max_rd_xactions = max_rd_xactions;
+	} else
+		max_rd_xactions = mgmtdev->max_rd_xactions;
+
+	mgmtdev->priority = devm_kcalloc(&pdev->dev,
+					 mgmtdev->dma_channels,
+					 sizeof(*mgmtdev->priority),
+					 GFP_KERNEL);
+	if (!mgmtdev->priority) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	mgmtdev->weight = devm_kcalloc(&pdev->dev,
+				       mgmtdev->dma_channels,
+				       sizeof(*mgmtdev->weight), GFP_KERNEL);
+	if (!mgmtdev->weight) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = hidma_mgmt_setup(mgmtdev);
+	if (rc) {
+		dev_err(&pdev->dev, "setup failed\n");
+		goto out;
+	}
+
+	/* start the HW */
+	val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+	val |= 1;
+	writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+
+	rc = hidma_mgmt_init_sys(mgmtdev);
+	if (rc) {
+		dev_err(&pdev->dev, "sysfs setup failed\n");
+		goto out;
+	}
+
+	dev_info(&pdev->dev,
+		 "HW rev: %d.%d @ %pa with %d physical channels\n",
+		 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
+		 &res->start, mgmtdev->dma_channels);
+
+	platform_set_drvdata(pdev, mgmtdev);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+	return 0;
+out:
+	pm_runtime_put_sync_suspend(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return rc;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
+	{"QCOM8060"},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
+#endif
+
+static const struct of_device_id hidma_mgmt_match[] = {
+	{.compatible = "qcom,hidma-mgmt-1.0",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
+
+static struct platform_driver hidma_mgmt_driver = {
+	.probe = hidma_mgmt_probe,
+	.driver = {
+		   .name = "hidma-mgmt",
+		   .of_match_table = hidma_mgmt_match,
+		   .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
+	},
+};
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+static int object_counter;
+
+static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
+{
+	struct platform_device *pdev_parent = of_find_device_by_node(np);
+	struct platform_device_info pdevinfo;
+	struct device_node *child;
+	struct resource *res;
+	int ret = 0;
+
+	/* allocate a resource array */
+	res = kcalloc(3, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(np, child) {
+		struct platform_device *new_pdev;
+
+		ret = of_address_to_resource(child, 0, &res[0]);
+		if (!ret)
+			goto out;
+
+		ret = of_address_to_resource(child, 1, &res[1]);
+		if (!ret)
+			goto out;
+
+		ret = of_irq_to_resource(child, 0, &res[2]);
+		if (ret <= 0)
+			goto out;
+
+		memset(&pdevinfo, 0, sizeof(pdevinfo));
+		pdevinfo.fwnode = &child->fwnode;
+		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
+		pdevinfo.name = child->name;
+		pdevinfo.id = object_counter++;
+		pdevinfo.res = res;
+		pdevinfo.num_res = 3;
+		pdevinfo.data = NULL;
+		pdevinfo.size_data = 0;
+		pdevinfo.dma_mask = DMA_BIT_MASK(64);
+		new_pdev = platform_device_register_full(&pdevinfo);
+		if (IS_ERR(new_pdev)) {
+			ret = PTR_ERR(new_pdev);
+			goto out;
+		}
+		of_node_get(child);
+		new_pdev->dev.of_node = child;
+		of_dma_configure(&new_pdev->dev, child, true);
+		/*
+		 * It is assumed that calling of_msi_configure is safe on
+		 * platforms with or without MSI support.
+		 */
+		of_msi_configure(&new_pdev->dev, child);
+		of_node_put(child);
+	}
+out:
+	kfree(res);
+
+	return ret;
+}
+#endif
+
+static int __init hidma_mgmt_init(void)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+	struct device_node *child;
+
+	for_each_matching_node(child, hidma_mgmt_match) {
+		/* device tree based firmware here */
+		hidma_mgmt_of_populate_channels(child);
+	}
+#endif
+	platform_driver_register(&hidma_mgmt_driver);
+
+	return 0;
+}
+module_init(hidma_mgmt_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h
new file mode 100644
index 0000000..f7daf33
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.h
@@ -0,0 +1,39 @@
+/*
+ * Qualcomm Technologies HIDMA Management common header
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+struct hidma_mgmt_dev {
+	u8 hw_version_major;
+	u8 hw_version_minor;
+
+	u32 max_wr_xactions;
+	u32 max_rd_xactions;
+	u32 max_write_request;
+	u32 max_read_request;
+	u32 dma_channels;
+	u32 chreset_timeout_cycles;
+	u32 hw_version;
+	u32 *priority;
+	u32 *weight;
+
+	/* Hardware device constants */
+	void __iomem *virtaddr;
+	resource_size_t addrsize;
+
+	struct kobject **chroots;
+	struct platform_device *pdev;
+};
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev);
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev);
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
new file mode 100644
index 0000000..cbb89ea
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -0,0 +1,293 @@
+/*
+ * Qualcomm Technologies HIDMA Management SYS interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "hidma_mgmt.h"
+
+struct hidma_chan_attr {
+	struct hidma_mgmt_dev *mdev;
+	int index;
+	struct kobj_attribute attr;
+};
+
+struct hidma_mgmt_fileinfo {
+	char *name;
+	int mode;
+	int (*get)(struct hidma_mgmt_dev *mdev);
+	int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
+};
+
+#define IMPLEMENT_GETSET(name)					\
+static int get_##name(struct hidma_mgmt_dev *mdev)		\
+{								\
+	return mdev->name;					\
+}								\
+static int set_##name(struct hidma_mgmt_dev *mdev, u64 val)	\
+{								\
+	u64 tmp;						\
+	int rc;							\
+								\
+	tmp = mdev->name;					\
+	mdev->name = val;					\
+	rc = hidma_mgmt_setup(mdev);				\
+	if (rc)							\
+		mdev->name = tmp;				\
+	return rc;						\
+}
+
+#define DECLARE_ATTRIBUTE(name, mode)				\
+	{#name, mode, get_##name, set_##name}
+
+IMPLEMENT_GETSET(hw_version_major)
+IMPLEMENT_GETSET(hw_version_minor)
+IMPLEMENT_GETSET(max_wr_xactions)
+IMPLEMENT_GETSET(max_rd_xactions)
+IMPLEMENT_GETSET(max_write_request)
+IMPLEMENT_GETSET(max_read_request)
+IMPLEMENT_GETSET(dma_channels)
+IMPLEMENT_GETSET(chreset_timeout_cycles)
+
+static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+	u64 tmp;
+	int rc;
+
+	if (i >= mdev->dma_channels)
+		return -EINVAL;
+
+	tmp = mdev->priority[i];
+	mdev->priority[i] = val;
+	rc = hidma_mgmt_setup(mdev);
+	if (rc)
+		mdev->priority[i] = tmp;
+	return rc;
+}
+
+static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+	u64 tmp;
+	int rc;
+
+	if (i >= mdev->dma_channels)
+		return -EINVAL;
+
+	tmp = mdev->weight[i];
+	mdev->weight[i] = val;
+	rc = hidma_mgmt_setup(mdev);
+	if (rc)
+		mdev->weight[i] = tmp;
+	return rc;
+}
+
+static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
+	DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO),
+	DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO),
+	DECLARE_ATTRIBUTE(dma_channels, S_IRUGO),
+	DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO),
+	DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO),
+	DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO),
+	DECLARE_ATTRIBUTE(max_write_request, S_IRUGO),
+	DECLARE_ATTRIBUTE(max_read_request, S_IRUGO),
+};
+
+static ssize_t show_values(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
+	unsigned int i;
+
+	buf[0] = 0;
+
+	for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+		if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+			sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
+			break;
+		}
+	}
+	return strlen(buf);
+}
+
+static ssize_t set_values(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
+	unsigned long tmp;
+	unsigned int i;
+	int rc;
+
+	rc = kstrtoul(buf, 0, &tmp);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+		if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+			rc = hidma_mgmt_files[i].set(mdev, tmp);
+			if (rc)
+				return rc;
+
+			break;
+		}
+	}
+	return count;
+}
+
+static ssize_t show_values_channel(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+	struct hidma_chan_attr *chattr;
+	struct hidma_mgmt_dev *mdev;
+
+	buf[0] = 0;
+	chattr = container_of(attr, struct hidma_chan_attr, attr);
+	mdev = chattr->mdev;
+	if (strcmp(attr->attr.name, "priority") == 0)
+		sprintf(buf, "%d\n", mdev->priority[chattr->index]);
+	else if (strcmp(attr->attr.name, "weight") == 0)
+		sprintf(buf, "%d\n", mdev->weight[chattr->index]);
+
+	return strlen(buf);
+}
+
+static ssize_t set_values_channel(struct kobject *kobj,
+				  struct kobj_attribute *attr, const char *buf,
+				  size_t count)
+{
+	struct hidma_chan_attr *chattr;
+	struct hidma_mgmt_dev *mdev;
+	unsigned long tmp;
+	int rc;
+
+	chattr = container_of(attr, struct hidma_chan_attr, attr);
+	mdev = chattr->mdev;
+
+	rc = kstrtoul(buf, 0, &tmp);
+	if (rc)
+		return rc;
+
+	if (strcmp(attr->attr.name, "priority") == 0) {
+		rc = set_priority(mdev, chattr->index, tmp);
+		if (rc)
+			return rc;
+	} else if (strcmp(attr->attr.name, "weight") == 0) {
+		rc = set_weight(mdev, chattr->index, tmp);
+		if (rc)
+			return rc;
+	}
+	return count;
+}
+
+static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
+{
+	struct device_attribute *attrs;
+	char *name_copy;
+
+	attrs = devm_kmalloc(&dev->pdev->dev,
+			     sizeof(struct device_attribute), GFP_KERNEL);
+	if (!attrs)
+		return -ENOMEM;
+
+	name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL);
+	if (!name_copy)
+		return -ENOMEM;
+
+	attrs->attr.name = name_copy;
+	attrs->attr.mode = mode;
+	attrs->show = show_values;
+	attrs->store = set_values;
+	sysfs_attr_init(&attrs->attr);
+
+	return device_create_file(&dev->pdev->dev, attrs);
+}
+
+static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
+				      int mode, int index,
+				      struct kobject *parent)
+{
+	struct hidma_chan_attr *chattr;
+	char *name_copy;
+
+	chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
+	if (!chattr)
+		return -ENOMEM;
+
+	name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
+	if (!name_copy)
+		return -ENOMEM;
+
+	chattr->mdev = mdev;
+	chattr->index = index;
+	chattr->attr.attr.name = name_copy;
+	chattr->attr.attr.mode = mode;
+	chattr->attr.show = show_values_channel;
+	chattr->attr.store = set_values_channel;
+	sysfs_attr_init(&chattr->attr.attr);
+
+	return sysfs_create_file(parent, &chattr->attr.attr);
+}
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
+{
+	unsigned int i;
+	int rc;
+	int required;
+	struct kobject *chanops;
+
+	required = sizeof(*mdev->chroots) * mdev->dma_channels;
+	mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
+	if (!mdev->chroots)
+		return -ENOMEM;
+
+	chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
+	if (!chanops)
+		return -ENOMEM;
+
+	/* create each channel directory here */
+	for (i = 0; i < mdev->dma_channels; i++) {
+		char name[20];
+
+		snprintf(name, sizeof(name), "chan%d", i);
+		mdev->chroots[i] = kobject_create_and_add(name, chanops);
+		if (!mdev->chroots[i])
+			return -ENOMEM;
+	}
+
+	/* populate common parameters */
+	for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+		rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
+					hidma_mgmt_files[i].mode);
+		if (rc)
+			return rc;
+	}
+
+	/* populate parameters that are per channel */
+	for (i = 0; i < mdev->dma_channels; i++) {
+		rc = create_sysfs_entry_channel(mdev, "priority",
+						(S_IRUGO | S_IWUGO), i,
+						mdev->chroots[i]);
+		if (rc)
+			return rc;
+
+		rc = create_sysfs_entry_channel(mdev, "weight",
+						(S_IRUGO | S_IWUGO), i,
+						mdev->chroots[i]);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys);