v4.19.13 snapshot.
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
new file mode 100644
index 0000000..8f80e8b
--- /dev/null
+++ b/drivers/soc/fsl/Kconfig
@@ -0,0 +1,31 @@
+#
+# NXP/Freescale QorIQ series SOC drivers
+#
+
+menu "NXP/Freescale QorIQ SoC drivers"
+
+source "drivers/soc/fsl/qbman/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
+
+config FSL_GUTS
+	bool
+	select SOC_BUS
+	help
+	  The global utilities block controls power management, I/O device
+	  enabling, power-onreset(POR) configuration monitoring, alternate
+	  function selection for multiplexed signals,and clock control.
+	  This driver is to manage and access global utilities block.
+	  Initially only reading SVR and registering soc device are supported.
+	  Other guts accesses, such as reading RCW, should eventually be moved
+	  into this driver as well.
+
+config FSL_MC_DPIO
+        tristate "QorIQ DPAA2 DPIO driver"
+        depends on FSL_MC_BUS
+        help
+	  Driver for the DPAA2 DPIO object.  A DPIO provides queue and
+	  buffer management facilities for software to interact with
+	  other DPAA2 objects. This driver does not expose the DPIO
+	  objects individually, but groups them under a service layer
+	  API.
+endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
new file mode 100644
index 0000000..803ef1b
--- /dev/null
+++ b/drivers/soc/fsl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux Kernel SOC fsl specific device drivers
+#
+
+obj-$(CONFIG_FSL_DPAA)                 += qbman/
+obj-$(CONFIG_QUICC_ENGINE)		+= qe/
+obj-$(CONFIG_CPM)			+= qe/
+obj-$(CONFIG_FSL_GUTS)			+= guts.o
+obj-$(CONFIG_FSL_MC_DPIO) 		+= dpio/
diff --git a/drivers/soc/fsl/dpio/Makefile b/drivers/soc/fsl/dpio/Makefile
new file mode 100644
index 0000000..b9ff24c
--- /dev/null
+++ b/drivers/soc/fsl/dpio/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# QorIQ DPAA2 DPIO driver
+#
+
+obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
+
+fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
new file mode 100644
index 0000000..ab8f82e
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR			4
+#define DPIO_VER_MINOR			2
+
+/* Command Versioning */
+
+#define DPIO_CMD_ID_OFFSET		4
+#define DPIO_CMD_BASE_VERSION		1
+
+#define DPIO_CMD(id)	(((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE				DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN					DPIO_CMD(0x803)
+#define DPIO_CMDID_GET_API_VERSION			DPIO_CMD(0xa03)
+#define DPIO_CMDID_ENABLE				DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE				DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR				DPIO_CMD(0x004)
+
+struct dpio_cmd_open {
+	__le32 dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_MASK		0x3
+
+struct dpio_rsp_get_attr {
+	/* cmd word 0 */
+	__le32 id;
+	__le16 qbman_portal_id;
+	u8 num_priorities;
+	u8 channel_mode;
+	/* cmd word 1 */
+	__le64 qbman_portal_ce_addr;
+	/* cmd word 2 */
+	__le64 qbman_portal_ci_addr;
+	/* cmd word 3 */
+	__le32 qbman_version;
+};
+
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
new file mode 100644
index 0000000..b60b77b
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP 2016
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "qbman-portal.h"
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("DPIO Driver");
+
+struct dpio_priv {
+	struct dpaa2_io *io;
+};
+
+static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
+{
+	struct device *dev = (struct device *)arg;
+	struct dpio_priv *priv = dev_get_drvdata(dev);
+
+	return dpaa2_io_irq(priv->io);
+}
+
+static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
+{
+	struct fsl_mc_device_irq *irq;
+
+	irq = dpio_dev->irqs[0];
+
+	/* clear the affinity hint */
+	irq_set_affinity_hint(irq->msi_desc->irq, NULL);
+}
+
+static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+{
+	struct dpio_priv *priv;
+	int error;
+	struct fsl_mc_device_irq *irq;
+	cpumask_t mask;
+
+	priv = dev_get_drvdata(&dpio_dev->dev);
+
+	irq = dpio_dev->irqs[0];
+	error = devm_request_irq(&dpio_dev->dev,
+				 irq->msi_desc->irq,
+				 dpio_irq_handler,
+				 0,
+				 dev_name(&dpio_dev->dev),
+				 &dpio_dev->dev);
+	if (error < 0) {
+		dev_err(&dpio_dev->dev,
+			"devm_request_irq() failed: %d\n",
+			error);
+		return error;
+	}
+
+	/* set the affinity hint */
+	cpumask_clear(&mask);
+	cpumask_set_cpu(cpu, &mask);
+	if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
+		dev_err(&dpio_dev->dev,
+			"irq_set_affinity failed irq %d cpu %d\n",
+			irq->msi_desc->irq, cpu);
+
+	return 0;
+}
+
+static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
+{
+	struct dpio_attr dpio_attrs;
+	struct dpaa2_io_desc desc;
+	struct dpio_priv *priv;
+	int err = -ENOMEM;
+	struct device *dev = &dpio_dev->dev;
+	static int next_cpu = -1;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		goto err_priv_alloc;
+
+	dev_set_drvdata(dev, priv);
+
+	err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+	if (err) {
+		dev_dbg(dev, "MC portal allocation failed\n");
+		err = -EPROBE_DEFER;
+		goto err_priv_alloc;
+	}
+
+	err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+			&dpio_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpio_open() failed\n");
+		goto err_open;
+	}
+
+	err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
+				  &dpio_attrs);
+	if (err) {
+		dev_err(dev, "dpio_get_attributes() failed %d\n", err);
+		goto err_get_attr;
+	}
+	desc.qman_version = dpio_attrs.qbman_version;
+
+	err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpio_enable() failed %d\n", err);
+		goto err_get_attr;
+	}
+
+	/* initialize DPIO descriptor */
+	desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
+	desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
+	desc.dpio_id = dpio_dev->obj_desc.id;
+
+	/* get the cpu to use for the affinity hint */
+	if (next_cpu == -1)
+		next_cpu = cpumask_first(cpu_online_mask);
+	else
+		next_cpu = cpumask_next(next_cpu, cpu_online_mask);
+
+	if (!cpu_possible(next_cpu)) {
+		dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
+		err = -ERANGE;
+		goto err_allocate_irqs;
+	}
+	desc.cpu = next_cpu;
+
+	/*
+	 * Set the CENA regs to be the cache inhibited area of the portal to
+	 * avoid coherency issues if a user migrates to another core.
+	 */
+	desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+				       resource_size(&dpio_dev->regions[1]),
+				       MEMREMAP_WC);
+	if (IS_ERR(desc.regs_cena)) {
+		dev_err(dev, "devm_memremap failed\n");
+		err = PTR_ERR(desc.regs_cena);
+		goto err_allocate_irqs;
+	}
+
+	desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
+				      resource_size(&dpio_dev->regions[1]));
+	if (!desc.regs_cinh) {
+		err = -ENOMEM;
+		dev_err(dev, "devm_ioremap failed\n");
+		goto err_allocate_irqs;
+	}
+
+	err = fsl_mc_allocate_irqs(dpio_dev);
+	if (err) {
+		dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
+		goto err_allocate_irqs;
+	}
+
+	err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+	if (err)
+		goto err_register_dpio_irq;
+
+	priv->io = dpaa2_io_create(&desc);
+	if (!priv->io) {
+		dev_err(dev, "dpaa2_io_create failed\n");
+		err = -ENOMEM;
+		goto err_dpaa2_io_create;
+	}
+
+	dev_info(dev, "probed\n");
+	dev_dbg(dev, "   receives_notifications = %d\n",
+		desc.receives_notifications);
+	dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+	fsl_mc_portal_free(dpio_dev->mc_io);
+
+	return 0;
+
+err_dpaa2_io_create:
+	unregister_dpio_irq_handlers(dpio_dev);
+err_register_dpio_irq:
+	fsl_mc_free_irqs(dpio_dev);
+err_allocate_irqs:
+	dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_get_attr:
+	dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_open:
+	fsl_mc_portal_free(dpio_dev->mc_io);
+err_priv_alloc:
+	return err;
+}
+
+/* Tear down interrupts for a given DPIO object */
+static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
+{
+	unregister_dpio_irq_handlers(dpio_dev);
+	fsl_mc_free_irqs(dpio_dev);
+}
+
+static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
+{
+	struct device *dev;
+	struct dpio_priv *priv;
+	int err;
+
+	dev = &dpio_dev->dev;
+	priv = dev_get_drvdata(dev);
+
+	dpaa2_io_down(priv->io);
+
+	dpio_teardown_irqs(dpio_dev);
+
+	err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+	if (err) {
+		dev_err(dev, "MC portal allocation failed\n");
+		goto err_mcportal;
+	}
+
+	err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+			&dpio_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpio_open() failed\n");
+		goto err_open;
+	}
+
+	dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+	dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+	fsl_mc_portal_free(dpio_dev->mc_io);
+
+	return 0;
+
+err_open:
+	fsl_mc_portal_free(dpio_dev->mc_io);
+err_mcportal:
+	return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
+	{
+		.vendor = FSL_MC_VENDOR_FREESCALE,
+		.obj_type = "dpio",
+	},
+	{ .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_dpio_driver = {
+	.driver = {
+		.name		= KBUILD_MODNAME,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= dpaa2_dpio_probe,
+	.remove		= dpaa2_dpio_remove,
+	.match_id_table = dpaa2_dpio_match_id_table
+};
+
+static int dpio_driver_init(void)
+{
+	return fsl_mc_driver_register(&dpaa2_dpio_driver);
+}
+
+static void dpio_driver_exit(void)
+{
+	fsl_mc_driver_unregister(&dpaa2_dpio_driver);
+}
+module_init(dpio_driver_init);
+module_exit(dpio_driver_exit);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
new file mode 100644
index 0000000..9b17f72
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <linux/types.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "dpio.h"
+#include "qbman-portal.h"
+
+struct dpaa2_io {
+	struct dpaa2_io_desc dpio_desc;
+	struct qbman_swp_desc swp_desc;
+	struct qbman_swp *swp;
+	struct list_head node;
+	/* protect against multiple management commands */
+	spinlock_t lock_mgmt_cmd;
+	/* protect notifications list */
+	spinlock_t lock_notifications;
+	struct list_head notifications;
+};
+
+struct dpaa2_io_store {
+	unsigned int max;
+	dma_addr_t paddr;
+	struct dpaa2_dq *vaddr;
+	void *alloced_addr;    /* unaligned value from kmalloc() */
+	unsigned int idx;      /* position of the next-to-be-returned entry */
+	struct qbman_swp *swp; /* portal used to issue VDQCR */
+	struct device *dev;    /* device used for DMA mapping */
+};
+
+/* keep a per cpu array of DPIOs for fast access */
+static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
+static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
+static DEFINE_SPINLOCK(dpio_list_lock);
+
+static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
+						     int cpu)
+{
+	if (d)
+		return d;
+
+	if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
+		return NULL;
+
+	/*
+	 * If cpu == -1, choose the current cpu, with no guarantees about
+	 * potentially being migrated away.
+	 */
+	if (unlikely(cpu < 0))
+		cpu = smp_processor_id();
+
+	/* If a specific cpu was requested, pick it up immediately */
+	return dpio_by_cpu[cpu];
+}
+
+static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
+{
+	if (d)
+		return d;
+
+	spin_lock(&dpio_list_lock);
+	d = list_entry(dpio_list.next, struct dpaa2_io, node);
+	list_del(&d->node);
+	list_add_tail(&d->node, &dpio_list);
+	spin_unlock(&dpio_list_lock);
+
+	return d;
+}
+
+/**
+ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
+ * @cpu: the cpu id
+ *
+ * Return the affine dpaa2_io service, or NULL if there is no service affined
+ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
+ * service.
+ */
+struct dpaa2_io *dpaa2_io_service_select(int cpu)
+{
+	if (cpu == DPAA2_IO_ANY_CPU)
+		return service_select(NULL);
+
+	return service_select_by_cpu(NULL, cpu);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+
+/**
+ * dpaa2_io_create() - create a dpaa2_io object.
+ * @desc: the dpaa2_io descriptor
+ *
+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
+ * DPIO object.
+ *
+ * Return a valid dpaa2_io object for success, or NULL for failure.
+ */
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
+{
+	struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+
+	if (!obj)
+		return NULL;
+
+	/* check if CPU is out of range (-1 means any cpu) */
+	if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
+		kfree(obj);
+		return NULL;
+	}
+
+	obj->dpio_desc = *desc;
+	obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
+	obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+	obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+	obj->swp = qbman_swp_init(&obj->swp_desc);
+
+	if (!obj->swp) {
+		kfree(obj);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&obj->node);
+	spin_lock_init(&obj->lock_mgmt_cmd);
+	spin_lock_init(&obj->lock_notifications);
+	INIT_LIST_HEAD(&obj->notifications);
+
+	/* For now only enable DQRR interrupts */
+	qbman_swp_interrupt_set_trigger(obj->swp,
+					QBMAN_SWP_INTERRUPT_DQRI);
+	qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
+	if (obj->dpio_desc.receives_notifications)
+		qbman_swp_push_set(obj->swp, 0, 1);
+
+	spin_lock(&dpio_list_lock);
+	list_add_tail(&obj->node, &dpio_list);
+	if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
+		dpio_by_cpu[desc->cpu] = obj;
+	spin_unlock(&dpio_list_lock);
+
+	return obj;
+}
+
+/**
+ * dpaa2_io_down() - release the dpaa2_io object.
+ * @d: the dpaa2_io object to be released.
+ *
+ * The "struct dpaa2_io" type can represent an individual DPIO object (as
+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
+ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
+ * each handle obtained should be released using this function.
+ */
+void dpaa2_io_down(struct dpaa2_io *d)
+{
+	kfree(d);
+}
+
+#define DPAA_POLL_MAX 32
+
+/**
+ * dpaa2_io_irq() - ISR for DPIO interrupts
+ *
+ * @obj: the given DPIO object.
+ *
+ * Return IRQ_HANDLED for success or IRQ_NONE if there
+ * were no pending interrupts.
+ */
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
+{
+	const struct dpaa2_dq *dq;
+	int max = 0;
+	struct qbman_swp *swp;
+	u32 status;
+
+	swp = obj->swp;
+	status = qbman_swp_interrupt_read_status(swp);
+	if (!status)
+		return IRQ_NONE;
+
+	dq = qbman_swp_dqrr_next(swp);
+	while (dq) {
+		if (qbman_result_is_SCN(dq)) {
+			struct dpaa2_io_notification_ctx *ctx;
+			u64 q64;
+
+			q64 = qbman_result_SCN_ctx(dq);
+			ctx = (void *)(uintptr_t)q64;
+			ctx->cb(ctx);
+		} else {
+			pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
+		}
+		qbman_swp_dqrr_consume(swp, dq);
+		++max;
+		if (max > DPAA_POLL_MAX)
+			goto done;
+		dq = qbman_swp_dqrr_next(swp);
+	}
+done:
+	qbman_swp_interrupt_clear_status(swp, status);
+	qbman_swp_interrupt_set_inhibit(swp, 0);
+	return IRQ_HANDLED;
+}
+
+/**
+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
+ *                               notifications on the given DPIO service.
+ * @d:   the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * The caller should make the MC command to attach a DPAA2 object to
+ * a DPIO after this function completes successfully.  In that way:
+ *    (a) The DPIO service is "ready" to handle a notification arrival
+ *        (which might happen before the "attach" command to MC has
+ *        returned control of execution back to the caller)
+ *    (b) The DPIO service can provide back to the caller the 'dpio_id' and
+ *        'qman64' parameters that it should pass along in the MC command
+ *        in order for the object to be configured to produce the right
+ *        notification fields to the DPIO service.
+ *
+ * Return 0 for success, or -ENODEV for failure.
+ */
+int dpaa2_io_service_register(struct dpaa2_io *d,
+			      struct dpaa2_io_notification_ctx *ctx)
+{
+	unsigned long irqflags;
+
+	d = service_select_by_cpu(d, ctx->desired_cpu);
+	if (!d)
+		return -ENODEV;
+
+	ctx->dpio_id = d->dpio_desc.dpio_id;
+	ctx->qman64 = (u64)(uintptr_t)ctx;
+	ctx->dpio_private = d;
+	spin_lock_irqsave(&d->lock_notifications, irqflags);
+	list_add(&ctx->node, &d->notifications);
+	spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+	/* Enable the generation of CDAN notifications */
+	if (ctx->is_cdan)
+		return qbman_swp_CDAN_set_context_enable(d->swp,
+							 (u16)ctx->id,
+							 ctx->qman64);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
+
+/**
+ * dpaa2_io_service_deregister - The opposite of 'register'.
+ * @service: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * This function should be called only after sending the MC command to
+ * to detach the notification-producing device from the DPIO.
+ */
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+				 struct dpaa2_io_notification_ctx *ctx)
+{
+	struct dpaa2_io *d = ctx->dpio_private;
+	unsigned long irqflags;
+
+	if (ctx->is_cdan)
+		qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
+
+	spin_lock_irqsave(&d->lock_notifications, irqflags);
+	list_del(&ctx->node);
+	spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
+
+/**
+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
+ * traffic source for as long as it likes. Eventually it may wish to "rearm"
+ * that source to allow it to produce another FQDAN/CDAN, that's what this
+ * function achieves.
+ *
+ * Return 0 for success.
+ */
+int dpaa2_io_service_rearm(struct dpaa2_io *d,
+			   struct dpaa2_io_notification_ctx *ctx)
+{
+	unsigned long irqflags;
+	int err;
+
+	d = service_select_by_cpu(d, ctx->desired_cpu);
+	if (!unlikely(d))
+		return -ENODEV;
+
+	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+	if (ctx->is_cdan)
+		err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
+	else
+		err = qbman_swp_fq_schedule(d->swp, ctx->id);
+	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
+
+/**
+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
+ * @d: the given DPIO service.
+ * @channelid: the given channel id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+				  struct dpaa2_io_store *s)
+{
+	struct qbman_pull_desc pd;
+	int err;
+
+	qbman_pull_desc_clear(&pd);
+	qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+	qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+	qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
+
+	d = service_select(d);
+	if (!d)
+		return -ENODEV;
+
+	s->swp = d->swp;
+	err = qbman_swp_pull(d->swp, &pd);
+	if (err)
+		s->swp = NULL;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
+
+/**
+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
+ * @d: the given DPIO service.
+ * @qdid: the given queuing destination id.
+ * @prio: the given queuing priority.
+ * @qdbin: the given queuing destination bin.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
+				u32 qdid, u8 prio, u16 qdbin,
+				const struct dpaa2_fd *fd)
+{
+	struct qbman_eq_desc ed;
+
+	d = service_select(d);
+	if (!d)
+		return -ENODEV;
+
+	qbman_eq_desc_clear(&ed);
+	qbman_eq_desc_set_no_orp(&ed, 0);
+	qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
+
+	return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
+
+/**
+ * dpaa2_io_service_release() - Release buffers to a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffers to be released.
+ * @num_buffers: the number of the buffers to be released.
+ *
+ * Return 0 for success, and negative error code for failure.
+ */
+int dpaa2_io_service_release(struct dpaa2_io *d,
+			     u32 bpid,
+			     const u64 *buffers,
+			     unsigned int num_buffers)
+{
+	struct qbman_release_desc rd;
+
+	d = service_select(d);
+	if (!d)
+		return -ENODEV;
+
+	qbman_release_desc_clear(&rd);
+	qbman_release_desc_set_bpid(&rd, bpid);
+
+	return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
+
+/**
+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffer addresses for acquired buffers.
+ * @num_buffers: the expected number of the buffers to acquire.
+ *
+ * Return a negative error code if the command failed, otherwise it returns
+ * the number of buffers acquired, which may be less than the number requested.
+ * Eg. if the buffer pool is empty, this will return zero.
+ */
+int dpaa2_io_service_acquire(struct dpaa2_io *d,
+			     u32 bpid,
+			     u64 *buffers,
+			     unsigned int num_buffers)
+{
+	unsigned long irqflags;
+	int err;
+
+	d = service_select(d);
+	if (!d)
+		return -ENODEV;
+
+	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+	err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
+	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
+
+/*
+ * 'Stores' are reusable memory blocks for holding dequeue results, and to
+ * assist with parsing those results.
+ */
+
+/**
+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
+ * @dev:        the device to allow mapping/unmapping the DMAable region.
+ *
+ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
+ * The 'dpaa2_io_store' returned is a DPIO service managed object.
+ *
+ * Return pointer to dpaa2_io_store struct for successfully created storage
+ * memory, or NULL on error.
+ */
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+					     struct device *dev)
+{
+	struct dpaa2_io_store *ret;
+	size_t size;
+
+	if (!max_frames || (max_frames > 16))
+		return NULL;
+
+	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->max = max_frames;
+	size = max_frames * sizeof(struct dpaa2_dq) + 64;
+	ret->alloced_addr = kzalloc(size, GFP_KERNEL);
+	if (!ret->alloced_addr) {
+		kfree(ret);
+		return NULL;
+	}
+
+	ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
+	ret->paddr = dma_map_single(dev, ret->vaddr,
+				    sizeof(struct dpaa2_dq) * max_frames,
+				    DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, ret->paddr)) {
+		kfree(ret->alloced_addr);
+		kfree(ret);
+		return NULL;
+	}
+
+	ret->idx = 0;
+	ret->dev = dev;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
+
+/**
+ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
+ *                            result.
+ * @s: the storage memory to be destroyed.
+ */
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
+{
+	dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
+			 DMA_FROM_DEVICE);
+	kfree(s->alloced_addr);
+	kfree(s);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
+
+/**
+ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
+ * @s: the dpaa2_io_store object.
+ * @is_last: indicate whether this is the last frame in the pull command.
+ *
+ * When an object driver performs dequeues to a dpaa2_io_store, this function
+ * can be used to determine when the next frame result is available. Once
+ * this function returns non-NULL, a subsequent call to it will try to find
+ * the next dequeue result.
+ *
+ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
+ * was empty, then this function will also return NULL (rather than expecting
+ * the caller to always check for this. As such, "is_last" can be used to
+ * differentiate between "end-of-empty-dequeue" and "still-waiting".
+ *
+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
+ */
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
+{
+	int match;
+	struct dpaa2_dq *ret = &s->vaddr[s->idx];
+
+	match = qbman_result_has_new_result(s->swp, ret);
+	if (!match) {
+		*is_last = 0;
+		return NULL;
+	}
+
+	s->idx++;
+
+	if (dpaa2_dq_is_pull_complete(ret)) {
+		*is_last = 1;
+		s->idx = 0;
+		/*
+		 * If we get an empty dequeue result to terminate a zero-results
+		 * vdqcr, return NULL to the caller rather than expecting him to
+		 * check non-NULL results every time.
+		 */
+		if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
+			ret = NULL;
+	} else {
+		*is_last = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
new file mode 100644
index 0000000..ff37c80
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+/*
+ * Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id:	DPIO unique ID
+ * @token:	Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+	      u32 cmd_flags,
+	      int dpio_id,
+	      u16 *token)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpio_cmd_open *dpio_cmd;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+					  cmd_flags,
+					  0);
+	dpio_cmd = (struct dpio_cmd_open *)cmd.params;
+	dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	*token = mc_cmd_hdr_read_token(&cmd);
+
+	return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPIO object
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+	       u32 cmd_flags,
+	       u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+					  cmd_flags,
+					  token);
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPIO object
+ *
+ * Return:	'0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+		u32 cmd_flags,
+		u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+					  cmd_flags,
+					  token);
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPIO object
+ *
+ * Return:	'0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token)
+{
+	struct fsl_mc_command cmd = { 0 };
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+					  cmd_flags,
+					  token);
+
+	return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io:	Pointer to MC portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:	Token of DPIO object
+ * @attr:	Returned object's attributes
+ *
+ * Return:	'0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			struct dpio_attr *attr)
+{
+	struct fsl_mc_command cmd = { 0 };
+	struct dpio_rsp_get_attr *dpio_rsp;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+					  cmd_flags,
+					  token);
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
+	attr->id = le32_to_cpu(dpio_rsp->id);
+	attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
+	attr->num_priorities = dpio_rsp->num_priorities;
+	attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
+	attr->qbman_portal_ce_offset =
+		le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
+	attr->qbman_portal_ci_offset =
+		le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
+	attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+
+	return 0;
+}
+
+/**
+ * dpio_get_api_version - Get Data Path I/O API version
+ * @mc_io:	Pointer to MC portal's DPIO object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver:	Major version of DPIO API
+ * @minor_ver:	Minor version of DPIO API
+ *
+ * Return:	'0' on Success; Error code otherwise
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver)
+{
+	struct fsl_mc_command cmd = { 0 };
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+					  cmd_flags, 0);
+
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+	return 0;
+}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
new file mode 100644
index 0000000..49194c8
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io	*mc_io,
+	      u32		cmd_flags,
+	      int		dpio_id,
+	      u16		*token);
+
+int dpio_close(struct fsl_mc_io	*mc_io,
+	       u32		cmd_flags,
+	       u16		token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ *	dedicated channel in the DPIO; user should point the queue's
+ *	destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+	DPIO_NO_CHANNEL = 0,
+	DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ *			relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+	enum dpio_channel_mode	channel_mode;
+	u8		num_priorities;
+};
+
+int dpio_enable(struct fsl_mc_io	*mc_io,
+		u32		cmd_flags,
+		u16		token);
+
+int dpio_disable(struct fsl_mc_io	*mc_io,
+		 u32		cmd_flags,
+		 u16		token);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ *			relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ */
+struct dpio_attr {
+	int			id;
+	u64		qbman_portal_ce_offset;
+	u64		qbman_portal_ci_offset;
+	u16		qbman_portal_id;
+	enum dpio_channel_mode	channel_mode;
+	u8			num_priorities;
+	u32		qbman_version;
+};
+
+int dpio_get_attributes(struct fsl_mc_io	*mc_io,
+			u32		cmd_flags,
+			u16		token,
+			struct dpio_attr	*attr);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver);
+
+#endif /* __FSL_DPIO_H */
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
new file mode 100644
index 0000000..cf1d448
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <soc/fsl/dpaa2-global.h>
+
+#include "qbman-portal.h"
+
+#define QMAN_REV_4000   0x04000000
+#define QMAN_REV_4100   0x04010000
+#define QMAN_REV_4101   0x04010001
+#define QMAN_REV_MASK   0xffff0000
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((u32)0x80)
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE       0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQAR    0x8c0
+#define QBMAN_CINH_SWP_DQPI    0xa00
+#define QBMAN_CINH_SWP_DCAP    0xac0
+#define QBMAN_CINH_SWP_SDQCR   0xb00
+#define QBMAN_CINH_SWP_RAR     0xcc0
+#define QBMAN_CINH_SWP_ISR     0xe00
+#define QBMAN_CINH_SWP_IER     0xe40
+#define QBMAN_CINH_SWP_ISDR    0xe80
+#define QBMAN_CINH_SWP_IIR     0xec0
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR      0x600
+#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR   0x780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+/* Define token used to determine if response written to memory is valid */
+#define QMAN_DQ_TOKEN_VALID 1
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT   29
+#define QB_SDQCR_FC_MASK    0x1
+#define QB_SDQCR_DCT_SHIFT  24
+#define QB_SDQCR_DCT_MASK   0x3
+#define QB_SDQCR_TOK_SHIFT  16
+#define QB_SDQCR_TOK_MASK   0xff
+#define QB_SDQCR_SRC_SHIFT  0
+#define QB_SDQCR_SRC_MASK   0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN    0xbb
+
+enum qbman_sdqcr_dct {
+	qbman_sdqcr_dct_null = 0,
+	qbman_sdqcr_dct_prio_ics,
+	qbman_sdqcr_dct_active_ics,
+	qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+	qbman_sdqcr_fc_one = 0,
+	qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Portal Access */
+
+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
+{
+	return readl_relaxed(p->addr_cinh + offset);
+}
+
+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
+					u32 value)
+{
+	writel_relaxed(value, p->addr_cinh + offset);
+}
+
+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
+{
+	return p->addr_cena + offset;
+}
+
+#define QBMAN_CINH_SWP_CFG   0xd00
+
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT     16
+#define SWP_CFG_WN_SHIFT      14
+#define SWP_CFG_RPM_SHIFT     12
+#define SWP_CFG_DCM_SHIFT     10
+#define SWP_CFG_EPM_SHIFT     8
+#define SWP_CFG_SD_SHIFT      5
+#define SWP_CFG_SP_SHIFT      4
+#define SWP_CFG_SE_SHIFT      3
+#define SWP_CFG_DP_SHIFT      2
+#define SWP_CFG_DE_SHIFT      1
+#define SWP_CFG_EP_SHIFT      0
+
+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn,	u8 est, u8 rpm, u8 dcm,
+				    u8 epm, int sd, int sp, int se,
+				    int dp, int de, int ep)
+{
+	return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+		est << SWP_CFG_EST_SHIFT |
+		wn << SWP_CFG_WN_SHIFT |
+		rpm << SWP_CFG_RPM_SHIFT |
+		dcm << SWP_CFG_DCM_SHIFT |
+		epm << SWP_CFG_EPM_SHIFT |
+		sd << SWP_CFG_SD_SHIFT |
+		sp << SWP_CFG_SP_SHIFT |
+		se << SWP_CFG_SE_SHIFT |
+		dp << SWP_CFG_DP_SHIFT |
+		de << SWP_CFG_DE_SHIFT |
+		ep << SWP_CFG_EP_SHIFT);
+}
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ *                    QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+	struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+	u32 reg;
+
+	if (!p)
+		return NULL;
+	p->desc = d;
+	p->mc.valid_bit = QB_VALID_BIT;
+	p->sdq = 0;
+	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+	p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+	atomic_set(&p->vdq.available, 1);
+	p->vdq.valid_bit = QB_VALID_BIT;
+	p->dqrr.next_idx = 0;
+	p->dqrr.valid_bit = QB_VALID_BIT;
+
+	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
+		p->dqrr.dqrr_size = 4;
+		p->dqrr.reset_bug = 1;
+	} else {
+		p->dqrr.dqrr_size = 8;
+		p->dqrr.reset_bug = 0;
+	}
+
+	p->addr_cena = d->cena_bar;
+	p->addr_cinh = d->cinh_bar;
+
+	reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+				1, /* Writes Non-cacheable */
+				0, /* EQCR_CI stashing threshold */
+				3, /* RPM: Valid bit mode, RCR in array mode */
+				2, /* DCM: Discrete consumption ack mode */
+				3, /* EPM: Valid bit mode, EQCR in array mode */
+				0, /* mem stashing drop enable == FALSE */
+				1, /* mem stashing priority == TRUE */
+				0, /* mem stashing enable == FALSE */
+				1, /* dequeue stashing priority == TRUE */
+				0, /* dequeue stashing enable == FALSE */
+				0); /* EQCR_CI stashing priority == FALSE */
+
+	qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+	reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+	if (!reg) {
+		pr_err("qbman: the portal is not enabled!\n");
+		return NULL;
+	}
+
+	/*
+	 * SDQCR needs to be initialized to 0 when no channels are
+	 * being dequeued from or else the QMan HW will indicate an
+	 * error.  The values that were calculated above will be
+	 * applied when dequeues from a specific channel are enabled.
+	 */
+	qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+	return p;
+}
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ *                      the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed
+ */
+void qbman_swp_finish(struct qbman_swp *p)
+{
+	kfree(p);
+}
+
+/**
+ * qbman_swp_interrupt_read_status()
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_ISR register.
+ */
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+	return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
+}
+
+/**
+ * qbman_swp_interrupt_clear_status()
+ * @p: the given software portal
+ * @mask: The mask to clear in SWP_ISR register
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
+{
+	qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_IER register.
+ */
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+	return qbman_read_register(p, QBMAN_CINH_SWP_IER);
+}
+
+/**
+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
+ * @p: the given software portal
+ * @mask: The mask of bits to enable in SWP_IER
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
+{
+	qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
+ * @p: the given software portal object
+ *
+ * Return the value in the SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+	return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
+}
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
+ * @p: the given software portal object
+ * @mask: The mask to set in SWP_IIR register
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+	qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/*
+ * Different management commands all use this common base layer of code to issue
+ * commands and poll for results.
+ */
+
+/*
+ * Returns a pointer to where the caller should fill in their management command
+ * (caller should ignore the verb byte)
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+	return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+}
+
+/*
+ * Commits merges in the caller-supplied command verb (which should not include
+ * the valid-bit) and submits the command to hardware
+ */
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
+{
+	u8 *v = cmd;
+
+	dma_wmb();
+	*v = cmd_verb | p->mc.valid_bit;
+}
+
+/*
+ * Checks for a completed response (returns non-NULL if only if the response
+ * is complete).
+ */
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+	u32 *ret, verb;
+
+	ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+
+	/* Remove the valid-bit - command completed if the rest is non-zero */
+	verb = ret[0] & ~QB_VALID_BIT;
+	if (!verb)
+		return NULL;
+	p->mc.valid_bit ^= QB_VALID_BIT;
+	return ret;
+}
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
+enum qb_enqueue_commands {
+	enqueue_empty = 0,
+	enqueue_response_always = 1,
+	enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ *                         default/starting state.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+	memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d:                the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ *                    rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+	d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+	if (respond_success)
+		d->verb |= enqueue_response_always;
+	else
+		d->verb |= enqueue_rejects_to_fq;
+}
+
+/*
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ *   -enqueue to a frame queue
+ *   -enqueue to a queuing destination
+ */
+
+/**
+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
+ * @d:    the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
+{
+	d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+	d->tgtid = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
+ * @d:       the enqueue descriptor
+ * @qdid:    the id of the queuing destination to be enqueued
+ * @qd_bin:  the queuing destination bin
+ * @qd_prio: the queuing destination priority
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+			  u32 qd_bin, u32 qd_prio)
+{
+	d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+	d->tgtid = cpu_to_le32(qdid);
+	d->qdbin = cpu_to_le16(qd_bin);
+	d->qpri = qd_prio;
+}
+
+#define EQAR_IDX(eqar)     ((eqar) & 0x7)
+#define EQAR_VB(eqar)      ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s:  the software portal used for enqueue
+ * @d:  the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+		      const struct dpaa2_fd *fd)
+{
+	struct qbman_eq_desc *p;
+	u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+
+	if (!EQAR_SUCCESS(eqar))
+		return -EBUSY;
+
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+	memcpy(&p->dca, &d->dca, 31);
+	memcpy(&p->fd, fd, sizeof(*fd));
+
+	/* Set the verb byte, have to substitute in the valid-bit */
+	dma_wmb();
+	p->verb = d->verb | EQAR_VB(eqar);
+
+	return 0;
+}
+
+/* Static (push) dequeue */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup
+ * @p:           the software portal object
+ * @channel_idx: the channel index to query
+ * @enabled:     returned boolean to show whether the push dequeue is enabled
+ *               for the given channel
+ */
+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
+{
+	u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+	WARN_ON(channel_idx > 15);
+	*enabled = src | (1 << channel_idx);
+}
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue
+ * @p:           the software portal object
+ * @channel_idx: the channel index (0 to 15)
+ * @enable:      enable or disable push dequeue
+ */
+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
+{
+	u16 dqsrc;
+
+	WARN_ON(channel_idx > 15);
+	if (enable)
+		s->sdq |= 1 << channel_idx;
+	else
+		s->sdq &= ~(1 << channel_idx);
+
+	/* Read make the complete src map.  If no channels are enabled
+	 * the SDQCR must be 0 or else QMan will assert errors
+	 */
+	dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+	if (dqsrc != 0)
+		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
+	else
+		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+#define QB_VDQCR_VERB_DCT_SHIFT    0
+#define QB_VDQCR_VERB_DT_SHIFT     2
+#define QB_VDQCR_VERB_RLS_SHIFT    4
+#define QB_VDQCR_VERB_WAE_SHIFT    5
+
+enum qb_pull_dt_e {
+	qb_pull_dt_channel,
+	qb_pull_dt_workqueue,
+	qb_pull_dt_framequeue
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ *                           default/starting state
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+	memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d:            the pull dequeue descriptor to be set
+ * @storage:      the pointer of the memory to store the dequeue result
+ * @storage_phys: the physical address of the storage memory
+ * @stash:        to indicate whether write allocate is enabled
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+				 struct dpaa2_dq *storage,
+				 dma_addr_t storage_phys,
+				 int stash)
+{
+	/* save the virtual address */
+	d->rsp_addr_virt = (u64)(uintptr_t)storage;
+
+	if (!storage) {
+		d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+		return;
+	}
+	d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+	if (stash)
+		d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+	else
+		d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+	d->rsp_addr = cpu_to_le64(storage_phys);
+}
+
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
+ * @d:         the pull dequeue descriptor to be set
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
+{
+	d->numf = numframes - 1;
+}
+
+/*
+ * Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @fqid: the frame queue index of the given FQ
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
+{
+	d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+	d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+	d->dq_src = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @wqid: composed of channel id and wqid within the channel
+ * @dct:  the dequeue command type
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+			    enum qbman_pull_type_e dct)
+{
+	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+	d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+	d->dq_src = cpu_to_le32(wqid);
+}
+
+/**
+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ *                                 dequeues
+ * @chid: the channel id to be dequeued
+ * @dct:  the dequeue command type
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+				 enum qbman_pull_type_e dct)
+{
+	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+	d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+	d->dq_src = cpu_to_le32(chid);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ *     the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+	struct qbman_pull_desc *p;
+
+	if (!atomic_dec_and_test(&s->vdq.available)) {
+		atomic_inc(&s->vdq.available);
+		return -EBUSY;
+	}
+	s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+	p->numf = d->numf;
+	p->tok = QMAN_DQ_TOKEN_VALID;
+	p->dq_src = d->dq_src;
+	p->rsp_addr = d->rsp_addr;
+	p->rsp_addr_virt = d->rsp_addr_virt;
+	dma_wmb();
+
+	/* Set the verb byte, have to substitute in the valid-bit */
+	p->verb = d->verb | s->vdq.valid_bit;
+	s->vdq.valid_bit ^= QB_VALID_BIT;
+
+	return 0;
+}
+
+#define QMAN_DQRR_PI_MASK   0xf
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+	u32 verb;
+	u32 response_verb;
+	u32 flags;
+	struct dpaa2_dq *p;
+
+	/* Before using valid-bit to detect if something is there, we have to
+	 * handle the case of the DQRR reset bug...
+	 */
+	if (unlikely(s->dqrr.reset_bug)) {
+		/*
+		 * We pick up new entries by cache-inhibited producer index,
+		 * which means that a non-coherent mapping would require us to
+		 * invalidate and read *only* once that PI has indicated that
+		 * there's an entry here. The first trip around the DQRR ring
+		 * will be much less efficient than all subsequent trips around
+		 * it...
+		 */
+		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+			QMAN_DQRR_PI_MASK;
+
+		/* there are new entries if pi != next_idx */
+		if (pi == s->dqrr.next_idx)
+			return NULL;
+
+		/*
+		 * if next_idx is/was the last ring index, and 'pi' is
+		 * different, we can disable the workaround as all the ring
+		 * entries have now been DMA'd to so valid-bit checking is
+		 * repaired. Note: this logic needs to be based on next_idx
+		 * (which increments one at a time), rather than on pi (which
+		 * can burst and wrap-around between our snapshots of it).
+		 */
+		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+				 s->dqrr.next_idx, pi);
+			s->dqrr.reset_bug = 0;
+		}
+		prefetch(qbman_get_cmd(s,
+				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+	}
+
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+	verb = p->dq.verb;
+
+	/*
+	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
+	 * in the DQRR reset bug workaround, we shouldn't need to skip these
+	 * check, because we've already determined that a new entry is available
+	 * and we've invalidated the cacheline before reading it, so the
+	 * valid-bit behaviour is repaired and should tell us what we already
+	 * knew from reading PI.
+	 */
+	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+		prefetch(qbman_get_cmd(s,
+				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+		return NULL;
+	}
+	/*
+	 * There's something there. Move "next_idx" attention to the next ring
+	 * entry (and prefetch it) before returning what we found.
+	 */
+	s->dqrr.next_idx++;
+	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+	if (!s->dqrr.next_idx)
+		s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+	/*
+	 * If this is the final response to a volatile dequeue command
+	 * indicate that the vdq is available
+	 */
+	flags = p->dq.stat;
+	response_verb = verb & QBMAN_RESULT_MASK;
+	if ((response_verb == QBMAN_RESULT_DQ) &&
+	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
+	    (flags & DPAA2_DQ_STAT_EXPIRED))
+		atomic_inc(&s->vdq.available);
+
+	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+	return p;
+}
+
+/**
+ * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
+ *                             qbman_swp_dqrr_next().
+ * @s: the software portal object
+ * @dq: the DQRR entry to be consumed
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+	qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ *                                 dq storage memory set in pull dequeue command
+ * @s: the software portal object
+ * @dq: the dequeue result read from the memory
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format. As such, once the user has called qbman_result_has_new_result() and
+ * been returned a valid dequeue result, they should not call it again on
+ * the same memory location (except of course if another dequeue command has
+ * been executed to produce a new result to that location).
+ */
+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+	if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
+		return 0;
+
+	/*
+	 * Set token to be 0 so we will detect change back to 1
+	 * next time the looping is traversed. Const is cast away here
+	 * as we want users to treat the dequeue responses as read only.
+	 */
+	((struct dpaa2_dq *)dq)->dq.tok = 0;
+
+	/*
+	 * Determine whether VDQCR is available based on whether the
+	 * current result is sitting in the first storage location of
+	 * the busy command.
+	 */
+	if (s->vdq.storage == dq) {
+		s->vdq.storage = NULL;
+		atomic_inc(&s->vdq.available);
+	}
+
+	return 1;
+}
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ *                              default/starting state.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+	memset(d, 0, sizeof(*d));
+	d->verb = 1 << 5; /* Release Command Valid */
+}
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
+	d->bpid = cpu_to_le16(bpid);
+}
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+	if (enable)
+		d->verb |= 1 << 6;
+	else
+		d->verb &= ~(1 << 6);
+}
+
+#define RAR_IDX(rar)     ((rar) & 0x7)
+#define RAR_VB(rar)      ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s:           the software portal object
+ * @d:           the release descriptor
+ * @buffers:     a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released,  must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+		      const u64 *buffers, unsigned int num_buffers)
+{
+	int i;
+	struct qbman_release_desc *p;
+	u32 rar;
+
+	if (!num_buffers || (num_buffers > 7))
+		return -EINVAL;
+
+	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+	if (!RAR_SUCCESS(rar))
+		return -EBUSY;
+
+	/* Start the release command */
+	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+	/* Copy the caller's buffer pointers to the command */
+	for (i = 0; i < num_buffers; i++)
+		p->buf[i] = cpu_to_le64(buffers[i]);
+	p->bpid = d->bpid;
+
+	/*
+	 * Set the verb byte, have to substitute in the valid-bit and the number
+	 * of buffers.
+	 */
+	dma_wmb();
+	p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+	return 0;
+}
+
+struct qbman_acquire_desc {
+	u8 verb;
+	u8 reserved;
+	__le16 bpid;
+	u8 num;
+	u8 reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+	u8 verb;
+	u8 rslt;
+	__le16 reserved;
+	u8 num;
+	u8 reserved2[3];
+	__le64 buf[7];
+};
+
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command
+ * @s:           the software portal object
+ * @bpid:        the buffer pool index
+ * @buffers:     a pointer pointing to the acquired buffer addresses
+ * @num_buffers: number of buffers to be acquired, must be less than 8
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+		      unsigned int num_buffers)
+{
+	struct qbman_acquire_desc *p;
+	struct qbman_acquire_rslt *r;
+	int i;
+
+	if (!num_buffers || (num_buffers > 7))
+		return -EINVAL;
+
+	/* Start the management command */
+	p = qbman_swp_mc_start(s);
+
+	if (!p)
+		return -EBUSY;
+
+	/* Encode the caller-provided attributes */
+	p->bpid = cpu_to_le16(bpid);
+	p->num = num_buffers;
+
+	/* Complete the management command */
+	r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+	if (unlikely(!r)) {
+		pr_err("qbman: acquire from BPID %d failed, no response\n",
+		       bpid);
+		return -EIO;
+	}
+
+	/* Decode the outcome */
+	WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
+
+	/* Determine success or failure */
+	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+		pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
+		       bpid, r->rslt);
+		return -EIO;
+	}
+
+	WARN_ON(r->num > num_buffers);
+
+	/* Copy the acquired buffers to the caller's array */
+	for (i = 0; i < r->num; i++)
+		buffers[i] = le64_to_cpu(r->buf[i]);
+
+	return (int)r->num;
+}
+
+struct qbman_alt_fq_state_desc {
+	u8 verb;
+	u8 reserved[3];
+	__le32 fqid;
+	u8 reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+	u8 verb;
+	u8 rslt;
+	u8 reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+			   u8 alt_fq_verb)
+{
+	struct qbman_alt_fq_state_desc *p;
+	struct qbman_alt_fq_state_rslt *r;
+
+	/* Start the management command */
+	p = qbman_swp_mc_start(s);
+	if (!p)
+		return -EBUSY;
+
+	p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
+
+	/* Complete the management command */
+	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+	if (unlikely(!r)) {
+		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+		       alt_fq_verb);
+		return -EIO;
+	}
+
+	/* Decode the outcome */
+	WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
+
+	/* Determine success or failure */
+	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+		pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
+		       fqid, r->verb, r->rslt);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+struct qbman_cdan_ctrl_desc {
+	u8 verb;
+	u8 reserved;
+	__le16 ch;
+	u8 we;
+	u8 ctrl;
+	__le16 reserved2;
+	__le64 cdan_ctx;
+	u8 reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+	u8 verb;
+	u8 rslt;
+	__le16 ch;
+	u8 reserved[60];
+};
+
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+		       u8 we_mask, u8 cdan_en,
+		       u64 ctx)
+{
+	struct qbman_cdan_ctrl_desc *p = NULL;
+	struct qbman_cdan_ctrl_rslt *r = NULL;
+
+	/* Start the management command */
+	p = qbman_swp_mc_start(s);
+	if (!p)
+		return -EBUSY;
+
+	/* Encode the caller-provided attributes */
+	p->ch = cpu_to_le16(channelid);
+	p->we = we_mask;
+	if (cdan_en)
+		p->ctrl = 1;
+	else
+		p->ctrl = 0;
+	p->cdan_ctx = cpu_to_le64(ctx);
+
+	/* Complete the management command */
+	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+	if (unlikely(!r)) {
+		pr_err("qbman: wqchan config failed, no response\n");
+		return -EIO;
+	}
+
+	WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
+	/* Determine success or failure */
+	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+		pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
+		       channelid, r->rslt);
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
new file mode 100644
index 0000000..89d1dd9
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_QBMAN_PORTAL_H
+#define __FSL_QBMAN_PORTAL_H
+
+#include <soc/fsl/dpaa2-fd.h>
+
+struct dpaa2_dq;
+struct qbman_swp;
+
+/* qbman software portal descriptor structure */
+struct qbman_swp_desc {
+	void *cena_bar; /* Cache-enabled portal base address */
+	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
+	u32 qman_version;
+};
+
+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
+/* the structure for pull dequeue descriptor */
+struct qbman_pull_desc {
+	u8 verb;
+	u8 numf;
+	u8 tok;
+	u8 reserved;
+	__le32 dq_src;
+	__le64 rsp_addr;
+	u64 rsp_addr_virt;
+	u8 padding[40];
+};
+
+enum qbman_pull_type_e {
+	/* dequeue with priority precedence, respect intra-class scheduling */
+	qbman_pull_type_prio = 1,
+	/* dequeue with active FQ precedence, respect ICS */
+	qbman_pull_type_active,
+	/* dequeue with active FQ precedence, no ICS */
+	qbman_pull_type_active_noics
+};
+
+/* Definitions for parsing dequeue entries */
+#define QBMAN_RESULT_MASK      0x7f
+#define QBMAN_RESULT_DQ        0x60
+#define QBMAN_RESULT_FQRN      0x21
+#define QBMAN_RESULT_FQRNI     0x22
+#define QBMAN_RESULT_FQPN      0x24
+#define QBMAN_RESULT_FQDAN     0x25
+#define QBMAN_RESULT_CDAN      0x26
+#define QBMAN_RESULT_CSCN_MEM  0x27
+#define QBMAN_RESULT_CGCU      0x28
+#define QBMAN_RESULT_BPSCN     0x29
+#define QBMAN_RESULT_CSCN_WQ   0x2a
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE	0x48
+#define QBMAN_FQ_FORCE		0x49
+#define QBMAN_FQ_XON		0x4d
+#define QBMAN_FQ_XOFF		0x4e
+
+/* structure of enqueue descriptor */
+struct qbman_eq_desc {
+	u8 verb;
+	u8 dca;
+	__le16 seqnum;
+	__le16 orpid;
+	__le16 reserved1;
+	__le32 tgtid;
+	__le32 tag;
+	__le16 qdbin;
+	u8 qpri;
+	u8 reserved[3];
+	u8 wae;
+	u8 rspid;
+	__le64 rsp_addr;
+	u8 fd[32];
+};
+
+/* buffer release descriptor */
+struct qbman_release_desc {
+	u8 verb;
+	u8 reserved;
+	__le16 bpid;
+	__le32 reserved2;
+	__le64 buf[7];
+};
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK      0xf0
+
+#define CODE_CDAN_WE_EN    0x1
+#define CODE_CDAN_WE_CTX   0x4
+
+/* portal data structure */
+struct qbman_swp {
+	const struct qbman_swp_desc *desc;
+	void *addr_cena;
+	void __iomem *addr_cinh;
+
+	/* Management commands */
+	struct {
+		u32 valid_bit; /* 0x00 or 0x80 */
+	} mc;
+
+	/* Push dequeues */
+	u32 sdq;
+
+	/* Volatile dequeues */
+	struct {
+		atomic_t available; /* indicates if a command can be sent */
+		u32 valid_bit; /* 0x00 or 0x80 */
+		struct dpaa2_dq *storage; /* NULL if DQRR */
+	} vdq;
+
+	/* DQRR */
+	struct {
+		u32 next_idx;
+		u32 valid_bit;
+		u8 dqrr_size;
+		int reset_bug; /* indicates dqrr reset workaround is needed */
+	} dqrr;
+};
+
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+void qbman_swp_finish(struct qbman_swp *p);
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+				 struct dpaa2_dq *storage,
+				 dma_addr_t storage_phys,
+				 int stash);
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+			    enum qbman_pull_type_e dct);
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+				 enum qbman_pull_type_e dct);
+
+int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+			  u32 qd_bin, u32 qd_prio);
+
+int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
+		      const struct dpaa2_fd *fd);
+
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+		      const u64 *buffers, unsigned int num_buffers);
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+		      unsigned int num_buffers);
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+			   u8 alt_fq_verb);
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+		       u8 we_mask, u8 cdan_en,
+		       u64 ctx);
+
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/**
+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
+ * @dq: the dequeue result to be checked
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked
+ *
+ */
+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
+	return !qbman_result_is_DQ(dq);
+}
+
+/* FQ Data Availability */
+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
+}
+
+/* Channel Data Availability */
+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
+}
+
+/* Congestion State Change */
+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
+}
+
+/* Buffer Pool State Change */
+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
+}
+
+/* Congestion Group Count Update */
+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
+}
+
+/* Retirement */
+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
+}
+
+/* Retirement Immediate */
+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
+}
+
+ /* Park */
+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
+{
+	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
+}
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ */
+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
+{
+	return scn->scn.state;
+}
+
+#define SCN_RID_MASK 0x00FFFFFF
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
+ */
+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
+{
+	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
+/**
+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
+ */
+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
+	return le64_to_cpu(scn->scn.ctx);
+}
+
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
+ * @s:    the software portal object
+ * @fqid: the index of frame queue to be scheduled
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
+ * @s:    the software portal object
+ * @fqid: the index of frame queue to be forced
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+/**
+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
+ * @s:    the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+/**
+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
+ * @s:    the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ * XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing.
+ * If a FQ is changed to XOFF after it had already become truly-scheduled
+ * to a channel, and a pull dequeue of that channel occurs that selects
+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
+	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/* If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
+ * necessary.
+ *
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * they need to be reenabled before they'll generate another. The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step. Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s:         the software portal object
+ * @channelid: the channel index
+ * @ctx:       the context to be set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
+					     u64 ctx)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_CTX,
+				  0, ctx);
+}
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
+ * @s:         the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_EN,
+				  1, 0);
+}
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
+ * @s:         the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_EN,
+				  0, 0);
+}
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s:         the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ * @ctx:i      the context set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
+						    u16 channelid,
+						    u64 ctx)
+{
+	return qbman_swp_CDAN_set(s, channelid,
+				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+				  1, ctx);
+}
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+					  u8 cmd_verb)
+{
+	int loopvar = 1000;
+
+	qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+	do {
+		cmd = qbman_swp_mc_result(swp);
+	} while (!cmd && loopvar--);
+
+	WARN_ON(!loopvar);
+
+	return cmd;
+}
+
+#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
new file mode 100644
index 0000000..302e0c8
--- /dev/null
+++ b/drivers/soc/fsl/guts.c
@@ -0,0 +1,248 @@
+/*
+ * Freescale QorIQ Platforms GUTS Driver
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/fsl/guts.h>
+
+struct guts {
+	struct ccsr_guts __iomem *regs;
+	bool little_endian;
+};
+
+struct fsl_soc_die_attr {
+	char	*die;
+	u32	svr;
+	u32	mask;
+};
+
+static struct guts *guts;
+static struct soc_device_attribute soc_dev_attr;
+static struct soc_device *soc_dev;
+
+
+/* SoC die attribute definition for QorIQ platform */
+static const struct fsl_soc_die_attr fsl_soc_die[] = {
+	/*
+	 * Power Architecture-based SoCs T Series
+	 */
+
+	/* Die: T4240, SoC: T4240/T4160/T4080 */
+	{ .die		= "T4240",
+	  .svr		= 0x82400000,
+	  .mask		= 0xfff00000,
+	},
+	/* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
+	{ .die		= "T1040",
+	  .svr		= 0x85200000,
+	  .mask		= 0xfff00000,
+	},
+	/* Die: T2080, SoC: T2080/T2081 */
+	{ .die		= "T2080",
+	  .svr		= 0x85300000,
+	  .mask		= 0xfff00000,
+	},
+	/* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
+	{ .die		= "T1024",
+	  .svr		= 0x85400000,
+	  .mask		= 0xfff00000,
+	},
+
+	/*
+	 * ARM-based SoCs LS Series
+	 */
+
+	/* Die: LS1043A, SoC: LS1043A/LS1023A */
+	{ .die		= "LS1043A",
+	  .svr		= 0x87920000,
+	  .mask		= 0xffff0000,
+	},
+	/* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
+	{ .die		= "LS2080A",
+	  .svr		= 0x87010000,
+	  .mask		= 0xff3f0000,
+	},
+	/* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
+	{ .die		= "LS1088A",
+	  .svr		= 0x87030000,
+	  .mask		= 0xff3f0000,
+	},
+	/* Die: LS1012A, SoC: LS1012A */
+	{ .die		= "LS1012A",
+	  .svr		= 0x87040000,
+	  .mask		= 0xffff0000,
+	},
+	/* Die: LS1046A, SoC: LS1046A/LS1026A */
+	{ .die		= "LS1046A",
+	  .svr		= 0x87070000,
+	  .mask		= 0xffff0000,
+	},
+	/* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
+	{ .die		= "LS2088A",
+	  .svr		= 0x87090000,
+	  .mask		= 0xff3f0000,
+	},
+	/* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
+	{ .die		= "LS1021A",
+	  .svr		= 0x87000000,
+	  .mask		= 0xfff70000,
+	},
+	{ },
+};
+
+static const struct fsl_soc_die_attr *fsl_soc_die_match(
+	u32 svr, const struct fsl_soc_die_attr *matches)
+{
+	while (matches->svr) {
+		if (matches->svr == (svr & matches->mask))
+			return matches;
+		matches++;
+	};
+	return NULL;
+}
+
+u32 fsl_guts_get_svr(void)
+{
+	u32 svr = 0;
+
+	if (!guts || !guts->regs)
+		return svr;
+
+	if (guts->little_endian)
+		svr = ioread32(&guts->regs->svr);
+	else
+		svr = ioread32be(&guts->regs->svr);
+
+	return svr;
+}
+EXPORT_SYMBOL(fsl_guts_get_svr);
+
+static int fsl_guts_probe(struct platform_device *pdev)
+{
+	struct device_node *root, *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	const struct fsl_soc_die_attr *soc_die;
+	const char *machine;
+	u32 svr;
+
+	/* Initialize guts */
+	guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
+	if (!guts)
+		return -ENOMEM;
+
+	guts->little_endian = of_property_read_bool(np, "little-endian");
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	guts->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(guts->regs))
+		return PTR_ERR(guts->regs);
+
+	/* Register soc device */
+	root = of_find_node_by_path("/");
+	if (of_property_read_string(root, "model", &machine))
+		of_property_read_string_index(root, "compatible", 0, &machine);
+	of_node_put(root);
+	if (machine)
+		soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+
+	svr = fsl_guts_get_svr();
+	soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+	if (soc_die) {
+		soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
+						     "QorIQ %s", soc_die->die);
+	} else {
+		soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
+	}
+	if (!soc_dev_attr.family)
+		return -ENOMEM;
+	soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
+					     "svr:0x%08x", svr);
+	if (!soc_dev_attr.soc_id)
+		return -ENOMEM;
+	soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+					       (svr >>  4) & 0xf, svr & 0xf);
+	if (!soc_dev_attr.revision)
+		return -ENOMEM;
+
+	soc_dev = soc_device_register(&soc_dev_attr);
+	if (IS_ERR(soc_dev))
+		return PTR_ERR(soc_dev);
+
+	pr_info("Machine: %s\n", soc_dev_attr.machine);
+	pr_info("SoC family: %s\n", soc_dev_attr.family);
+	pr_info("SoC ID: %s, Revision: %s\n",
+		soc_dev_attr.soc_id, soc_dev_attr.revision);
+	return 0;
+}
+
+static int fsl_guts_remove(struct platform_device *dev)
+{
+	soc_device_unregister(soc_dev);
+	return 0;
+}
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Freescale QorIQ SOCs.
+ */
+static const struct of_device_id fsl_guts_of_match[] = {
+	{ .compatible = "fsl,qoriq-device-config-1.0", },
+	{ .compatible = "fsl,qoriq-device-config-2.0", },
+	{ .compatible = "fsl,p1010-guts", },
+	{ .compatible = "fsl,p1020-guts", },
+	{ .compatible = "fsl,p1021-guts", },
+	{ .compatible = "fsl,p1022-guts", },
+	{ .compatible = "fsl,p1023-guts", },
+	{ .compatible = "fsl,p2020-guts", },
+	{ .compatible = "fsl,bsc9131-guts", },
+	{ .compatible = "fsl,bsc9132-guts", },
+	{ .compatible = "fsl,mpc8536-guts", },
+	{ .compatible = "fsl,mpc8544-guts", },
+	{ .compatible = "fsl,mpc8548-guts", },
+	{ .compatible = "fsl,mpc8568-guts", },
+	{ .compatible = "fsl,mpc8569-guts", },
+	{ .compatible = "fsl,mpc8572-guts", },
+	{ .compatible = "fsl,ls1021a-dcfg", },
+	{ .compatible = "fsl,ls1043a-dcfg", },
+	{ .compatible = "fsl,ls2080a-dcfg", },
+	{ .compatible = "fsl,ls1088a-dcfg", },
+	{ .compatible = "fsl,ls1012a-dcfg", },
+	{ .compatible = "fsl,ls1046a-dcfg", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+
+static struct platform_driver fsl_guts_driver = {
+	.driver = {
+		.name = "fsl-guts",
+		.of_match_table = fsl_guts_of_match,
+	},
+	.probe = fsl_guts_probe,
+	.remove = fsl_guts_remove,
+};
+
+static int __init fsl_guts_init(void)
+{
+	return platform_driver_register(&fsl_guts_driver);
+}
+core_initcall(fsl_guts_init);
+
+static void __exit fsl_guts_exit(void)
+{
+	platform_driver_unregister(&fsl_guts_driver);
+}
+module_exit(fsl_guts_exit);
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 0000000..d570cb5
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+	bool "QorIQ DPAA1 framework support"
+	depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
+	select GENERIC_ALLOCATOR
+	help
+	  The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+	  hardware components on specific QorIQ multicore processors.
+	  This architecture provides the infrastructure to support simplified
+	  sharing of networking interfaces and accelerators by multiple CPUs.
+	  The major h/w blocks composing DPAA are BMan and QMan.
+
+	  The Buffer Manager (BMan) is a hardware buffer pool management block
+	  that allows software and accelerators on the datapath to acquire and
+	  release buffers in order to build frames.
+
+	  The Queue Manager (QMan) is a hardware queue management block
+	  that allows software and accelerators on the datapath to enqueue and
+	  dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+	bool "Additional driver checking"
+	help
+	  Compiles in additional checks, to sanity-check the drivers and
+	  any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+	tristate "BMan self-tests"
+	help
+	  Compile the BMan self-test code. These tests will
+	  exercise the BMan APIs to confirm functionality
+	  of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+	bool "High-level API self-test"
+	depends on FSL_BMAN_TEST
+	default y
+	help
+	  This requires the presence of cpu-affine portals, and performs
+	  high-level API testing with them (whichever portal(s) are affine
+	  to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+	tristate "QMan self-tests"
+	help
+	  Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+	bool "QMan high-level self-test"
+	depends on FSL_QMAN_TEST
+	default y
+	help
+	  This requires the presence of cpu-affine portals, and performs
+	  high-level API testing with them (whichever portal(s) are affine to
+	  the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+	bool "QMan 'hot potato' data-stashing self-test"
+	depends on FSL_QMAN_TEST
+	default y
+	help
+	  This performs a "hot potato" style test enqueuing/dequeuing a frame
+	  across a series of FQs scheduled to different portals (and cpus), with
+	  DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 0000000..811312a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_DPAA)                          += bman_ccsr.o qman_ccsr.o \
+						   bman_portal.o qman_portal.o \
+						   bman.o qman.o dpaa_sys.o
+
+obj-$(CONFIG_FSL_BMAN_TEST)                     += bman-test.o
+bman-test-y                                      = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API)           += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST)			+= qman-test.o
+qman-test-y					 = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API)		+= qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH)		+= qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 0000000..f9485ce
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,821 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME		"BMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH	0x3000
+#define BM_REG_RCR_CI_CINH	0x3100
+#define BM_REG_RCR_ITR		0x3200
+#define BM_REG_CFG		0x3300
+#define BM_REG_SCN(n)		(0x3400 + ((n) << 6))
+#define BM_REG_ISR		0x3e00
+#define BM_REG_IER		0x3e40
+#define BM_REG_ISDR		0x3e80
+#define BM_REG_IIR		0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR		0x0000
+#define BM_CL_RR0		0x0100
+#define BM_CL_RR1		0x0140
+#define BM_CL_RCR		0x1000
+#define BM_CL_RCR_PI_CENA	0x3000
+#define BM_CL_RCR_CI_CENA	0x3100
+
+#else
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH	0x0000
+#define BM_REG_RCR_CI_CINH	0x0004
+#define BM_REG_RCR_ITR		0x0008
+#define BM_REG_CFG		0x0100
+#define BM_REG_SCN(n)		(0x0200 + ((n) << 2))
+#define BM_REG_ISR		0x0e00
+#define BM_REG_IER		0x0e04
+#define BM_REG_ISDR		0x0e08
+#define BM_REG_IIR		0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR		0x0000
+#define BM_CL_RR0		0x0100
+#define BM_CL_RR1		0x0140
+#define BM_CL_RCR		0x1000
+#define BM_CL_RCR_PI_CENA	0x3000
+#define BM_CL_RCR_CI_CENA	0x3100
+#endif
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
+	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
+	bm_rcr_pce = 1,		/* PI index, cache-enabled */
+	bm_rcr_pvb = 2		/* valid-bit */
+};
+enum bm_rcr_cmode {		/* s/w-only */
+	bm_rcr_cci,		/* CI index, cache-inhibited */
+	bm_rcr_cce		/* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE		8
+
+/* Release Command */
+struct bm_rcr_entry {
+	union {
+		struct {
+			u8 _ncw_verb; /* writes to this are non-coherent */
+			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+			u8 __reserved1[62];
+		};
+		struct bm_buffer bufs[8];
+	};
+};
+#define BM_RCR_VERB_VBIT		0x80
+#define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI	0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
+
+struct bm_rcr {
+	struct bm_rcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	u32 busy;
+	enum bm_rcr_pmode pmode;
+	enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+	u8 _ncw_verb; /* writes to this are non-coherent */
+	u8 bpid; /* used by acquire command */
+	u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT		0x80
+#define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE		0x10
+#define BM_MCC_VERB_CMD_QUERY		0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+	struct {
+		u8 verb;
+		u8 bpid;
+		u8 __reserved[62];
+	};
+	struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT		0x80
+#define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID	0x60
+#define BM_MCR_VERB_CMD_ERR_ECC		0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT			10000 /* us */
+
+struct bm_mc {
+	struct bm_mc_command *cr;
+	union bm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum {
+		/* Can only be _mc_start()ed */
+		mc_idle,
+		/* Can only be _mc_commit()ed or _mc_abort()ed */
+		mc_user,
+		/* Can only be _mc_retry()ed */
+		mc_hw
+	} state;
+#endif
+};
+
+struct bm_addr {
+	void *ce;		/* cache-enabled */
+	__be32 *ce_be;		/* Same as above but for direct access */
+	void __iomem *ci;	/* cache-inhibited */
+};
+
+struct bm_portal {
+	struct bm_addr addr;
+	struct bm_rcr rcr;
+	struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+	return ioread32be(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+	iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+	dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+	dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+struct bman_portal {
+	struct bm_portal p;
+	/* interrupt sources processed by portal_isr(), configurable */
+	unsigned long irq_sources;
+	/* probing time config params for cpu-affine portals */
+	const struct bm_portal_config *config;
+	char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+	return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+	/* index of the buffer pool to encapsulate (0-63) */
+	u32 bpid;
+	/* Used for hash-table admin when using depletion notifications. */
+	struct bman_portal *portal;
+	struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+	struct bman_portal *p = ptr;
+	struct bm_portal *portal = &p->p;
+	u32 clear = p->irq_sources;
+	u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+	if (unlikely(!is))
+		return IRQ_NONE;
+
+	clear |= poll_portal_slow(p, is);
+	bm_out(portal, BM_REG_ISR, clear);
+	return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT	ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY	(uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~RCR_CARRY;
+
+	return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+	return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+	/* increment to the next RCR pointer and handle overflow and 'vbit' */
+	struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+	rcr->cursor = rcr_carryclear(partial);
+	if (partial != rcr->cursor)
+		rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	rcr->ithresh = ithresh;
+	bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+	__maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+	bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+	u8 diff, old_ci = rcr->ci;
+
+	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+	rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+	bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+	diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+	rcr->available += diff;
+	return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+
+	DPAA_ASSERT(!rcr->busy);
+	if (!rcr->available)
+		return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	rcr->busy = 1;
+#endif
+	dpaa_zero(rcr->cursor);
+	return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+	struct bm_rcr_entry *rcursor;
+
+	DPAA_ASSERT(rcr->busy);
+	DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+	DPAA_ASSERT(rcr->available >= 1);
+	dma_wmb();
+	rcursor = rcr->cursor;
+	rcursor->_ncw_verb = myverb | rcr->vbit;
+	dpaa_flush(rcursor);
+	rcr_inc(rcr);
+	rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+		       enum bm_rcr_cmode cmode)
+{
+	struct bm_rcr *rcr = &portal->rcr;
+	u32 cfg;
+	u8 pi;
+
+	rcr->ring = portal->addr.ce + BM_CL_RCR;
+	rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	rcr->cursor = rcr->ring + pi;
+	rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+		BM_RCR_VERB_VBIT : 0;
+	rcr->available = BM_RCR_SIZE - 1
+		- dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+	rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	rcr->busy = 0;
+	rcr->pmode = pmode;
+	rcr->cmode = cmode;
+#endif
+	cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+		| (pmode & 0x3); /* BCSP_CFG::RPM */
+	bm_out(portal, BM_REG_CFG, cfg);
+	return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct bm_rcr *rcr = &portal->rcr;
+	int i;
+
+	DPAA_ASSERT(!rcr->busy);
+
+	i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	if (i != rcr_ptr2idx(rcr->cursor))
+		pr_crit("losing uncommitted RCR entries\n");
+
+	i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	if (i != rcr->ci)
+		pr_crit("missing existing RCR completions\n");
+	if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+		pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+	struct bm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.ce + BM_CL_CR;
+	mc->rr = portal->addr.ce + BM_CL_RR0;
+	mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
+		    0 : 1;
+	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct bm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == mc_idle);
+	if (mc->state != mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+	struct bm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_user;
+#endif
+	dpaa_zero(mc->cr);
+	return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+	struct bm_mc *mc = &portal->mc;
+	union bm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == mc_user);
+	dma_wmb();
+	mc->cr->_ncw_verb = myverb | mc->vbit;
+	dpaa_flush(mc->cr);
+	dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+	struct bm_mc *mc = &portal->mc;
+	union bm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == mc_hw);
+	/*
+	 * The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering...
+	 */
+	if (!rr->verb) {
+		dpaa_invalidate_touch_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+				       union bm_mc_result **mcr)
+{
+	int timeout = BM_MCR_TIMEOUT;
+
+	do {
+		*mcr = bm_mc_result(portal);
+		if (*mcr)
+			break;
+		udelay(1);
+	} while (--timeout);
+
+	return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+	bm_out(portal, BM_REG_SCN(0), 0);
+	bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+			      const struct bm_portal_config *c)
+{
+	struct bm_portal *p;
+	int ret;
+
+	p = &portal->p;
+	/*
+	 * prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference...
+	 */
+	p->addr.ce = c->addr_virt_ce;
+	p->addr.ce_be = c->addr_virt_ce;
+	p->addr.ci = c->addr_virt_ci;
+	if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+		dev_err(c->dev, "RCR initialisation failed\n");
+		goto fail_rcr;
+	}
+	if (bm_mc_init(p)) {
+		dev_err(c->dev, "MC initialisation failed\n");
+		goto fail_mc;
+	}
+	/*
+	 * Default to all BPIDs disabled, we enable as required at
+	 * run-time.
+	 */
+	bm_isr_bscn_disable(p);
+
+	/* Write-to-clear any stale interrupt status bits */
+	bm_out(p, BM_REG_ISDR, 0xffffffff);
+	portal->irq_sources = 0;
+	bm_out(p, BM_REG_IER, 0);
+	bm_out(p, BM_REG_ISR, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
+		dev_err(c->dev, "request_irq() failed\n");
+		goto fail_irq;
+	}
+	if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+	    irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+		dev_err(c->dev, "irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need RCR to be empty before continuing */
+	ret = bm_rcr_get_fill(p);
+	if (ret) {
+		dev_err(c->dev, "RCR unclean\n");
+		goto fail_rcr_empty;
+	}
+	/* Success */
+	portal->config = c;
+
+	bm_out(p, BM_REG_ISDR, 0);
+	bm_out(p, BM_REG_IIR, 0);
+
+	return 0;
+
+fail_rcr_empty:
+fail_affinity:
+	free_irq(c->irq, portal);
+fail_irq:
+	bm_mc_finish(p);
+fail_mc:
+	bm_rcr_finish(p);
+fail_rcr:
+	return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+	struct bman_portal *portal;
+	int err;
+
+	portal = &per_cpu(bman_affine_portal, c->cpu);
+	err = bman_create_portal(portal, c);
+	if (err)
+		return NULL;
+
+	spin_lock(&affine_mask_lock);
+	cpumask_set_cpu(c->cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+
+	return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+	u32 ret = is;
+
+	if (is & BM_PIRQ_RCRI) {
+		bm_rcr_cce_update(&p->p);
+		bm_rcr_set_ithresh(&p->p, 0);
+		bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+		is &= ~BM_PIRQ_RCRI;
+	}
+
+	/* There should be no status register bits left undefined */
+	DPAA_ASSERT(!is);
+	return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	p->irq_sources |= bits & BM_PIRQ_VISIBLE;
+	bm_out(&p->p, BM_REG_IER, p->irq_sources);
+	local_irq_restore(irqflags);
+	return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+	struct bm_mc_command *bm_cmd;
+	union bm_mc_result *bm_res;
+
+	while (1) {
+		struct bman_portal *p = get_affine_portal();
+		/* Acquire buffers until empty */
+		bm_cmd = bm_mc_start(&p->p);
+		bm_cmd->bpid = bpid;
+		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+		if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+			put_affine_portal();
+			pr_crit("BMan Acquire Command timedout\n");
+			return -ETIMEDOUT;
+		}
+		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+			put_affine_portal();
+			/* Pool is empty */
+			return 0;
+		}
+		put_affine_portal();
+	}
+
+	return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+	unsigned long addr;
+
+	addr = gen_pool_alloc(bm_bpalloc, count);
+	if (!addr)
+		return -ENOMEM;
+
+	*result = addr & ~DPAA_GENALLOC_OFF;
+
+	return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+	int ret;
+
+	ret = bm_shutdown_pool(bpid);
+	if (ret) {
+		pr_debug("BPID %d leaked\n", bpid);
+		return ret;
+	}
+
+	gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+	struct bman_pool *pool = NULL;
+	u32 bpid;
+
+	if (bm_alloc_bpid_range(&bpid, 1))
+		return NULL;
+
+	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		goto err;
+
+	pool->bpid = bpid;
+
+	return pool;
+err:
+	bm_release_bpid(bpid);
+	kfree(pool);
+	return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+	bm_release_bpid(pool->bpid);
+
+	kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+	return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+	if (avail)
+		bm_rcr_cce_prefetch(&p->p);
+	else
+		bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+	struct bman_portal *p;
+	struct bm_rcr_entry *r;
+	unsigned long irqflags;
+	int avail, timeout = 1000; /* 1ms */
+	int i = num - 1;
+
+	DPAA_ASSERT(num > 0 && num <= 8);
+
+	do {
+		p = get_affine_portal();
+		local_irq_save(irqflags);
+		avail = bm_rcr_get_avail(&p->p);
+		if (avail < 2)
+			update_rcr_ci(p, avail);
+		r = bm_rcr_start(&p->p);
+		local_irq_restore(irqflags);
+		put_affine_portal();
+		if (likely(r))
+			break;
+
+		udelay(1);
+	} while (--timeout);
+
+	if (unlikely(!timeout))
+		return -ETIMEDOUT;
+
+	p = get_affine_portal();
+	local_irq_save(irqflags);
+	/*
+	 * we can copy all but the first entry, as this can trigger badness
+	 * with the valid-bit
+	 */
+	bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+	bm_buffer_set_bpid(r->bufs, pool->bpid);
+	if (i)
+		memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+			  (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+	local_irq_restore(irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+	struct bman_portal *p = get_affine_portal();
+	struct bm_mc_command *mcc;
+	union bm_mc_result *mcr;
+	int ret;
+
+	DPAA_ASSERT(num > 0 && num <= 8);
+
+	mcc = bm_mc_start(&p->p);
+	mcc->bpid = pool->bpid;
+	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+		     (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+	if (!bm_mc_result_timeout(&p->p, &mcr)) {
+		put_affine_portal();
+		pr_crit("BMan Acquire Timeout\n");
+		return -ETIMEDOUT;
+	}
+	ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+	if (bufs)
+		memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+	put_affine_portal();
+	if (ret != num)
+		ret = -ENOMEM;
+	return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+	return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 0000000..7c3cc96
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,288 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FBPR_BARE		0x0c00
+#define REG_FBPR_BAR		0x0c04
+#define REG_FBPR_AR		0x0c10
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_ERR_ISR		0x0e00
+#define REG_ERR_IER		0x0e04
+#define REG_ERR_ISDR		0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI	0x00000010	/* Invalid Command Verb */
+#define BM_EIRQ_FLWI	0x00000008	/* FBPR Low Watermark */
+#define BM_EIRQ_MBEI	0x00000004	/* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI	0x00000002	/* Single-bit ECC Error */
+#define BM_EIRQ_BSCN	0x00000001	/* pool State Change Notification */
+
+struct bman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+	{ BM_EIRQ_IVCI, "Invalid Command Verb" },
+	{ BM_EIRQ_FLWI, "FBPR Low Watermark" },
+	{ BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+	{ BM_EIRQ_SBEI, "Single-bit ECC Error" },
+	{ BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+	return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+	iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = bm_ccsr_in(REG_IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+	u32 exp = ilog2(size);
+	/* choke if size isn't within range */
+	DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+		   is_power_of_2(size));
+	/* choke if '[e]ba' has lower-alignment than 'size' */
+	DPAA_ASSERT(!(ba & (size - 1)));
+	bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+	bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+	bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+static int __bman_probed;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+	fbpr_a = rmem->base;
+	fbpr_sz = rmem->size;
+
+	WARN_ON(!(fbpr_a && fbpr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+	struct device *dev = ptr;
+
+	ier_val = bm_ccsr_in(REG_ERR_IER);
+	isr_val = bm_ccsr_in(REG_ERR_ISR);
+	ecsr_val = bm_ccsr_in(REG_ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+
+	for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+		if (bman_hwerr_txts[i].mask & isr_mask) {
+			dev_err_ratelimited(dev, "ErrInt: %s\n",
+					    bman_hwerr_txts[i].txt);
+			if (bman_hwerr_txts[i].mask & ecsr_val) {
+				/* Re-arm error capture registers */
+				bm_ccsr_out(REG_ECSR, ecsr_val);
+			}
+			if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+				dev_dbg(dev, "Disabling error 0x%x\n",
+					bman_hwerr_txts[i].mask);
+				ier_val &= ~bman_hwerr_txts[i].mask;
+				bm_ccsr_out(REG_ERR_IER, ier_val);
+			}
+		}
+	}
+	bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+	return IRQ_HANDLED;
+}
+
+int bman_is_probed(void)
+{
+	return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+	int ret, err_irq;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	u16 id, bm_pool_cnt;
+	u8 major, minor;
+
+	__bman_probed = -1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+			node);
+		return -ENXIO;
+	}
+	bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+	if (!bm_ccsr_start)
+		return -ENXIO;
+
+	bm_get_version(&id, &major, &minor);
+	if (major == 1 && minor == 0) {
+		bman_ip_rev = BMAN_REV10;
+		bm_pool_cnt = BM_POOL_MAX;
+	} else if (major == 2 && minor == 0) {
+		bman_ip_rev = BMAN_REV20;
+		bm_pool_cnt = 8;
+	} else if (major == 2 && minor == 1) {
+		bman_ip_rev = BMAN_REV21;
+		bm_pool_cnt = BM_POOL_MAX;
+	} else {
+		dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+			id, major, minor);
+		return -ENODEV;
+	}
+
+	/*
+	 * If FBPR memory wasn't defined using the qbman compatible string
+	 * try using the of_reserved_mem_device method
+	 */
+	if (!fbpr_a) {
+		ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+		if (ret) {
+			dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+				ret);
+			return -ENODEV;
+		}
+	}
+
+	dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
+
+	bm_set_memory(fbpr_a, fbpr_sz);
+
+	err_irq = platform_get_irq(pdev, 0);
+	if (err_irq <= 0) {
+		dev_info(dev, "Can't get %pOF IRQ\n", node);
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+			       dev);
+	if (ret)  {
+		dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+			ret, node);
+		return ret;
+	}
+	/* Disable Buffer Pool State Change */
+	bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+	/*
+	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init).
+	 */
+	bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+	/* Enable Error Interrupts */
+	bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+	bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+	if (IS_ERR(bm_bpalloc)) {
+		ret = PTR_ERR(bm_bpalloc);
+		dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	/* seed BMan resource pool */
+	ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+			0, bm_pool_cnt - 1, ret);
+		return ret;
+	}
+
+	__bman_probed = 1;
+
+	return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+	{
+		.compatible = "fsl,bman",
+	},
+	{}
+};
+
+static struct platform_driver fsl_bman_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = fsl_bman_ids,
+		.suppress_bind_attrs = true,
+	},
+	.probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 0000000..2f71f7d
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,207 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+	struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+	if (!p) {
+		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+			 __func__, pcfg->cpu);
+		return NULL;
+	}
+
+	bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+	affine_bportals[pcfg->cpu] = p;
+
+	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+	return p;
+}
+
+static int bman_offline_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (!p)
+		return 0;
+
+	pcfg = bman_get_bm_portal_config(p);
+	if (!pcfg)
+		return 0;
+
+	irq_set_affinity(pcfg->irq, cpumask_of(0));
+	return 0;
+}
+
+static int bman_online_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (!p)
+		return 0;
+
+	pcfg = bman_get_bm_portal_config(p);
+	if (!pcfg)
+		return 0;
+
+	irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+	return 0;
+}
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct bm_portal_config *pcfg;
+	struct resource *addr_phys[2];
+	int irq, cpu;
+
+	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return -ENOMEM;
+
+	pcfg->dev = dev;
+
+	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CE);
+	if (!addr_phys[0]) {
+		dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+		return -ENXIO;
+	}
+
+	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CI);
+	if (!addr_phys[1]) {
+		dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+		return -ENXIO;
+	}
+
+	pcfg->cpu = -1;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "Can't get %pOF IRQ'\n", node);
+		return -ENXIO;
+	}
+	pcfg->irq = irq;
+
+	pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+					resource_size(addr_phys[0]),
+					QBMAN_MEMREMAP_ATTR);
+	if (!pcfg->addr_virt_ce) {
+		dev_err(dev, "memremap::CE failed\n");
+		goto err_ioremap1;
+	}
+
+	pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+					resource_size(addr_phys[1]));
+	if (!pcfg->addr_virt_ci) {
+		dev_err(dev, "ioremap::CI failed\n");
+		goto err_ioremap2;
+	}
+
+	spin_lock(&bman_lock);
+	cpu = cpumask_next_zero(-1, &portal_cpus);
+	if (cpu >= nr_cpu_ids) {
+		/* unassigned portal, skip init */
+		spin_unlock(&bman_lock);
+		return 0;
+	}
+
+	cpumask_set_cpu(cpu, &portal_cpus);
+	spin_unlock(&bman_lock);
+	pcfg->cpu = cpu;
+
+	if (!init_pcfg(pcfg)) {
+		dev_err(dev, "portal init failed\n");
+		goto err_portal_init;
+	}
+
+	/* clear irq affinity if assigned cpu is offline */
+	if (!cpu_online(cpu))
+		bman_offline_cpu(cpu);
+
+	return 0;
+
+err_portal_init:
+	iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+	memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+	return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+	{
+		.compatible = "fsl,bman-portal",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = bman_portal_ids,
+	},
+	.probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+	int ret;
+
+	ret = platform_driver_register(drv);
+	if (ret < 0)
+		return ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+					"soc/qbman_portal:online",
+					bman_online_cpu, bman_offline_cpu);
+	if (ret < 0) {
+		pr_err("bman: failed to register hotplug callbacks.\n");
+		platform_driver_unregister(drv);
+		return ret;
+	}
+	return 0;
+}
+
+module_driver(bman_portal_driver,
+	      bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 0000000..751ce90
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,78 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI	0x00000002	/* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev;	/* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+	/* Portal addresses */
+	void  *addr_virt_ce;
+	void __iomem *addr_virt_ci;
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	struct device *dev;
+	/* User-visible portal configuration settings */
+	/* portal is affined to this cpu */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+			const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE	BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 0000000..09b1c96
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+	int loop = 1;
+
+	while (loop--)
+		bman_test_api();
+#endif
+	return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 0000000..037ed34
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 0000000..6f6bdd1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS	93
+#define LOOPS		3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_BUFS; i++)
+		bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+	bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+	if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+		/*
+		 * On SoCs with BMan revison 2.0, BMan only respects the 40
+		 * LS-bits of buffer addresses, masking off the upper 8-bits on
+		 * release commands. The API provides for 48-bit addresses
+		 * because some SoCs support all 48-bits. When generating
+		 * garbage addresses for testing, we either need to zero the
+		 * upper 8-bits when releasing to BMan (otherwise we'll be
+		 * disappointed when the buffers we acquire back from BMan
+		 * don't match), or we need to mask the upper 8-bits off when
+		 * comparing. We do the latter.
+		 */
+		if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+		    (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+			return -1;
+		if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+		    (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+			return 1;
+	} else {
+		if (bm_buffer_get64(a) < bm_buffer_get64(b))
+			return -1;
+		if (bm_buffer_get64(a) > bm_buffer_get64(b))
+			return 1;
+	}
+
+	return 0;
+}
+
+static void bufs_confirm(void)
+{
+	int i, j;
+
+	for (i = 0; i < NUM_BUFS; i++) {
+		int matches = 0;
+
+		for (j = 0; j < NUM_BUFS; j++)
+			if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+				matches++;
+		WARN_ON(matches != 1);
+	}
+}
+
+/* test */
+void bman_test_api(void)
+{
+	int i, loops = LOOPS;
+
+	bufs_init();
+
+	pr_info("%s(): Starting\n", __func__);
+
+	pool = bman_new_pool();
+	if (!pool) {
+		pr_crit("bman_new_pool() failed\n");
+		goto failed;
+	}
+
+	/* Release buffers */
+do_loop:
+	i = 0;
+	while (i < NUM_BUFS) {
+		int num = 8;
+
+		if (i + num > NUM_BUFS)
+			num = NUM_BUFS - i;
+		if (bman_release(pool, bufs_in + i, num)) {
+			pr_crit("bman_release() failed\n");
+			goto failed;
+		}
+		i += num;
+	}
+
+	/* Acquire buffers */
+	while (i > 0) {
+		int tmp, num = 8;
+
+		if (num > i)
+			num = i;
+		tmp = bman_acquire(pool, bufs_out + i - num, num);
+		WARN_ON(tmp != num);
+		i -= num;
+	}
+	i = bman_acquire(pool, NULL, 1);
+	WARN_ON(i > 0);
+
+	bufs_confirm();
+
+	if (--loops)
+		goto do_loop;
+
+	/* Clean up */
+	bman_free_pool(pool);
+	pr_info("%s(): Finished\n", __func__);
+	return;
+
+failed:
+	WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
new file mode 100644
index 0000000..9436aa8
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -0,0 +1,78 @@
+/* Copyright 2017 NXP Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of NXP Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "dpaa_sys.h"
+
+/*
+ * Initialize a devices private memory region
+ */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+				size_t *size)
+{
+	int ret;
+	struct device_node *mem_node;
+	u64 size64;
+
+	ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
+	if (ret) {
+		dev_err(dev,
+			"of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
+			idx, ret);
+		return -ENODEV;
+	}
+	mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+	if (mem_node) {
+		ret = of_property_read_u64(mem_node, "size", &size64);
+		if (ret) {
+			dev_err(dev, "of_address_to_resource fails 0x%x\n",
+			        ret);
+			return -ENODEV;
+		}
+		*size = size64;
+	} else {
+		dev_err(dev, "No memory-region found for index %d\n", idx);
+		return -ENODEV;
+	}
+
+	if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+		dev_err(dev, "DMA Alloc memory failed\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Disassociate the reserved memory area from the device
+	 * because a device can only have one DMA memory area. This
+	 * should be fine since the memory is allocated and initialized
+	 * and only ever accessed by the QBMan device from now on
+	 */
+	of_reserved_mem_device_release(dev);
+	return 0;
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 0000000..9f37900
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,114 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+static inline void dpaa_flush(void *p)
+{
+	/*
+	 * Only PPC needs to flush the cache currently - on ARM the mapping
+	 * is non cacheable
+	 */
+#ifdef CONFIG_PPC
+	flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+	prefetch(p+32);
+#endif
+	prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+	dpaa_invalidate(p);
+	dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+	/* 'first' is included, 'last' is excluded */
+	if (first <= last)
+		return last - first;
+	return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF	0x80000000
+
+/* Initialize the devices private memory region */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+				size_t *size);
+
+/* memremap() attributes for different platforms */
+#ifdef CONFIG_PPC
+#define QBMAN_MEMREMAP_ATTR	MEMREMAP_WB
+#else
+#define QBMAN_MEMREMAP_ATTR	MEMREMAP_WC
+#endif
+
+#endif	/* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000..8cc0151
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2888 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL	15
+#define EQCR_ITHRESH	4	/* if EQCR congests, interrupt threshold */
+#define IRQNAME		"QMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH	0x3000
+#define QM_REG_EQCR_CI_CINH	0x3040
+#define QM_REG_EQCR_ITR		0x3080
+#define QM_REG_DQRR_PI_CINH	0x3100
+#define QM_REG_DQRR_CI_CINH	0x3140
+#define QM_REG_DQRR_ITR		0x3180
+#define QM_REG_DQRR_DCAP	0x31C0
+#define QM_REG_DQRR_SDQCR	0x3200
+#define QM_REG_DQRR_VDQCR	0x3240
+#define QM_REG_DQRR_PDQCR	0x3280
+#define QM_REG_MR_PI_CINH	0x3300
+#define QM_REG_MR_CI_CINH	0x3340
+#define QM_REG_MR_ITR		0x3380
+#define QM_REG_CFG		0x3500
+#define QM_REG_ISR		0x3600
+#define QM_REG_IER		0x3640
+#define QM_REG_ISDR		0x3680
+#define QM_REG_IIR		0x36C0
+#define QM_REG_ITPR		0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR		0x0000
+#define QM_CL_DQRR		0x1000
+#define QM_CL_MR		0x2000
+#define QM_CL_EQCR_PI_CENA	0x3000
+#define QM_CL_EQCR_CI_CENA	0x3040
+#define QM_CL_DQRR_PI_CENA	0x3100
+#define QM_CL_DQRR_CI_CENA	0x3140
+#define QM_CL_MR_PI_CENA	0x3300
+#define QM_CL_MR_CI_CENA	0x3340
+#define QM_CL_CR		0x3800
+#define QM_CL_RR0		0x3900
+#define QM_CL_RR1		0x3940
+
+#else
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH	0x0000
+#define QM_REG_EQCR_CI_CINH	0x0004
+#define QM_REG_EQCR_ITR		0x0008
+#define QM_REG_DQRR_PI_CINH	0x0040
+#define QM_REG_DQRR_CI_CINH	0x0044
+#define QM_REG_DQRR_ITR		0x0048
+#define QM_REG_DQRR_DCAP	0x0050
+#define QM_REG_DQRR_SDQCR	0x0054
+#define QM_REG_DQRR_VDQCR	0x0058
+#define QM_REG_DQRR_PDQCR	0x005c
+#define QM_REG_MR_PI_CINH	0x0080
+#define QM_REG_MR_CI_CINH	0x0084
+#define QM_REG_MR_ITR		0x0088
+#define QM_REG_CFG		0x0100
+#define QM_REG_ISR		0x0e00
+#define QM_REG_IER		0x0e04
+#define QM_REG_ISDR		0x0e08
+#define QM_REG_IIR		0x0e0c
+#define QM_REG_ITPR		0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR		0x0000
+#define QM_CL_DQRR		0x1000
+#define QM_CL_MR		0x2000
+#define QM_CL_EQCR_PI_CENA	0x3000
+#define QM_CL_EQCR_CI_CENA	0x3100
+#define QM_CL_DQRR_PI_CENA	0x3200
+#define QM_CL_DQRR_CI_CENA	0x3300
+#define QM_CL_MR_PI_CENA	0x3400
+#define QM_CL_MR_CI_CENA	0x3500
+#define QM_CL_CR		0x3800
+#define QM_CL_RR0		0x3900
+#define QM_CL_RR1		0x3940
+#endif
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx)	((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *     dmode == h/w dequeue mode.
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode {		/* matches QCSP_CFG::EPM */
+	qm_eqcr_pci = 0,	/* PI index, cache-inhibited */
+	qm_eqcr_pce = 1,	/* PI index, cache-enabled */
+	qm_eqcr_pvb = 2		/* valid-bit */
+};
+enum qm_dqrr_dmode {		/* matches QCSP_CFG::DP */
+	qm_dqrr_dpush = 0,	/* SDQCR  + VDQCR */
+	qm_dqrr_dpull = 1	/* PDQCR */
+};
+enum qm_dqrr_pmode {		/* s/w-only */
+	qm_dqrr_pci,		/* reads DQRR_PI_CINH */
+	qm_dqrr_pce,		/* reads DQRR_PI_CENA */
+	qm_dqrr_pvb		/* reads valid-bit */
+};
+enum qm_dqrr_cmode {		/* matches QCSP_CFG::DCM */
+	qm_dqrr_cci = 0,	/* CI index, cache-inhibited */
+	qm_dqrr_cce = 1,	/* CI index, cache-enabled */
+	qm_dqrr_cdc = 2		/* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode {		/* s/w-only */
+	qm_mr_pci,		/* reads MR_PI_CINH */
+	qm_mr_pce,		/* reads MR_PI_CENA */
+	qm_mr_pvb		/* reads valid-bit */
+};
+enum qm_mr_cmode {		/* matches QCSP_CFG::MM */
+	qm_mr_cci = 0,		/* CI index, cache-inhibited */
+	qm_mr_cce = 1		/* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE		8
+#define QM_DQRR_SIZE		16
+#define QM_MR_SIZE		8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+	u8 _ncw_verb; /* writes to this are non-coherent */
+	u8 dca;
+	__be16 seqnum;
+	u8 __reserved[4];
+	__be32 fqid;	/* 24-bit */
+	__be32 tag;
+	struct qm_fd fd;
+	u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT		0x80
+#define QM_EQCR_VERB_CMD_MASK		0x61	/* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE	0x01
+#define QM_EQCR_SEQNUM_NESN		0x8000	/* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS		0x4000	/* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK		0x3fff	/* sequence number goes here */
+
+struct qm_eqcr {
+	struct qm_eqcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	u32 busy;
+	enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+	const struct qm_dqrr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum qm_dqrr_dmode dmode;
+	enum qm_dqrr_pmode pmode;
+	enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+	union qm_mr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum qm_mr_pmode pmode;
+	enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "FQ" command layout */
+struct qm_mcc_fq {
+	u8 _ncw_verb;
+	u8 __reserved1[3];
+	__be32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+
+/* "CGR" command layout */
+struct qm_mcc_cgr {
+	u8 _ncw_verb;
+	u8 __reserved1[30];
+	u8 cgid;
+	u8 __reserved2[32];
+};
+
+#define QM_MCC_VERB_VBIT		0x80
+#define QM_MCC_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED	0x40
+#define QM_MCC_VERB_INITFQ_SCHED	0x41
+#define QM_MCC_VERB_QUERYFQ		0x44
+#define QM_MCC_VERB_QUERYFQ_NP		0x45	/* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ		0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED	0x47
+#define QM_MCC_VERB_ALTER_SCHED		0x48	/* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE		0x49	/* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE	0x4a	/* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS		0x4b	/* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON		0x4d	/* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF	0x4e	/* FQ XOFF */
+#define QM_MCC_VERB_INITCGR		0x50
+#define QM_MCC_VERB_MODIFYCGR		0x51
+#define QM_MCC_VERB_CGRTESTWRITE	0x52
+#define QM_MCC_VERB_QUERYCGR		0x58
+#define QM_MCC_VERB_QUERYCONGESTION	0x59
+union qm_mc_command {
+	struct {
+		u8 _ncw_verb; /* writes to this are non-coherent */
+		u8 __reserved[63];
+	};
+	struct qm_mcc_initfq initfq;
+	struct qm_mcc_initcgr initcgr;
+	struct qm_mcc_fq fq;
+	struct qm_mcc_cgr cgr;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+	u8 verb;
+	u8 result;
+	u8 __reserved1[8];
+	struct qm_fqd fqd;	/* the FQD fields are here */
+	u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+	u8 verb;
+	u8 result;
+	u8 fqs;		/* Frame Queue Status */
+	u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID		0x80
+#define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL		0x00
+#define QM_MCR_RESULT_OK		0xf0
+#define QM_MCR_RESULT_ERR_FQID		0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE	0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
+#define QM_MCR_RESULT_PENDING		0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
+#define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT			10000	/* us */
+union qm_mc_result {
+	struct {
+		u8 verb;
+		u8 result;
+		u8 __reserved1[62];
+	};
+	struct qm_mcr_queryfq queryfq;
+	struct qm_mcr_alterfq alterfq;
+	struct qm_mcr_querycgr querycgr;
+	struct qm_mcr_querycongestion querycongestion;
+	struct qm_mcr_querywq querywq;
+	struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+	union qm_mc_command *cr;
+	union qm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	enum {
+		/* Can be _mc_start()ed */
+		qman_mc_idle,
+		/* Can be _mc_commit()ed or _mc_abort()ed */
+		qman_mc_user,
+		/* Can only be _mc_retry()ed */
+		qman_mc_hw
+	} state;
+#endif
+};
+
+struct qm_addr {
+	void *ce;		/* cache-enabled */
+	__be32 *ce_be;		/* same value as above but for direct access */
+	void __iomem *ci;	/* cache-inhibited */
+};
+
+struct qm_portal {
+	/*
+	 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+	 * and including 'mc' fits within a cacheline (yay!). The 'config' part
+	 * is setup-only, so isn't a cause for a concern. In other words, don't
+	 * rearrange this structure on a whim, there be dragons ...
+	 */
+	struct qm_addr addr;
+	struct qm_eqcr eqcr;
+	struct qm_dqrr dqrr;
+	struct qm_mr mr;
+	struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+	return ioread32be(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+	iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+	dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+	dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT	ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY	(uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~EQCR_CARRY;
+
+	return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+	return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+	/* increment to the next EQCR pointer and handle overflow and 'vbit' */
+	struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+	eqcr->cursor = eqcr_carryclear(partial);
+	if (partial != eqcr->cursor)
+		eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+				enum qm_eqcr_pmode pmode,
+				unsigned int eq_stash_thresh,
+				int eq_stash_prio)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u32 cfg;
+	u8 pi;
+
+	eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+	eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+	pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	eqcr->cursor = eqcr->ring + pi;
+	eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+		     QM_EQCR_VERB_VBIT : 0;
+	eqcr->available = QM_EQCR_SIZE - 1 -
+			  dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+	eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 0;
+	eqcr->pmode = pmode;
+#endif
+	cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+	      (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+	      (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+	      ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+	qm_out(portal, QM_REG_CFG, cfg);
+	return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+	return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (pi != eqcr_ptr2idx(eqcr->cursor))
+		pr_crit("losing uncommitted EQCR entries\n");
+	if (ci != eqcr->ci)
+		pr_crit("missing existing EQCR completions\n");
+	if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+		pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+								 *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (!eqcr->available)
+		return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dpaa_zero(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+								*portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci;
+
+	DPAA_ASSERT(!eqcr->busy);
+	if (!eqcr->available) {
+		old_ci = eqcr->ci;
+		eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+			   (QM_EQCR_SIZE - 1);
+		diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+		eqcr->available += diff;
+		if (!diff)
+			return NULL;
+	}
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dpaa_zero(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+	DPAA_ASSERT(eqcr->busy);
+	DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
+	DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	struct qm_eqcr_entry *eqcursor;
+
+	eqcr_commit_checks(eqcr);
+	DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+	dma_wmb();
+	eqcursor = eqcr->cursor;
+	eqcursor->_ncw_verb = myverb | eqcr->vbit;
+	dpaa_flush(eqcursor);
+	eqcr_inc(eqcr);
+	eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+	qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+	diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	eqcr->ithresh = ithresh;
+	qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+	struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT	ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY	(uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+					const struct qm_dqrr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~DQRR_CARRY;
+
+	return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+	return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+	return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+	qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+				   ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+			       const struct qm_portal_config *config,
+			       enum qm_dqrr_dmode dmode,
+			       enum qm_dqrr_pmode pmode,
+			       enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+	u32 cfg;
+
+	/* Make sure the DQRR will be idle when we enable */
+	qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+	qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+	qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+	dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+	dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->cursor = dqrr->ring + dqrr->ci;
+	dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+	dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+			QM_DQRR_VERB_VBIT : 0;
+	dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	dqrr->dmode = dmode;
+	dqrr->pmode = pmode;
+	dqrr->cmode = cmode;
+#endif
+	/* Invalidate every ring entry before beginning */
+	for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+		dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+	cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+		((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+		((dmode & 1) << 18) |			/* DP */
+		((cmode & 3) << 16) |			/* DCM */
+		0xa0 |					/* RE+SE */
+		(0 ? 0x40 : 0) |			/* Ignore RP */
+		(0 ? 0x10 : 0);				/* Ignore SP */
+	qm_out(portal, QM_REG_CFG, cfg);
+	qm_dqrr_set_maxfill(portal, max_fill);
+	return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (dqrr->cmode != qm_dqrr_cdc &&
+	    dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+		pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+						struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (!dqrr->fill)
+		return NULL;
+	return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPAA_ASSERT(dqrr->fill);
+	dqrr->cursor = dqrr_inc(dqrr->cursor);
+	return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+	struct qm_dqrr *dqrr = &portal->dqrr;
+	struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+	DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+	/*
+	 * If PAMU is not available we need to invalidate the cache.
+	 * When PAMU is available the cache is updated by stash
+	 */
+	dpaa_invalidate_touch_ro(res);
+#endif
+	if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+		dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+		if (!dqrr->pi)
+			dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+		dqrr->fill++;
+	}
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+					const struct qm_dqrr_entry *dq,
+					int park)
+{
+	__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+	int idx = dqrr_ptr2idx(dq);
+
+	DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPAA_ASSERT((dqrr->ring + idx) == dq);
+	DPAA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+	       ((park ? 1 : 0) << 6) |		    /* DQRR_DCAP::PK */
+	       idx);				    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+	__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+	       (bitmask << 16));		    /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+	qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+	qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT	ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY	(uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+	uintptr_t addr = (uintptr_t)p;
+
+	addr &= ~MR_CARRY;
+
+	return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+	return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+	return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+			     enum qm_mr_cmode cmode)
+{
+	struct qm_mr *mr = &portal->mr;
+	u32 cfg;
+
+	mr->ring = portal->addr.ce + QM_CL_MR;
+	mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+	mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+	mr->cursor = mr->ring + mr->ci;
+	mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+	mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+		? QM_MR_VERB_VBIT : 0;
+	mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mr->pmode = pmode;
+	mr->cmode = cmode;
+#endif
+	cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+	      ((cmode & 1) << 8);	/* QCSP_CFG:MM */
+	qm_out(portal, QM_REG_CFG, cfg);
+	return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	if (mr->ci != mr_ptr2idx(mr->cursor))
+		pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	if (!mr->fill)
+		return NULL;
+	return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->fill);
+	mr->cursor = mr_inc(mr->cursor);
+	return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+	union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+	DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+
+	if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
+		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+		if (!mr->pi)
+			mr->vbit ^= QM_MR_VERB_VBIT;
+		mr->fill++;
+		res = mr_inc(res);
+	}
+	dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+	struct qm_mr *mr = &portal->mr;
+
+	DPAA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = mr_ptr2idx(mr->cursor);
+	qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.ce + QM_CL_CR;
+	mc->rr = portal->addr.ce + QM_CL_RR0;
+	mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
+		    ? 0 : 1;
+	mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	struct qm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == qman_mc_idle);
+	if (mc->state != qman_mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+
+	DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_user;
+#endif
+	dpaa_zero(mc->cr);
+	return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+	struct qm_mc *mc = &portal->mc;
+	union qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == qman_mc_user);
+	dma_wmb();
+	mc->cr->_ncw_verb = myverb | mc->vbit;
+	dpaa_flush(mc->cr);
+	dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+	struct qm_mc *mc = &portal->mc;
+	union qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPAA_ASSERT(mc->state == qman_mc_hw);
+	/*
+	 *  The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering...
+	 */
+	if (!rr->verb) {
+		dpaa_invalidate_touch_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+				       union qm_mc_result **mcr)
+{
+	int timeout = QM_MCR_TIMEOUT;
+
+	do {
+		*mcr = qm_mc_result(portal);
+		if (*mcr)
+			break;
+		udelay(1);
+	} while (--timeout);
+
+	return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+	fq->flags |= mask;
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+	fq->flags &= ~mask;
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+	return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+	return !(fq->flags & mask);
+}
+
+struct qman_portal {
+	struct qm_portal p;
+	/* PORTAL_BITS_*** - dynamic, strictly internal */
+	unsigned long bits;
+	/* interrupt sources processed by portal_isr(), configurable */
+	unsigned long irq_sources;
+	u32 use_eqcr_ci_stashing;
+	/* only 1 volatile dequeue at a time */
+	struct qman_fq *vdqcr_owned;
+	u32 sdqcr;
+	/* probing time config params for cpu-affine portals */
+	const struct qm_portal_config *config;
+	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+	struct qman_cgrs *cgrs;
+	/* linked-list of CSCN handlers. */
+	struct list_head cgr_cbs;
+	/* list lock */
+	spinlock_t cgr_lock;
+	struct work_struct congestion_work;
+	struct work_struct mr_work;
+	char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+	return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+	qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+	if (!qm_portal_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+	num_fqids = _num_fqids;
+
+	fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
+				       num_fqids, 2));
+	if (!fq_table)
+		return -ENOMEM;
+
+	pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+		 fq_table, num_fqids * 2);
+	return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+	struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (WARN_ON(idx >= num_fqids * 2))
+		return NULL;
+#endif
+	fq = fq_table[idx];
+	DPAA_ASSERT(!fq || idx == fq->idx);
+
+	return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+	return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+	return idx_to_fq(tag);
+#else
+	return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+	return fq->idx;
+#else
+	return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+	struct qman_portal *p = ptr;
+
+	u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+	u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+	if (unlikely(!is))
+		return IRQ_NONE;
+
+	/* DQRR-handling if it's interrupt-driven */
+	if (is & QM_PIRQ_DQRI)
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+	/* Handling of anything else that's interrupt-driven */
+	clear |= __poll_portal_slow(p, is);
+	qm_out(&p->p, QM_REG_ISR, clear);
+	return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+	const union qm_mr_entry *msg;
+loop:
+	msg = qm_mr_current(p);
+	if (!msg) {
+		/*
+		 * if MR was full and h/w had other FQRNI entries to produce, we
+		 * need to allow it time to produce those entries once the
+		 * existing entries are consumed. A worst-case situation
+		 * (fully-loaded system) means h/w sequencers may have to do 3-4
+		 * other things before servicing the portal's MR pump, each of
+		 * which (if slow) may take ~50 qman cycles (which is ~200
+		 * processor cycles). So rounding up and then multiplying this
+		 * worst-case estimate by a factor of 10, just to be
+		 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+		 * one entry at a time, so h/w has an opportunity to produce new
+		 * entries well before the ring has been fully consumed, so
+		 * we're being *really* paranoid here.
+		 */
+		msleep(1);
+		msg = qm_mr_current(p);
+		if (!msg)
+			return 0;
+	}
+	if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+		/* We aren't draining anything but FQRNIs */
+		pr_err("Found verb 0x%x in MR\n", msg->verb);
+		return -1;
+	}
+	qm_mr_next(p);
+	qm_mr_cci_consume(p, 1);
+	goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+			      const struct qm_portal_config *c,
+			      const struct qman_cgrs *cgrs)
+{
+	struct qm_portal *p;
+	int ret;
+	u32 isdr;
+
+	p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+	/* PAMU is required for stashing */
+	portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+	portal->use_eqcr_ci_stashing = 0;
+#endif
+	/*
+	 * prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference
+	 */
+	p->addr.ce = c->addr_virt_ce;
+	p->addr.ce_be = c->addr_virt_ce;
+	p->addr.ci = c->addr_virt_ci;
+	/*
+	 * If CI-stashing is used, the current defaults use a threshold of 3,
+	 * and stash with high-than-DQRR priority.
+	 */
+	if (qm_eqcr_init(p, qm_eqcr_pvb,
+			portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+		dev_err(c->dev, "EQCR initialisation failed\n");
+		goto fail_eqcr;
+	}
+	if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+			qm_dqrr_cdc, DQRR_MAXFILL)) {
+		dev_err(c->dev, "DQRR initialisation failed\n");
+		goto fail_dqrr;
+	}
+	if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+		dev_err(c->dev, "MR initialisation failed\n");
+		goto fail_mr;
+	}
+	if (qm_mc_init(p)) {
+		dev_err(c->dev, "MC initialisation failed\n");
+		goto fail_mc;
+	}
+	/* static interrupt-gating controls */
+	qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+	qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+	qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+	portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+	if (!portal->cgrs)
+		goto fail_cgrs;
+	/* initial snapshot is no-depletion */
+	qman_cgrs_init(&portal->cgrs[1]);
+	if (cgrs)
+		portal->cgrs[0] = *cgrs;
+	else
+		/* if the given mask is NULL, assume all CGRs can be seen */
+		qman_cgrs_fill(&portal->cgrs[0]);
+	INIT_LIST_HEAD(&portal->cgr_cbs);
+	spin_lock_init(&portal->cgr_lock);
+	INIT_WORK(&portal->congestion_work, qm_congestion_task);
+	INIT_WORK(&portal->mr_work, qm_mr_process_task);
+	portal->bits = 0;
+	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+	isdr = 0xffffffff;
+	qm_out(p, QM_REG_ISDR, isdr);
+	portal->irq_sources = 0;
+	qm_out(p, QM_REG_IER, 0);
+	qm_out(p, QM_REG_ISR, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
+		dev_err(c->dev, "request_irq() failed\n");
+		goto fail_irq;
+	}
+	if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+	    irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+		dev_err(c->dev, "irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need EQCR to be empty before continuing */
+	isdr &= ~QM_PIRQ_EQCI;
+	qm_out(p, QM_REG_ISDR, isdr);
+	ret = qm_eqcr_get_fill(p);
+	if (ret) {
+		dev_err(c->dev, "EQCR unclean\n");
+		goto fail_eqcr_empty;
+	}
+	isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+	qm_out(p, QM_REG_ISDR, isdr);
+	if (qm_dqrr_current(p)) {
+		dev_err(c->dev, "DQRR unclean\n");
+		qm_dqrr_cdc_consume_n(p, 0xffff);
+	}
+	if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+		/* special handling, drain just in case it's a few FQRNIs */
+		const union qm_mr_entry *e = qm_mr_current(p);
+
+		dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+			e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
+		goto fail_dqrr_mr_empty;
+	}
+	/* Success */
+	portal->config = c;
+	qm_out(p, QM_REG_ISDR, 0);
+	qm_out(p, QM_REG_IIR, 0);
+	/* Write a sane SDQCR */
+	qm_dqrr_sdqcr_set(p, portal->sdqcr);
+	return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+	free_irq(c->irq, portal);
+fail_irq:
+	kfree(portal->cgrs);
+fail_cgrs:
+	qm_mc_finish(p);
+fail_mc:
+	qm_mr_finish(p);
+fail_mr:
+	qm_dqrr_finish(p);
+fail_dqrr:
+	qm_eqcr_finish(p);
+fail_eqcr:
+	return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+					      const struct qman_cgrs *cgrs)
+{
+	struct qman_portal *portal;
+	int err;
+
+	portal = &per_cpu(qman_affine_portal, c->cpu);
+	err = qman_create_portal(portal, c, cgrs);
+	if (err)
+		return NULL;
+
+	spin_lock(&affine_mask_lock);
+	cpumask_set_cpu(c->cpu, &affine_mask);
+	affine_channels[c->cpu] = c->channel;
+	affine_portals[c->cpu] = portal;
+	spin_unlock(&affine_mask_lock);
+
+	return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+	const struct qm_portal_config *pcfg;
+
+	/* Stop dequeues on the portal */
+	qm_dqrr_sdqcr_set(&qm->p, 0);
+
+	/*
+	 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+	 * something related to QM_PIRQ_EQCI, this may need fixing.
+	 * Also, due to the prefetching model used for CI updates in the enqueue
+	 * path, this update will only invalidate the CI cacheline *after*
+	 * working on it, so we need to call this twice to ensure a full update
+	 * irrespective of where the enqueue processing was at when the teardown
+	 * began.
+	 */
+	qm_eqcr_cce_update(&qm->p);
+	qm_eqcr_cce_update(&qm->p);
+	pcfg = qm->config;
+
+	free_irq(pcfg->irq, qm);
+
+	kfree(qm->cgrs);
+	qm_mc_finish(&qm->p);
+	qm_mr_finish(&qm->p);
+	qm_dqrr_finish(&qm->p);
+	qm_eqcr_finish(&qm->p);
+
+	qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+	struct qman_portal *qm = get_affine_portal();
+	const struct qm_portal_config *pcfg;
+	int cpu;
+
+	pcfg = qm->config;
+	cpu = pcfg->cpu;
+
+	qman_destroy_portal(qm);
+
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+	put_affine_portal();
+	return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+				   const union qm_mr_entry *msg, u8 verb)
+{
+	switch (verb) {
+	case QM_MR_VERB_FQRL:
+		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+		fq_clear(fq, QMAN_FQ_STATE_ORL);
+		break;
+	case QM_MR_VERB_FQRN:
+		DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+			    fq->state == qman_fq_state_sched);
+		DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+		fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+		if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		fq->state = qman_fq_state_retired;
+		break;
+	case QM_MR_VERB_FQPN:
+		DPAA_ASSERT(fq->state == qman_fq_state_sched);
+		DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+		fq->state = qman_fq_state_parked;
+	}
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+	struct qman_portal *p = container_of(work, struct qman_portal,
+					     congestion_work);
+	struct qman_cgrs rr, c;
+	union qm_mc_result *mcr;
+	struct qman_cgr *cgr;
+
+	spin_lock(&p->cgr_lock);
+	qm_mc_start(&p->p);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		spin_unlock(&p->cgr_lock);
+		dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+		qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+		return;
+	}
+	/* mask out the ones I'm not interested in */
+	qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+		      &p->cgrs[0]);
+	/* check previous snapshot for delta, enter/exit congestion */
+	qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+	/* update snapshot */
+	qman_cgrs_cp(&p->cgrs[1], &rr);
+	/* Invoke callback */
+	list_for_each_entry(cgr, &p->cgr_cbs, node)
+		if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+	spin_unlock(&p->cgr_lock);
+	qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+	struct qman_portal *p = container_of(work, struct qman_portal,
+					     mr_work);
+	const union qm_mr_entry *msg;
+	struct qman_fq *fq;
+	u8 verb, num = 0;
+
+	preempt_disable();
+
+	while (1) {
+		qm_mr_pvb_update(&p->p);
+		msg = qm_mr_current(&p->p);
+		if (!msg)
+			break;
+
+		verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+		/* The message is a software ERN iff the 0x20 bit is clear */
+		if (verb & 0x20) {
+			switch (verb) {
+			case QM_MR_VERB_FQRNI:
+				/* nada, we drop FQRNIs on the floor */
+				break;
+			case QM_MR_VERB_FQRN:
+			case QM_MR_VERB_FQRL:
+				/* Lookup in the retirement table */
+				fq = fqid_to_fq(qm_fqid_get(&msg->fq));
+				if (WARN_ON(!fq))
+					break;
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_FQPN:
+				/* Parked */
+				fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_DC_ERN:
+				/* DCP ERN */
+				pr_crit_once("Leaking DCP ERNs!\n");
+				break;
+			default:
+				pr_crit("Invalid MR verb 0x%02x\n", verb);
+			}
+		} else {
+			/* Its a software ERN */
+			fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
+			fq->cb.ern(p, fq, msg);
+		}
+		num++;
+		qm_mr_next(&p->p);
+	}
+
+	qm_mr_cci_consume(&p->p, num);
+	qman_p_irqsource_add(p, QM_PIRQ_MRI);
+	preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+	if (is & QM_PIRQ_CSCI) {
+		qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
+		queue_work_on(smp_processor_id(), qm_portal_wq,
+			      &p->congestion_work);
+	}
+
+	if (is & QM_PIRQ_EQRI) {
+		qm_eqcr_cce_update(&p->p);
+		qm_eqcr_set_ithresh(&p->p, 0);
+		wake_up(&affine_queue);
+	}
+
+	if (is & QM_PIRQ_MRI) {
+		qman_p_irqsource_remove(p, QM_PIRQ_MRI);
+		queue_work_on(smp_processor_id(), qm_portal_wq,
+			      &p->mr_work);
+	}
+
+	return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+	p->vdqcr_owned = NULL;
+	fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+	wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ *   (i) setting/clearing vdqcr_owned, and
+ *  (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ *	 vdqcr_owned field (which it does before setting VDQCR), and
+ *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
+ *	 done so that we can't interfere.
+ *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ *	 with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit)
+{
+	const struct qm_dqrr_entry *dq;
+	struct qman_fq *fq;
+	enum qman_cb_dqrr_result res;
+	unsigned int limit = 0;
+
+	do {
+		qm_dqrr_pvb_update(&p->p);
+		dq = qm_dqrr_current(&p->p);
+		if (!dq)
+			break;
+
+		if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+			/*
+			 * VDQCR: don't trust context_b as the FQ may have
+			 * been configured for h/w consumption and we're
+			 * draining it post-retirement.
+			 */
+			fq = p->vdqcr_owned;
+			/*
+			 * We only set QMAN_FQ_STATE_NE when retiring, so we
+			 * only need to check for clearing it when doing
+			 * volatile dequeues.  It's one less thing to check
+			 * in the critical path (SDQCR).
+			 */
+			if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+				fq_clear(fq, QMAN_FQ_STATE_NE);
+			/*
+			 * This is duplicated from the SDQCR code, but we
+			 * have stuff to do before *and* after this callback,
+			 * and we don't want multiple if()s in the critical
+			 * path (SDQCR).
+			 */
+			res = fq->cb.dqrr(p, fq, dq);
+			if (res == qman_cb_dqrr_stop)
+				break;
+			/* Check for VDQCR completion */
+			if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+				clear_vdqcr(p, fq);
+		} else {
+			/* SDQCR: context_b points to the FQ */
+			fq = tag_to_fq(be32_to_cpu(dq->context_b));
+			/* Now let the callback do its stuff */
+			res = fq->cb.dqrr(p, fq, dq);
+			/*
+			 * The callback can request that we exit without
+			 * consuming this entry nor advancing;
+			 */
+			if (res == qman_cb_dqrr_stop)
+				break;
+		}
+		/* Interpret 'dq' from a driver perspective. */
+		/*
+		 * Parking isn't possible unless HELDACTIVE was set. NB,
+		 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+		 * check for HELDACTIVE to cover both.
+		 */
+		DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+			    (res != qman_cb_dqrr_park));
+		/* just means "skip it, I'll consume it myself later on" */
+		if (res != qman_cb_dqrr_defer)
+			qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+						 res == qman_cb_dqrr_park);
+		/* Move forward */
+		qm_dqrr_next(&p->p);
+		/*
+		 * Entry processed and consumed, increment our counter.  The
+		 * callback can request that we exit after consuming the
+		 * entry, and we also exit if we reach our processing limit,
+		 * so loop back only if neither of these conditions is met.
+		 */
+	} while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+	return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	p->irq_sources |= bits & QM_PIRQ_VISIBLE;
+	qm_out(&p->p, QM_REG_IER, p->irq_sources);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+	unsigned long irqflags;
+	u32 ier;
+
+	/*
+	 * Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert.
+	 */
+	local_irq_save(irqflags);
+	bits &= QM_PIRQ_VISIBLE;
+	p->irq_sources &= ~bits;
+	qm_out(&p->p, QM_REG_IER, p->irq_sources);
+	ier = qm_in(&p->p, QM_REG_IER);
+	/*
+	 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering.
+	 */
+	qm_out(&p->p, QM_REG_ISR, ~ier);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+	if (cpu < 0) {
+		struct qman_portal *portal = get_affine_portal();
+
+		cpu = portal->config->cpu;
+		put_affine_portal();
+	}
+	WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+	return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+	return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+	return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
+	pools &= p->config->pools;
+	p->sdqcr |= pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+	switch (result) {
+	case QM_MCR_RESULT_NULL:
+		return "QM_MCR_RESULT_NULL";
+	case QM_MCR_RESULT_OK:
+		return "QM_MCR_RESULT_OK";
+	case QM_MCR_RESULT_ERR_FQID:
+		return "QM_MCR_RESULT_ERR_FQID";
+	case QM_MCR_RESULT_ERR_FQSTATE:
+		return "QM_MCR_RESULT_ERR_FQSTATE";
+	case QM_MCR_RESULT_ERR_NOTEMPTY:
+		return "QM_MCR_RESULT_ERR_NOTEMPTY";
+	case QM_MCR_RESULT_PENDING:
+		return "QM_MCR_RESULT_PENDING";
+	case QM_MCR_RESULT_ERR_BADCOMMAND:
+		return "QM_MCR_RESULT_ERR_BADCOMMAND";
+	}
+	return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+		int ret = qman_alloc_fqid(&fqid);
+
+		if (ret)
+			return ret;
+	}
+	fq->fqid = fqid;
+	fq->flags = flags;
+	fq->state = qman_fq_state_oos;
+	fq->cgr_groupid = 0;
+
+	/* A context_b of 0 is allegedly special, so don't use that fqid */
+	if (fqid == 0 || fqid >= num_fqids) {
+		WARN(1, "bad fqid %d\n", fqid);
+		return -EINVAL;
+	}
+
+	fq->idx = fqid * 2;
+	if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+		fq->idx++;
+
+	WARN_ON(fq_table[fq->idx]);
+	fq_table[fq->idx] = fq;
+
+	return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+	/*
+	 * We don't need to lock the FQ as it is a pre-condition that the FQ be
+	 * quiesced. Instead, run some checks.
+	 */
+	switch (fq->state) {
+	case qman_fq_state_parked:
+	case qman_fq_state_oos:
+		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+			qman_release_fqid(fq->fqid);
+
+		DPAA_ASSERT(fq_table[fq->idx]);
+		fq_table[fq->idx] = NULL;
+		return;
+	default:
+		break;
+	}
+	DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+	return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	u8 res, myverb;
+	int ret = 0;
+
+	myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+		? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+	if (fq->state != qman_fq_state_oos &&
+	    fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
+		/* And can't be set at the same time as TDTHRESH */
+		if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
+			return -EINVAL;
+	}
+	/* Issue an INITFQ_[PARKED|SCHED] management command */
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    (fq->state != qman_fq_state_oos &&
+	     fq->state != qman_fq_state_parked)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initfq = *opts;
+	qm_fqid_set(&mcc->fq, fq->fqid);
+	mcc->initfq.count = 0;
+	/*
+	 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+	 * demux pointer. Otherwise, the caller-provided value is allowed to
+	 * stand, don't overwrite it.
+	 */
+	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+		dma_addr_t phys_fq;
+
+		mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+		mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
+		/*
+		 *  and the physical address - NB, if the user wasn't trying to
+		 * set CONTEXTA, clear the stashing settings.
+		 */
+		if (!(be16_to_cpu(mcc->initfq.we_mask) &
+				  QM_INITFQ_WE_CONTEXTA)) {
+			mcc->initfq.we_mask |=
+				cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+			memset(&mcc->initfq.fqd.context_a, 0,
+				sizeof(mcc->initfq.fqd.context_a));
+		} else {
+			struct qman_portal *p = qman_dma_portal;
+
+			phys_fq = dma_map_single(p->config->dev, fq,
+						 sizeof(*fq), DMA_TO_DEVICE);
+			if (dma_mapping_error(p->config->dev, phys_fq)) {
+				dev_err(p->config->dev, "dma_mapping failed\n");
+				ret = -EIO;
+				goto out;
+			}
+
+			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+		}
+	}
+	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+		int wq = 0;
+
+		if (!(be16_to_cpu(mcc->initfq.we_mask) &
+				  QM_INITFQ_WE_DESTWQ)) {
+			mcc->initfq.we_mask |=
+				cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+			wq = 4;
+		}
+		qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+	}
+	qm_mc_commit(&p->p, myverb);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(p->config->dev, "MCR timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	if (opts) {
+		if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+			if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
+				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+			else
+				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+		}
+		if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
+			fq->cgr_groupid = opts->fqd.cgid;
+	}
+	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+		qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret = 0;
+
+	if (fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	/* Issue a ALTERFQ_SCHED management command */
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    fq->state != qman_fq_state_parked) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fq->fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_sched;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret;
+	u8 res;
+
+	if (fq->state != qman_fq_state_parked &&
+	    fq->state != qman_fq_state_sched)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+	    fq->state == qman_fq_state_retired ||
+	    fq->state == qman_fq_state_oos) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fq->fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+	res = mcr->result;
+	/*
+	 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+	 * and defer the flags until FQRNI or FQRN (respectively) show up. But
+	 * "Friendly" is to process OK immediately, and not set CHANGING. We do
+	 * friendly, otherwise the caller doesn't necessarily have a fully
+	 * "retired" FQ on return even if the retirement was immediate. However
+	 * this does mean some code duplication between here and
+	 * fq_state_change().
+	 */
+	if (res == QM_MCR_RESULT_OK) {
+		ret = 0;
+		/* Process 'fq' right away, we'll ignore FQRNI */
+		if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		if (flags)
+			*flags = fq->flags;
+		fq->state = qman_fq_state_retired;
+		if (fq->cb.fqs) {
+			/*
+			 * Another issue with supporting "immediate" retirement
+			 * is that we're forced to drop FQRNIs, because by the
+			 * time they're seen it may already be "too late" (the
+			 * fq may have been OOS'd and free()'d already). But if
+			 * the upper layer wants a callback whether it's
+			 * immediate or not, we have to fake a "MR" entry to
+			 * look like an FQRNI...
+			 */
+			union qm_mr_entry msg;
+
+			msg.verb = QM_MR_VERB_FQRNI;
+			msg.fq.fqs = mcr->alterfq.fqs;
+			qm_fqid_set(&msg.fq, fq->fqid);
+			msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
+			fq->cb.fqs(p, fq, &msg);
+		}
+	} else if (res == QM_MCR_RESULT_PENDING) {
+		ret = 1;
+		fq_set(fq, QMAN_FQ_STATE_CHANGING);
+	} else {
+		ret = -EIO;
+	}
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p;
+	int ret = 0;
+
+	if (fq->state != qman_fq_state_retired)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+	    fq->state != qman_fq_state_retired) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fq->fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_oos;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fq->fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*fqd = mcr->queryfq.fqd;
+	else
+		ret = -EIO;
+out:
+	put_affine_portal();
+	return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fq->fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*np = mcr->queryfq_np;
+	else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+		ret = -ERANGE;
+	else
+		ret = -EIO;
+out:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+			  struct qm_mcr_querycgr *cgrd)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	mcc->cgr.cgid = cgr->cgrid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+	if (mcr->result == QM_MCR_RESULT_OK)
+		*cgrd = mcr->querycgr;
+	else {
+		dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+			mcr_result_str(mcr->result));
+		ret = -EIO;
+	}
+out:
+	put_affine_portal();
+	return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+	struct qm_mcr_querycgr query_cgr;
+	int err;
+
+	err = qman_query_cgr(cgr, &query_cgr);
+	if (err)
+		return err;
+
+	*result = !!query_cgr.cgr.cs;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+	unsigned long irqflags;
+	int ret = -EBUSY;
+
+	local_irq_save(irqflags);
+	if (p->vdqcr_owned)
+		goto out;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		goto out;
+
+	fq_set(fq, QMAN_FQ_STATE_VDQCR);
+	p->vdqcr_owned = fq;
+	qm_dqrr_vdqcr_set(&p->p, vdqcr);
+	ret = 0;
+out:
+	local_irq_restore(irqflags);
+	return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+	int ret;
+
+	*p = get_affine_portal();
+	ret = set_p_vdqcr(*p, fq, vdqcr);
+	put_affine_portal();
+	return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!set_vdqcr(p, fq, vdqcr));
+	else
+		wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+	return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+	struct qman_portal *p;
+	int ret;
+
+	if (fq->state != qman_fq_state_parked &&
+	    fq->state != qman_fq_state_retired)
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+	else
+		ret = set_vdqcr(&p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/*
+			 * NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue.
+			 */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+	if (avail)
+		qm_eqcr_cce_prefetch(&p->p);
+	else
+		qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags;
+	u8 avail;
+
+	p = get_affine_portal();
+	local_irq_save(irqflags);
+
+	if (p->use_eqcr_ci_stashing) {
+		/*
+		 * The stashing case is easy, only update if we need to in
+		 * order to try and liberate ring entries.
+		 */
+		eq = qm_eqcr_start_stash(&p->p);
+	} else {
+		/*
+		 * The non-stashing case is harder, need to prefetch ahead of
+		 * time.
+		 */
+		avail = qm_eqcr_get_avail(&p->p);
+		if (avail < 2)
+			update_eqcr_ci(p, avail);
+		eq = qm_eqcr_start_no_stash(&p->p);
+	}
+
+	if (unlikely(!eq))
+		goto out;
+
+	qm_fqid_set(eq, fq->fqid);
+	eq->tag = cpu_to_be32(fq_to_tag(fq));
+	eq->fd = *fd;
+
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+	local_irq_restore(irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			 struct qm_mcc_initcgr *opts)
+{
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	u8 verb = QM_MCC_VERB_MODIFYCGR;
+	int ret = 0;
+
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initcgr = *opts;
+	mcc->initcgr.cgid = cgr->cgrid;
+	if (flags & QMAN_CGR_FLAG_USE_INIT)
+		verb = QM_MCC_VERB_INITCGR;
+	qm_mc_commit(&p->p, verb);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+	if (mcr->result != QM_MCR_RESULT_OK)
+		ret = -EIO;
+
+out:
+	put_affine_portal();
+	return ret;
+}
+
+#define PORTAL_IDX(n)	(n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+	if (qman_ip_rev >= QMAN_REV30)
+		cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+					QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+	else
+		cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+	if (qman_ip_rev >= QMAN_REV30)
+		cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+	else
+		cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+	struct qman_cgr cgr;
+	int err_cnt = 0;
+
+	for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+		if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+			err_cnt++;
+	}
+
+	if (err_cnt)
+		pr_err("Warning: %d error%s while initialising CGR h/w\n",
+		       err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+		    struct qm_mcc_initcgr *opts)
+{
+	struct qm_mcr_querycgr cgr_state;
+	int ret;
+	struct qman_portal *p;
+
+	/*
+	 * We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC.
+	 */
+	if (cgr->cgrid >= CGR_NUM)
+		return -EINVAL;
+
+	preempt_disable();
+	p = get_affine_portal();
+	qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+	preempt_enable();
+
+	cgr->chan = p->config->channel;
+	spin_lock(&p->cgr_lock);
+
+	if (opts) {
+		struct qm_mcc_initcgr local_opts = *opts;
+
+		ret = qman_query_cgr(cgr, &cgr_state);
+		if (ret)
+			goto out;
+
+		qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+				     be32_to_cpu(cgr_state.cgr.cscn_targ));
+		local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+
+		/* send init if flags indicate so */
+		if (flags & QMAN_CGR_FLAG_USE_INIT)
+			ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+					    &local_opts);
+		else
+			ret = qm_modify_cgr(cgr, 0, &local_opts);
+		if (ret)
+			goto out;
+	}
+
+	list_add(&cgr->node, &p->cgr_cbs);
+
+	/* Determine if newly added object requires its callback to be called */
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret) {
+		/* we can't go back, so proceed and return success */
+		dev_err(p->config->dev, "CGR HW state partially modified\n");
+		ret = 0;
+		goto out;
+	}
+	if (cgr->cb && cgr_state.cgr.cscn_en &&
+	    qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+		cgr->cb(p, cgr, 1);
+out:
+	spin_unlock(&p->cgr_lock);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+	unsigned long irqflags;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret = 0;
+	struct qman_cgr *i;
+	struct qman_portal *p = get_affine_portal();
+
+	if (cgr->chan != p->config->channel) {
+		/* attempt to delete from other portal than creator */
+		dev_err(p->config->dev, "CGR not owned by current portal");
+		dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+			cgr->chan, p->config->channel);
+
+		ret = -EINVAL;
+		goto put_portal;
+	}
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+	list_del(&cgr->node);
+	/*
+	 * If there are no other CGR objects for this CGRID in the list,
+	 * update CSCN_TARG accordingly
+	 */
+	list_for_each_entry(i, &p->cgr_cbs, node)
+		if (i->cgrid == cgr->cgrid && i->cb)
+			goto release_lock;
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)  {
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+		goto release_lock;
+	}
+
+	local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+	qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+			       be32_to_cpu(cgr_state.cgr.cscn_targ));
+
+	ret = qm_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+	struct qman_cgr *cgr;
+	struct completion completion;
+};
+
+static void qman_delete_cgr_smp_call(void *p)
+{
+	qman_delete_cgr((struct qman_cgr *)p);
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+	preempt_disable();
+	if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+		smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+					 qman_delete_cgr_smp_call, cgr, true);
+		preempt_enable();
+		return;
+	}
+
+	qman_delete_cgr(cgr);
+	preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+	const union qm_mr_entry *msg;
+	int found = 0;
+
+	qm_mr_pvb_update(p);
+	msg = qm_mr_current(p);
+	while (msg) {
+		if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+			found = 1;
+		qm_mr_next(p);
+		qm_mr_cci_consume_to_current(p);
+		qm_mr_pvb_update(p);
+		msg = qm_mr_current(p);
+	}
+	return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+				      bool wait)
+{
+	const struct qm_dqrr_entry *dqrr;
+	int found = 0;
+
+	do {
+		qm_dqrr_pvb_update(p);
+		dqrr = qm_dqrr_current(p);
+		if (!dqrr)
+			cpu_relax();
+	} while (wait && !dqrr);
+
+	while (dqrr) {
+		if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
+			found = 1;
+		qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+		qm_dqrr_pvb_update(p);
+		qm_dqrr_next(p);
+		dqrr = qm_dqrr_current(p);
+	}
+	return found;
+}
+
+#define qm_mr_drain(p, V) \
+	_qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+	_qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+	_qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+	struct qman_portal *p;
+	struct device *dev;
+	union qm_mc_command *mcc;
+	union qm_mc_result *mcr;
+	int orl_empty, drain = 0, ret = 0;
+	u32 channel, wq, res;
+	u8 state;
+
+	p = get_affine_portal();
+	dev = p->config->dev;
+	/* Determine the state of the FQID */
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(dev, "QUERYFQ_NP timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+	if (state == QM_MCR_NP_STATE_OOS)
+		goto out; /* Already OOS, no need to do anymore checks */
+
+	/* Query which channel the FQ is using */
+	mcc = qm_mc_start(&p->p);
+	qm_fqid_set(&mcc->fq, fqid);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+		dev_err(dev, "QUERYFQ timeout\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	/* Need to store these since the MCR gets reused */
+	channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+	wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+	switch (state) {
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+	case QM_MCR_NP_STATE_PARKED:
+		orl_empty = 0;
+		mcc = qm_mc_start(&p->p);
+		qm_fqid_set(&mcc->fq, fqid);
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			dev_err(dev, "QUERYFQ_NP timeout\n");
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_RETIRE);
+		res = mcr->result; /* Make a copy as we reuse MCR below */
+
+		if (res == QM_MCR_RESULT_PENDING) {
+			/*
+			 * Need to wait for the FQRN in the message ring, which
+			 * will only occur once the FQ has been drained.  In
+			 * order for the FQ to drain the portal needs to be set
+			 * to dequeue from the channel the FQ is scheduled on
+			 */
+			int found_fqrn = 0;
+			u16 dequeue_wq = 0;
+
+			/* Flag that we need to drain FQ */
+			drain = 1;
+
+			if (channel >= qm_channel_pool1 &&
+			    channel < qm_channel_pool1 + 15) {
+				/* Pool channel, enable the bit in the portal */
+				dequeue_wq = (channel -
+					      qm_channel_pool1 + 1)<<4 | wq;
+			} else if (channel < qm_channel_pool1) {
+				/* Dedicated channel */
+				dequeue_wq = wq;
+			} else {
+				dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+					fqid, channel);
+				ret = -EBUSY;
+				goto out;
+			}
+			/* Set the sdqcr to drain this channel */
+			if (channel < qm_channel_pool1)
+				qm_dqrr_sdqcr_set(&p->p,
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_DEDICATED);
+			else
+				qm_dqrr_sdqcr_set(&p->p,
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_POOL_CONV
+						  (channel));
+			do {
+				/* Keep draining DQRR while checking the MR*/
+				qm_dqrr_drain_nomatch(&p->p);
+				/* Process message ring too */
+				found_fqrn = qm_mr_drain(&p->p, FQRN);
+				cpu_relax();
+			} while (!found_fqrn);
+
+		}
+		if (res != QM_MCR_RESULT_OK &&
+		    res != QM_MCR_RESULT_PENDING) {
+			dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+				fqid, res);
+			ret = -EIO;
+			goto out;
+		}
+		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+			/*
+			 * ORL had no entries, no need to wait until the
+			 * ERNs come in
+			 */
+			orl_empty = 1;
+		}
+		/*
+		 * Retirement succeeded, check to see if FQ needs
+		 * to be drained
+		 */
+		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+			/* FQ is Not Empty, drain using volatile DQ commands */
+			do {
+				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+				qm_dqrr_vdqcr_set(&p->p, vdqcr);
+				/*
+				 * Wait for a dequeue and process the dequeues,
+				 * making sure to empty the ring completely
+				 */
+			} while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+		}
+		qm_dqrr_sdqcr_set(&p->p, 0);
+
+		while (!orl_empty) {
+			/* Wait for the ORL to have been completely drained */
+			orl_empty = qm_mr_drain(&p->p, FQRL);
+			cpu_relax();
+		}
+		mcc = qm_mc_start(&p->p);
+		qm_fqid_set(&mcc->fq, fqid);
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result != QM_MCR_RESULT_OK) {
+			dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+				fqid, mcr->result);
+			ret = -EIO;
+			goto out;
+		}
+		break;
+
+	case QM_MCR_NP_STATE_RETIRED:
+		/* Send OOS Command */
+		mcc = qm_mc_start(&p->p);
+		qm_fqid_set(&mcc->fq, fqid);
+		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+		if (!qm_mc_result_timeout(&p->p, &mcr)) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			    QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result) {
+			dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+				fqid, mcr->result);
+			ret = -EIO;
+			goto out;
+		}
+		break;
+
+	case QM_MCR_NP_STATE_OOS:
+		/*  Done */
+		break;
+
+	default:
+		ret = -EIO;
+	}
+
+out:
+	put_affine_portal();
+	return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal)
+{
+	return portal->config;
+}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+	unsigned long addr;
+
+	if (!p)
+		return -ENODEV;
+
+	addr = gen_pool_alloc(p, cnt);
+	if (!addr)
+		return -ENOMEM;
+
+	*result = addr & ~DPAA_GENALLOC_OFF;
+
+	return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+	return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+	int ret = qman_shutdown_fq(fqid);
+
+	if (ret) {
+		pr_debug("FQID %d leaked\n", fqid);
+		return ret;
+	}
+
+	gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+	/*
+	 * We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose destination channel is the pool-channel being released.
+	 * When a non-OOS FQD is found we attempt to clean it up
+	 */
+	struct qman_fq fq = {
+		.fqid = QM_FQID_RANGE_START
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err == -ERANGE)
+			/* FQID range exceeded, found no problems */
+			return 0;
+		else if (WARN_ON(err))
+			return err;
+
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			if (WARN_ON(err))
+				return err;
+			if (qm_fqd_get_chan(&fqd) == qp) {
+				/* The channel is the FQ's target, clean it */
+				err = qman_shutdown_fq(fq.fqid);
+				if (err)
+					/*
+					 * Couldn't shut down the FQ
+					 * so the pool must be leaked
+					 */
+					return err;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+	int ret;
+
+	ret = qpool_cleanup(qp);
+	if (ret) {
+		pr_debug("CHID %d leaked\n", qp);
+		return ret;
+	}
+
+	gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+	/*
+	 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+	 * error, looking for non-OOS FQDs whose CGR is the CGR being released
+	 */
+	struct qman_fq fq = {
+		.fqid = QM_FQID_RANGE_START
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err == -ERANGE)
+			/* FQID range exceeded, found no problems */
+			return 0;
+		else if (WARN_ON(err))
+			return err;
+
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			if (WARN_ON(err))
+				return err;
+			if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
+			    fqd.cgid == cgrid) {
+				pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+				       cgrid, fq.fqid);
+				return -EIO;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+	int ret;
+
+	ret = cgr_cleanup(cgrid);
+	if (ret) {
+		pr_debug("CGRID %d leaked\n", cgrid);
+		return ret;
+	}
+
+	gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 0000000..6fd5fef
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,861 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n)	(0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n)	(0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n)	(0x000c + ((n) * 0x10))
+#define REG_DD_CFG		0x0200
+#define REG_DCP_CFG(n)		(0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n)	(0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n)	(0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC		0x0400
+#define REG_PFDR_FP_HEAD	0x0404
+#define REG_PFDR_FP_TAIL	0x0408
+#define REG_PFDR_FP_LWIT	0x0410
+#define REG_PFDR_CFG		0x0414
+#define REG_SFDR_CFG		0x0500
+#define REG_SFDR_IN_USE		0x0504
+#define REG_WQ_CS_CFG(n)	(0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID	0x0630
+#define REG_WQ_SC_DD_CFG(n)	(0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n)	(0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n)	(0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n)	(0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n)	(0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_ECIR2		0x0a0c
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_MCR			0x0b00
+#define REG_MCP(n)		(0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG		0x0be0
+#define REG_HID_CFG		0x0bf0
+#define REG_IDLE_STAT		0x0bf4
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FQD_BARE		0x0c00
+#define REG_PFDR_BARE		0x0c20
+#define REG_offset_BAR		0x0004	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR		0x0010	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE		0x0c80
+#define REG_QCSP_BAR		0x0c84
+#define REG_CI_SCHED_CFG	0x0d00
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_CI_RLM_AVG		0x0d14
+#define REG_ERR_ISR		0x0e00
+#define REG_ERR_IER		0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n)	(0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n)	(0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n)	(0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR		0x01000000
+#define MCR_get_rslt(v)		(u8)((v) >> 24)
+#define MCR_rslt_idle(r)	(!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r)		((r) == 0xf0)
+#define MCR_rslt_eaccess(r)	((r) == 0xf8)
+#define MCR_rslt_inval(r)	((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV		4
+#define QM_CI_SCHED_CFG_SRQ_W		3
+#define QM_CI_SCHED_CFG_RW_W		2
+#define QM_CI_SCHED_CFG_BMAN_W		2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN	BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+	qm_wq_portal = 0,
+	qm_wq_pool = 1,
+	qm_wq_fman0 = 2,
+	qm_wq_fman1 = 3,
+	qm_wq_caam = 4,
+	qm_wq_pme = 5,
+	qm_wq_first = qm_wq_portal,
+	qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+	qm_memory_fqd,
+	qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE	0x20000000	/* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE	0x10000000	/* Corenet Target Data Error */
+#define QM_EIRQ_CITT	0x08000000	/* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI	0x04000000	/* PFDR Low Watermark */
+#define QM_EIRQ_MBEI	0x02000000	/* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI	0x01000000	/* Single-bit ECC Error */
+#define QM_EIRQ_PEBI	0x00800000	/* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI	0x00020000	/* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI	0x00010000	/* Invalid Command Verb */
+#define QM_EIRQ_IDDI	0x00000800	/* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI	0x00000400	/* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI	0x00000200	/* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI	0x00000100	/* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE	0x00000010	/* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI	0x00000008	/* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI	0x00000004	/* Invalid Enqueue State */
+#define QM_EIRQ_IECI	0x00000002	/* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI	0x00000001	/* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+			 QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+			 QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+			 QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+			 QM_EIRQ_IFSI)
+
+struct qm_ecir {
+	u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+	return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+	return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+	return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+	u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+	return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+	return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+	u32 info; /* memid[24-27], eadr[0-11] */
+		  /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+	return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+	return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+	return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+	return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+	{ QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+	{ QM_EIRQ_CTDE, "Corenet Target Data Error" },
+	{ QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+	{ QM_EIRQ_PLWI, "PFDR Low Watermark" },
+	{ QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+	{ QM_EIRQ_SBEI, "Single-bit ECC Error" },
+	{ QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+	{ QM_EIRQ_ICVI, "Invalid Command Verb" },
+	{ QM_EIRQ_IFSI, "Invalid Flow Control State" },
+	{ QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+	{ QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+	{ QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+	{ QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+	{ QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+	{ QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+	{ QM_EIRQ_IESI, "Invalid Enqueue State" },
+	{ QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+	{ QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+	{ 0x01FF, 24, "FQD cache tag memory 0" },
+	{ 0x01FF, 24, "FQD cache tag memory 1" },
+	{ 0x01FF, 24, "FQD cache tag memory 2" },
+	{ 0x01FF, 24, "FQD cache tag memory 3" },
+	{ 0x0FFF, 512, "FQD cache memory" },
+	{ 0x07FF, 128, "SFDR memory" },
+	{ 0x01FF, 72, "WQ context memory" },
+	{ 0x00FF, 240, "CGR memory" },
+	{ 0x00FF, 302, "Internal Order Restoration List memory" },
+	{ 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+static int __qman_probed;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+	return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+	iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+	return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+	qm_dc_portal_fman0 = 0,
+	qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+	DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+		    portal == qm_dc_portal_fman1);
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_ccsr_out(REG_DCP_CFG(portal),
+			    (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+	else
+		qm_ccsr_out(REG_DCP_CFG(portal),
+			    (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+				 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+				 u8 csw5, u8 csw6, u8 csw7)
+{
+	qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+		    ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+		    ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+		    ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+	qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+	qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+		    (QM_CI_SCHED_CFG_SRCCIV << 24) |
+		    (QM_CI_SCHED_CFG_SRQ_W << 8) |
+		    (QM_CI_SCHED_CFG_RW_W << 4) |
+		    QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = qm_ccsr_in(REG_IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+#define PFDR_AR_EN		BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+	u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+	u32 exp = ilog2(size);
+
+	/* choke if size isn't within range */
+	DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+		    is_power_of_2(size));
+	/* choke if 'ba' has lower-alignment than 'size' */
+	DPAA_ASSERT(!(ba & (size - 1)));
+	qm_ccsr_out(offset, upper_32_bits(ba));
+	qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+	qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+	qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+	qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+	qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+	u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+	DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+	/* Make sure the command interface is 'idle' */
+	if (!MCR_rslt_idle(rslt)) {
+		dev_crit(dev, "QMAN_MCR isn't idle");
+		WARN_ON(1);
+	}
+
+	/* Write the MCR command params then the verb */
+	qm_ccsr_out(REG_MCP(0), pfdr_start);
+	/*
+	 * TODO: remove this - it's a workaround for a model bug that is
+	 * corrected in more recent versions. We use the workaround until
+	 * everyone has upgraded.
+	 */
+	qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+	dma_wmb();
+	qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+	/* Poll for the result */
+	do {
+		rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+	} while (!MCR_rslt_idle(rslt));
+	if (MCR_rslt_ok(rslt))
+		return 0;
+	if (MCR_rslt_eaccess(rslt))
+		return -EACCES;
+	if (MCR_rslt_inval(rslt))
+		return -EINVAL;
+	dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+	return -ENODEV;
+}
+
+/*
+ * QMan needs two global memory areas initialized at boot time:
+ *  1) FQD: Frame Queue Descriptors used to manage frame queues
+ *  2) PFDR: Packed Frame Queue Descriptor Records used to store frames
+ * Both areas are reserved using the device tree reserved memory framework
+ * and the addresses and sizes are initialized when the QMan device is probed
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+#ifdef CONFIG_PPC
+/*
+ * Support for PPC Device Tree backward compatibility when compatible
+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
+ */
+static int zero_priv_mem(phys_addr_t addr, size_t sz)
+{
+	/* map as cacheable, non-guarded */
+	void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+	if (!tmpp)
+		return -ENOMEM;
+
+	memset_io(tmpp, 0, sz);
+	flush_dcache_range((unsigned long)tmpp,
+			   (unsigned long)tmpp + sz);
+	iounmap(tmpp);
+
+	return 0;
+}
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+	fqd_a = rmem->base;
+	fqd_sz = rmem->size;
+
+	WARN_ON(!(fqd_a && fqd_sz));
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+	pfdr_a = rmem->base;
+	pfdr_sz = rmem->size;
+
+	WARN_ON(!(pfdr_a && pfdr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+#endif
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+	return fqd_sz / 64;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	dev_warn(dev, "ErrInt, EDATA:\n");
+	i = bit_count / 32;
+	if (bit_count % 32) {
+		i++;
+		mask = ~(mask << bit_count % 32);
+	}
+	j = 16 - i;
+	dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+				      u32 ecsr_val)
+{
+	struct qm_ecir ecir_val;
+	struct qm_eadr eadr_val;
+	int memid;
+
+	ecir_val.info = qm_ccsr_in(REG_ECIR);
+	/* Is portal info valid */
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		struct qm_ecir2 ecir2_val;
+
+		ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			dev_warn(dev, "ErrInt: %s id %d\n",
+				 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+				 qm_ecir2_get_pnum(&ecir2_val));
+		}
+		if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+				 qm_ecir_get_fqid(&ecir_val));
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.info = qm_ccsr_in(REG_EADR);
+			memid = qm_eadr_v3_get_memid(&eadr_val);
+			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+				 error_mdata[memid].txt,
+				 error_mdata[memid].addr_mask
+					& qm_eadr_v3_get_eadr(&eadr_val));
+			log_edata_bits(dev, error_mdata[memid].bits);
+		}
+	} else {
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			dev_warn(dev, "ErrInt: %s id %d\n",
+				 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+				 qm_ecir_get_pnum(&ecir_val));
+		}
+		if (ecsr_val & FQID_ECSR_ERR)
+			dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+				 qm_ecir_get_fqid(&ecir_val));
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.info = qm_ccsr_in(REG_EADR);
+			memid = qm_eadr_get_memid(&eadr_val);
+			dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+				 error_mdata[memid].txt,
+				 error_mdata[memid].addr_mask
+					& qm_eadr_get_eadr(&eadr_val));
+			log_edata_bits(dev, error_mdata[memid].bits);
+		}
+	}
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+	struct device *dev = ptr;
+
+	ier_val = qm_ccsr_in(REG_ERR_IER);
+	isr_val = qm_ccsr_in(REG_ERR_ISR);
+	ecsr_val = qm_ccsr_in(REG_ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+
+	for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+		if (qman_hwerr_txts[i].mask & isr_mask) {
+			dev_err_ratelimited(dev, "ErrInt: %s\n",
+					    qman_hwerr_txts[i].txt);
+			if (qman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(dev, isr_mask,
+							  ecsr_val);
+				/* Re-arm error capture registers */
+				qm_ccsr_out(REG_ECSR, ecsr_val);
+			}
+			if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+				dev_dbg(dev, "Disabling error 0x%x\n",
+					qman_hwerr_txts[i].mask);
+				ier_val &= ~qman_hwerr_txts[i].mask;
+				qm_ccsr_out(REG_ERR_IER, ier_val);
+			}
+		}
+	}
+	qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+	return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+	int i, err;
+
+	/* FQD memory */
+	qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+	/* PFDR memory */
+	qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+	err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+	if (err)
+		return err;
+	/* thresholds */
+	qm_set_pfdr_threshold(512, 64);
+	qm_set_sfdr_threshold(128);
+	/* clear stale PEBI bit from interrupt status register */
+	qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+	/* corenet initiator settings */
+	qm_set_corenet_initiator();
+	/* HID settings */
+	qm_set_hid();
+	/* Set scheduling weights to defaults */
+	for (i = qm_wq_first; i <= qm_wq_last; i++)
+		qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+	/* We are not prepared to accept ERNs for hardware enqueues */
+	qm_set_dc(qm_dc_portal_fman0, 1, 0);
+	qm_set_dc(qm_dc_portal_fman1, 1, 0);
+	return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+	static int done;
+	static u32 liodn_offset;
+	u32 before, after;
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+	else
+		before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+	if (!done) {
+		liodn_offset = before & LIO_CFG_LIODN_MASK;
+		done = 1;
+		return;
+	}
+	after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+	else
+		qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+	u32 before, after;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+		/* Each pair of vcpu share the same SRQ(SDEST) */
+		cpu_idx /= 2;
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+	} else {
+		before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+	}
+}
+
+static int qman_resource_init(struct device *dev)
+{
+	int pool_chan_num, cgrid_num;
+	int ret, i;
+
+	switch (qman_ip_rev >> 8) {
+	case 1:
+		pool_chan_num = 15;
+		cgrid_num = 256;
+		break;
+	case 2:
+		pool_chan_num = 3;
+		cgrid_num = 64;
+		break;
+	case 3:
+		pool_chan_num = 15;
+		cgrid_num = 256;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+			   pool_chan_num, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+		return ret;
+	}
+
+	ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+		return ret;
+	}
+
+	/* parse pool channels into the SDQCR mask */
+	for (i = 0; i < cgrid_num; i++)
+		qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+	ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+			   qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+	if (ret) {
+		dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int qman_is_probed(void)
+{
+	return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	int ret, err_irq;
+	u16 id;
+	u8 major, minor;
+
+	__qman_probed = -1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+			node);
+		return -ENXIO;
+	}
+	qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+	if (!qm_ccsr_start)
+		return -ENXIO;
+
+	qm_get_version(&id, &major, &minor);
+	if (major == 1 && minor == 0) {
+		dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+			return -ENODEV;
+	} else if (major == 1 && minor == 1)
+		qman_ip_rev = QMAN_REV11;
+	else if	(major == 1 && minor == 2)
+		qman_ip_rev = QMAN_REV12;
+	else if (major == 2 && minor == 0)
+		qman_ip_rev = QMAN_REV20;
+	else if (major == 3 && minor == 0)
+		qman_ip_rev = QMAN_REV30;
+	else if (major == 3 && minor == 1)
+		qman_ip_rev = QMAN_REV31;
+	else if (major == 3 && minor == 2)
+		qman_ip_rev = QMAN_REV32;
+	else {
+		dev_err(dev, "Unknown QMan version\n");
+		return -ENODEV;
+	}
+
+	if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+		qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+	}
+
+	if (fqd_a) {
+#ifdef CONFIG_PPC
+		/*
+		 * For PPC backward DT compatibility
+		 * FQD memory MUST be zero'd by software
+		 */
+		zero_priv_mem(fqd_a, fqd_sz);
+#else
+		WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+#endif
+	} else {
+		/*
+		 * Order of memory regions is assumed as FQD followed by PFDR
+		 * in order to ensure allocations from the correct regions the
+		 * driver initializes then allocates each piece in order
+		 */
+		ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+		if (ret) {
+			dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+				ret);
+			return -ENODEV;
+		}
+	}
+	dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
+
+	if (!pfdr_a) {
+		/* Setup PFDR memory */
+		ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+		if (ret) {
+			dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+				ret);
+			return -ENODEV;
+		}
+	}
+	dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
+
+	ret = qman_init_ccsr(dev);
+	if (ret) {
+		dev_err(dev, "CCSR setup failed\n");
+		return ret;
+	}
+
+	err_irq = platform_get_irq(pdev, 0);
+	if (err_irq <= 0) {
+		dev_info(dev, "Can't get %pOF property 'interrupts'\n",
+			 node);
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+			       dev);
+	if (ret)  {
+		dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+			ret, node);
+		return ret;
+	}
+
+	/*
+	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init).
+	 */
+	qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+	/* Enable Error Interrupts */
+	qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+	qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+	if (IS_ERR(qm_fqalloc)) {
+		ret = PTR_ERR(qm_fqalloc);
+		dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+	if (IS_ERR(qm_qpalloc)) {
+		ret = PTR_ERR(qm_qpalloc);
+		dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+	if (IS_ERR(qm_cgralloc)) {
+		ret = PTR_ERR(qm_cgralloc);
+		dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+		return ret;
+	}
+
+	ret = qman_resource_init(dev);
+	if (ret)
+		return ret;
+
+	ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+	if (ret)
+		return ret;
+
+	ret = qman_wq_alloc();
+	if (ret)
+		return ret;
+
+	__qman_probed = 1;
+
+	return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+	{
+		.compatible = "fsl,qman",
+	},
+	{}
+};
+
+static struct platform_driver fsl_qman_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = fsl_qman_ids,
+		.suppress_bind_attrs = true,
+	},
+	.probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000..3e9391d
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,361 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW  1
+#define CONFIG_FSL_DPA_PIRQ_FAST  1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+	struct device *dev = pcfg->dev;
+	int window_count = 1;
+	struct iommu_domain_geometry geom_attr;
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+	if (!pcfg->iommu_domain) {
+		dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+		goto no_iommu;
+	}
+	geom_attr.aperture_start = 0;
+	geom_attr.aperture_end =
+		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+	geom_attr.force_aperture = true;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+				    &geom_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+				    &window_count);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	stash_attr.cpu = cpu;
+	stash_attr.cache = PAMU_ATTR_CACHE_L1;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_STASH,
+				    &stash_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+			__func__, ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+					 IOMMU_READ | IOMMU_WRITE);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+			__func__, ret);
+		goto out_domain_free;
+	}
+	ret = iommu_attach_device(pcfg->iommu_domain, dev);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+			ret);
+		goto out_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
+				    &window_count);
+	if (ret < 0) {
+		dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+			ret);
+		goto out_detach_device;
+	}
+
+no_iommu:
+#endif
+	qman_set_sdest(pcfg->channel, cpu);
+
+	return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+	iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+	iommu_domain_free(pcfg->iommu_domain);
+	pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+	struct qman_portal *p;
+	u32 irq_sources = 0;
+
+	/* We need the same LIODN offset for all portals */
+	qman_liodn_fixup(pcfg->channel);
+
+	pcfg->iommu_domain = NULL;
+	portal_set_cpu(pcfg, pcfg->cpu);
+
+	p = qman_create_affine_portal(pcfg, NULL);
+	if (!p) {
+		dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+			 __func__, pcfg->cpu);
+		return NULL;
+	}
+
+	/* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+	irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+		       QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+	irq_sources |= QM_PIRQ_DQRI;
+#endif
+	qman_p_irqsource_add(p, irq_sources);
+
+	spin_lock(&qman_lock);
+	if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+		/* all assigned portals are initialized now */
+		qman_init_cgr_all();
+	}
+
+	if (!qman_dma_portal)
+		qman_dma_portal = p;
+
+	spin_unlock(&qman_lock);
+
+	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+	return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+							unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	if (pcfg->iommu_domain) {
+		stash_attr.cpu = cpu;
+		stash_attr.cache = PAMU_ATTR_CACHE_L1;
+		ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+		if (ret < 0) {
+			dev_err(pcfg->dev,
+				"Failed to update pamu stash setting\n");
+			return;
+		}
+	}
+#endif
+	qman_set_sdest(pcfg->channel, cpu);
+}
+
+static int qman_offline_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->irq, cpumask_of(0));
+			qman_portal_update_sdest(pcfg, 0);
+		}
+	}
+	return 0;
+}
+
+static int qman_online_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+			qman_portal_update_sdest(pcfg, cpu);
+		}
+	}
+	return 0;
+}
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct qm_portal_config *pcfg;
+	struct resource *addr_phys[2];
+	int irq, cpu, err;
+	u32 val;
+
+	err = qman_is_probed();
+	if (!err)
+		return -EPROBE_DEFER;
+	if (err < 0) {
+		dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+		return -ENODEV;
+	}
+
+	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return -ENOMEM;
+
+	pcfg->dev = dev;
+
+	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CE);
+	if (!addr_phys[0]) {
+		dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+		return -ENXIO;
+	}
+
+	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+					     DPAA_PORTAL_CI);
+	if (!addr_phys[1]) {
+		dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+		return -ENXIO;
+	}
+
+	err = of_property_read_u32(node, "cell-index", &val);
+	if (err) {
+		dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
+		return err;
+	}
+	pcfg->channel = val;
+	pcfg->cpu = -1;
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "Can't get %pOF IRQ\n", node);
+		return -ENXIO;
+	}
+	pcfg->irq = irq;
+
+	pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+					resource_size(addr_phys[0]),
+					QBMAN_MEMREMAP_ATTR);
+	if (!pcfg->addr_virt_ce) {
+		dev_err(dev, "memremap::CE failed\n");
+		goto err_ioremap1;
+	}
+
+	pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+				resource_size(addr_phys[1]));
+	if (!pcfg->addr_virt_ci) {
+		dev_err(dev, "ioremap::CI failed\n");
+		goto err_ioremap2;
+	}
+
+	pcfg->pools = qm_get_pools_sdqcr();
+
+	spin_lock(&qman_lock);
+	cpu = cpumask_next_zero(-1, &portal_cpus);
+	if (cpu >= nr_cpu_ids) {
+		/* unassigned portal, skip init */
+		spin_unlock(&qman_lock);
+		return 0;
+	}
+
+	cpumask_set_cpu(cpu, &portal_cpus);
+	spin_unlock(&qman_lock);
+	pcfg->cpu = cpu;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+		dev_err(dev, "dma_set_mask() failed\n");
+		goto err_portal_init;
+	}
+
+	if (!init_pcfg(pcfg)) {
+		dev_err(dev, "portal init failed\n");
+		goto err_portal_init;
+	}
+
+	/* clear irq affinity if assigned cpu is offline */
+	if (!cpu_online(cpu))
+		qman_offline_cpu(cpu);
+
+	return 0;
+
+err_portal_init:
+	iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+	memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+	return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+	{
+		.compatible = "fsl,qman-portal",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = qman_portal_ids,
+	},
+	.probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+	int ret;
+
+	ret = platform_driver_register(drv);
+	if (ret < 0)
+		return ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+					"soc/qman_portal:online",
+					qman_online_cpu, qman_offline_cpu);
+	if (ret < 0) {
+		pr_err("qman: failed to register hotplug callbacks.\n");
+		platform_driver_unregister(drv);
+		return ret;
+	}
+	return 0;
+}
+
+module_driver(qman_portal_driver,
+	      qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000..75a8f90
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,267 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+	u8 verb;
+	u8 result;
+	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+	u8 __reserved[28];
+	u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+	return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+	u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+	u8 verb;
+	u8 result;
+	u8 __reserved[30];
+	/* Access this struct using qman_cgrs_get() */
+	struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+	u8 verb;
+	u8 result;
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[6];
+	u8 i_bcnt_hi;	/* high 8-bits of 40-bit "Instant" */
+	__be32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u8 __reserved3[3];
+	u8 a_bcnt_hi;	/* high 8-bits of 40-bit "Average" */
+	__be32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	__be32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
+}
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x)	((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x)	(BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM	(sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+	struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+	memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+	memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+	return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+				const struct qman_cgrs *src)
+{
+	*dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.state;
+	const u32 *_a = a->q.state;
+	const u32 *_b = b->q.state;
+
+	for (ret = 0; ret < 8; ret++)
+		*_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.state;
+	const u32 *_a = a->q.state;
+	const u32 *_b = b->q.state;
+
+	for (ret = 0; ret < 8; ret++)
+		*_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+	/* Portal addresses */
+	void *addr_virt_ce;
+	void __iomem *addr_virt_ci;
+	struct device *dev;
+	struct iommu_domain *iommu_domain;
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	/* User-visible portal configuration settings */
+	/* portal is affined to this cpu */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+	/*
+	 * the portal's dedicated channel id, used initialising
+	 * frame queues to target this portal when scheduled
+	 */
+	u16 channel;
+	/*
+	 * mask of pool channels this portal has dequeue access to
+	 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+	 */
+	u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/*   QMan s/w corenet portal, low-level i/face	 */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *   priority.
+ * If SOURCE == SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS	0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ	0x40000000
+#define QM_SDQCR_COUNT_EXACT1		0x0
+#define QM_SDQCR_COUNT_UPTO3		0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_SDQCR_TYPE_MASK		0x03000000
+#define QM_SDQCR_TYPE_NULL		0x0
+#define QM_SDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_SDQCR_TYPE_ACTIVE		0x03000000
+#define QM_SDQCR_TOKEN_MASK		0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v)		(((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v)		(((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n)	(n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK		0x00ffffff
+#define QM_VDQCR_FQID(n)		((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL	0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL	0x8000		/* Portal channel */
+#define QM_DQAVAIL_POOL(n)	(0x8000 >> (n))	/* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK		0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 0000000..18f7f02
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+	int loop = 1;
+	int err = 0;
+
+	while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+		err = qman_test_stash();
+		if (err)
+			break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+		err = qman_test_api();
+		if (err)
+			break;
+#endif
+	}
+	return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 0000000..41bdbc4
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,34 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 0000000..2895d06
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,245 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID		27
+#define POOL_ID		2
+#define FQ_FLAGS	QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES	10
+#define NUM_PARTIAL	4
+#define PORTAL_SDQCR	(QM_SDQCR_SOURCE_CHANNELS | \
+			QM_SDQCR_TYPE_PRIO_QOS | \
+			QM_SDQCR_TOKEN_SET(0x98) | \
+			QM_SDQCR_CHANNELS_DEDICATED | \
+			QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE	((void *)0xf00dbeef)
+#define VDQCR_FLAGS	(QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+					struct qman_fq *,
+					const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+		   const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+		   const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+	.cb.dqrr = cb_dqrr,
+	.cb.ern = cb_ern,
+	.cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+	qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+	qm_fd_set_contig_big(fd, 0x0000ffff);
+	fd->cmd = cpu_to_be32(0xfeedf00d);
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+	u64 t = qm_fd_addr_get64(fd);
+	int z = t >> 40;
+	unsigned int len, off;
+	enum qm_fd_format fmt;
+
+	t <<= 1;
+	if (z)
+		t |= 1;
+	qm_fd_addr_set64(fd, t);
+
+	fmt = qm_fd_get_format(fd);
+	off = qm_fd_get_offset(fd);
+	len = qm_fd_get_length(fd);
+	len--;
+	qm_fd_set_param(fd, fmt, off, len);
+
+	fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
+{
+	bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
+
+	neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+	neq |= a->cfg != b->cfg;
+	neq |= a->cmd != b->cmd;
+
+	return neq;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+	unsigned int loop;
+	int err = 0;
+
+	for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+		if (qman_enqueue(fq, &fd)) {
+			pr_crit("qman_enqueue() failed\n");
+			err = -EIO;
+		}
+		fd_inc(&fd);
+	}
+
+	return err;
+}
+
+int qman_test_api(void)
+{
+	unsigned int flags, frmcnt;
+	int err;
+	struct qman_fq *fq = &fq_base;
+
+	pr_info("%s(): Starting\n", __func__);
+	fd_init(&fd);
+	fd_init(&fd_dq);
+
+	/* Initialise (parked) FQ */
+	err = qman_create_fq(0, FQ_FLAGS, fq);
+	if (err) {
+		pr_crit("qman_create_fq() failed\n");
+		goto failed;
+	}
+	err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+	if (err) {
+		pr_crit("qman_init_fq() failed\n");
+		goto failed;
+	}
+	/* Do enqueues + VDQCR, twice. (Parked FQ) */
+	err = do_enqueues(fq);
+	if (err)
+		goto failed;
+	pr_info("VDQCR (till-empty);\n");
+	frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+	err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+	if (err) {
+		pr_crit("qman_volatile_dequeue() failed\n");
+		goto failed;
+	}
+	err = do_enqueues(fq);
+	if (err)
+		goto failed;
+	pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+	frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+	err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+	if (err) {
+		pr_crit("qman_volatile_dequeue() failed\n");
+		goto failed;
+	}
+	pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+		NUM_ENQUEUES);
+	frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+	err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+	if (err) {
+		pr_err("qman_volatile_dequeue() failed\n");
+		goto failed;
+	}
+
+	err = do_enqueues(fq);
+	if (err)
+		goto failed;
+	pr_info("scheduled dequeue (till-empty)\n");
+	err = qman_schedule_fq(fq);
+	if (err) {
+		pr_crit("qman_schedule_fq() failed\n");
+		goto failed;
+	}
+	wait_event(waitqueue, sdqcr_complete);
+
+	/* Retire and OOS the FQ */
+	err = qman_retire_fq(fq, &flags);
+	if (err < 0) {
+		pr_crit("qman_retire_fq() failed\n");
+		goto failed;
+	}
+	wait_event(waitqueue, retire_complete);
+	if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+		err = -EIO;
+		pr_crit("leaking frames\n");
+		goto failed;
+	}
+	err = qman_oos_fq(fq);
+	if (err) {
+		pr_crit("qman_oos_fq() failed\n");
+		goto failed;
+	}
+	qman_destroy_fq(fq);
+	pr_info("%s(): Finished\n", __func__);
+	return 0;
+
+failed:
+	WARN_ON(1);
+	return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dq)
+{
+	if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
+		pr_err("BADNESS: dequeued frame doesn't match;\n");
+		return qman_cb_dqrr_consume;
+	}
+	fd_inc(&fd_dq);
+	if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
+		sdqcr_complete = 1;
+		wake_up(&waitqueue);
+	}
+	return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+		   const union qm_mr_entry *msg)
+{
+	pr_crit("cb_ern() unimplemented");
+	WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+		   const union qm_mr_entry *msg)
+{
+	u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+	if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+		pr_crit("unexpected FQS message");
+		WARN_ON(1);
+		return;
+	}
+	pr_info("Retirement message received\n");
+	retire_complete = 1;
+	wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 0000000..e87b654
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,627 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ *    into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ *    handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ *    and advance the iterator for the next loop. This includes a final fixup,
+ *    which connects the last handler to the first (and which is why phase 2
+ *    and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    initialise FQ objects and advance the iterator for the next loop.
+ *    Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ *    initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+	int (*fn)(void);
+	atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+	struct bstrap *bstrap = bs;
+	int err;
+
+	atomic_inc(&bstrap->started);
+	err = bstrap->fn();
+	if (err)
+		return err;
+	while (!kthread_should_stop())
+		msleep(20);
+	return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+	int cpu;
+
+	for_each_cpu(cpu, cpu_online_mask) {
+		struct bstrap bstrap = {
+			.fn = fn,
+			.started = ATOMIC_INIT(0)
+		};
+		struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+			"hotpotato%d", cpu);
+		int ret;
+
+		if (IS_ERR(k))
+			return -ENOMEM;
+		kthread_bind(k, cpu);
+		wake_up_process(k);
+		/*
+		 * If we call kthread_stop() before the "wake up" has had an
+		 * effect, then the thread may exit with -EINTR without ever
+		 * running the function. So poll until it's started before
+		 * requesting it to stop.
+		 */
+		while (!atomic_read(&bstrap.started))
+			msleep(20);
+		ret = kthread_stop(k);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+struct hp_handler {
+
+	/* The following data is stashed when 'rx' is dequeued; */
+	/* -------------- */
+	/* The Rx FQ, dequeues of which will stash the entire hp_handler */
+	struct qman_fq rx;
+	/* The Tx FQ we should forward to */
+	struct qman_fq tx;
+	/* The value we XOR post-dequeue, prior to validating */
+	u32 rx_mixer;
+	/* The value we XOR pre-enqueue, after validating */
+	u32 tx_mixer;
+	/* what the hotpotato address should be on dequeue */
+	dma_addr_t addr;
+	u32 *frame_ptr;
+
+	/* The following data isn't (necessarily) stashed on dequeue; */
+	/* -------------- */
+	u32 fqid_rx, fqid_tx;
+	/* list node for linking us into 'hp_cpu' */
+	struct list_head node;
+	/* Just to check ... */
+	unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+	/* identify the cpu we run on; */
+	unsigned int processor_id;
+	/* root node for the per-cpu list of handlers */
+	struct list_head handlers;
+	/* list node for linking us into 'hp_cpu_list' */
+	struct list_head node;
+	/*
+	 * when repeatedly scanning 'hp_list', each time linking the n'th
+	 * handlers together, this is used as per-cpu iterator state
+	 */
+	struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static DEFINE_SPINLOCK(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU	2
+#define HP_LOOPS	8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS	80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD	0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+	return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+
+	if (!qman_dma_portal) {
+		pr_crit("portal not available\n");
+		return -EIO;
+	}
+
+	pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
+	__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+	if (!__frame_ptr)
+		return -ENOMEM;
+
+	frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+	for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+		frame_ptr[loop] = lfsr;
+		lfsr = do_lfsr(lfsr);
+	}
+
+	frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
+				   DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(pcfg->dev, frame_dma)) {
+		pr_crit("dma mapping failure\n");
+		kfree(__frame_ptr);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+	dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+			 DMA_BIDIRECTIONAL);
+	kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+				     const struct qm_fd *fd)
+{
+	u32 *p = handler->frame_ptr;
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+
+	if (qm_fd_addr_get64(fd) != handler->addr) {
+		pr_crit("bad frame address, [%llX != %llX]\n",
+			qm_fd_addr_get64(fd), handler->addr);
+		return -EIO;
+	}
+	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+		*p ^= handler->rx_mixer;
+		if (*p != lfsr) {
+			pr_crit("corrupt frame data");
+			return -EIO;
+		}
+		*p ^= handler->tx_mixer;
+		lfsr = do_lfsr(lfsr);
+	}
+	return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+					    struct qman_fq *fq,
+					    const struct qm_dqrr_entry *dqrr)
+{
+	struct hp_handler *handler = (struct hp_handler *)fq;
+
+	if (process_frame_data(handler, &dqrr->fd)) {
+		WARN_ON(1);
+		goto skip;
+	}
+	if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+		pr_crit("qman_enqueue() failed");
+		WARN_ON(1);
+	}
+skip:
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+					     struct qman_fq *fq,
+					     const struct qm_dqrr_entry *dqrr)
+{
+	struct hp_handler *handler = (struct hp_handler *)fq;
+
+	process_frame_data(handler, &dqrr->fd);
+	if (++loop_counter < HP_LOOPS) {
+		if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+			pr_crit("qman_enqueue() failed");
+			WARN_ON(1);
+			goto skip;
+		}
+	} else {
+		pr_info("Received final (%dth) frame\n", loop_counter);
+		wake_up(&queue);
+	}
+skip:
+	return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+	struct hp_handler *handler;
+	int loop;
+	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+	hp_cpu->processor_id = smp_processor_id();
+	spin_lock(&hp_lock);
+	list_add_tail(&hp_cpu->node, &hp_cpu_list);
+	hp_cpu_list_length++;
+	spin_unlock(&hp_lock);
+	INIT_LIST_HEAD(&hp_cpu->handlers);
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+		if (!handler) {
+			pr_crit("kmem_cache_alloc() failed");
+			WARN_ON(1);
+			return -EIO;
+		}
+		handler->processor_id = hp_cpu->processor_id;
+		handler->addr = frame_dma;
+		handler->frame_ptr = frame_ptr;
+		list_add_tail(&handler->node, &hp_cpu->handlers);
+	}
+	return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+	struct list_head *loop, *tmp;
+	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+	spin_lock(&hp_lock);
+	list_del(&hp_cpu->node);
+	spin_unlock(&hp_lock);
+	list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+		u32 flags = 0;
+		struct hp_handler *handler = list_entry(loop, struct hp_handler,
+							node);
+		if (qman_retire_fq(&handler->rx, &flags) ||
+		    (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+			pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+			WARN_ON(1);
+			return -EIO;
+		}
+		if (qman_oos_fq(&handler->rx)) {
+			pr_crit("qman_oos_fq(rx) failed");
+			WARN_ON(1);
+			return -EIO;
+		}
+		qman_destroy_fq(&handler->rx);
+		qman_destroy_fq(&handler->tx);
+		qman_release_fqid(handler->fqid_rx);
+		list_del(&handler->node);
+		kmem_cache_free(hp_handler_slab, handler);
+	}
+	return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+	u8 res = (offset + (L1_CACHE_BYTES - 1))
+			 / (L1_CACHE_BYTES);
+	if (res > 3)
+		return 3;
+	return res;
+}
+#define STASH_DATA_CL \
+	num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+	num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+	struct qm_mcc_initfq opts;
+	struct hp_handler *handler = h;
+	int err;
+
+	if (handler->processor_id != smp_processor_id()) {
+		err = -EIO;
+		goto failed;
+	}
+	/* Set up rx */
+	memset(&handler->rx, 0, sizeof(handler->rx));
+	if (handler == special_handler)
+		handler->rx.cb.dqrr = special_dqrr;
+	else
+		handler->rx.cb.dqrr = normal_dqrr;
+	err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+	if (err) {
+		pr_crit("qman_create_fq(rx) failed");
+		goto failed;
+	}
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+				   QM_INITFQ_WE_CONTEXTA);
+	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
+	qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+	err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+			   QMAN_INITFQ_FLAG_LOCAL, &opts);
+	if (err) {
+		pr_crit("qman_init_fq(rx) failed");
+		goto failed;
+	}
+	/* Set up tx */
+	memset(&handler->tx, 0, sizeof(handler->tx));
+	err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+			     &handler->tx);
+	if (err) {
+		pr_crit("qman_create_fq(tx) failed");
+		goto failed;
+	}
+
+	return 0;
+failed:
+	return err;
+}
+
+static void init_handler_cb(void *h)
+{
+	if (init_handler(h))
+		WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+	int loop;
+	u32 fqid = 0;
+	u32 lfsr = 0xdeadbeef;
+	struct hp_cpu *hp_cpu;
+	struct hp_handler *handler;
+
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+			int err;
+
+			if (!loop)
+				hp_cpu->iterator = list_first_entry(
+						&hp_cpu->handlers,
+						struct hp_handler, node);
+			else
+				hp_cpu->iterator = list_entry(
+						hp_cpu->iterator->node.next,
+						struct hp_handler, node);
+			/* Rx FQID is the previous handler's Tx FQID */
+			hp_cpu->iterator->fqid_rx = fqid;
+			/* Allocate new FQID for Tx */
+			err = qman_alloc_fqid(&fqid);
+			if (err) {
+				pr_crit("qman_alloc_fqid() failed");
+				return err;
+			}
+			hp_cpu->iterator->fqid_tx = fqid;
+			/* Rx mixer is the previous handler's Tx mixer */
+			hp_cpu->iterator->rx_mixer = lfsr;
+			/* Get new mixer for Tx */
+			lfsr = do_lfsr(lfsr);
+			hp_cpu->iterator->tx_mixer = lfsr;
+		}
+	}
+	/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+	hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+	handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+	if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+		return 1;
+	handler->fqid_rx = fqid;
+	handler->rx_mixer = lfsr;
+	/* and tag it as our "special" handler */
+	special_handler = handler;
+	return 0;
+}
+
+static int init_phase3(void)
+{
+	int loop, err;
+	struct hp_cpu *hp_cpu;
+
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+			if (!loop)
+				hp_cpu->iterator = list_first_entry(
+						&hp_cpu->handlers,
+						struct hp_handler, node);
+			else
+				hp_cpu->iterator = list_entry(
+						hp_cpu->iterator->node.next,
+						struct hp_handler, node);
+			preempt_disable();
+			if (hp_cpu->processor_id == smp_processor_id()) {
+				err = init_handler(hp_cpu->iterator);
+				if (err)
+					return err;
+			} else {
+				smp_call_function_single(hp_cpu->processor_id,
+					init_handler_cb, hp_cpu->iterator, 1);
+			}
+			preempt_enable();
+		}
+	}
+	return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+	u32 *p = special_handler->frame_ptr;
+	u32 lfsr = HP_FIRST_WORD;
+	int loop, err;
+	struct qm_fd fd;
+
+	if (special_handler->processor_id != smp_processor_id()) {
+		err = -EIO;
+		goto failed;
+	}
+	memset(&fd, 0, sizeof(fd));
+	qm_fd_addr_set64(&fd, special_handler->addr);
+	qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+		if (*p != lfsr) {
+			err = -EIO;
+			pr_crit("corrupt frame data");
+			goto failed;
+		}
+		*p ^= special_handler->tx_mixer;
+		lfsr = do_lfsr(lfsr);
+	}
+	pr_info("Sending first frame\n");
+	err = qman_enqueue(&special_handler->tx, &fd);
+	if (err) {
+		pr_crit("qman_enqueue() failed");
+		goto failed;
+	}
+
+	return 0;
+failed:
+	return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+	if (send_first_frame(NULL))
+		WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+	int err;
+
+	if (cpumask_weight(cpu_online_mask) < 2) {
+		pr_info("%s(): skip - only 1 CPU\n", __func__);
+		return 0;
+	}
+
+	pr_info("%s(): Starting\n", __func__);
+
+	hp_cpu_list_length = 0;
+	loop_counter = 0;
+	hp_handler_slab = kmem_cache_create("hp_handler_slab",
+			sizeof(struct hp_handler), L1_CACHE_BYTES,
+			SLAB_HWCACHE_ALIGN, NULL);
+	if (!hp_handler_slab) {
+		err = -EIO;
+		pr_crit("kmem_cache_create() failed");
+		goto failed;
+	}
+
+	err = allocate_frame_data();
+	if (err)
+		goto failed;
+
+	/* Init phase 1 */
+	pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+	if (on_all_cpus(create_per_cpu_handlers)) {
+		err = -EIO;
+		pr_crit("on_each_cpu() failed");
+		goto failed;
+	}
+	pr_info("Number of cpus: %d, total of %d handlers\n",
+		hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+	err = init_phase2();
+	if (err)
+		goto failed;
+
+	err = init_phase3();
+	if (err)
+		goto failed;
+
+	preempt_disable();
+	if (special_handler->processor_id == smp_processor_id()) {
+		err = send_first_frame(NULL);
+		if (err)
+			goto failed;
+	} else {
+		smp_call_function_single(special_handler->processor_id,
+					 send_first_frame_cb, NULL, 1);
+	}
+	preempt_enable();
+
+	wait_event(queue, loop_counter == HP_LOOPS);
+	deallocate_frame_data();
+	if (on_all_cpus(destroy_per_cpu_handlers)) {
+		err = -EIO;
+		pr_crit("on_each_cpu() failed");
+		goto failed;
+	}
+	kmem_cache_destroy(hp_handler_slab);
+	pr_info("%s(): Finished\n", __func__);
+
+	return 0;
+failed:
+	WARN_ON(1);
+	return err;
+}
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
new file mode 100644
index 0000000..fabba17
--- /dev/null
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -0,0 +1,42 @@
+#
+# QE Communication options
+#
+
+config QUICC_ENGINE
+	bool "QUICC Engine (QE) framework support"
+	depends on FSL_SOC && PPC32
+	select GENERIC_ALLOCATOR
+	select CRC32
+	help
+	  The QUICC Engine (QE) is a new generation of communications
+	  coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+	  Selecting this option means that you wish to build a kernel
+	  for a machine with a QE coprocessor.
+
+config UCC_SLOW
+	bool
+	default y if SERIAL_QE
+	help
+	  This option provides qe_lib support to UCC slow
+	  protocols: UART, BISYNC, QMC
+
+config UCC_FAST
+	bool
+	default y if UCC_GETH || QE_TDM
+	help
+	  This option provides qe_lib support to UCC fast
+	  protocols: HDLC, Ethernet, ATM, transparent
+
+config UCC
+	bool
+	default y if UCC_FAST || UCC_SLOW
+
+config QE_TDM
+	bool
+	default y if FSL_UCC_HDLC
+
+config QE_USB
+	bool
+	default y if USB_FSL_QE
+	help
+	  QE USB Controller support
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
new file mode 100644
index 0000000..55a5553
--- /dev/null
+++ b/drivers/soc/fsl/qe/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux ppc-specific parts of QE
+#
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM)	+= qe_common.o
+obj-$(CONFIG_UCC)	+= ucc.o
+obj-$(CONFIG_UCC_SLOW)	+= ucc_slow.o
+obj-$(CONFIG_UCC_FAST)	+= ucc_fast.o
+obj-$(CONFIG_QE_TDM)	+= qe_tdm.o
+obj-$(CONFIG_QE_USB)	+= usb.o
+obj-$(CONFIG_QE_GPIO)	+= gpio.o
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
new file mode 100644
index 0000000..819bed0
--- /dev/null
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -0,0 +1,342 @@
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+/* FIXME: needed for gpio_to_chip() get rid of this */
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <soc/fsl/qe/qe.h>
+
+struct qe_gpio_chip {
+	struct of_mm_gpio_chip mm_gc;
+	spinlock_t lock;
+
+	unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
+	/* shadowed data register to clear/set bits safely */
+	u32 cpdata;
+
+	/* saved_regs used to restore dedicated functions */
+	struct qe_pio_regs saved_regs;
+};
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+	struct qe_gpio_chip *qe_gc =
+		container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+	struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+	qe_gc->cpdata = in_be32(&regs->cpdata);
+	qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+	qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
+	qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
+	qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
+	qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
+	qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct qe_pio_regs __iomem *regs = mm_gc->regs;
+	u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+	return !!(in_be32(&regs->cpdata) & pin_mask);
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+	struct qe_pio_regs __iomem *regs = mm_gc->regs;
+	unsigned long flags;
+	u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	if (val)
+		qe_gc->cpdata |= pin_mask;
+	else
+		qe_gc->cpdata &= ~pin_mask;
+
+	out_be32(&regs->cpdata, qe_gc->cpdata);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static void qe_gpio_set_multiple(struct gpio_chip *gc,
+				 unsigned long *mask, unsigned long *bits)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+	struct qe_pio_regs __iomem *regs = mm_gc->regs;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	for (i = 0; i < gc->ngpio; i++) {
+		if (*mask == 0)
+			break;
+		if (__test_and_clear_bit(i, mask)) {
+			if (test_bit(i, bits))
+				qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+			else
+				qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+		}
+	}
+
+	out_be32(&regs->cpdata, qe_gc->cpdata);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	__par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+	return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+	unsigned long flags;
+
+	qe_gpio_set(gc, gpio, val);
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	__par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+	return 0;
+}
+
+struct qe_pin {
+	/*
+	 * The qe_gpio_chip name is unfortunate, we should change that to
+	 * something like qe_pio_controller. Someday.
+	 */
+	struct qe_gpio_chip *controller;
+	int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np:		device node to get a pin from
+ * @index:	index of a pin in the device tree
+ * Context:	non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+	struct qe_pin *qe_pin;
+	struct gpio_chip *gc;
+	struct of_mm_gpio_chip *mm_gc;
+	struct qe_gpio_chip *qe_gc;
+	int err;
+	unsigned long flags;
+
+	qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+	if (!qe_pin) {
+		pr_debug("%s: can't allocate memory\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	err = of_get_gpio(np, index);
+	if (err < 0)
+		goto err0;
+	gc = gpio_to_chip(err);
+	if (WARN_ON(!gc))
+		goto err0;
+
+	if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
+		pr_debug("%s: tried to get a non-qe pin\n", __func__);
+		err = -EINVAL;
+		goto err0;
+	}
+
+	mm_gc = to_of_mm_gpio_chip(gc);
+	qe_gc = gpiochip_get_data(gc);
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	err -= gc->base;
+	if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+		qe_pin->controller = qe_gc;
+		qe_pin->num = err;
+		err = 0;
+	} else {
+		err = -EBUSY;
+	}
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+	if (!err)
+		return qe_pin;
+err0:
+	kfree(qe_pin);
+	pr_debug("%s failed with status %d\n", __func__, err);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin:	pointer to the qe_pin structure
+ * Context:	any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+	struct qe_gpio_chip *qe_gc = qe_pin->controller;
+	unsigned long flags;
+	const int pin = qe_pin->num;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+	test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+	kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin:	pointer to the qe_pin structure
+ * Context:	any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+	struct qe_gpio_chip *qe_gc = qe_pin->controller;
+	struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+	struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+	int pin = qe_pin->num;
+	u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+	u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+	bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	if (second_reg) {
+		clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
+		clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+	} else {
+		clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
+		clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+	}
+
+	if (sregs->cpdata & mask1)
+		qe_gc->cpdata |= mask1;
+	else
+		qe_gc->cpdata &= ~mask1;
+
+	out_be32(&regs->cpdata, qe_gc->cpdata);
+	clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin:	pointer to the qe_pin structure
+ * Context:	any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+	struct qe_gpio_chip *qe_gc = qe_pin->controller;
+	struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	/* Let's make it input by default, GPIO API is able to change that. */
+	__par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+	struct device_node *np;
+
+	for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+		int ret;
+		struct qe_gpio_chip *qe_gc;
+		struct of_mm_gpio_chip *mm_gc;
+		struct gpio_chip *gc;
+
+		qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+		if (!qe_gc) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		spin_lock_init(&qe_gc->lock);
+
+		mm_gc = &qe_gc->mm_gc;
+		gc = &mm_gc->gc;
+
+		mm_gc->save_regs = qe_gpio_save_regs;
+		gc->ngpio = QE_PIO_PINS;
+		gc->direction_input = qe_gpio_dir_in;
+		gc->direction_output = qe_gpio_dir_out;
+		gc->get = qe_gpio_get;
+		gc->set = qe_gpio_set;
+		gc->set_multiple = qe_gpio_set_multiple;
+
+		ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
+		if (ret)
+			goto err;
+		continue;
+err:
+		pr_err("%pOF: registration failed with status %d\n",
+		       np, ret);
+		kfree(qe_gc);
+		/* try others anyway */
+	}
+	return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
new file mode 100644
index 0000000..2ef6fc6
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe.c
@@ -0,0 +1,736 @@
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: 	Shlomi Gridish <gridish@freescale.com>
+ * 		Li Yang <leoli@freescale.com>
+ * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
+ *
+ * Description:
+ * General Purpose functions for the global management of the
+ * QUICC Engine (QE).
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/prom.h>
+#include <asm/rheap.h>
+
+static void qe_snums_init(void);
+static int qe_sdma_init(void);
+
+static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
+
+/* QE snum state */
+enum qe_snum_state {
+	QE_SNUM_STATE_USED,
+	QE_SNUM_STATE_FREE
+};
+
+/* QE snum */
+struct qe_snum {
+	u8 num;
+	enum qe_snum_state state;
+};
+
+/* We allocate this here because it is used almost exclusively for
+ * the communication processor devices.
+ */
+struct qe_immap __iomem *qe_immr;
+EXPORT_SYMBOL(qe_immr);
+
+static struct qe_snum snums[QE_NUM_OF_SNUM];	/* Dynamically allocated SNUMs */
+static unsigned int qe_num_of_snum;
+
+static phys_addr_t qebase = -1;
+
+static phys_addr_t get_qe_base(void)
+{
+	struct device_node *qe;
+	int ret;
+	struct resource res;
+
+	if (qebase != -1)
+		return qebase;
+
+	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+	if (!qe) {
+		qe = of_find_node_by_type(NULL, "qe");
+		if (!qe)
+			return qebase;
+	}
+
+	ret = of_address_to_resource(qe, 0, &res);
+	if (!ret)
+		qebase = res.start;
+	of_node_put(qe);
+
+	return qebase;
+}
+
+void qe_reset(void)
+{
+	if (qe_immr == NULL)
+		qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
+
+	qe_snums_init();
+
+	qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+		     QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+	/* Reclaim the MURAM memory for our use. */
+	qe_muram_init();
+
+	if (qe_sdma_init())
+		panic("sdma init failed!");
+}
+
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+{
+	unsigned long flags;
+	u8 mcn_shift = 0, dev_shift = 0;
+	u32 ret;
+
+	spin_lock_irqsave(&qe_lock, flags);
+	if (cmd == QE_RESET) {
+		out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+	} else {
+		if (cmd == QE_ASSIGN_PAGE) {
+			/* Here device is the SNUM, not sub-block */
+			dev_shift = QE_CR_SNUM_SHIFT;
+		} else if (cmd == QE_ASSIGN_RISC) {
+			/* Here device is the SNUM, and mcnProtocol is
+			 * e_QeCmdRiscAssignment value */
+			dev_shift = QE_CR_SNUM_SHIFT;
+			mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
+		} else {
+			if (device == QE_CR_SUBBLOCK_USB)
+				mcn_shift = QE_CR_MCN_USB_SHIFT;
+			else
+				mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+		}
+
+		out_be32(&qe_immr->cp.cecdr, cmd_input);
+		out_be32(&qe_immr->cp.cecr,
+			 (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
+			  mcn_protocol << mcn_shift));
+	}
+
+	/* wait for the QE_CR_FLG to clear */
+	ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+			   100, 0);
+	/* On timeout (e.g. failure), the expression will be false (ret == 0),
+	   otherwise it will be true (ret == 1). */
+	spin_unlock_irqrestore(&qe_lock, flags);
+
+	return ret == 1;
+}
+EXPORT_SYMBOL(qe_issue_cmd);
+
+/* Set a baud rate generator. This needs lots of work. There are
+ * 16 BRGs, which can be connected to the QE channels or output
+ * as clocks. The BRGs are in two different block of internal
+ * memory mapped space.
+ * The BRG clock is the QE clock divided by 2.
+ * It was set up long ago during the initial boot phase and is
+ * is given to us.
+ * Baud rate clocks are zero-based in the driver code (as that maps
+ * to port numbers). Documentation uses 1-based numbering.
+ */
+static unsigned int brg_clk = 0;
+
+#define CLK_GRAN	(1000)
+#define CLK_GRAN_LIMIT	(5)
+
+unsigned int qe_get_brg_clk(void)
+{
+	struct device_node *qe;
+	int size;
+	const u32 *prop;
+	unsigned int mod;
+
+	if (brg_clk)
+		return brg_clk;
+
+	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+	if (!qe) {
+		qe = of_find_node_by_type(NULL, "qe");
+		if (!qe)
+			return brg_clk;
+	}
+
+	prop = of_get_property(qe, "brg-frequency", &size);
+	if (prop && size == sizeof(*prop))
+		brg_clk = *prop;
+
+	of_node_put(qe);
+
+	/* round this if near to a multiple of CLK_GRAN */
+	mod = brg_clk % CLK_GRAN;
+	if (mod) {
+		if (mod < CLK_GRAN_LIMIT)
+			brg_clk -= mod;
+		else if (mod > (CLK_GRAN - CLK_GRAN_LIMIT))
+			brg_clk += CLK_GRAN - mod;
+	}
+
+	return brg_clk;
+}
+EXPORT_SYMBOL(qe_get_brg_clk);
+
+#define PVR_VER_836x	0x8083
+#define PVR_VER_832x	0x8084
+
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR].  E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
+ */
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+{
+	u32 divisor, tempval;
+	u32 div16 = 0;
+
+	if ((brg < QE_BRG1) || (brg > QE_BRG16))
+		return -EINVAL;
+
+	divisor = qe_get_brg_clk() / (rate * multiplier);
+
+	if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+		div16 = QE_BRGC_DIV16;
+		divisor /= 16;
+	}
+
+	/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+	   that the BRG divisor must be even if you're not using divide-by-16
+	   mode. */
+	if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+		if (!div16 && (divisor & 1) && (divisor > 3))
+			divisor++;
+
+	tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+		QE_BRGC_ENABLE | div16;
+
+	out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+
+	return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+	unsigned int i;
+
+	if (strcasecmp(source, "none") == 0)
+		return QE_CLK_NONE;
+
+	if (strcmp(source, "tsync_pin") == 0)
+		return QE_TSYNC_PIN;
+
+	if (strcmp(source, "rsync_pin") == 0)
+		return QE_RSYNC_PIN;
+
+	if (strncasecmp(source, "brg", 3) == 0) {
+		i = simple_strtoul(source + 3, NULL, 10);
+		if ((i >= 1) && (i <= 16))
+			return (QE_BRG1 - 1) + i;
+		else
+			return QE_CLK_DUMMY;
+	}
+
+	if (strncasecmp(source, "clk", 3) == 0) {
+		i = simple_strtoul(source + 3, NULL, 10);
+		if ((i >= 1) && (i <= 24))
+			return (QE_CLK1 - 1) + i;
+		else
+			return QE_CLK_DUMMY;
+	}
+
+	return QE_CLK_DUMMY;
+}
+EXPORT_SYMBOL(qe_clock_source);
+
+/* Initialize SNUMs (thread serial numbers) according to
+ * QE Module Control chapter, SNUM table
+ */
+static void qe_snums_init(void)
+{
+	int i;
+	static const u8 snum_init_76[] = {
+		0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+		0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+		0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+		0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+		0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+		0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+		0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+		0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+		0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+		0xF4, 0xF5, 0xFC, 0xFD,
+	};
+	static const u8 snum_init_46[] = {
+		0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+		0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+		0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+		0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+		0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+		0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
+	};
+	static const u8 *snum_init;
+
+	qe_num_of_snum = qe_get_num_of_snums();
+
+	if (qe_num_of_snum == 76)
+		snum_init = snum_init_76;
+	else
+		snum_init = snum_init_46;
+
+	for (i = 0; i < qe_num_of_snum; i++) {
+		snums[i].num = snum_init[i];
+		snums[i].state = QE_SNUM_STATE_FREE;
+	}
+}
+
+int qe_get_snum(void)
+{
+	unsigned long flags;
+	int snum = -EBUSY;
+	int i;
+
+	spin_lock_irqsave(&qe_lock, flags);
+	for (i = 0; i < qe_num_of_snum; i++) {
+		if (snums[i].state == QE_SNUM_STATE_FREE) {
+			snums[i].state = QE_SNUM_STATE_USED;
+			snum = snums[i].num;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qe_lock, flags);
+
+	return snum;
+}
+EXPORT_SYMBOL(qe_get_snum);
+
+void qe_put_snum(u8 snum)
+{
+	int i;
+
+	for (i = 0; i < qe_num_of_snum; i++) {
+		if (snums[i].num == snum) {
+			snums[i].state = QE_SNUM_STATE_FREE;
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL(qe_put_snum);
+
+static int qe_sdma_init(void)
+{
+	struct sdma __iomem *sdma = &qe_immr->sdma;
+	static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
+
+	if (!sdma)
+		return -ENODEV;
+
+	/* allocate 2 internal temporary buffers (512 bytes size each) for
+	 * the SDMA */
+	if (IS_ERR_VALUE(sdma_buf_offset)) {
+		sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+		if (IS_ERR_VALUE(sdma_buf_offset))
+			return -ENOMEM;
+	}
+
+	out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
+ 	out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+ 					(0x1 << QE_SDMR_CEN_SHIFT)));
+
+	return 0;
+}
+
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC     4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware().  It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+	const struct qe_microcode *ucode)
+{
+	const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+	unsigned int i;
+
+	if (ucode->major || ucode->minor || ucode->revision)
+		printk(KERN_INFO "qe-firmware: "
+			"uploading microcode '%s' version %u.%u.%u\n",
+			ucode->id, ucode->major, ucode->minor, ucode->revision);
+	else
+		printk(KERN_INFO "qe-firmware: "
+			"uploading microcode '%s'\n", ucode->id);
+
+	/* Use auto-increment */
+	out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+		QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+
+	for (i = 0; i < be32_to_cpu(ucode->count); i++)
+		out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+	
+	/* Set I-RAM Ready Register */
+	out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+	unsigned int i;
+	unsigned int j;
+	u32 crc;
+	size_t calc_size = sizeof(struct qe_firmware);
+	size_t length;
+	const struct qe_header *hdr;
+
+	if (!firmware) {
+		printk(KERN_ERR "qe-firmware: invalid pointer\n");
+		return -EINVAL;
+	}
+
+	hdr = &firmware->header;
+	length = be32_to_cpu(hdr->length);
+
+	/* Check the magic */
+	if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+	    (hdr->magic[2] != 'F')) {
+		printk(KERN_ERR "qe-firmware: not a microcode\n");
+		return -EPERM;
+	}
+
+	/* Check the version */
+	if (hdr->version != 1) {
+		printk(KERN_ERR "qe-firmware: unsupported version\n");
+		return -EPERM;
+	}
+
+	/* Validate some of the fields */
+	if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+		printk(KERN_ERR "qe-firmware: invalid data\n");
+		return -EINVAL;
+	}
+
+	/* Validate the length and check if there's a CRC */
+	calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+
+	for (i = 0; i < firmware->count; i++)
+		/*
+		 * For situations where the second RISC uses the same microcode
+		 * as the first, the 'code_offset' and 'count' fields will be
+		 * zero, so it's okay to add those.
+		 */
+		calc_size += sizeof(__be32) *
+			be32_to_cpu(firmware->microcode[i].count);
+
+	/* Validate the length */
+	if (length != calc_size + sizeof(__be32)) {
+		printk(KERN_ERR "qe-firmware: invalid length\n");
+		return -EPERM;
+	}
+
+	/* Validate the CRC */
+	crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+	if (crc != crc32(0, firmware, calc_size)) {
+		printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+		return -EIO;
+	}
+
+	/*
+	 * If the microcode calls for it, split the I-RAM.
+	 */
+	if (!firmware->split)
+		setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+	if (firmware->soc.model)
+		printk(KERN_INFO
+			"qe-firmware: firmware '%s' for %u V%u.%u\n",
+			firmware->id, be16_to_cpu(firmware->soc.model),
+			firmware->soc.major, firmware->soc.minor);
+	else
+		printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+			firmware->id);
+
+	/*
+	 * The QE only supports one microcode per RISC, so clear out all the
+	 * saved microcode information and put in the new.
+	 */
+	memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+	strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+	qe_firmware_info.extended_modes = firmware->extended_modes;
+	memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+		sizeof(firmware->vtraps));
+
+	/* Loop through each microcode. */
+	for (i = 0; i < firmware->count; i++) {
+		const struct qe_microcode *ucode = &firmware->microcode[i];
+
+		/* Upload a microcode if it's present */
+		if (ucode->code_offset)
+			qe_upload_microcode(firmware, ucode);
+
+		/* Program the traps for this processor */
+		for (j = 0; j < 16; j++) {
+			u32 trap = be32_to_cpu(ucode->traps[j]);
+
+			if (trap)
+				out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+		}
+
+		/* Enable traps */
+		out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+	}
+
+	qe_firmware_uploaded = 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(qe_upload_firmware);
+
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
+ */
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+	static int initialized;
+	struct property *prop;
+	struct device_node *qe;
+	struct device_node *fw = NULL;
+	const char *sprop;
+	unsigned int i;
+
+	/*
+	 * If we haven't checked yet, and a driver hasn't uploaded a firmware
+	 * yet, then check the device tree for information.
+	 */
+	if (qe_firmware_uploaded)
+		return &qe_firmware_info;
+
+	if (initialized)
+		return NULL;
+
+	initialized = 1;
+
+	/*
+	 * Newer device trees have an "fsl,qe" compatible property for the QE
+	 * node, but we still need to support older device trees.
+	*/
+	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+	if (!qe) {
+		qe = of_find_node_by_type(NULL, "qe");
+		if (!qe)
+			return NULL;
+	}
+
+	/* Find the 'firmware' child node */
+	for_each_child_of_node(qe, fw) {
+		if (strcmp(fw->name, "firmware") == 0)
+			break;
+	}
+
+	of_node_put(qe);
+
+	/* Did we find the 'firmware' node? */
+	if (!fw)
+		return NULL;
+
+	qe_firmware_uploaded = 1;
+
+	/* Copy the data into qe_firmware_info*/
+	sprop = of_get_property(fw, "id", NULL);
+	if (sprop)
+		strlcpy(qe_firmware_info.id, sprop,
+			sizeof(qe_firmware_info.id));
+
+	prop = of_find_property(fw, "extended-modes", NULL);
+	if (prop && (prop->length == sizeof(u64))) {
+		const u64 *iprop = prop->value;
+
+		qe_firmware_info.extended_modes = *iprop;
+	}
+
+	prop = of_find_property(fw, "virtual-traps", NULL);
+	if (prop && (prop->length == 32)) {
+		const u32 *iprop = prop->value;
+
+		for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
+			qe_firmware_info.vtraps[i] = iprop[i];
+	}
+
+	of_node_put(fw);
+
+	return &qe_firmware_info;
+}
+EXPORT_SYMBOL(qe_get_firmware_info);
+
+unsigned int qe_get_num_of_risc(void)
+{
+	struct device_node *qe;
+	int size;
+	unsigned int num_of_risc = 0;
+	const u32 *prop;
+
+	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+	if (!qe) {
+		/* Older devices trees did not have an "fsl,qe"
+		 * compatible property, so we need to look for
+		 * the QE node by name.
+		 */
+		qe = of_find_node_by_type(NULL, "qe");
+		if (!qe)
+			return num_of_risc;
+	}
+
+	prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
+	if (prop && size == sizeof(*prop))
+		num_of_risc = *prop;
+
+	of_node_put(qe);
+
+	return num_of_risc;
+}
+EXPORT_SYMBOL(qe_get_num_of_risc);
+
+unsigned int qe_get_num_of_snums(void)
+{
+	struct device_node *qe;
+	int size;
+	unsigned int num_of_snums;
+	const u32 *prop;
+
+	num_of_snums = 28; /* The default number of snum for threads is 28 */
+	qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+	if (!qe) {
+		/* Older devices trees did not have an "fsl,qe"
+		 * compatible property, so we need to look for
+		 * the QE node by name.
+		 */
+		qe = of_find_node_by_type(NULL, "qe");
+		if (!qe)
+			return num_of_snums;
+	}
+
+	prop = of_get_property(qe, "fsl,qe-num-snums", &size);
+	if (prop && size == sizeof(*prop)) {
+		num_of_snums = *prop;
+		if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
+			/* No QE ever has fewer than 28 SNUMs */
+			pr_err("QE: number of snum is invalid\n");
+			of_node_put(qe);
+			return -EINVAL;
+		}
+	}
+
+	of_node_put(qe);
+
+	return num_of_snums;
+}
+EXPORT_SYMBOL(qe_get_num_of_snums);
+
+static int __init qe_init(void)
+{
+	struct device_node *np;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+	if (!np)
+		return -ENODEV;
+	qe_reset();
+	of_node_put(np);
+	return 0;
+}
+subsys_initcall(qe_init);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
+{
+	if (!qe_alive_during_sleep())
+		qe_reset();
+	return 0;
+}
+
+static int qe_probe(struct platform_device *ofdev)
+{
+	return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+	{ .compatible = "fsl,qe", },
+	{ },
+};
+
+static struct platform_driver qe_driver = {
+	.driver = {
+		.name = "fsl-qe",
+		.of_match_table = qe_ids,
+	},
+	.probe = qe_probe,
+	.resume = qe_resume,
+};
+
+builtin_platform_driver(qe_driver);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644
index 0000000..104e68d
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -0,0 +1,243 @@
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static spinlock_t cpm_muram_lock;
+static u8 __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+	struct list_head head;
+	unsigned long start;
+	int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS	4
+#define GENPOOL_OFFSET		(4096 * 8)
+
+int cpm_muram_init(void)
+{
+	struct device_node *np;
+	struct resource r;
+	u32 zero[OF_MAX_ADDR_CELLS] = {};
+	resource_size_t max = 0;
+	int i = 0;
+	int ret = 0;
+
+	if (muram_pbase)
+		return 0;
+
+	spin_lock_init(&cpm_muram_lock);
+	np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+	if (!np) {
+		/* try legacy bindings */
+		np = of_find_node_by_name(NULL, "data-only");
+		if (!np) {
+			pr_err("Cannot find CPM muram data node");
+			ret = -ENODEV;
+			goto out_muram;
+		}
+	}
+
+	muram_pool = gen_pool_create(0, -1);
+	if (!muram_pool) {
+		pr_err("Cannot allocate memory pool for CPM/QE muram");
+		ret = -ENOMEM;
+		goto out_muram;
+	}
+	muram_pbase = of_translate_address(np, zero);
+	if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+		pr_err("Cannot translate zero through CPM muram node");
+		ret = -ENODEV;
+		goto out_pool;
+	}
+
+	while (of_address_to_resource(np, i++, &r) == 0) {
+		if (r.end > max)
+			max = r.end;
+		ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+				   GENPOOL_OFFSET, resource_size(&r), -1);
+		if (ret) {
+			pr_err("QE: couldn't add muram to pool!\n");
+			goto out_pool;
+		}
+	}
+
+	muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+	if (!muram_vbase) {
+		pr_err("Cannot map QE muram");
+		ret = -ENOMEM;
+		goto out_pool;
+	}
+	goto out_muram;
+out_pool:
+	gen_pool_destroy(muram_pool);
+out_muram:
+	of_node_put(np);
+	return ret;
+}
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns an offset into the muram area.
+ */
+static unsigned long cpm_muram_alloc_common(unsigned long size,
+		genpool_algo_t algo, void *data)
+{
+	struct muram_block *entry;
+	unsigned long start;
+
+	if (!muram_pool && cpm_muram_init())
+		goto out2;
+
+	start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+	if (!start)
+		goto out2;
+	start = start - GENPOOL_OFFSET;
+	memset_io(cpm_muram_addr(start), 0, size);
+	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		goto out1;
+	entry->start = start;
+	entry->size = size;
+	list_add(&entry->head, &muram_block_list);
+
+	return start;
+out1:
+	gen_pool_free(muram_pool, start, size);
+out2:
+	return (unsigned long)-ENOMEM;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns an offset into the muram area.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+	unsigned long start;
+	unsigned long flags;
+	struct genpool_data_align muram_pool_data;
+
+	spin_lock_irqsave(&cpm_muram_lock, flags);
+	muram_pool_data.align = align;
+	start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+				       &muram_pool_data);
+	spin_unlock_irqrestore(&cpm_muram_lock, flags);
+	return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+int cpm_muram_free(unsigned long offset)
+{
+	unsigned long flags;
+	int size;
+	struct muram_block *tmp;
+
+	size = 0;
+	spin_lock_irqsave(&cpm_muram_lock, flags);
+	list_for_each_entry(tmp, &muram_block_list, head) {
+		if (tmp->start == offset) {
+			size = tmp->size;
+			list_del(&tmp->head);
+			kfree(tmp);
+			break;
+		}
+	}
+	gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+	spin_unlock_irqrestore(&cpm_muram_lock, flags);
+	return size;
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns an offset into the muram area
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+	unsigned long start;
+	unsigned long flags;
+	struct genpool_data_fixed muram_pool_data_fixed;
+
+	spin_lock_irqsave(&cpm_muram_lock, flags);
+	muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+	start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+				       &muram_pool_data_fixed);
+	spin_unlock_irqrestore(&cpm_muram_lock, flags);
+	return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+	return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(void __iomem *addr)
+{
+	return addr - (void __iomem *)muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @offset: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+	return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
new file mode 100644
index 0000000..ec2ca86
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -0,0 +1,512 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_ic.c
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.  All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * QUICC ENGINE Interrupt Controller
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/qe_ic.h>
+
+#include "qe_ic.h"
+
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+static struct qe_ic_info qe_ic_info[] = {
+	[1] = {
+	       .mask = 0x00008000,
+	       .mask_reg = QEIC_CIMR,
+	       .pri_code = 0,
+	       .pri_reg = QEIC_CIPWCC,
+	       },
+	[2] = {
+	       .mask = 0x00004000,
+	       .mask_reg = QEIC_CIMR,
+	       .pri_code = 1,
+	       .pri_reg = QEIC_CIPWCC,
+	       },
+	[3] = {
+	       .mask = 0x00002000,
+	       .mask_reg = QEIC_CIMR,
+	       .pri_code = 2,
+	       .pri_reg = QEIC_CIPWCC,
+	       },
+	[10] = {
+		.mask = 0x00000040,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 1,
+		.pri_reg = QEIC_CIPZCC,
+		},
+	[11] = {
+		.mask = 0x00000020,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 2,
+		.pri_reg = QEIC_CIPZCC,
+		},
+	[12] = {
+		.mask = 0x00000010,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 3,
+		.pri_reg = QEIC_CIPZCC,
+		},
+	[13] = {
+		.mask = 0x00000008,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 4,
+		.pri_reg = QEIC_CIPZCC,
+		},
+	[14] = {
+		.mask = 0x00000004,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 5,
+		.pri_reg = QEIC_CIPZCC,
+		},
+	[15] = {
+		.mask = 0x00000002,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 6,
+		.pri_reg = QEIC_CIPZCC,
+		},
+	[20] = {
+		.mask = 0x10000000,
+		.mask_reg = QEIC_CRIMR,
+		.pri_code = 3,
+		.pri_reg = QEIC_CIPRTA,
+		},
+	[25] = {
+		.mask = 0x00800000,
+		.mask_reg = QEIC_CRIMR,
+		.pri_code = 0,
+		.pri_reg = QEIC_CIPRTB,
+		},
+	[26] = {
+		.mask = 0x00400000,
+		.mask_reg = QEIC_CRIMR,
+		.pri_code = 1,
+		.pri_reg = QEIC_CIPRTB,
+		},
+	[27] = {
+		.mask = 0x00200000,
+		.mask_reg = QEIC_CRIMR,
+		.pri_code = 2,
+		.pri_reg = QEIC_CIPRTB,
+		},
+	[28] = {
+		.mask = 0x00100000,
+		.mask_reg = QEIC_CRIMR,
+		.pri_code = 3,
+		.pri_reg = QEIC_CIPRTB,
+		},
+	[32] = {
+		.mask = 0x80000000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 0,
+		.pri_reg = QEIC_CIPXCC,
+		},
+	[33] = {
+		.mask = 0x40000000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 1,
+		.pri_reg = QEIC_CIPXCC,
+		},
+	[34] = {
+		.mask = 0x20000000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 2,
+		.pri_reg = QEIC_CIPXCC,
+		},
+	[35] = {
+		.mask = 0x10000000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 3,
+		.pri_reg = QEIC_CIPXCC,
+		},
+	[36] = {
+		.mask = 0x08000000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 4,
+		.pri_reg = QEIC_CIPXCC,
+		},
+	[40] = {
+		.mask = 0x00800000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 0,
+		.pri_reg = QEIC_CIPYCC,
+		},
+	[41] = {
+		.mask = 0x00400000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 1,
+		.pri_reg = QEIC_CIPYCC,
+		},
+	[42] = {
+		.mask = 0x00200000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 2,
+		.pri_reg = QEIC_CIPYCC,
+		},
+	[43] = {
+		.mask = 0x00100000,
+		.mask_reg = QEIC_CIMR,
+		.pri_code = 3,
+		.pri_reg = QEIC_CIPYCC,
+		},
+};
+
+static inline u32 qe_ic_read(volatile __be32  __iomem * base, unsigned int reg)
+{
+	return in_be32(base + (reg >> 2));
+}
+
+static inline void qe_ic_write(volatile __be32  __iomem * base, unsigned int reg,
+			       u32 value)
+{
+	out_be32(base + (reg >> 2), value);
+}
+
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+{
+	return irq_get_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+	return irq_data_get_irq_chip_data(d);
+}
+
+static void qe_ic_unmask_irq(struct irq_data *d)
+{
+	struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+	unsigned int src = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 temp;
+
+	raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+	temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+	qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+		    temp | qe_ic_info[src].mask);
+
+	raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static void qe_ic_mask_irq(struct irq_data *d)
+{
+	struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+	unsigned int src = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 temp;
+
+	raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+	temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+	qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+		    temp & ~qe_ic_info[src].mask);
+
+	/* Flush the above write before enabling interrupts; otherwise,
+	 * spurious interrupts will sometimes happen.  To be 100% sure
+	 * that the write has reached the device before interrupts are
+	 * enabled, the mask register would have to be read back; however,
+	 * this is not required for correctness, only to avoid wasting
+	 * time on a large number of spurious interrupts.  In testing,
+	 * a sync reduced the observed spurious interrupts to zero.
+	 */
+	mb();
+
+	raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static struct irq_chip qe_ic_irq_chip = {
+	.name = "QEIC",
+	.irq_unmask = qe_ic_unmask_irq,
+	.irq_mask = qe_ic_mask_irq,
+	.irq_mask_ack = qe_ic_mask_irq,
+};
+
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+			    enum irq_domain_bus_token bus_token)
+{
+	/* Exact match, unless qe_ic node is NULL */
+	struct device_node *of_node = irq_domain_get_of_node(h);
+	return of_node == NULL || of_node == node;
+}
+
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
+			  irq_hw_number_t hw)
+{
+	struct qe_ic *qe_ic = h->host_data;
+	struct irq_chip *chip;
+
+	if (hw >= ARRAY_SIZE(qe_ic_info)) {
+		pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qe_ic_info[hw].mask == 0) {
+		printk(KERN_ERR "Can't map reserved IRQ\n");
+		return -EINVAL;
+	}
+	/* Default chip */
+	chip = &qe_ic->hc_irq;
+
+	irq_set_chip_data(virq, qe_ic);
+	irq_set_status_flags(virq, IRQ_LEVEL);
+
+	irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops qe_ic_host_ops = {
+	.match = qe_ic_host_match,
+	.map = qe_ic_host_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{
+	int irq;
+
+	BUG_ON(qe_ic == NULL);
+
+	/* get the interrupt source vector. */
+	irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+	if (irq == 0)
+		return NO_IRQ;
+
+	return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{
+	int irq;
+
+	BUG_ON(qe_ic == NULL);
+
+	/* get the interrupt source vector. */
+	irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+	if (irq == 0)
+		return NO_IRQ;
+
+	return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+void __init qe_ic_init(struct device_node *node, unsigned int flags,
+		       void (*low_handler)(struct irq_desc *desc),
+		       void (*high_handler)(struct irq_desc *desc))
+{
+	struct qe_ic *qe_ic;
+	struct resource res;
+	u32 temp = 0, ret, high_active = 0;
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret)
+		return;
+
+	qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+	if (qe_ic == NULL)
+		return;
+
+	qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+					       &qe_ic_host_ops, qe_ic);
+	if (qe_ic->irqhost == NULL) {
+		kfree(qe_ic);
+		return;
+	}
+
+	qe_ic->regs = ioremap(res.start, resource_size(&res));
+
+	qe_ic->hc_irq = qe_ic_irq_chip;
+
+	qe_ic->virq_high = irq_of_parse_and_map(node, 0);
+	qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+
+	if (qe_ic->virq_low == NO_IRQ) {
+		printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
+		kfree(qe_ic);
+		return;
+	}
+
+	/* default priority scheme is grouped. If spread mode is    */
+	/* required, configure cicr accordingly.                    */
+	if (flags & QE_IC_SPREADMODE_GRP_W)
+		temp |= CICR_GWCC;
+	if (flags & QE_IC_SPREADMODE_GRP_X)
+		temp |= CICR_GXCC;
+	if (flags & QE_IC_SPREADMODE_GRP_Y)
+		temp |= CICR_GYCC;
+	if (flags & QE_IC_SPREADMODE_GRP_Z)
+		temp |= CICR_GZCC;
+	if (flags & QE_IC_SPREADMODE_GRP_RISCA)
+		temp |= CICR_GRTA;
+	if (flags & QE_IC_SPREADMODE_GRP_RISCB)
+		temp |= CICR_GRTB;
+
+	/* choose destination signal for highest priority interrupt */
+	if (flags & QE_IC_HIGH_SIGNAL) {
+		temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
+		high_active = 1;
+	}
+
+	qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+
+	irq_set_handler_data(qe_ic->virq_low, qe_ic);
+	irq_set_chained_handler(qe_ic->virq_low, low_handler);
+
+	if (qe_ic->virq_high != NO_IRQ &&
+			qe_ic->virq_high != qe_ic->virq_low) {
+		irq_set_handler_data(qe_ic->virq_high, qe_ic);
+		irq_set_chained_handler(qe_ic->virq_high, high_handler);
+	}
+}
+
+void qe_ic_set_highest_priority(unsigned int virq, int high)
+{
+	struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+	unsigned int src = virq_to_hw(virq);
+	u32 temp = 0;
+
+	temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
+
+	temp &= ~CICR_HP_MASK;
+	temp |= src << CICR_HP_SHIFT;
+
+	temp &= ~CICR_HPIT_MASK;
+	temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
+
+	qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+}
+
+/* Set Priority level within its group, from 1 to 8 */
+int qe_ic_set_priority(unsigned int virq, unsigned int priority)
+{
+	struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+	unsigned int src = virq_to_hw(virq);
+	u32 temp;
+
+	if (priority > 8 || priority == 0)
+		return -EINVAL;
+	if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+		      "%s: Invalid hw irq number for QEIC\n", __func__))
+		return -EINVAL;
+	if (qe_ic_info[src].pri_reg == 0)
+		return -EINVAL;
+
+	temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
+
+	if (priority < 4) {
+		temp &= ~(0x7 << (32 - priority * 3));
+		temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
+	} else {
+		temp &= ~(0x7 << (24 - priority * 3));
+		temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
+	}
+
+	qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
+
+	return 0;
+}
+
+/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
+int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
+{
+	struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+	unsigned int src = virq_to_hw(virq);
+	u32 temp, control_reg = QEIC_CICNR, shift = 0;
+
+	if (priority > 2 || priority == 0)
+		return -EINVAL;
+	if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+		      "%s: Invalid hw irq number for QEIC\n", __func__))
+		return -EINVAL;
+
+	switch (qe_ic_info[src].pri_reg) {
+	case QEIC_CIPZCC:
+		shift = CICNR_ZCC1T_SHIFT;
+		break;
+	case QEIC_CIPWCC:
+		shift = CICNR_WCC1T_SHIFT;
+		break;
+	case QEIC_CIPYCC:
+		shift = CICNR_YCC1T_SHIFT;
+		break;
+	case QEIC_CIPXCC:
+		shift = CICNR_XCC1T_SHIFT;
+		break;
+	case QEIC_CIPRTA:
+		shift = CRICR_RTA1T_SHIFT;
+		control_reg = QEIC_CRICR;
+		break;
+	case QEIC_CIPRTB:
+		shift = CRICR_RTB1T_SHIFT;
+		control_reg = QEIC_CRICR;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	shift += (2 - priority) * 2;
+	temp = qe_ic_read(qe_ic->regs, control_reg);
+	temp &= ~(SIGNAL_MASK << shift);
+	temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
+	qe_ic_write(qe_ic->regs, control_reg, temp);
+
+	return 0;
+}
+
+static struct bus_type qe_ic_subsys = {
+	.name = "qe_ic",
+	.dev_name = "qe_ic",
+};
+
+static struct device device_qe_ic = {
+	.id = 0,
+	.bus = &qe_ic_subsys,
+};
+
+static int __init init_qe_ic_sysfs(void)
+{
+	int rc;
+
+	printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
+
+	rc = subsys_system_register(&qe_ic_subsys, NULL);
+	if (rc) {
+		printk(KERN_ERR "Failed registering qe_ic sys class\n");
+		return -ENODEV;
+	}
+	rc = device_register(&device_qe_ic);
+	if (rc) {
+		printk(KERN_ERR "Failed registering qe_ic sys device\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+subsys_initcall(init_qe_ic_sysfs);
diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
new file mode 100644
index 0000000..926a2ed
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/soc/fsl/qe/qe_ic.h
+ *
+ * QUICC ENGINE Interrupt Controller Header
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef _POWERPC_SYSDEV_QE_IC_H
+#define _POWERPC_SYSDEV_QE_IC_H
+
+#include <soc/fsl/qe/qe_ic.h>
+
+#define NR_QE_IC_INTS		64
+
+/* QE IC registers offset */
+#define QEIC_CICR		0x00
+#define QEIC_CIVEC		0x04
+#define QEIC_CRIPNR		0x08
+#define QEIC_CIPNR		0x0c
+#define QEIC_CIPXCC		0x10
+#define QEIC_CIPYCC		0x14
+#define QEIC_CIPWCC		0x18
+#define QEIC_CIPZCC		0x1c
+#define QEIC_CIMR		0x20
+#define QEIC_CRIMR		0x24
+#define QEIC_CICNR		0x28
+#define QEIC_CIPRTA		0x30
+#define QEIC_CIPRTB		0x34
+#define QEIC_CRICR		0x3c
+#define QEIC_CHIVEC		0x60
+
+/* Interrupt priority registers */
+#define CIPCC_SHIFT_PRI0	29
+#define CIPCC_SHIFT_PRI1	26
+#define CIPCC_SHIFT_PRI2	23
+#define CIPCC_SHIFT_PRI3	20
+#define CIPCC_SHIFT_PRI4	13
+#define CIPCC_SHIFT_PRI5	10
+#define CIPCC_SHIFT_PRI6	7
+#define CIPCC_SHIFT_PRI7	4
+
+/* CICR priority modes */
+#define CICR_GWCC		0x00040000
+#define CICR_GXCC		0x00020000
+#define CICR_GYCC		0x00010000
+#define CICR_GZCC		0x00080000
+#define CICR_GRTA		0x00200000
+#define CICR_GRTB		0x00400000
+#define CICR_HPIT_SHIFT		8
+#define CICR_HPIT_MASK		0x00000300
+#define CICR_HP_SHIFT		24
+#define CICR_HP_MASK		0x3f000000
+
+/* CICNR */
+#define CICNR_WCC1T_SHIFT	20
+#define CICNR_ZCC1T_SHIFT	28
+#define CICNR_YCC1T_SHIFT	12
+#define CICNR_XCC1T_SHIFT	4
+
+/* CRICR */
+#define CRICR_RTA1T_SHIFT	20
+#define CRICR_RTB1T_SHIFT	28
+
+/* Signal indicator */
+#define SIGNAL_MASK		3
+#define SIGNAL_HIGH		2
+#define SIGNAL_LOW		0
+
+struct qe_ic {
+	/* Control registers offset */
+	volatile u32 __iomem *regs;
+
+	/* The remapper for this QEIC */
+	struct irq_domain *irqhost;
+
+	/* The "linux" controller struct */
+	struct irq_chip hc_irq;
+
+	/* VIRQ numbers of QE high/low irqs */
+	unsigned int virq_high;
+	unsigned int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+	u32	mask;	  /* location of this source at the QIMR register. */
+	u32	mask_reg; /* Mask register offset */
+	u8	pri_code; /* for grouped interrupts sources - the interrupt
+			     code as appears at the group priority register */
+	u32	pri_reg;  /* Group priority register offset */
+};
+
+#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
new file mode 100644
index 0000000..7ae59ab
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -0,0 +1,192 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_io.c
+ *
+ * QE Parallel I/O ports configuration routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+#undef DEBUG
+
+static struct qe_pio_regs __iomem *par_io;
+static int num_par_io_ports = 0;
+
+int par_io_init(struct device_node *np)
+{
+	struct resource res;
+	int ret;
+	const u32 *num_ports;
+
+	/* Map Parallel I/O ports registers */
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret)
+		return ret;
+	par_io = ioremap(res.start, resource_size(&res));
+
+	num_ports = of_get_property(np, "num-ports", NULL);
+	if (num_ports)
+		num_par_io_ports = *num_ports;
+
+	return 0;
+}
+
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+			 int open_drain, int assignment, int has_irq)
+{
+	u32 pin_mask1bit;
+	u32 pin_mask2bits;
+	u32 new_mask2bits;
+	u32 tmp_val;
+
+	/* calculate pin location for single and 2 bits information */
+	pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+	/* Set open drain, if required */
+	tmp_val = in_be32(&par_io->cpodr);
+	if (open_drain)
+		out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+	else
+		out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+
+	/* define direction */
+	tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+		in_be32(&par_io->cpdir2) :
+		in_be32(&par_io->cpdir1);
+
+	/* get all bits mask for 2 bit per port */
+	pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+				(pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+	/* Get the final mask we need for the right definition */
+	new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+				(pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+	/* clear and set 2 bits mask */
+	if (pin > (QE_PIO_PINS / 2) - 1) {
+		out_be32(&par_io->cpdir2,
+			 ~pin_mask2bits & tmp_val);
+		tmp_val &= ~pin_mask2bits;
+		out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+	} else {
+		out_be32(&par_io->cpdir1,
+			 ~pin_mask2bits & tmp_val);
+		tmp_val &= ~pin_mask2bits;
+		out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+	}
+	/* define pin assignment */
+	tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+		in_be32(&par_io->cppar2) :
+		in_be32(&par_io->cppar1);
+
+	new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+			(pin % (QE_PIO_PINS / 2) + 1) * 2));
+	/* clear and set 2 bits mask */
+	if (pin > (QE_PIO_PINS / 2) - 1) {
+		out_be32(&par_io->cppar2,
+			 ~pin_mask2bits & tmp_val);
+		tmp_val &= ~pin_mask2bits;
+		out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+	} else {
+		out_be32(&par_io->cppar1,
+			 ~pin_mask2bits & tmp_val);
+		tmp_val &= ~pin_mask2bits;
+		out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+	}
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+		      int assignment, int has_irq)
+{
+	if (!par_io || port >= num_par_io_ports)
+		return -EINVAL;
+
+	__par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+			    has_irq);
+	return 0;
+}
+EXPORT_SYMBOL(par_io_config_pin);
+
+int par_io_data_set(u8 port, u8 pin, u8 val)
+{
+	u32 pin_mask, tmp_val;
+
+	if (port >= num_par_io_ports)
+		return -EINVAL;
+	if (pin >= QE_PIO_PINS)
+		return -EINVAL;
+	/* calculate pin location */
+	pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+	tmp_val = in_be32(&par_io[port].cpdata);
+
+	if (val == 0)		/* clear */
+		out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+	else			/* set */
+		out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+
+	return 0;
+}
+EXPORT_SYMBOL(par_io_data_set);
+
+int par_io_of_config(struct device_node *np)
+{
+	struct device_node *pio;
+	const phandle *ph;
+	int pio_map_len;
+	const unsigned int *pio_map;
+
+	if (par_io == NULL) {
+		printk(KERN_ERR "par_io not initialized\n");
+		return -1;
+	}
+
+	ph = of_get_property(np, "pio-handle", NULL);
+	if (ph == NULL) {
+		printk(KERN_ERR "pio-handle not available\n");
+		return -1;
+	}
+
+	pio = of_find_node_by_phandle(*ph);
+
+	pio_map = of_get_property(pio, "pio-map", &pio_map_len);
+	if (pio_map == NULL) {
+		printk(KERN_ERR "pio-map is not set!\n");
+		return -1;
+	}
+	pio_map_len /= sizeof(unsigned int);
+	if ((pio_map_len % 6) != 0) {
+		printk(KERN_ERR "pio-map format wrong!\n");
+		return -1;
+	}
+
+	while (pio_map_len > 0) {
+		par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
+				(int) pio_map[2], (int) pio_map[3],
+				(int) pio_map[4], (int) pio_map[5]);
+		pio_map += 6;
+		pio_map_len -= 6;
+	}
+	of_node_put(pio);
+	return 0;
+}
+EXPORT_SYMBOL(par_io_of_config);
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 0000000..f744c21
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors:	Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+	if (strcmp(tdm_framer_type, "e1") == 0)
+		return TDM_FRAMER_E1;
+	else if (strcmp(tdm_framer_type, "t1") == 0)
+		return TDM_FRAMER_T1;
+	else
+		return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+	struct si_mode_info *si_info = &ut_info->si_info;
+
+	if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+		si_info->simr_crt = 1;
+		si_info->simr_rfsd = 0;
+	}
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+		     struct ucc_tdm_info *ut_info)
+{
+	const char *sprop;
+	int ret = 0;
+	u32 val;
+	struct resource *res;
+	struct device_node *np2;
+	static int siram_init_flag;
+	struct platform_device *pdev;
+
+	sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+	if (sprop) {
+		ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+		if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+		    (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+			pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+			return -EINVAL;
+		}
+	} else {
+		pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+		return -EINVAL;
+	}
+
+	sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+	if (sprop) {
+		ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+		if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+		    (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+			pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+		return -EINVAL;
+		}
+	} else {
+		pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+	if (ret) {
+		pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+		return -EINVAL;
+	}
+	utdm->tx_ts_mask = val;
+
+	ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+	if (ret) {
+		ret = -EINVAL;
+		pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+		return ret;
+	}
+	utdm->rx_ts_mask = val;
+
+	ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+	if (ret) {
+		ret = -EINVAL;
+		pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+		return ret;
+	}
+	utdm->tdm_port = val;
+	ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+	if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
+		utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+	else
+		utdm->tdm_mode = TDM_NORMAL;
+
+	sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+	if (!sprop) {
+		ret = -EINVAL;
+		pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+		return ret;
+	}
+	ret = set_tdm_framer(sprop);
+	if (ret < 0)
+		return -EINVAL;
+	utdm->tdm_framer_type = ret;
+
+	ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+	if (ret) {
+		ret = -EINVAL;
+		pr_err("QE-TDM: No siram entry id for UCC\n");
+		return ret;
+	}
+	utdm->siram_entry_id = val;
+
+	set_si_param(utdm, ut_info);
+
+	np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
+	if (!np2)
+		return -EINVAL;
+
+	pdev = of_find_device_by_node(np2);
+	if (!pdev) {
+		pr_err("%s: failed to lookup pdev\n", np2->name);
+		of_node_put(np2);
+		return -EINVAL;
+	}
+
+	of_node_put(np2);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(utdm->si_regs)) {
+		ret = PTR_ERR(utdm->si_regs);
+		goto err_miss_siram_property;
+	}
+
+	np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
+	if (!np2) {
+		ret = -EINVAL;
+		goto err_miss_siram_property;
+	}
+
+	pdev = of_find_device_by_node(np2);
+	if (!pdev) {
+		ret = -EINVAL;
+		pr_err("%s: failed to lookup pdev\n", np2->name);
+		of_node_put(np2);
+		goto err_miss_siram_property;
+	}
+
+	of_node_put(np2);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	utdm->siram = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(utdm->siram)) {
+		ret = PTR_ERR(utdm->siram);
+		goto err_miss_siram_property;
+	}
+
+	if (siram_init_flag == 0) {
+		memset_io(utdm->siram, 0,  resource_size(res));
+		siram_init_flag = 1;
+	}
+
+	return ret;
+
+err_miss_siram_property:
+	devm_iounmap(&pdev->dev, utdm->si_regs);
+	return ret;
+}
+EXPORT_SYMBOL(ucc_of_parse_tdm);
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+	struct si1 __iomem *si_regs;
+	u16 __iomem *siram;
+	u16 siram_entry_valid;
+	u16 siram_entry_closed;
+	u16 ucc_num;
+	u8 csel;
+	u16 sixmr;
+	u16 tdm_port;
+	u32 siram_entry_id;
+	u32 mask;
+	int i;
+
+	si_regs = utdm->si_regs;
+	siram = utdm->siram;
+	ucc_num = ut_info->uf_info.ucc_num;
+	tdm_port = utdm->tdm_port;
+	siram_entry_id = utdm->siram_entry_id;
+
+	if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+		utdm->num_of_ts = 24;
+	if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+		utdm->num_of_ts = 32;
+
+	/* set siram table */
+	csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+	siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+	siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+	for (i = 0; i < utdm->num_of_ts; i++) {
+		mask = 0x01 << i;
+
+		if (utdm->tx_ts_mask & mask)
+			iowrite16be(siram_entry_valid,
+				    &siram[siram_entry_id * 32 + i]);
+		else
+			iowrite16be(siram_entry_closed,
+				    &siram[siram_entry_id * 32 + i]);
+
+		if (utdm->rx_ts_mask & mask)
+			iowrite16be(siram_entry_valid,
+				    &siram[siram_entry_id * 32 + 0x200 +  i]);
+		else
+			iowrite16be(siram_entry_closed,
+				    &siram[siram_entry_id * 32 + 0x200 +  i]);
+	}
+
+	setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+		  SIR_LAST);
+	setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+		  SIR_LAST);
+
+	/* Set SIxMR register */
+	sixmr = SIMR_SAD(siram_entry_id);
+
+	sixmr &= ~SIMR_SDM_MASK;
+
+	if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+		sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+	else
+		sixmr |= SIMR_SDM_NORMAL;
+
+	sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+			SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+	if (ut_info->si_info.simr_crt)
+		sixmr |= SIMR_CRT;
+	if (ut_info->si_info.simr_sl)
+		sixmr |= SIMR_SL;
+	if (ut_info->si_info.simr_ce)
+		sixmr |= SIMR_CE;
+	if (ut_info->si_info.simr_fe)
+		sixmr |= SIMR_FE;
+	if (ut_info->si_info.simr_gm)
+		sixmr |= SIMR_GM;
+
+	switch (tdm_port) {
+	case 0:
+		iowrite16be(sixmr, &si_regs->sixmr1[0]);
+		break;
+	case 1:
+		iowrite16be(sixmr, &si_regs->sixmr1[1]);
+		break;
+	case 2:
+		iowrite16be(sixmr, &si_regs->sixmr1[2]);
+		break;
+	case 3:
+		iowrite16be(sixmr, &si_regs->sixmr1[3]);
+		break;
+	default:
+		pr_err("QE-TDM: can not find tdm sixmr reg\n");
+		break;
+	}
+}
+EXPORT_SYMBOL(ucc_tdm_init);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
new file mode 100644
index 0000000..681f7d4
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -0,0 +1,662 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/ucc.c
+ *
+ * QE UCC API Set - UCC specific routines implementations.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: 	Shlomi Gridish <gridish@freescale.com>
+ * 		Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+{
+	unsigned long flags;
+
+	if (ucc_num > UCC_MAX_NUM - 1)
+		return -EINVAL;
+
+	spin_lock_irqsave(&cmxgcr_lock, flags);
+	clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+		ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+	spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
+
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+{
+	u8 __iomem *guemr;
+
+	/* The GUEMR register is at the same location for both slow and fast
+	   devices, so we just use uccX.slow.guemr. */
+	switch (ucc_num) {
+	case 0: guemr = &qe_immr->ucc1.slow.guemr;
+		break;
+	case 1: guemr = &qe_immr->ucc2.slow.guemr;
+		break;
+	case 2: guemr = &qe_immr->ucc3.slow.guemr;
+		break;
+	case 3: guemr = &qe_immr->ucc4.slow.guemr;
+		break;
+	case 4: guemr = &qe_immr->ucc5.slow.guemr;
+		break;
+	case 5: guemr = &qe_immr->ucc6.slow.guemr;
+		break;
+	case 6: guemr = &qe_immr->ucc7.slow.guemr;
+		break;
+	case 7: guemr = &qe_immr->ucc8.slow.guemr;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+		UCC_GUEMR_SET_RESERVED3 | speed);
+
+	return 0;
+}
+
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+	unsigned int *reg_num, unsigned int *shift)
+{
+	unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+	*reg_num = cmx + 1;
+	*cmxucr = &qe_immr->qmx.cmxucr[cmx];
+	*shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+	__be32 __iomem *cmxucr;
+	unsigned int reg_num;
+	unsigned int shift;
+
+	/* check if the UCC number is in range. */
+	if (ucc_num > UCC_MAX_NUM - 1)
+		return -EINVAL;
+
+	get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+	if (set)
+		setbits32(cmxucr, mask << shift);
+	else
+		clrbits32(cmxucr, mask << shift);
+
+	return 0;
+}
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+	enum comm_dir mode)
+{
+	__be32 __iomem *cmxucr;
+	unsigned int reg_num;
+	unsigned int shift;
+	u32 clock_bits = 0;
+
+	/* check if the UCC number is in range. */
+	if (ucc_num > UCC_MAX_NUM - 1)
+		return -EINVAL;
+
+	/* The communications direction must be RX or TX */
+	if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
+		return -EINVAL;
+
+	get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+	switch (reg_num) {
+	case 1:
+		switch (clock) {
+		case QE_BRG1:	clock_bits = 1; break;
+		case QE_BRG2:	clock_bits = 2; break;
+		case QE_BRG7:	clock_bits = 3; break;
+		case QE_BRG8:	clock_bits = 4; break;
+		case QE_CLK9:	clock_bits = 5; break;
+		case QE_CLK10:	clock_bits = 6; break;
+		case QE_CLK11:	clock_bits = 7; break;
+		case QE_CLK12:	clock_bits = 8; break;
+		case QE_CLK15:	clock_bits = 9; break;
+		case QE_CLK16:	clock_bits = 10; break;
+		default: break;
+		}
+		break;
+	case 2:
+		switch (clock) {
+		case QE_BRG5:	clock_bits = 1; break;
+		case QE_BRG6:	clock_bits = 2; break;
+		case QE_BRG7:	clock_bits = 3; break;
+		case QE_BRG8:	clock_bits = 4; break;
+		case QE_CLK13:	clock_bits = 5; break;
+		case QE_CLK14:	clock_bits = 6; break;
+		case QE_CLK19:	clock_bits = 7; break;
+		case QE_CLK20:	clock_bits = 8; break;
+		case QE_CLK15:	clock_bits = 9; break;
+		case QE_CLK16:	clock_bits = 10; break;
+		default: break;
+		}
+		break;
+	case 3:
+		switch (clock) {
+		case QE_BRG9:	clock_bits = 1; break;
+		case QE_BRG10:	clock_bits = 2; break;
+		case QE_BRG15:	clock_bits = 3; break;
+		case QE_BRG16:	clock_bits = 4; break;
+		case QE_CLK3:	clock_bits = 5; break;
+		case QE_CLK4:	clock_bits = 6; break;
+		case QE_CLK17:	clock_bits = 7; break;
+		case QE_CLK18:	clock_bits = 8; break;
+		case QE_CLK7:	clock_bits = 9; break;
+		case QE_CLK8:	clock_bits = 10; break;
+		case QE_CLK16:	clock_bits = 11; break;
+		default: break;
+		}
+		break;
+	case 4:
+		switch (clock) {
+		case QE_BRG13:	clock_bits = 1; break;
+		case QE_BRG14:	clock_bits = 2; break;
+		case QE_BRG15:	clock_bits = 3; break;
+		case QE_BRG16:	clock_bits = 4; break;
+		case QE_CLK5:	clock_bits = 5; break;
+		case QE_CLK6:	clock_bits = 6; break;
+		case QE_CLK21:	clock_bits = 7; break;
+		case QE_CLK22:	clock_bits = 8; break;
+		case QE_CLK7:	clock_bits = 9; break;
+		case QE_CLK8:	clock_bits = 10; break;
+		case QE_CLK16:	clock_bits = 11; break;
+		default: break;
+		}
+		break;
+	default: break;
+	}
+
+	/* Check for invalid combination of clock and UCC number */
+	if (!clock_bits)
+		return -ENOENT;
+
+	if (mode == COMM_DIR_RX)
+		shift += 4;
+
+	clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+		clock_bits << shift);
+
+	return 0;
+}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+	int clock_bits = -EINVAL;
+
+	/*
+	 * for TDM[0, 1, 2, 3], TX and RX use  common
+	 * clock source BRG3,4 and CLK1,2
+	 * for TDM[4, 5, 6, 7], TX and RX use  common
+	 * clock source BRG12,13 and CLK23,24
+	 */
+	switch (tdm_num) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		switch (clock) {
+		case QE_BRG3:
+			clock_bits = 1;
+			break;
+		case QE_BRG4:
+			clock_bits = 2;
+			break;
+		case QE_CLK1:
+			clock_bits = 4;
+			break;
+		case QE_CLK2:
+			clock_bits = 5;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 4:
+	case 5:
+	case 6:
+	case 7:
+		switch (clock) {
+		case QE_BRG12:
+			clock_bits = 1;
+			break;
+		case QE_BRG13:
+			clock_bits = 2;
+			break;
+		case QE_CLK23:
+			clock_bits = 4;
+			break;
+		case QE_CLK24:
+			clock_bits = 5;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+	int clock_bits = -EINVAL;
+
+	switch (tdm_num) {
+	case 0:
+		switch (clock) {
+		case QE_CLK3:
+			clock_bits = 6;
+			break;
+		case QE_CLK8:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1:
+		switch (clock) {
+		case QE_CLK5:
+			clock_bits = 6;
+			break;
+		case QE_CLK10:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 2:
+		switch (clock) {
+		case QE_CLK7:
+			clock_bits = 6;
+			break;
+		case QE_CLK12:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 3:
+		switch (clock) {
+		case QE_CLK9:
+			clock_bits = 6;
+			break;
+		case QE_CLK14:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 4:
+		switch (clock) {
+		case QE_CLK11:
+			clock_bits = 6;
+			break;
+		case QE_CLK16:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 5:
+		switch (clock) {
+		case QE_CLK13:
+			clock_bits = 6;
+			break;
+		case QE_CLK18:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 6:
+		switch (clock) {
+		case QE_CLK15:
+			clock_bits = 6;
+			break;
+		case QE_CLK20:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 7:
+		switch (clock) {
+		case QE_CLK17:
+			clock_bits = 6;
+			break;
+		case QE_CLK22:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+	int clock_bits = -EINVAL;
+
+	switch (tdm_num) {
+	case 0:
+		switch (clock) {
+		case QE_CLK4:
+			clock_bits = 6;
+			break;
+		case QE_CLK9:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1:
+		switch (clock) {
+		case QE_CLK6:
+			clock_bits = 6;
+			break;
+		case QE_CLK11:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 2:
+		switch (clock) {
+		case QE_CLK8:
+			clock_bits = 6;
+			break;
+		case QE_CLK13:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 3:
+		switch (clock) {
+		case QE_CLK10:
+			clock_bits = 6;
+			break;
+		case QE_CLK15:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 4:
+		switch (clock) {
+		case QE_CLK12:
+			clock_bits = 6;
+			break;
+		case QE_CLK17:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 5:
+		switch (clock) {
+		case QE_CLK14:
+			clock_bits = 6;
+			break;
+		case QE_CLK19:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 6:
+		switch (clock) {
+		case QE_CLK16:
+			clock_bits = 6;
+			break;
+		case QE_CLK21:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 7:
+		switch (clock) {
+		case QE_CLK18:
+			clock_bits = 6;
+			break;
+		case QE_CLK3:
+			clock_bits = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+				enum qe_clock clock)
+{
+	int clock_bits;
+
+	clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+	if (clock_bits > 0)
+		return clock_bits;
+	if (mode == COMM_DIR_RX)
+		clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+	if (mode == COMM_DIR_TX)
+		clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+	return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+	u32 shift;
+
+	shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+	if (tdm_num < 4)
+		shift -= tdm_num * 4;
+	else
+		shift -= (tdm_num - 4) * 4;
+
+	return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+			 enum comm_dir mode)
+{
+	int clock_bits;
+	u32 shift;
+	struct qe_mux __iomem *qe_mux_reg;
+	 __be32 __iomem *cmxs1cr;
+
+	qe_mux_reg = &qe_immr->qmx;
+
+	if (tdm_num > 7 || tdm_num < 0)
+		return -EINVAL;
+
+	/* The communications direction must be RX or TX */
+	if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+		return -EINVAL;
+
+	clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+	if (clock_bits < 0)
+		return -EINVAL;
+
+	shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+	cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+				  &qe_mux_reg->cmxsi1cr_h;
+
+	qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+			clock_bits << shift);
+
+	return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+				   enum comm_dir mode)
+{
+	int source = -EINVAL;
+
+	if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+		source = 0;
+		return source;
+	}
+	if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+		source = 0;
+		return source;
+	}
+
+	switch (tdm_num) {
+	case 0:
+	case 1:
+		switch (clock) {
+		case QE_BRG9:
+			source = 1;
+			break;
+		case QE_BRG10:
+			source = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 2:
+	case 3:
+		switch (clock) {
+		case QE_BRG9:
+			source = 1;
+			break;
+		case QE_BRG11:
+			source = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 4:
+	case 5:
+		switch (clock) {
+		case QE_BRG13:
+			source = 1;
+			break;
+		case QE_BRG14:
+			source = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 6:
+	case 7:
+		switch (clock) {
+		case QE_BRG13:
+			source = 1;
+			break;
+		case QE_BRG15:
+			source = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+	u32 shift;
+
+	shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
+	shift -= tdm_num * 2;
+
+	return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+			  enum comm_dir mode)
+{
+	int source;
+	u32 shift;
+	struct qe_mux *qe_mux_reg;
+
+	qe_mux_reg = &qe_immr->qmx;
+
+	if (tdm_num >= UCC_TDM_NUM)
+		return -EINVAL;
+
+	/* The communications direction must be RX or TX */
+	if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+		return -EINVAL;
+
+	source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+	if (source < 0)
+		return -EINVAL;
+
+	shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+	qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
+			QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+			source << shift);
+
+	return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
new file mode 100644
index 0000000..83d8d16
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: 	Shlomi Gridish <gridish@freescale.com>
+ * 		Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+{
+	printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+	printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+	printk(KERN_INFO "gumr  : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+	printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+	printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+	printk(KERN_INFO "udsr  : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+	printk(KERN_INFO "ucce  : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+	printk(KERN_INFO "uccm  : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+	printk(KERN_INFO "uccs  : addr=0x%p, val=0x%02x\n",
+		  &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+	printk(KERN_INFO "urfb  : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+	printk(KERN_INFO "urfs  : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+	printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+	printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+	printk(KERN_INFO "utfb  : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+	printk(KERN_INFO "utfs  : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+	printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+	printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+	printk(KERN_INFO "utpt  : addr=0x%p, val=0x%04x\n",
+		  &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+	printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+		  &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+	printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+		  &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
+}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
+{
+	switch (uccf_num) {
+	case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+	case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+	case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+	case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+	case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+	case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+	case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+	case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+	default: return QE_CR_SUBBLOCK_INVALID;
+	}
+}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+{
+	out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+	struct ucc_fast __iomem *uf_regs;
+	u32 gumr;
+
+	uf_regs = uccf->uf_regs;
+
+	/* Enable reception and/or transmission on this UCC. */
+	gumr = in_be32(&uf_regs->gumr);
+	if (mode & COMM_DIR_TX) {
+		gumr |= UCC_FAST_GUMR_ENT;
+		uccf->enabled_tx = 1;
+	}
+	if (mode & COMM_DIR_RX) {
+		gumr |= UCC_FAST_GUMR_ENR;
+		uccf->enabled_rx = 1;
+	}
+	out_be32(&uf_regs->gumr, gumr);
+}
+EXPORT_SYMBOL(ucc_fast_enable);
+
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+	struct ucc_fast __iomem *uf_regs;
+	u32 gumr;
+
+	uf_regs = uccf->uf_regs;
+
+	/* Disable reception and/or transmission on this UCC. */
+	gumr = in_be32(&uf_regs->gumr);
+	if (mode & COMM_DIR_TX) {
+		gumr &= ~UCC_FAST_GUMR_ENT;
+		uccf->enabled_tx = 0;
+	}
+	if (mode & COMM_DIR_RX) {
+		gumr &= ~UCC_FAST_GUMR_ENR;
+		uccf->enabled_rx = 0;
+	}
+	out_be32(&uf_regs->gumr, gumr);
+}
+EXPORT_SYMBOL(ucc_fast_disable);
+
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
+{
+	struct ucc_fast_private *uccf;
+	struct ucc_fast __iomem *uf_regs;
+	u32 gumr;
+	int ret;
+
+	if (!uf_info)
+		return -EINVAL;
+
+	/* check if the UCC port number is in range. */
+	if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+		printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check that 'max_rx_buf_length' is properly aligned (4). */
+	if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	/* Validate Virtual Fifo register values */
+	if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
+		printk(KERN_ERR "%s: urfs is too small\n", __func__);
+		return -EINVAL;
+	}
+
+	if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+		printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+	if (!uccf) {
+		printk(KERN_ERR "%s: Cannot allocate private data\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* Fill fast UCC structure */
+	uccf->uf_info = uf_info;
+	/* Set the PHY base address */
+	uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
+	if (uccf->uf_regs == NULL) {
+		printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+		kfree(uccf);
+		return -ENOMEM;
+	}
+
+	uccf->enabled_tx = 0;
+	uccf->enabled_rx = 0;
+	uccf->stopped_tx = 0;
+	uccf->stopped_rx = 0;
+	uf_regs = uccf->uf_regs;
+	uccf->p_ucce = &uf_regs->ucce;
+	uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+	uccf->p_utodr = &uf_regs->utodr;
+#endif
+#ifdef STATISTICS
+	uccf->tx_frames = 0;
+	uccf->rx_frames = 0;
+	uccf->rx_discarded = 0;
+#endif				/* STATISTICS */
+
+	/* Set UCC to fast type */
+	ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+	if (ret) {
+		printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
+		ucc_fast_free(uccf);
+		return ret;
+	}
+
+	uccf->mrblr = uf_info->max_rx_buf_length;
+
+	/* Set GUMR */
+	/* For more details see the hardware spec. */
+	gumr = uf_info->ttx_trx;
+	if (uf_info->tci)
+		gumr |= UCC_FAST_GUMR_TCI;
+	if (uf_info->cdp)
+		gumr |= UCC_FAST_GUMR_CDP;
+	if (uf_info->ctsp)
+		gumr |= UCC_FAST_GUMR_CTSP;
+	if (uf_info->cds)
+		gumr |= UCC_FAST_GUMR_CDS;
+	if (uf_info->ctss)
+		gumr |= UCC_FAST_GUMR_CTSS;
+	if (uf_info->txsy)
+		gumr |= UCC_FAST_GUMR_TXSY;
+	if (uf_info->rsyn)
+		gumr |= UCC_FAST_GUMR_RSYN;
+	gumr |= uf_info->synl;
+	if (uf_info->rtsm)
+		gumr |= UCC_FAST_GUMR_RTSM;
+	gumr |= uf_info->renc;
+	if (uf_info->revd)
+		gumr |= UCC_FAST_GUMR_REVD;
+	gumr |= uf_info->tenc;
+	gumr |= uf_info->tcrc;
+	gumr |= uf_info->mode;
+	out_be32(&uf_regs->gumr, gumr);
+
+	/* Allocate memory for Tx Virtual Fifo */
+	uccf->ucc_fast_tx_virtual_fifo_base_offset =
+	    qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+	if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+		printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+			__func__);
+		uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
+		ucc_fast_free(uccf);
+		return -ENOMEM;
+	}
+
+	/* Allocate memory for Rx Virtual Fifo */
+	uccf->ucc_fast_rx_virtual_fifo_base_offset =
+		qe_muram_alloc(uf_info->urfs +
+			   UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+			   UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+	if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+		printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+			__func__);
+		uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
+		ucc_fast_free(uccf);
+		return -ENOMEM;
+	}
+
+	/* Set Virtual Fifo registers */
+	out_be16(&uf_regs->urfs, uf_info->urfs);
+	out_be16(&uf_regs->urfet, uf_info->urfet);
+	out_be16(&uf_regs->urfset, uf_info->urfset);
+	out_be16(&uf_regs->utfs, uf_info->utfs);
+	out_be16(&uf_regs->utfet, uf_info->utfet);
+	out_be16(&uf_regs->utftt, uf_info->utftt);
+	/* utfb, urfb are offsets from MURAM base */
+	out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
+	out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+	/* Mux clocking */
+	/* Grant Support */
+	ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
+	/* Breakpoint Support */
+	ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
+	/* Set Tsa or NMSI mode. */
+	ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
+	/* If NMSI (not Tsa), set Tx and Rx clock. */
+	if (!uf_info->tsa) {
+		/* Rx clock routing */
+		if ((uf_info->rx_clock != QE_CLK_NONE) &&
+		    ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+					COMM_DIR_RX)) {
+			printk(KERN_ERR "%s: illegal value for RX clock\n",
+			       __func__);
+			ucc_fast_free(uccf);
+			return -EINVAL;
+		}
+		/* Tx clock routing */
+		if ((uf_info->tx_clock != QE_CLK_NONE) &&
+		    ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+					COMM_DIR_TX)) {
+			printk(KERN_ERR "%s: illegal value for TX clock\n",
+			       __func__);
+			ucc_fast_free(uccf);
+			return -EINVAL;
+		}
+	} else {
+		/* tdm Rx clock routing */
+		if ((uf_info->rx_clock != QE_CLK_NONE) &&
+		    ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+					 COMM_DIR_RX)) {
+			pr_err("%s: illegal value for RX clock", __func__);
+			ucc_fast_free(uccf);
+			return -EINVAL;
+		}
+
+		/* tdm Tx clock routing */
+		if ((uf_info->tx_clock != QE_CLK_NONE) &&
+		    ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+					 COMM_DIR_TX)) {
+			pr_err("%s: illegal value for TX clock", __func__);
+			ucc_fast_free(uccf);
+			return -EINVAL;
+		}
+
+		/* tdm Rx sync clock routing */
+		if ((uf_info->rx_sync != QE_CLK_NONE) &&
+		    ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+					  COMM_DIR_RX)) {
+			pr_err("%s: illegal value for RX clock", __func__);
+			ucc_fast_free(uccf);
+			return -EINVAL;
+		}
+
+		/* tdm Tx sync clock routing */
+		if ((uf_info->tx_sync != QE_CLK_NONE) &&
+		    ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+					  COMM_DIR_TX)) {
+			pr_err("%s: illegal value for TX clock", __func__);
+			ucc_fast_free(uccf);
+			return -EINVAL;
+		}
+	}
+
+	/* Set interrupt mask register at UCC level. */
+	out_be32(&uf_regs->uccm, uf_info->uccm_mask);
+
+	/* First, clear anything pending at UCC level,
+	 * otherwise, old garbage may come through
+	 * as soon as the dam is opened. */
+
+	/* Writing '1' clears */
+	out_be32(&uf_regs->ucce, 0xffffffff);
+
+	*uccf_ret = uccf;
+	return 0;
+}
+EXPORT_SYMBOL(ucc_fast_init);
+
+void ucc_fast_free(struct ucc_fast_private * uccf)
+{
+	if (!uccf)
+		return;
+
+	if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
+		qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+
+	if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
+		qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+	if (uccf->uf_regs)
+		iounmap(uccf->uf_regs);
+
+	kfree(uccf);
+}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
new file mode 100644
index 0000000..9334bdb
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: 	Shlomi Gridish <gridish@freescale.com>
+ * 		Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Slow API Set - UCC Slow specific routines implementations.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+{
+	switch (uccs_num) {
+	case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
+	case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
+	case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
+	case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
+	case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
+	case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
+	case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
+	case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
+	default: return QE_CR_SUBBLOCK_INVALID;
+	}
+}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+{
+	struct ucc_slow_info *us_info = uccs->us_info;
+	u32 id;
+
+	id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+	qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+			 QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+{
+	struct ucc_slow_info *us_info = uccs->us_info;
+	u32 id;
+
+	id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+	qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
+
+void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+{
+	struct ucc_slow_info *us_info = uccs->us_info;
+	u32 id;
+
+	id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+	qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
+
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+	struct ucc_slow *us_regs;
+	u32 gumr_l;
+
+	us_regs = uccs->us_regs;
+
+	/* Enable reception and/or transmission on this UCC. */
+	gumr_l = in_be32(&us_regs->gumr_l);
+	if (mode & COMM_DIR_TX) {
+		gumr_l |= UCC_SLOW_GUMR_L_ENT;
+		uccs->enabled_tx = 1;
+	}
+	if (mode & COMM_DIR_RX) {
+		gumr_l |= UCC_SLOW_GUMR_L_ENR;
+		uccs->enabled_rx = 1;
+	}
+	out_be32(&us_regs->gumr_l, gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_enable);
+
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+	struct ucc_slow *us_regs;
+	u32 gumr_l;
+
+	us_regs = uccs->us_regs;
+
+	/* Disable reception and/or transmission on this UCC. */
+	gumr_l = in_be32(&us_regs->gumr_l);
+	if (mode & COMM_DIR_TX) {
+		gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
+		uccs->enabled_tx = 0;
+	}
+	if (mode & COMM_DIR_RX) {
+		gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
+		uccs->enabled_rx = 0;
+	}
+	out_be32(&us_regs->gumr_l, gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_disable);
+
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
+{
+	struct ucc_slow_private *uccs;
+	u32 i;
+	struct ucc_slow __iomem *us_regs;
+	u32 gumr;
+	struct qe_bd *bd;
+	u32 id;
+	u32 command;
+	int ret = 0;
+
+	if (!us_info)
+		return -EINVAL;
+
+	/* check if the UCC port number is in range. */
+	if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
+		printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Set mrblr
+	 * Check that 'max_rx_buf_length' is properly aligned (4), unless
+	 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+	 * case when QE accepts 32 bits at a time.
+	 */
+	if ((!us_info->rfw) &&
+		(us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
+		printk(KERN_ERR "max_rx_buf_length not aligned.\n");
+		return -EINVAL;
+	}
+
+	uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+	if (!uccs) {
+		printk(KERN_ERR "%s: Cannot allocate private data\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* Fill slow UCC structure */
+	uccs->us_info = us_info;
+	/* Set the PHY base address */
+	uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+	if (uccs->us_regs == NULL) {
+		printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+		kfree(uccs);
+		return -ENOMEM;
+	}
+
+	uccs->saved_uccm = 0;
+	uccs->p_rx_frame = 0;
+	us_regs = uccs->us_regs;
+	uccs->p_ucce = (u16 *) & (us_regs->ucce);
+	uccs->p_uccm = (u16 *) & (us_regs->uccm);
+#ifdef STATISTICS
+	uccs->rx_frames = 0;
+	uccs->tx_frames = 0;
+	uccs->rx_discarded = 0;
+#endif				/* STATISTICS */
+
+	/* Get PRAM base */
+	uccs->us_pram_offset =
+		qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+	if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+		printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
+		ucc_slow_free(uccs);
+		return -ENOMEM;
+	}
+	id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+	qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+		     uccs->us_pram_offset);
+
+	uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
+
+	/* Set UCC to slow type */
+	ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+	if (ret) {
+		printk(KERN_ERR "%s: cannot set UCC type", __func__);
+		ucc_slow_free(uccs);
+		return ret;
+	}
+
+	out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+
+	INIT_LIST_HEAD(&uccs->confQ);
+
+	/* Allocate BDs. */
+	uccs->rx_base_offset =
+		qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
+				QE_ALIGNMENT_OF_BD);
+	if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+		printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+			us_info->rx_bd_ring_len);
+		uccs->rx_base_offset = 0;
+		ucc_slow_free(uccs);
+		return -ENOMEM;
+	}
+
+	uccs->tx_base_offset =
+		qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
+			QE_ALIGNMENT_OF_BD);
+	if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+		printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
+		uccs->tx_base_offset = 0;
+		ucc_slow_free(uccs);
+		return -ENOMEM;
+	}
+
+	/* Init Tx bds */
+	bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
+	for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
+		/* clear bd buffer */
+		out_be32(&bd->buf, 0);
+		/* set bd status and length */
+		out_be32((u32 *) bd, 0);
+		bd++;
+	}
+	/* for last BD set Wrap bit */
+	out_be32(&bd->buf, 0);
+	out_be32((u32 *) bd, cpu_to_be32(T_W));
+
+	/* Init Rx bds */
+	bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
+	for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
+		/* set bd status and length */
+		out_be32((u32*)bd, 0);
+		/* clear bd buffer */
+		out_be32(&bd->buf, 0);
+		bd++;
+	}
+	/* for last BD set Wrap bit */
+	out_be32((u32*)bd, cpu_to_be32(R_W));
+	out_be32(&bd->buf, 0);
+
+	/* Set GUMR (For more details see the hardware spec.). */
+	/* gumr_h */
+	gumr = us_info->tcrc;
+	if (us_info->cdp)
+		gumr |= UCC_SLOW_GUMR_H_CDP;
+	if (us_info->ctsp)
+		gumr |= UCC_SLOW_GUMR_H_CTSP;
+	if (us_info->cds)
+		gumr |= UCC_SLOW_GUMR_H_CDS;
+	if (us_info->ctss)
+		gumr |= UCC_SLOW_GUMR_H_CTSS;
+	if (us_info->tfl)
+		gumr |= UCC_SLOW_GUMR_H_TFL;
+	if (us_info->rfw)
+		gumr |= UCC_SLOW_GUMR_H_RFW;
+	if (us_info->txsy)
+		gumr |= UCC_SLOW_GUMR_H_TXSY;
+	if (us_info->rtsm)
+		gumr |= UCC_SLOW_GUMR_H_RTSM;
+	out_be32(&us_regs->gumr_h, gumr);
+
+	/* gumr_l */
+	gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
+		us_info->diag | us_info->mode;
+	if (us_info->tci)
+		gumr |= UCC_SLOW_GUMR_L_TCI;
+	if (us_info->rinv)
+		gumr |= UCC_SLOW_GUMR_L_RINV;
+	if (us_info->tinv)
+		gumr |= UCC_SLOW_GUMR_L_TINV;
+	if (us_info->tend)
+		gumr |= UCC_SLOW_GUMR_L_TEND;
+	out_be32(&us_regs->gumr_l, gumr);
+
+	/* Function code registers */
+
+	/* if the data is in cachable memory, the 'global' */
+	/* in the function code should be set. */
+	uccs->us_pram->tbmr = UCC_BMR_BO_BE;
+	uccs->us_pram->rbmr = UCC_BMR_BO_BE;
+
+	/* rbase, tbase are offsets from MURAM base */
+	out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
+	out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+
+	/* Mux clocking */
+	/* Grant Support */
+	ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
+	/* Breakpoint Support */
+	ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
+	/* Set Tsa or NMSI mode. */
+	ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
+	/* If NMSI (not Tsa), set Tx and Rx clock. */
+	if (!us_info->tsa) {
+		/* Rx clock routing */
+		if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+					COMM_DIR_RX)) {
+			printk(KERN_ERR "%s: illegal value for RX clock\n",
+			       __func__);
+			ucc_slow_free(uccs);
+			return -EINVAL;
+		}
+		/* Tx clock routing */
+		if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+					COMM_DIR_TX)) {
+			printk(KERN_ERR "%s: illegal value for TX clock\n",
+			       __func__);
+			ucc_slow_free(uccs);
+			return -EINVAL;
+		}
+	}
+
+	/* Set interrupt mask register at UCC level. */
+	out_be16(&us_regs->uccm, us_info->uccm_mask);
+
+	/* First, clear anything pending at UCC level,
+	 * otherwise, old garbage may come through
+	 * as soon as the dam is opened. */
+
+	/* Writing '1' clears */
+	out_be16(&us_regs->ucce, 0xffff);
+
+	/* Issue QE Init command */
+	if (us_info->init_tx && us_info->init_rx)
+		command = QE_INIT_TX_RX;
+	else if (us_info->init_tx)
+		command = QE_INIT_TX;
+	else
+		command = QE_INIT_RX;	/* We know at least one is TRUE */
+
+	qe_issue_cmd(command, id, us_info->protocol, 0);
+
+	*uccs_ret = uccs;
+	return 0;
+}
+EXPORT_SYMBOL(ucc_slow_init);
+
+void ucc_slow_free(struct ucc_slow_private * uccs)
+{
+	if (!uccs)
+		return;
+
+	if (uccs->rx_base_offset)
+		qe_muram_free(uccs->rx_base_offset);
+
+	if (uccs->tx_base_offset)
+		qe_muram_free(uccs->tx_base_offset);
+
+	if (uccs->us_pram)
+		qe_muram_free(uccs->us_pram_offset);
+
+	if (uccs->us_regs)
+		iounmap(uccs->us_regs);
+
+	kfree(uccs);
+}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
new file mode 100644
index 0000000..111f7ab
--- /dev/null
+++ b/drivers/soc/fsl/qe/usb.c
@@ -0,0 +1,56 @@
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+	struct qe_mux __iomem *mux = &qe_immr->qmx;
+	unsigned long flags;
+	u32 val;
+
+	switch (clk) {
+	case QE_CLK3:  val = QE_CMXGCR_USBCS_CLK3;  break;
+	case QE_CLK5:  val = QE_CMXGCR_USBCS_CLK5;  break;
+	case QE_CLK7:  val = QE_CMXGCR_USBCS_CLK7;  break;
+	case QE_CLK9:  val = QE_CMXGCR_USBCS_CLK9;  break;
+	case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+	case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+	case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+	case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+	case QE_BRG9:  val = QE_CMXGCR_USBCS_BRG9;  break;
+	case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+	default:
+		pr_err("%s: requested unknown clock %d\n", __func__, clk);
+		return -EINVAL;
+	}
+
+	if (qe_clock_is_brg(clk))
+		qe_setbrg(clk, rate, 1);
+
+	spin_lock_irqsave(&cmxgcr_lock, flags);
+
+	clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+	spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);