v4.19.13 snapshot.
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
new file mode 100644
index 0000000..3c7b61d
--- /dev/null
+++ b/drivers/nvme/target/Kconfig
@@ -0,0 +1,62 @@
+
+config NVME_TARGET
+	tristate "NVMe Target support"
+	depends on BLOCK
+	depends on CONFIGFS_FS
+	help
+	  This enabled target side support for the NVMe protocol, that is
+	  it allows the Linux kernel to implement NVMe subsystems and
+	  controllers and export Linux block devices as NVMe namespaces.
+	  You need to select at least one of the transports below to make this
+	  functionality useful.
+
+	  To configure the NVMe target you probably want to use the nvmetcli
+	  tool from http://git.infradead.org/users/hch/nvmetcli.git.
+
+config NVME_TARGET_LOOP
+	tristate "NVMe loopback device support"
+	depends on NVME_TARGET
+	select NVME_CORE
+	select NVME_FABRICS
+	select SG_POOL
+	help
+	  This enables the NVMe loopback device support, which can be useful
+	  to test NVMe host and target side features.
+
+	  If unsure, say N.
+
+config NVME_TARGET_RDMA
+	tristate "NVMe over Fabrics RDMA target support"
+	depends on INFINIBAND && INFINIBAND_ADDR_TRANS
+	depends on NVME_TARGET
+	select SGL_ALLOC
+	help
+	  This enables the NVMe RDMA target support, which allows exporting NVMe
+	  devices over RDMA.
+
+	  If unsure, say N.
+
+config NVME_TARGET_FC
+	tristate "NVMe over Fabrics FC target driver"
+	depends on NVME_TARGET
+	depends on HAS_DMA
+	select SGL_ALLOC
+	help
+	  This enables the NVMe FC target support, which allows exporting NVMe
+	  devices over FC.
+
+	  If unsure, say N.
+
+config NVME_TARGET_FCLOOP
+	tristate "NVMe over Fabrics FC Transport Loopback Test driver"
+	depends on NVME_TARGET
+	select NVME_CORE
+	select NVME_FABRICS
+	select SG_POOL
+	depends on NVME_FC
+	depends on NVME_TARGET_FC
+	help
+	  This enables the NVMe FC loopback test support, which can be useful
+	  to test NVMe-FC transport interfaces.
+
+	  If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
new file mode 100644
index 0000000..8118c93
--- /dev/null
+++ b/drivers/nvme/target/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NVME_TARGET)		+= nvmet.o
+obj-$(CONFIG_NVME_TARGET_LOOP)		+= nvme-loop.o
+obj-$(CONFIG_NVME_TARGET_RDMA)		+= nvmet-rdma.o
+obj-$(CONFIG_NVME_TARGET_FC)		+= nvmet-fc.o
+obj-$(CONFIG_NVME_TARGET_FCLOOP)	+= nvme-fcloop.o
+
+nvmet-y		+= core.o configfs.o admin-cmd.o fabrics-cmd.o \
+			discovery.o io-cmd-file.o io-cmd-bdev.o
+nvme-loop-y	+= loop.o
+nvmet-rdma-y	+= rdma.o
+nvmet-fc-y	+= fc.o
+nvme-fcloop-y	+= fcloop.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
new file mode 100644
index 0000000..2008fa6
--- /dev/null
+++ b/drivers/nvme/target/admin-cmd.c
@@ -0,0 +1,840 @@
+/*
+ * NVMe admin command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include <generated/utsrelease.h>
+#include <asm/unaligned.h>
+#include "nvmet.h"
+
+/*
+ * This helper allows us to clear the AEN based on the RAE bit,
+ * Please use this helper when processing the log pages which are
+ * associated with the AEN.
+ */
+static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
+{
+	int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
+
+	if (!rae)
+		clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
+}
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd)
+{
+	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
+
+	len <<= 16;
+	len += le16_to_cpu(cmd->get_log_page.numdl);
+	/* NUMD is a 0's based value */
+	len += 1;
+	len *= sizeof(u32);
+
+	return len;
+}
+
+static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
+{
+	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+}
+
+static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+		struct nvme_smart_log *slog)
+{
+	struct nvmet_ns *ns;
+	u64 host_reads, host_writes, data_units_read, data_units_written;
+
+	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
+	if (!ns) {
+		pr_err("nvmet : Could not find namespace id : %d\n",
+				le32_to_cpu(req->cmd->get_log_page.nsid));
+		return NVME_SC_INVALID_NS;
+	}
+
+	/* we don't have the right data for file backed ns */
+	if (!ns->bdev)
+		goto out;
+
+	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
+	data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+	data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+	put_unaligned_le64(host_reads, &slog->host_reads[0]);
+	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+	put_unaligned_le64(host_writes, &slog->host_writes[0]);
+	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+out:
+	nvmet_put_namespace(ns);
+
+	return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+		struct nvme_smart_log *slog)
+{
+	u64 host_reads = 0, host_writes = 0;
+	u64 data_units_read = 0, data_units_written = 0;
+	struct nvmet_ns *ns;
+	struct nvmet_ctrl *ctrl;
+
+	ctrl = req->sq->ctrl;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+		/* we don't have the right data for file backed ns */
+		if (!ns->bdev)
+			continue;
+		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+		data_units_read +=
+			part_stat_read(ns->bdev->bd_part, sectors[READ]);
+		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+		data_units_written +=
+			part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+	}
+	rcu_read_unlock();
+
+	put_unaligned_le64(host_reads, &slog->host_reads[0]);
+	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+	put_unaligned_le64(host_writes, &slog->host_writes[0]);
+	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+
+	return NVME_SC_SUCCESS;
+}
+
+static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
+{
+	struct nvme_smart_log *log;
+	u16 status = NVME_SC_INTERNAL;
+
+	if (req->data_len != sizeof(*log))
+		goto out;
+
+	log = kzalloc(sizeof(*log), GFP_KERNEL);
+	if (!log)
+		goto out;
+
+	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
+		status = nvmet_get_smart_log_all(req, log);
+	else
+		status = nvmet_get_smart_log_nsid(req, log);
+	if (status)
+		goto out_free_log;
+
+	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+out_free_log:
+	kfree(log);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+	u16 status = NVME_SC_INTERNAL;
+	struct nvme_effects_log *log;
+
+	log = kzalloc(sizeof(*log), GFP_KERNEL);
+	if (!log)
+		goto out;
+
+	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
+	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
+	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
+	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
+	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
+	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
+	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
+
+	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
+	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
+	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
+	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
+	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
+
+	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+
+	kfree(log);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	u16 status = NVME_SC_INTERNAL;
+	size_t len;
+
+	if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+		goto out;
+
+	mutex_lock(&ctrl->lock);
+	if (ctrl->nr_changed_ns == U32_MAX)
+		len = sizeof(__le32);
+	else
+		len = ctrl->nr_changed_ns * sizeof(__le32);
+	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
+	if (!status)
+		status = nvmet_zero_sgl(req, len, req->data_len - len);
+	ctrl->nr_changed_ns = 0;
+	nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
+	mutex_unlock(&ctrl->lock);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
+		struct nvme_ana_group_desc *desc)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvmet_ns *ns;
+	u32 count = 0;
+
+	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
+		rcu_read_lock();
+		list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+			if (ns->anagrpid == grpid)
+				desc->nsids[count++] = cpu_to_le32(ns->nsid);
+		rcu_read_unlock();
+	}
+
+	desc->grpid = cpu_to_le32(grpid);
+	desc->nnsids = cpu_to_le32(count);
+	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+	desc->state = req->port->ana_state[grpid];
+	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
+	return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+}
+
+static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
+{
+	struct nvme_ana_rsp_hdr hdr = { 0, };
+	struct nvme_ana_group_desc *desc;
+	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
+	size_t len;
+	u32 grpid;
+	u16 ngrps = 0;
+	u16 status;
+
+	status = NVME_SC_INTERNAL;
+	desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
+			NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+	if (!desc)
+		goto out;
+
+	down_read(&nvmet_ana_sem);
+	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+		if (!nvmet_ana_group_enabled[grpid])
+			continue;
+		len = nvmet_format_ana_group(req, grpid, desc);
+		status = nvmet_copy_to_sgl(req, offset, desc, len);
+		if (status)
+			break;
+		offset += len;
+		ngrps++;
+	}
+	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+		if (nvmet_ana_group_enabled[grpid])
+			ngrps++;
+	}
+
+	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
+	hdr.ngrps = cpu_to_le16(ngrps);
+	nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
+	up_read(&nvmet_ana_sem);
+
+	kfree(desc);
+
+	/* copy the header last once we know the number of groups */
+	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
+out:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvme_id_ctrl *id;
+	u16 status = 0;
+	const char model[] = "Linux";
+
+	id = kzalloc(sizeof(*id), GFP_KERNEL);
+	if (!id) {
+		status = NVME_SC_INTERNAL;
+		goto out;
+	}
+
+	/* XXX: figure out how to assign real vendors IDs. */
+	id->vid = 0;
+	id->ssvid = 0;
+
+	memset(id->sn, ' ', sizeof(id->sn));
+	bin2hex(id->sn, &ctrl->subsys->serial,
+		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+	memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+	memcpy_and_pad(id->fr, sizeof(id->fr),
+		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
+
+	id->rab = 6;
+
+	/*
+	 * XXX: figure out how we can assign a IEEE OUI, but until then
+	 * the safest is to leave it as zeroes.
+	 */
+
+	/* we support multiple ports, multiples hosts and ANA: */
+	id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
+
+	/* no limit on data transfer sizes for now */
+	id->mdts = 0;
+	id->cntlid = cpu_to_le16(ctrl->cntlid);
+	id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+	/* XXX: figure out what to do about RTD3R/RTD3 */
+	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
+	id->ctratt = cpu_to_le32(1 << 0);
+
+	id->oacs = 0;
+
+	/*
+	 * We don't really have a practical limit on the number of abort
+	 * comands.  But we don't do anything useful for abort either, so
+	 * no point in allowing more abort commands than the spec requires.
+	 */
+	id->acl = 3;
+
+	id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+	/* first slot is read-only, only one slot supported */
+	id->frmw = (1 << 0) | (1 << 1);
+	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
+	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
+	id->npss = 0;
+
+	/* We support keep-alive timeout in granularity of seconds */
+	id->kas = cpu_to_le16(NVMET_KAS);
+
+	id->sqes = (0x6 << 4) | 0x6;
+	id->cqes = (0x4 << 4) | 0x4;
+
+	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
+	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
+			NVME_CTRL_ONCS_WRITE_ZEROES);
+
+	/* XXX: don't report vwc if the underlying device is write through */
+	id->vwc = NVME_CTRL_VWC_PRESENT;
+
+	/*
+	 * We can't support atomic writes bigger than a LBA without support
+	 * from the backend device.
+	 */
+	id->awun = 0;
+	id->awupf = 0;
+
+	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
+	if (ctrl->ops->has_keyed_sgls)
+		id->sgls |= cpu_to_le32(1 << 2);
+	if (req->port->inline_data_size)
+		id->sgls |= cpu_to_le32(1 << 20);
+
+	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+
+	/* Max command capsule size is sqe + single page of in-capsule data */
+	id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+				  req->port->inline_data_size) / 16);
+	/* Max response capsule size is cqe */
+	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+	id->msdbd = ctrl->ops->msdbd;
+
+	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
+	id->anatt = 10; /* random value */
+	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
+	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
+
+	/*
+	 * Meh, we don't really support any power state.  Fake up the same
+	 * values that qemu does.
+	 */
+	id->psd[0].max_power = cpu_to_le16(0x9c4);
+	id->psd[0].entry_lat = cpu_to_le32(0x10);
+	id->psd[0].exit_lat = cpu_to_le32(0x4);
+
+	id->nwpc = 1 << 0; /* write protect and no write protect */
+
+	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+	kfree(id);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ns(struct nvmet_req *req)
+{
+	struct nvmet_ns *ns;
+	struct nvme_id_ns *id;
+	u16 status = 0;
+
+	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+		goto out;
+	}
+
+	id = kzalloc(sizeof(*id), GFP_KERNEL);
+	if (!id) {
+		status = NVME_SC_INTERNAL;
+		goto out;
+	}
+
+	/* return an all zeroed buffer if we can't find an active namespace */
+	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+	if (!ns)
+		goto done;
+
+	/*
+	 * nuse = ncap = nsze isn't always true, but we have no way to find
+	 * that out from the underlying device.
+	 */
+	id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
+	switch (req->port->ana_state[ns->anagrpid]) {
+	case NVME_ANA_INACCESSIBLE:
+	case NVME_ANA_PERSISTENT_LOSS:
+		break;
+	default:
+		id->nuse = id->nsze;
+		break;
+        }
+
+	/*
+	 * We just provide a single LBA format that matches what the
+	 * underlying device reports.
+	 */
+	id->nlbaf = 0;
+	id->flbas = 0;
+
+	/*
+	 * Our namespace might always be shared.  Not just with other
+	 * controllers, but also with any other user of the block device.
+	 */
+	id->nmic = (1 << 0);
+	id->anagrpid = cpu_to_le32(ns->anagrpid);
+
+	memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
+
+	id->lbaf[0].ds = ns->blksize_shift;
+
+	if (ns->readonly)
+		id->nsattr |= (1 << 0);
+	nvmet_put_namespace(ns);
+done:
+	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+	kfree(id);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_nslist(struct nvmet_req *req)
+{
+	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvmet_ns *ns;
+	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
+	__le32 *list;
+	u16 status = 0;
+	int i = 0;
+
+	list = kzalloc(buf_size, GFP_KERNEL);
+	if (!list) {
+		status = NVME_SC_INTERNAL;
+		goto out;
+	}
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+		if (ns->nsid <= min_nsid)
+			continue;
+		list[i++] = cpu_to_le32(ns->nsid);
+		if (i == buf_size / sizeof(__le32))
+			break;
+	}
+	rcu_read_unlock();
+
+	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
+
+	kfree(list);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
+				    void *id, off_t *off)
+{
+	struct nvme_ns_id_desc desc = {
+		.nidt = type,
+		.nidl = len,
+	};
+	u16 status;
+
+	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
+	if (status)
+		return status;
+	*off += sizeof(desc);
+
+	status = nvmet_copy_to_sgl(req, *off, id, len);
+	if (status)
+		return status;
+	*off += len;
+
+	return 0;
+}
+
+static void nvmet_execute_identify_desclist(struct nvmet_req *req)
+{
+	struct nvmet_ns *ns;
+	u16 status = 0;
+	off_t off = 0;
+
+	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+	if (!ns) {
+		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+		goto out;
+	}
+
+	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
+						  NVME_NIDT_UUID_LEN,
+						  &ns->uuid, &off);
+		if (status)
+			goto out_put_ns;
+	}
+	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
+						  NVME_NIDT_NGUID_LEN,
+						  &ns->nguid, &off);
+		if (status)
+			goto out_put_ns;
+	}
+
+	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
+			off) != NVME_IDENTIFY_DATA_SIZE - off)
+		status = NVME_SC_INTERNAL | NVME_SC_DNR;
+out_put_ns:
+	nvmet_put_namespace(ns);
+out:
+	nvmet_req_complete(req, status);
+}
+
+/*
+ * A "minimum viable" abort implementation: the command is mandatory in the
+ * spec, but we are not required to do any useful work.  We couldn't really
+ * do a useful abort, so don't bother even with waiting for the command
+ * to be exectuted and return immediately telling the command to abort
+ * wasn't found.
+ */
+static void nvmet_execute_abort(struct nvmet_req *req)
+{
+	nvmet_set_result(req, 1);
+	nvmet_req_complete(req, 0);
+}
+
+static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
+{
+	u16 status;
+
+	if (req->ns->file)
+		status = nvmet_file_flush(req);
+	else
+		status = nvmet_bdev_flush(req);
+
+	if (status)
+		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
+	return status;
+}
+
+static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
+{
+	u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
+	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+	u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+
+	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
+	if (unlikely(!req->ns))
+		return status;
+
+	mutex_lock(&subsys->lock);
+	switch (write_protect) {
+	case NVME_NS_WRITE_PROTECT:
+		req->ns->readonly = true;
+		status = nvmet_write_protect_flush_sync(req);
+		if (status)
+			req->ns->readonly = false;
+		break;
+	case NVME_NS_NO_WRITE_PROTECT:
+		req->ns->readonly = false;
+		status = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (!status)
+		nvmet_ns_changed(subsys, req->ns->nsid);
+	mutex_unlock(&subsys->lock);
+	return status;
+}
+
+static void nvmet_execute_set_features(struct nvmet_req *req)
+{
+	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+	u32 val32;
+	u16 status = 0;
+
+	switch (cdw10 & 0xff) {
+	case NVME_FEAT_NUM_QUEUES:
+		nvmet_set_result(req,
+			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
+		break;
+	case NVME_FEAT_KATO:
+		val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
+		req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+		nvmet_set_result(req, req->sq->ctrl->kato);
+		break;
+	case NVME_FEAT_ASYNC_EVENT:
+		val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
+		if (val32 & ~NVMET_AEN_CFG_ALL) {
+			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+			break;
+		}
+
+		WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
+		nvmet_set_result(req, val32);
+		break;
+	case NVME_FEAT_HOST_ID:
+		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+		break;
+	case NVME_FEAT_WRITE_PROTECT:
+		status = nvmet_set_feat_write_protect(req);
+		break;
+	default:
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		break;
+	}
+
+	nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
+{
+	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+	u32 result;
+
+	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
+	if (!req->ns)
+		return NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+	mutex_lock(&subsys->lock);
+	if (req->ns->readonly == true)
+		result = NVME_NS_WRITE_PROTECT;
+	else
+		result = NVME_NS_NO_WRITE_PROTECT;
+	nvmet_set_result(req, result);
+	mutex_unlock(&subsys->lock);
+
+	return 0;
+}
+
+static void nvmet_execute_get_features(struct nvmet_req *req)
+{
+	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+	u16 status = 0;
+
+	switch (cdw10 & 0xff) {
+	/*
+	 * These features are mandatory in the spec, but we don't
+	 * have a useful way to implement them.  We'll eventually
+	 * need to come up with some fake values for these.
+	 */
+#if 0
+	case NVME_FEAT_ARBITRATION:
+		break;
+	case NVME_FEAT_POWER_MGMT:
+		break;
+	case NVME_FEAT_TEMP_THRESH:
+		break;
+	case NVME_FEAT_ERR_RECOVERY:
+		break;
+	case NVME_FEAT_IRQ_COALESCE:
+		break;
+	case NVME_FEAT_IRQ_CONFIG:
+		break;
+	case NVME_FEAT_WRITE_ATOMIC:
+		break;
+#endif
+	case NVME_FEAT_ASYNC_EVENT:
+		nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
+		break;
+	case NVME_FEAT_VOLATILE_WC:
+		nvmet_set_result(req, 1);
+		break;
+	case NVME_FEAT_NUM_QUEUES:
+		nvmet_set_result(req,
+			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
+		break;
+	case NVME_FEAT_KATO:
+		nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+		break;
+	case NVME_FEAT_HOST_ID:
+		/* need 128-bit host identifier flag */
+		if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
+			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+			break;
+		}
+
+		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
+				sizeof(req->sq->ctrl->hostid));
+		break;
+	case NVME_FEAT_WRITE_PROTECT:
+		status = nvmet_get_feat_write_protect(req);
+		break;
+	default:
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		break;
+	}
+
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_async_event(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+	mutex_lock(&ctrl->lock);
+	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
+		mutex_unlock(&ctrl->lock);
+		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
+		return;
+	}
+	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
+	mutex_unlock(&ctrl->lock);
+
+	schedule_work(&ctrl->async_event_work);
+}
+
+static void nvmet_execute_keep_alive(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
+		ctrl->cntlid, ctrl->kato);
+
+	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+	nvmet_req_complete(req, 0);
+}
+
+u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+	u16 ret;
+
+	ret = nvmet_check_ctrl_status(req, cmd);
+	if (unlikely(ret))
+		return ret;
+
+	switch (cmd->common.opcode) {
+	case nvme_admin_get_log_page:
+		req->data_len = nvmet_get_log_page_len(cmd);
+
+		switch (cmd->get_log_page.lid) {
+		case NVME_LOG_ERROR:
+			/*
+			 * We currently never set the More bit in the status
+			 * field, so all error log entries are invalid and can
+			 * be zeroed out.  This is called a minum viable
+			 * implementation (TM) of this mandatory log page.
+			 */
+			req->execute = nvmet_execute_get_log_page_noop;
+			return 0;
+		case NVME_LOG_SMART:
+			req->execute = nvmet_execute_get_log_page_smart;
+			return 0;
+		case NVME_LOG_FW_SLOT:
+			/*
+			 * We only support a single firmware slot which always
+			 * is active, so we can zero out the whole firmware slot
+			 * log and still claim to fully implement this mandatory
+			 * log page.
+			 */
+			req->execute = nvmet_execute_get_log_page_noop;
+			return 0;
+		case NVME_LOG_CHANGED_NS:
+			req->execute = nvmet_execute_get_log_changed_ns;
+			return 0;
+		case NVME_LOG_CMD_EFFECTS:
+			req->execute = nvmet_execute_get_log_cmd_effects_ns;
+			return 0;
+		case NVME_LOG_ANA:
+			req->execute = nvmet_execute_get_log_page_ana;
+			return 0;
+		}
+		break;
+	case nvme_admin_identify:
+		req->data_len = NVME_IDENTIFY_DATA_SIZE;
+		switch (cmd->identify.cns) {
+		case NVME_ID_CNS_NS:
+			req->execute = nvmet_execute_identify_ns;
+			return 0;
+		case NVME_ID_CNS_CTRL:
+			req->execute = nvmet_execute_identify_ctrl;
+			return 0;
+		case NVME_ID_CNS_NS_ACTIVE_LIST:
+			req->execute = nvmet_execute_identify_nslist;
+			return 0;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			req->execute = nvmet_execute_identify_desclist;
+			return 0;
+		}
+		break;
+	case nvme_admin_abort_cmd:
+		req->execute = nvmet_execute_abort;
+		req->data_len = 0;
+		return 0;
+	case nvme_admin_set_features:
+		req->execute = nvmet_execute_set_features;
+		req->data_len = 0;
+		return 0;
+	case nvme_admin_get_features:
+		req->execute = nvmet_execute_get_features;
+		req->data_len = 0;
+		return 0;
+	case nvme_admin_async_event:
+		req->execute = nvmet_execute_async_event;
+		req->data_len = 0;
+		return 0;
+	case nvme_admin_keep_alive:
+		req->execute = nvmet_execute_keep_alive;
+		req->data_len = 0;
+		return 0;
+	}
+
+	pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
+	       req->sq->qid);
+	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
new file mode 100644
index 0000000..b37a8e3
--- /dev/null
+++ b/drivers/nvme/target/configfs.c
@@ -0,0 +1,1276 @@
+/*
+ * Configfs interface for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+
+#include "nvmet.h"
+
+static const struct config_item_type nvmet_host_type;
+static const struct config_item_type nvmet_subsys_type;
+
+static const struct nvmet_transport_name {
+	u8		type;
+	const char	*name;
+} nvmet_transport_names[] = {
+	{ NVMF_TRTYPE_RDMA,	"rdma" },
+	{ NVMF_TRTYPE_FC,	"fc" },
+	{ NVMF_TRTYPE_LOOP,	"loop" },
+};
+
+/*
+ * nvmet_port Generic ConfigFS definitions.
+ * Used in any place in the ConfigFS tree that refers to an address.
+ */
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
+		char *page)
+{
+	switch (to_nvmet_port(item)->disc_addr.adrfam) {
+	case NVMF_ADDR_FAMILY_IP4:
+		return sprintf(page, "ipv4\n");
+	case NVMF_ADDR_FAMILY_IP6:
+		return sprintf(page, "ipv6\n");
+	case NVMF_ADDR_FAMILY_IB:
+		return sprintf(page, "ib\n");
+	case NVMF_ADDR_FAMILY_FC:
+		return sprintf(page, "fc\n");
+	default:
+		return sprintf(page, "\n");
+	}
+}
+
+static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	if (port->enabled) {
+		pr_err("Cannot modify address while enabled\n");
+		pr_err("Disable the address before modifying\n");
+		return -EACCES;
+	}
+
+	if (sysfs_streq(page, "ipv4")) {
+		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
+	} else if (sysfs_streq(page, "ipv6")) {
+		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
+	} else if (sysfs_streq(page, "ib")) {
+		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
+	} else if (sysfs_streq(page, "fc")) {
+		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
+	} else {
+		pr_err("Invalid value '%s' for adrfam\n", page);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_adrfam);
+
+static ssize_t nvmet_addr_portid_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	return snprintf(page, PAGE_SIZE, "%d\n",
+			le16_to_cpu(port->disc_addr.portid));
+}
+
+static ssize_t nvmet_addr_portid_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+	u16 portid = 0;
+
+	if (kstrtou16(page, 0, &portid)) {
+		pr_err("Invalid value '%s' for portid\n", page);
+		return -EINVAL;
+	}
+
+	if (port->enabled) {
+		pr_err("Cannot modify address while enabled\n");
+		pr_err("Disable the address before modifying\n");
+		return -EACCES;
+	}
+	port->disc_addr.portid = cpu_to_le16(portid);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_portid);
+
+static ssize_t nvmet_addr_traddr_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			port->disc_addr.traddr);
+}
+
+static ssize_t nvmet_addr_traddr_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	if (count > NVMF_TRADDR_SIZE) {
+		pr_err("Invalid value '%s' for traddr\n", page);
+		return -EINVAL;
+	}
+
+	if (port->enabled) {
+		pr_err("Cannot modify address while enabled\n");
+		pr_err("Disable the address before modifying\n");
+		return -EACCES;
+	}
+
+	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
+		return -EINVAL;
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_traddr);
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item,
+		char *page)
+{
+	switch (to_nvmet_port(item)->disc_addr.treq) {
+	case NVMF_TREQ_NOT_SPECIFIED:
+		return sprintf(page, "not specified\n");
+	case NVMF_TREQ_REQUIRED:
+		return sprintf(page, "required\n");
+	case NVMF_TREQ_NOT_REQUIRED:
+		return sprintf(page, "not required\n");
+	default:
+		return sprintf(page, "\n");
+	}
+}
+
+static ssize_t nvmet_addr_treq_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	if (port->enabled) {
+		pr_err("Cannot modify address while enabled\n");
+		pr_err("Disable the address before modifying\n");
+		return -EACCES;
+	}
+
+	if (sysfs_streq(page, "not specified")) {
+		port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED;
+	} else if (sysfs_streq(page, "required")) {
+		port->disc_addr.treq = NVMF_TREQ_REQUIRED;
+	} else if (sysfs_streq(page, "not required")) {
+		port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED;
+	} else {
+		pr_err("Invalid value '%s' for treq\n", page);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_treq);
+
+static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			port->disc_addr.trsvcid);
+}
+
+static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	if (count > NVMF_TRSVCID_SIZE) {
+		pr_err("Invalid value '%s' for trsvcid\n", page);
+		return -EINVAL;
+	}
+	if (port->enabled) {
+		pr_err("Cannot modify address while enabled\n");
+		pr_err("Disable the address before modifying\n");
+		return -EACCES;
+	}
+
+	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
+		return -EINVAL;
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trsvcid);
+
+static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
+}
+
+static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+	int ret;
+
+	if (port->enabled) {
+		pr_err("Cannot modify inline_data_size while port enabled\n");
+		pr_err("Disable the port before modifying\n");
+		return -EACCES;
+	}
+	ret = kstrtoint(page, 0, &port->inline_data_size);
+	if (ret) {
+		pr_err("Invalid value '%s' for inline_data_size\n", page);
+		return -EINVAL;
+	}
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+
+static ssize_t nvmet_addr_trtype_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
+		if (port->disc_addr.trtype != nvmet_transport_names[i].type)
+			continue;
+		return sprintf(page, "%s\n", nvmet_transport_names[i].name);
+	}
+
+	return sprintf(page, "\n");
+}
+
+static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
+{
+	port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
+	port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
+	port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
+}
+
+static ssize_t nvmet_addr_trtype_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+	int i;
+
+	if (port->enabled) {
+		pr_err("Cannot modify address while enabled\n");
+		pr_err("Disable the address before modifying\n");
+		return -EACCES;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
+		if (sysfs_streq(page, nvmet_transport_names[i].name))
+			goto found;
+	}
+
+	pr_err("Invalid value '%s' for trtype\n", page);
+	return -EINVAL;
+found:
+	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+	port->disc_addr.trtype = nvmet_transport_names[i].type;
+	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
+		nvmet_port_init_tsas_rdma(port);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trtype);
+
+/*
+ * Namespace structures & file operation functions below
+ */
+static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
+}
+
+static ssize_t nvmet_ns_device_path_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	struct nvmet_subsys *subsys = ns->subsys;
+	size_t len;
+	int ret;
+
+	mutex_lock(&subsys->lock);
+	ret = -EBUSY;
+	if (ns->enabled)
+		goto out_unlock;
+
+	ret = -EINVAL;
+	len = strcspn(page, "\n");
+	if (!len)
+		goto out_unlock;
+
+	kfree(ns->device_path);
+	ret = -ENOMEM;
+	ns->device_path = kstrndup(page, len, GFP_KERNEL);
+	if (!ns->device_path)
+		goto out_unlock;
+
+	mutex_unlock(&subsys->lock);
+	return count;
+
+out_unlock:
+	mutex_unlock(&subsys->lock);
+	return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_path);
+
+static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
+}
+
+static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
+					  const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	struct nvmet_subsys *subsys = ns->subsys;
+	int ret = 0;
+
+
+	mutex_lock(&subsys->lock);
+	if (ns->enabled) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+
+
+	if (uuid_parse(page, &ns->uuid))
+		ret = -EINVAL;
+
+out_unlock:
+	mutex_unlock(&subsys->lock);
+	return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_uuid);
+
+static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
+}
+
+static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	struct nvmet_subsys *subsys = ns->subsys;
+	u8 nguid[16];
+	const char *p = page;
+	int i;
+	int ret = 0;
+
+	mutex_lock(&subsys->lock);
+	if (ns->enabled) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+
+	for (i = 0; i < 16; i++) {
+		if (p + 2 > page + count) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
+		p += 2;
+
+		if (*p == '-' || *p == ':')
+			p++;
+	}
+
+	memcpy(&ns->nguid, nguid, sizeof(nguid));
+out_unlock:
+	mutex_unlock(&subsys->lock);
+	return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_nguid);
+
+static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
+}
+
+static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	u32 oldgrpid, newgrpid;
+	int ret;
+
+	ret = kstrtou32(page, 0, &newgrpid);
+	if (ret)
+		return ret;
+
+	if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
+		return -EINVAL;
+
+	down_write(&nvmet_ana_sem);
+	oldgrpid = ns->anagrpid;
+	nvmet_ana_group_enabled[newgrpid]++;
+	ns->anagrpid = newgrpid;
+	nvmet_ana_group_enabled[oldgrpid]--;
+	nvmet_ana_chgcnt++;
+	up_write(&nvmet_ana_sem);
+
+	nvmet_send_ana_event(ns->subsys, NULL);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
+
+static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
+}
+
+static ssize_t nvmet_ns_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	bool enable;
+	int ret = 0;
+
+	if (strtobool(page, &enable))
+		return -EINVAL;
+
+	if (enable)
+		ret = nvmet_ns_enable(ns);
+	else
+		nvmet_ns_disable(ns);
+
+	return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, enable);
+
+static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
+}
+
+static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	bool val;
+
+	if (strtobool(page, &val))
+		return -EINVAL;
+
+	mutex_lock(&ns->subsys->lock);
+	if (ns->enabled) {
+		pr_err("disable ns before setting buffered_io value.\n");
+		mutex_unlock(&ns->subsys->lock);
+		return -EINVAL;
+	}
+
+	ns->buffered_io = val;
+	mutex_unlock(&ns->subsys->lock);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+
+static struct configfs_attribute *nvmet_ns_attrs[] = {
+	&nvmet_ns_attr_device_path,
+	&nvmet_ns_attr_device_nguid,
+	&nvmet_ns_attr_device_uuid,
+	&nvmet_ns_attr_ana_grpid,
+	&nvmet_ns_attr_enable,
+	&nvmet_ns_attr_buffered_io,
+	NULL,
+};
+
+static void nvmet_ns_release(struct config_item *item)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+
+	nvmet_ns_free(ns);
+}
+
+static struct configfs_item_operations nvmet_ns_item_ops = {
+	.release		= nvmet_ns_release,
+};
+
+static const struct config_item_type nvmet_ns_type = {
+	.ct_item_ops		= &nvmet_ns_item_ops,
+	.ct_attrs		= nvmet_ns_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *nvmet_ns_make(struct config_group *group,
+		const char *name)
+{
+	struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
+	struct nvmet_ns *ns;
+	int ret;
+	u32 nsid;
+
+	ret = kstrtou32(name, 0, &nsid);
+	if (ret)
+		goto out;
+
+	ret = -EINVAL;
+	if (nsid == 0 || nsid == NVME_NSID_ALL)
+		goto out;
+
+	ret = -ENOMEM;
+	ns = nvmet_ns_alloc(subsys, nsid);
+	if (!ns)
+		goto out;
+	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
+
+	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
+
+	return &ns->group;
+out:
+	return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_namespaces_group_ops = {
+	.make_group		= nvmet_ns_make,
+};
+
+static const struct config_item_type nvmet_namespaces_type = {
+	.ct_group_ops		= &nvmet_namespaces_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static int nvmet_port_subsys_allow_link(struct config_item *parent,
+		struct config_item *target)
+{
+	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+	struct nvmet_subsys *subsys;
+	struct nvmet_subsys_link *link, *p;
+	int ret;
+
+	if (target->ci_type != &nvmet_subsys_type) {
+		pr_err("can only link subsystems into the subsystems dir.!\n");
+		return -EINVAL;
+	}
+	subsys = to_subsys(target);
+	link = kmalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+	link->subsys = subsys;
+
+	down_write(&nvmet_config_sem);
+	ret = -EEXIST;
+	list_for_each_entry(p, &port->subsystems, entry) {
+		if (p->subsys == subsys)
+			goto out_free_link;
+	}
+
+	if (list_empty(&port->subsystems)) {
+		ret = nvmet_enable_port(port);
+		if (ret)
+			goto out_free_link;
+	}
+
+	list_add_tail(&link->entry, &port->subsystems);
+	nvmet_genctr++;
+	up_write(&nvmet_config_sem);
+	return 0;
+
+out_free_link:
+	up_write(&nvmet_config_sem);
+	kfree(link);
+	return ret;
+}
+
+static void nvmet_port_subsys_drop_link(struct config_item *parent,
+		struct config_item *target)
+{
+	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+	struct nvmet_subsys *subsys = to_subsys(target);
+	struct nvmet_subsys_link *p;
+
+	down_write(&nvmet_config_sem);
+	list_for_each_entry(p, &port->subsystems, entry) {
+		if (p->subsys == subsys)
+			goto found;
+	}
+	up_write(&nvmet_config_sem);
+	return;
+
+found:
+	list_del(&p->entry);
+	nvmet_genctr++;
+	if (list_empty(&port->subsystems))
+		nvmet_disable_port(port);
+	up_write(&nvmet_config_sem);
+	kfree(p);
+}
+
+static struct configfs_item_operations nvmet_port_subsys_item_ops = {
+	.allow_link		= nvmet_port_subsys_allow_link,
+	.drop_link		= nvmet_port_subsys_drop_link,
+};
+
+static const struct config_item_type nvmet_port_subsys_type = {
+	.ct_item_ops		= &nvmet_port_subsys_item_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
+		struct config_item *target)
+{
+	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+	struct nvmet_host *host;
+	struct nvmet_host_link *link, *p;
+	int ret;
+
+	if (target->ci_type != &nvmet_host_type) {
+		pr_err("can only link hosts into the allowed_hosts directory!\n");
+		return -EINVAL;
+	}
+
+	host = to_host(target);
+	link = kmalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+	link->host = host;
+
+	down_write(&nvmet_config_sem);
+	ret = -EINVAL;
+	if (subsys->allow_any_host) {
+		pr_err("can't add hosts when allow_any_host is set!\n");
+		goto out_free_link;
+	}
+
+	ret = -EEXIST;
+	list_for_each_entry(p, &subsys->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+			goto out_free_link;
+	}
+	list_add_tail(&link->entry, &subsys->hosts);
+	nvmet_genctr++;
+	up_write(&nvmet_config_sem);
+	return 0;
+out_free_link:
+	up_write(&nvmet_config_sem);
+	kfree(link);
+	return ret;
+}
+
+static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
+		struct config_item *target)
+{
+	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+	struct nvmet_host *host = to_host(target);
+	struct nvmet_host_link *p;
+
+	down_write(&nvmet_config_sem);
+	list_for_each_entry(p, &subsys->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+			goto found;
+	}
+	up_write(&nvmet_config_sem);
+	return;
+
+found:
+	list_del(&p->entry);
+	nvmet_genctr++;
+	up_write(&nvmet_config_sem);
+	kfree(p);
+}
+
+static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
+	.allow_link		= nvmet_allowed_hosts_allow_link,
+	.drop_link		= nvmet_allowed_hosts_drop_link,
+};
+
+static const struct config_item_type nvmet_allowed_hosts_type = {
+	.ct_item_ops		= &nvmet_allowed_hosts_item_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%d\n",
+		to_subsys(item)->allow_any_host);
+}
+
+static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+	bool allow_any_host;
+	int ret = 0;
+
+	if (strtobool(page, &allow_any_host))
+		return -EINVAL;
+
+	down_write(&nvmet_config_sem);
+	if (allow_any_host && !list_empty(&subsys->hosts)) {
+		pr_err("Can't set allow_any_host when explicit hosts are set!\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	subsys->allow_any_host = allow_any_host;
+out_unlock:
+	up_write(&nvmet_config_sem);
+	return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
+
+static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
+					      char *page)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	if (NVME_TERTIARY(subsys->ver))
+		return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
+				(int)NVME_MAJOR(subsys->ver),
+				(int)NVME_MINOR(subsys->ver),
+				(int)NVME_TERTIARY(subsys->ver));
+	else
+		return snprintf(page, PAGE_SIZE, "%d.%d\n",
+				(int)NVME_MAJOR(subsys->ver),
+				(int)NVME_MINOR(subsys->ver));
+}
+
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
+					       const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+	int major, minor, tertiary = 0;
+	int ret;
+
+
+	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
+	if (ret != 2 && ret != 3)
+		return -EINVAL;
+
+	down_write(&nvmet_config_sem);
+	subsys->ver = NVME_VS(major, minor, tertiary);
+	up_write(&nvmet_config_sem);
+
+	return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+
+static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
+					     char *page)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+					      const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	down_write(&nvmet_config_sem);
+	sscanf(page, "%llx\n", &subsys->serial);
+	up_write(&nvmet_config_sem);
+
+	return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
+
+static struct configfs_attribute *nvmet_subsys_attrs[] = {
+	&nvmet_subsys_attr_attr_allow_any_host,
+	&nvmet_subsys_attr_attr_version,
+	&nvmet_subsys_attr_attr_serial,
+	NULL,
+};
+
+/*
+ * Subsystem structures & folder operation functions below
+ */
+static void nvmet_subsys_release(struct config_item *item)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	nvmet_subsys_del_ctrls(subsys);
+	nvmet_subsys_put(subsys);
+}
+
+static struct configfs_item_operations nvmet_subsys_item_ops = {
+	.release		= nvmet_subsys_release,
+};
+
+static const struct config_item_type nvmet_subsys_type = {
+	.ct_item_ops		= &nvmet_subsys_item_ops,
+	.ct_attrs		= nvmet_subsys_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *nvmet_subsys_make(struct config_group *group,
+		const char *name)
+{
+	struct nvmet_subsys *subsys;
+
+	if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
+		pr_err("can't create discovery subsystem through configfs\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
+	if (!subsys)
+		return ERR_PTR(-ENOMEM);
+
+	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
+
+	config_group_init_type_name(&subsys->namespaces_group,
+			"namespaces", &nvmet_namespaces_type);
+	configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
+
+	config_group_init_type_name(&subsys->allowed_hosts_group,
+			"allowed_hosts", &nvmet_allowed_hosts_type);
+	configfs_add_default_group(&subsys->allowed_hosts_group,
+			&subsys->group);
+
+	return &subsys->group;
+}
+
+static struct configfs_group_operations nvmet_subsystems_group_ops = {
+	.make_group		= nvmet_subsys_make,
+};
+
+static const struct config_item_type nvmet_subsystems_type = {
+	.ct_group_ops		= &nvmet_subsystems_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static ssize_t nvmet_referral_enable_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
+}
+
+static ssize_t nvmet_referral_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
+	struct nvmet_port *port = to_nvmet_port(item);
+	bool enable;
+
+	if (strtobool(page, &enable))
+		goto inval;
+
+	if (enable)
+		nvmet_referral_enable(parent, port);
+	else
+		nvmet_referral_disable(port);
+
+	return count;
+inval:
+	pr_err("Invalid value '%s' for enable\n", page);
+	return -EINVAL;
+}
+
+CONFIGFS_ATTR(nvmet_referral_, enable);
+
+/*
+ * Discovery Service subsystem definitions
+ */
+static struct configfs_attribute *nvmet_referral_attrs[] = {
+	&nvmet_attr_addr_adrfam,
+	&nvmet_attr_addr_portid,
+	&nvmet_attr_addr_treq,
+	&nvmet_attr_addr_traddr,
+	&nvmet_attr_addr_trsvcid,
+	&nvmet_attr_addr_trtype,
+	&nvmet_referral_attr_enable,
+	NULL,
+};
+
+static void nvmet_referral_release(struct config_item *item)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	nvmet_referral_disable(port);
+	kfree(port);
+}
+
+static struct configfs_item_operations nvmet_referral_item_ops = {
+	.release	= nvmet_referral_release,
+};
+
+static const struct config_item_type nvmet_referral_type = {
+	.ct_owner	= THIS_MODULE,
+	.ct_attrs	= nvmet_referral_attrs,
+	.ct_item_ops	= &nvmet_referral_item_ops,
+};
+
+static struct config_group *nvmet_referral_make(
+		struct config_group *group, const char *name)
+{
+	struct nvmet_port *port;
+
+	port = kzalloc(sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&port->entry);
+	config_group_init_type_name(&port->group, name, &nvmet_referral_type);
+
+	return &port->group;
+}
+
+static struct configfs_group_operations nvmet_referral_group_ops = {
+	.make_group		= nvmet_referral_make,
+};
+
+static const struct config_item_type nvmet_referrals_type = {
+	.ct_owner	= THIS_MODULE,
+	.ct_group_ops	= &nvmet_referral_group_ops,
+};
+
+static struct {
+	enum nvme_ana_state	state;
+	const char		*name;
+} nvmet_ana_state_names[] = {
+	{ NVME_ANA_OPTIMIZED,		"optimized" },
+	{ NVME_ANA_NONOPTIMIZED,	"non-optimized" },
+	{ NVME_ANA_INACCESSIBLE,	"inaccessible" },
+	{ NVME_ANA_PERSISTENT_LOSS,	"persistent-loss" },
+	{ NVME_ANA_CHANGE,		"change" },
+};
+
+static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_ana_group *grp = to_ana_group(item);
+	enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
+		if (state != nvmet_ana_state_names[i].state)
+			continue;
+		return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
+	}
+
+	return sprintf(page, "\n");
+}
+
+static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ana_group *grp = to_ana_group(item);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
+		if (sysfs_streq(page, nvmet_ana_state_names[i].name))
+			goto found;
+	}
+
+	pr_err("Invalid value '%s' for ana_state\n", page);
+	return -EINVAL;
+
+found:
+	down_write(&nvmet_ana_sem);
+	grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
+	nvmet_ana_chgcnt++;
+	up_write(&nvmet_ana_sem);
+
+	nvmet_port_send_ana_event(grp->port);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
+
+static struct configfs_attribute *nvmet_ana_group_attrs[] = {
+	&nvmet_ana_group_attr_ana_state,
+	NULL,
+};
+
+static void nvmet_ana_group_release(struct config_item *item)
+{
+	struct nvmet_ana_group *grp = to_ana_group(item);
+
+	if (grp == &grp->port->ana_default_group)
+		return;
+
+	down_write(&nvmet_ana_sem);
+	grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
+	nvmet_ana_group_enabled[grp->grpid]--;
+	up_write(&nvmet_ana_sem);
+
+	nvmet_port_send_ana_event(grp->port);
+	kfree(grp);
+}
+
+static struct configfs_item_operations nvmet_ana_group_item_ops = {
+	.release		= nvmet_ana_group_release,
+};
+
+static const struct config_item_type nvmet_ana_group_type = {
+	.ct_item_ops		= &nvmet_ana_group_item_ops,
+	.ct_attrs		= nvmet_ana_group_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *nvmet_ana_groups_make_group(
+		struct config_group *group, const char *name)
+{
+	struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
+	struct nvmet_ana_group *grp;
+	u32 grpid;
+	int ret;
+
+	ret = kstrtou32(name, 0, &grpid);
+	if (ret)
+		goto out;
+
+	ret = -EINVAL;
+	if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
+		goto out;
+
+	ret = -ENOMEM;
+	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+	if (!grp)
+		goto out;
+	grp->port = port;
+	grp->grpid = grpid;
+
+	down_write(&nvmet_ana_sem);
+	nvmet_ana_group_enabled[grpid]++;
+	up_write(&nvmet_ana_sem);
+
+	nvmet_port_send_ana_event(grp->port);
+
+	config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
+	return &grp->group;
+out:
+	return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_ana_groups_group_ops = {
+	.make_group		= nvmet_ana_groups_make_group,
+};
+
+static const struct config_item_type nvmet_ana_groups_type = {
+	.ct_group_ops		= &nvmet_ana_groups_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Ports definitions.
+ */
+static void nvmet_port_release(struct config_item *item)
+{
+	struct nvmet_port *port = to_nvmet_port(item);
+
+	kfree(port->ana_state);
+	kfree(port);
+}
+
+static struct configfs_attribute *nvmet_port_attrs[] = {
+	&nvmet_attr_addr_adrfam,
+	&nvmet_attr_addr_treq,
+	&nvmet_attr_addr_traddr,
+	&nvmet_attr_addr_trsvcid,
+	&nvmet_attr_addr_trtype,
+	&nvmet_attr_param_inline_data_size,
+	NULL,
+};
+
+static struct configfs_item_operations nvmet_port_item_ops = {
+	.release		= nvmet_port_release,
+};
+
+static const struct config_item_type nvmet_port_type = {
+	.ct_attrs		= nvmet_port_attrs,
+	.ct_item_ops		= &nvmet_port_item_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *nvmet_ports_make(struct config_group *group,
+		const char *name)
+{
+	struct nvmet_port *port;
+	u16 portid;
+	u32 i;
+
+	if (kstrtou16(name, 0, &portid))
+		return ERR_PTR(-EINVAL);
+
+	port = kzalloc(sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return ERR_PTR(-ENOMEM);
+
+	port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
+			sizeof(*port->ana_state), GFP_KERNEL);
+	if (!port->ana_state) {
+		kfree(port);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
+		if (i == NVMET_DEFAULT_ANA_GRPID)
+			port->ana_state[1] = NVME_ANA_OPTIMIZED;
+		else
+			port->ana_state[i] = NVME_ANA_INACCESSIBLE;
+	}
+
+	INIT_LIST_HEAD(&port->entry);
+	INIT_LIST_HEAD(&port->subsystems);
+	INIT_LIST_HEAD(&port->referrals);
+	port->inline_data_size = -1;	/* < 0 == let the transport choose */
+
+	port->disc_addr.portid = cpu_to_le16(portid);
+	config_group_init_type_name(&port->group, name, &nvmet_port_type);
+
+	config_group_init_type_name(&port->subsys_group,
+			"subsystems", &nvmet_port_subsys_type);
+	configfs_add_default_group(&port->subsys_group, &port->group);
+
+	config_group_init_type_name(&port->referrals_group,
+			"referrals", &nvmet_referrals_type);
+	configfs_add_default_group(&port->referrals_group, &port->group);
+
+	config_group_init_type_name(&port->ana_groups_group,
+			"ana_groups", &nvmet_ana_groups_type);
+	configfs_add_default_group(&port->ana_groups_group, &port->group);
+
+	port->ana_default_group.port = port;
+	port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
+	config_group_init_type_name(&port->ana_default_group.group,
+			__stringify(NVMET_DEFAULT_ANA_GRPID),
+			&nvmet_ana_group_type);
+	configfs_add_default_group(&port->ana_default_group.group,
+			&port->ana_groups_group);
+
+	return &port->group;
+}
+
+static struct configfs_group_operations nvmet_ports_group_ops = {
+	.make_group		= nvmet_ports_make,
+};
+
+static const struct config_item_type nvmet_ports_type = {
+	.ct_group_ops		= &nvmet_ports_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group nvmet_subsystems_group;
+static struct config_group nvmet_ports_group;
+
+static void nvmet_host_release(struct config_item *item)
+{
+	struct nvmet_host *host = to_host(item);
+
+	kfree(host);
+}
+
+static struct configfs_item_operations nvmet_host_item_ops = {
+	.release		= nvmet_host_release,
+};
+
+static const struct config_item_type nvmet_host_type = {
+	.ct_item_ops		= &nvmet_host_item_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *nvmet_hosts_make_group(struct config_group *group,
+		const char *name)
+{
+	struct nvmet_host *host;
+
+	host = kzalloc(sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return ERR_PTR(-ENOMEM);
+
+	config_group_init_type_name(&host->group, name, &nvmet_host_type);
+
+	return &host->group;
+}
+
+static struct configfs_group_operations nvmet_hosts_group_ops = {
+	.make_group		= nvmet_hosts_make_group,
+};
+
+static const struct config_item_type nvmet_hosts_type = {
+	.ct_group_ops		= &nvmet_hosts_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group nvmet_hosts_group;
+
+static const struct config_item_type nvmet_root_type = {
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct configfs_subsystem nvmet_configfs_subsystem = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf	= "nvmet",
+			.ci_type	= &nvmet_root_type,
+		},
+	},
+};
+
+int __init nvmet_init_configfs(void)
+{
+	int ret;
+
+	config_group_init(&nvmet_configfs_subsystem.su_group);
+	mutex_init(&nvmet_configfs_subsystem.su_mutex);
+
+	config_group_init_type_name(&nvmet_subsystems_group,
+			"subsystems", &nvmet_subsystems_type);
+	configfs_add_default_group(&nvmet_subsystems_group,
+			&nvmet_configfs_subsystem.su_group);
+
+	config_group_init_type_name(&nvmet_ports_group,
+			"ports", &nvmet_ports_type);
+	configfs_add_default_group(&nvmet_ports_group,
+			&nvmet_configfs_subsystem.su_group);
+
+	config_group_init_type_name(&nvmet_hosts_group,
+			"hosts", &nvmet_hosts_type);
+	configfs_add_default_group(&nvmet_hosts_group,
+			&nvmet_configfs_subsystem.su_group);
+
+	ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
+	if (ret) {
+		pr_err("configfs_register_subsystem: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void __exit nvmet_exit_configfs(void)
+{
+	configfs_unregister_subsystem(&nvmet_configfs_subsystem);
+}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
new file mode 100644
index 0000000..b5ec96a
--- /dev/null
+++ b/drivers/nvme/target/core.c
@@ -0,0 +1,1242 @@
+/*
+ * Common code for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+
+#include "nvmet.h"
+
+struct workqueue_struct *buffered_io_wq;
+static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+static DEFINE_IDA(cntlid_ida);
+
+/*
+ * This read/write semaphore is used to synchronize access to configuration
+ * information on a target system that will result in discovery log page
+ * information change for at least one host.
+ * The full list of resources to protected by this semaphore is:
+ *
+ *  - subsystems list
+ *  - per-subsystem allowed hosts list
+ *  - allow_any_host subsystem attribute
+ *  - nvmet_genctr
+ *  - the nvmet_transports array
+ *
+ * When updating any of those lists/structures write lock should be obtained,
+ * while when reading (popolating discovery log page or checking host-subsystem
+ * link) read lock is obtained to allow concurrent reads.
+ */
+DECLARE_RWSEM(nvmet_config_sem);
+
+u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+u64 nvmet_ana_chgcnt;
+DECLARE_RWSEM(nvmet_ana_sem);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+		const char *subsysnqn);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+		size_t len)
+{
+	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+	return 0;
+}
+
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
+{
+	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+	return 0;
+}
+
+u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
+{
+	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
+		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+	return 0;
+}
+
+static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+{
+	struct nvmet_ns *ns;
+
+	if (list_empty(&subsys->namespaces))
+		return 0;
+
+	ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
+	return ns->nsid;
+}
+
+static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
+{
+	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+	struct nvmet_req *req;
+
+	while (1) {
+		mutex_lock(&ctrl->lock);
+		if (!ctrl->nr_async_event_cmds) {
+			mutex_unlock(&ctrl->lock);
+			return;
+		}
+
+		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+		mutex_unlock(&ctrl->lock);
+		nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+	}
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+	struct nvmet_ctrl *ctrl =
+		container_of(work, struct nvmet_ctrl, async_event_work);
+	struct nvmet_async_event *aen;
+	struct nvmet_req *req;
+
+	while (1) {
+		mutex_lock(&ctrl->lock);
+		aen = list_first_entry_or_null(&ctrl->async_events,
+				struct nvmet_async_event, entry);
+		if (!aen || !ctrl->nr_async_event_cmds) {
+			mutex_unlock(&ctrl->lock);
+			return;
+		}
+
+		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+		nvmet_set_result(req, nvmet_async_event_result(aen));
+
+		list_del(&aen->entry);
+		kfree(aen);
+
+		mutex_unlock(&ctrl->lock);
+		nvmet_req_complete(req, 0);
+	}
+}
+
+static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+		u8 event_info, u8 log_page)
+{
+	struct nvmet_async_event *aen;
+
+	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
+	if (!aen)
+		return;
+
+	aen->event_type = event_type;
+	aen->event_info = event_info;
+	aen->log_page = log_page;
+
+	mutex_lock(&ctrl->lock);
+	list_add_tail(&aen->entry, &ctrl->async_events);
+	mutex_unlock(&ctrl->lock);
+
+	schedule_work(&ctrl->async_event_work);
+}
+
+static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
+{
+	if (!(READ_ONCE(ctrl->aen_enabled) & aen))
+		return true;
+	return test_and_set_bit(aen, &ctrl->aen_masked);
+}
+
+static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
+{
+	u32 i;
+
+	mutex_lock(&ctrl->lock);
+	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
+		goto out_unlock;
+
+	for (i = 0; i < ctrl->nr_changed_ns; i++) {
+		if (ctrl->changed_ns_list[i] == nsid)
+			goto out_unlock;
+	}
+
+	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
+		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
+		ctrl->nr_changed_ns = U32_MAX;
+		goto out_unlock;
+	}
+
+	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
+out_unlock:
+	mutex_unlock(&ctrl->lock);
+}
+
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
+{
+	struct nvmet_ctrl *ctrl;
+
+	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
+		if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
+			continue;
+		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+				NVME_AER_NOTICE_NS_CHANGED,
+				NVME_LOG_CHANGED_NS);
+	}
+}
+
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+		struct nvmet_port *port)
+{
+	struct nvmet_ctrl *ctrl;
+
+	mutex_lock(&subsys->lock);
+	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+		if (port && ctrl->port != port)
+			continue;
+		if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
+			continue;
+		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
+	}
+	mutex_unlock(&subsys->lock);
+}
+
+void nvmet_port_send_ana_event(struct nvmet_port *port)
+{
+	struct nvmet_subsys_link *p;
+
+	down_read(&nvmet_config_sem);
+	list_for_each_entry(p, &port->subsystems, entry)
+		nvmet_send_ana_event(p->subsys, port);
+	up_read(&nvmet_config_sem);
+}
+
+int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
+{
+	int ret = 0;
+
+	down_write(&nvmet_config_sem);
+	if (nvmet_transports[ops->type])
+		ret = -EINVAL;
+	else
+		nvmet_transports[ops->type] = ops;
+	up_write(&nvmet_config_sem);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_register_transport);
+
+void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
+{
+	down_write(&nvmet_config_sem);
+	nvmet_transports[ops->type] = NULL;
+	up_write(&nvmet_config_sem);
+}
+EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+
+int nvmet_enable_port(struct nvmet_port *port)
+{
+	const struct nvmet_fabrics_ops *ops;
+	int ret;
+
+	lockdep_assert_held(&nvmet_config_sem);
+
+	ops = nvmet_transports[port->disc_addr.trtype];
+	if (!ops) {
+		up_write(&nvmet_config_sem);
+		request_module("nvmet-transport-%d", port->disc_addr.trtype);
+		down_write(&nvmet_config_sem);
+		ops = nvmet_transports[port->disc_addr.trtype];
+		if (!ops) {
+			pr_err("transport type %d not supported\n",
+				port->disc_addr.trtype);
+			return -EINVAL;
+		}
+	}
+
+	if (!try_module_get(ops->owner))
+		return -EINVAL;
+
+	ret = ops->add_port(port);
+	if (ret) {
+		module_put(ops->owner);
+		return ret;
+	}
+
+	/* If the transport didn't set inline_data_size, then disable it. */
+	if (port->inline_data_size < 0)
+		port->inline_data_size = 0;
+
+	port->enabled = true;
+	return 0;
+}
+
+void nvmet_disable_port(struct nvmet_port *port)
+{
+	const struct nvmet_fabrics_ops *ops;
+
+	lockdep_assert_held(&nvmet_config_sem);
+
+	port->enabled = false;
+
+	ops = nvmet_transports[port->disc_addr.trtype];
+	ops->remove_port(port);
+	module_put(ops->owner);
+}
+
+static void nvmet_keep_alive_timer(struct work_struct *work)
+{
+	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
+			struct nvmet_ctrl, ka_work);
+
+	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
+		ctrl->cntlid, ctrl->kato);
+
+	nvmet_ctrl_fatal_error(ctrl);
+}
+
+static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
+		ctrl->cntlid, ctrl->kato);
+
+	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
+	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
+
+	cancel_delayed_work_sync(&ctrl->ka_work);
+}
+
+static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
+		__le32 nsid)
+{
+	struct nvmet_ns *ns;
+
+	list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+		if (ns->nsid == le32_to_cpu(nsid))
+			return ns;
+	}
+
+	return NULL;
+}
+
+struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
+{
+	struct nvmet_ns *ns;
+
+	rcu_read_lock();
+	ns = __nvmet_find_namespace(ctrl, nsid);
+	if (ns)
+		percpu_ref_get(&ns->ref);
+	rcu_read_unlock();
+
+	return ns;
+}
+
+static void nvmet_destroy_namespace(struct percpu_ref *ref)
+{
+	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
+
+	complete(&ns->disable_done);
+}
+
+void nvmet_put_namespace(struct nvmet_ns *ns)
+{
+	percpu_ref_put(&ns->ref);
+}
+
+static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
+{
+	nvmet_bdev_ns_disable(ns);
+	nvmet_file_ns_disable(ns);
+}
+
+int nvmet_ns_enable(struct nvmet_ns *ns)
+{
+	struct nvmet_subsys *subsys = ns->subsys;
+	int ret;
+
+	mutex_lock(&subsys->lock);
+	ret = -EMFILE;
+	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+		goto out_unlock;
+	ret = 0;
+	if (ns->enabled)
+		goto out_unlock;
+
+	ret = nvmet_bdev_ns_enable(ns);
+	if (ret == -ENOTBLK)
+		ret = nvmet_file_ns_enable(ns);
+	if (ret)
+		goto out_unlock;
+
+	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
+				0, GFP_KERNEL);
+	if (ret)
+		goto out_dev_put;
+
+	if (ns->nsid > subsys->max_nsid)
+		subsys->max_nsid = ns->nsid;
+
+	/*
+	 * The namespaces list needs to be sorted to simplify the implementation
+	 * of the Identify Namepace List subcommand.
+	 */
+	if (list_empty(&subsys->namespaces)) {
+		list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
+	} else {
+		struct nvmet_ns *old;
+
+		list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
+			BUG_ON(ns->nsid == old->nsid);
+			if (ns->nsid < old->nsid)
+				break;
+		}
+
+		list_add_tail_rcu(&ns->dev_link, &old->dev_link);
+	}
+	subsys->nr_namespaces++;
+
+	nvmet_ns_changed(subsys, ns->nsid);
+	ns->enabled = true;
+	ret = 0;
+out_unlock:
+	mutex_unlock(&subsys->lock);
+	return ret;
+out_dev_put:
+	nvmet_ns_dev_disable(ns);
+	goto out_unlock;
+}
+
+void nvmet_ns_disable(struct nvmet_ns *ns)
+{
+	struct nvmet_subsys *subsys = ns->subsys;
+
+	mutex_lock(&subsys->lock);
+	if (!ns->enabled)
+		goto out_unlock;
+
+	ns->enabled = false;
+	list_del_rcu(&ns->dev_link);
+	if (ns->nsid == subsys->max_nsid)
+		subsys->max_nsid = nvmet_max_nsid(subsys);
+	mutex_unlock(&subsys->lock);
+
+	/*
+	 * Now that we removed the namespaces from the lookup list, we
+	 * can kill the per_cpu ref and wait for any remaining references
+	 * to be dropped, as well as a RCU grace period for anyone only
+	 * using the namepace under rcu_read_lock().  Note that we can't
+	 * use call_rcu here as we need to ensure the namespaces have
+	 * been fully destroyed before unloading the module.
+	 */
+	percpu_ref_kill(&ns->ref);
+	synchronize_rcu();
+	wait_for_completion(&ns->disable_done);
+	percpu_ref_exit(&ns->ref);
+
+	mutex_lock(&subsys->lock);
+	subsys->nr_namespaces--;
+	nvmet_ns_changed(subsys, ns->nsid);
+	nvmet_ns_dev_disable(ns);
+out_unlock:
+	mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+	nvmet_ns_disable(ns);
+
+	down_write(&nvmet_ana_sem);
+	nvmet_ana_group_enabled[ns->anagrpid]--;
+	up_write(&nvmet_ana_sem);
+
+	kfree(ns->device_path);
+	kfree(ns);
+}
+
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
+{
+	struct nvmet_ns *ns;
+
+	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+	if (!ns)
+		return NULL;
+
+	INIT_LIST_HEAD(&ns->dev_link);
+	init_completion(&ns->disable_done);
+
+	ns->nsid = nsid;
+	ns->subsys = subsys;
+
+	down_write(&nvmet_ana_sem);
+	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
+	nvmet_ana_group_enabled[ns->anagrpid]++;
+	up_write(&nvmet_ana_sem);
+
+	uuid_gen(&ns->uuid);
+	ns->buffered_io = false;
+
+	return ns;
+}
+
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+	u32 old_sqhd, new_sqhd;
+	u16 sqhd;
+
+	if (status)
+		nvmet_set_status(req, status);
+
+	if (req->sq->size) {
+		do {
+			old_sqhd = req->sq->sqhd;
+			new_sqhd = (old_sqhd + 1) % req->sq->size;
+		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
+					old_sqhd);
+	}
+	sqhd = req->sq->sqhd & 0x0000FFFF;
+	req->rsp->sq_head = cpu_to_le16(sqhd);
+	req->rsp->sq_id = cpu_to_le16(req->sq->qid);
+	req->rsp->command_id = req->cmd->common.command_id;
+
+	if (req->ns)
+		nvmet_put_namespace(req->ns);
+	req->ops->queue_response(req);
+}
+
+void nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+	__nvmet_req_complete(req, status);
+	percpu_ref_put(&req->sq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_complete);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+		u16 qid, u16 size)
+{
+	cq->qid = qid;
+	cq->size = size;
+
+	ctrl->cqs[qid] = cq;
+}
+
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+		u16 qid, u16 size)
+{
+	sq->sqhd = 0;
+	sq->qid = qid;
+	sq->size = size;
+
+	ctrl->sqs[qid] = sq;
+}
+
+static void nvmet_confirm_sq(struct percpu_ref *ref)
+{
+	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+	complete(&sq->confirm_done);
+}
+
+void nvmet_sq_destroy(struct nvmet_sq *sq)
+{
+	/*
+	 * If this is the admin queue, complete all AERs so that our
+	 * queue doesn't have outstanding requests on it.
+	 */
+	if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
+		nvmet_async_events_free(sq->ctrl);
+	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
+	wait_for_completion(&sq->confirm_done);
+	wait_for_completion(&sq->free_done);
+	percpu_ref_exit(&sq->ref);
+
+	if (sq->ctrl) {
+		nvmet_ctrl_put(sq->ctrl);
+		sq->ctrl = NULL; /* allows reusing the queue later */
+	}
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
+
+static void nvmet_sq_free(struct percpu_ref *ref)
+{
+	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+	complete(&sq->free_done);
+}
+
+int nvmet_sq_init(struct nvmet_sq *sq)
+{
+	int ret;
+
+	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
+	if (ret) {
+		pr_err("percpu_ref init failed!\n");
+		return ret;
+	}
+	init_completion(&sq->free_done);
+	init_completion(&sq->confirm_done);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_init);
+
+static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
+		struct nvmet_ns *ns)
+{
+	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
+
+	if (unlikely(state == NVME_ANA_INACCESSIBLE))
+		return NVME_SC_ANA_INACCESSIBLE;
+	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
+		return NVME_SC_ANA_PERSISTENT_LOSS;
+	if (unlikely(state == NVME_ANA_CHANGE))
+		return NVME_SC_ANA_TRANSITION;
+	return 0;
+}
+
+static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
+{
+	if (unlikely(req->ns->readonly)) {
+		switch (req->cmd->common.opcode) {
+		case nvme_cmd_read:
+		case nvme_cmd_flush:
+			break;
+		default:
+			return NVME_SC_NS_WRITE_PROTECTED;
+		}
+	}
+
+	return 0;
+}
+
+static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+	u16 ret;
+
+	ret = nvmet_check_ctrl_status(req, cmd);
+	if (unlikely(ret))
+		return ret;
+
+	req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
+	if (unlikely(!req->ns))
+		return NVME_SC_INVALID_NS | NVME_SC_DNR;
+	ret = nvmet_check_ana_state(req->port, req->ns);
+	if (unlikely(ret))
+		return ret;
+	ret = nvmet_io_cmd_check_access(req);
+	if (unlikely(ret))
+		return ret;
+
+	if (req->ns->file)
+		return nvmet_file_parse_io_cmd(req);
+	else
+		return nvmet_bdev_parse_io_cmd(req);
+}
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
+{
+	u8 flags = req->cmd->common.flags;
+	u16 status;
+
+	req->cq = cq;
+	req->sq = sq;
+	req->ops = ops;
+	req->sg = NULL;
+	req->sg_cnt = 0;
+	req->transfer_len = 0;
+	req->rsp->status = 0;
+	req->ns = NULL;
+
+	/* no support for fused commands yet */
+	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		goto fail;
+	}
+
+	/*
+	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
+	 * contains an address of a single contiguous physical buffer that is
+	 * byte aligned.
+	 */
+	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		goto fail;
+	}
+
+	if (unlikely(!req->sq->ctrl))
+		/* will return an error for any Non-connect command: */
+		status = nvmet_parse_connect_cmd(req);
+	else if (likely(req->sq->qid != 0))
+		status = nvmet_parse_io_cmd(req);
+	else if (req->cmd->common.opcode == nvme_fabrics_command)
+		status = nvmet_parse_fabrics_cmd(req);
+	else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+		status = nvmet_parse_discovery_cmd(req);
+	else
+		status = nvmet_parse_admin_cmd(req);
+
+	if (status)
+		goto fail;
+
+	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		goto fail;
+	}
+
+	return true;
+
+fail:
+	__nvmet_req_complete(req, status);
+	return false;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_init);
+
+void nvmet_req_uninit(struct nvmet_req *req)
+{
+	percpu_ref_put(&req->sq->ref);
+	if (req->ns)
+		nvmet_put_namespace(req->ns);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+
+void nvmet_req_execute(struct nvmet_req *req)
+{
+	if (unlikely(req->data_len != req->transfer_len))
+		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+	else
+		req->execute(req);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_execute);
+
+static inline bool nvmet_cc_en(u32 cc)
+{
+	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
+}
+
+static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+{
+	lockdep_assert_held(&ctrl->lock);
+
+	if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+	    nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
+	    nvmet_cc_mps(ctrl->cc) != 0 ||
+	    nvmet_cc_ams(ctrl->cc) != 0 ||
+	    nvmet_cc_css(ctrl->cc) != 0) {
+		ctrl->csts = NVME_CSTS_CFS;
+		return;
+	}
+
+	ctrl->csts = NVME_CSTS_RDY;
+
+	/*
+	 * Controllers that are not yet enabled should not really enforce the
+	 * keep alive timeout, but we still want to track a timeout and cleanup
+	 * in case a host died before it enabled the controller.  Hence, simply
+	 * reset the keep alive timer when the controller is enabled.
+	 */
+	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+{
+	lockdep_assert_held(&ctrl->lock);
+
+	/* XXX: tear down queues? */
+	ctrl->csts &= ~NVME_CSTS_RDY;
+	ctrl->cc = 0;
+}
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
+{
+	u32 old;
+
+	mutex_lock(&ctrl->lock);
+	old = ctrl->cc;
+	ctrl->cc = new;
+
+	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
+		nvmet_start_ctrl(ctrl);
+	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
+		nvmet_clear_ctrl(ctrl);
+	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
+		nvmet_clear_ctrl(ctrl);
+		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+	}
+	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
+		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+	mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
+{
+	/* command sets supported: NVMe command set: */
+	ctrl->cap = (1ULL << 37);
+	/* CC.EN timeout in 500msec units: */
+	ctrl->cap |= (15ULL << 24);
+	/* maximum queue entries supported: */
+	ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+}
+
+u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
+		struct nvmet_req *req, struct nvmet_ctrl **ret)
+{
+	struct nvmet_subsys *subsys;
+	struct nvmet_ctrl *ctrl;
+	u16 status = 0;
+
+	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+	if (!subsys) {
+		pr_warn("connect request for invalid subsystem %s!\n",
+			subsysnqn);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+	}
+
+	mutex_lock(&subsys->lock);
+	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+		if (ctrl->cntlid == cntlid) {
+			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
+				pr_warn("hostnqn mismatch.\n");
+				continue;
+			}
+			if (!kref_get_unless_zero(&ctrl->ref))
+				continue;
+
+			*ret = ctrl;
+			goto out;
+		}
+	}
+
+	pr_warn("could not find controller %d for subsys %s / host %s\n",
+		cntlid, subsysnqn, hostnqn);
+	req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+
+out:
+	mutex_unlock(&subsys->lock);
+	nvmet_subsys_put(subsys);
+	return status;
+}
+
+u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
+{
+	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
+		       cmd->common.opcode, req->sq->qid);
+		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+	}
+
+	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
+		       cmd->common.opcode, req->sq->qid);
+		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+	}
+	return 0;
+}
+
+static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
+		const char *hostnqn)
+{
+	struct nvmet_host_link *p;
+
+	if (subsys->allow_any_host)
+		return true;
+
+	list_for_each_entry(p, &subsys->hosts, entry) {
+		if (!strcmp(nvmet_host_name(p->host), hostnqn))
+			return true;
+	}
+
+	return false;
+}
+
+static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
+		const char *hostnqn)
+{
+	struct nvmet_subsys_link *s;
+
+	list_for_each_entry(s, &req->port->subsystems, entry) {
+		if (__nvmet_host_allowed(s->subsys, hostnqn))
+			return true;
+	}
+
+	return false;
+}
+
+bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
+		const char *hostnqn)
+{
+	lockdep_assert_held(&nvmet_config_sem);
+
+	if (subsys->type == NVME_NQN_DISC)
+		return nvmet_host_discovery_allowed(req, hostnqn);
+	else
+		return __nvmet_host_allowed(subsys, hostnqn);
+}
+
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
+{
+	struct nvmet_subsys *subsys;
+	struct nvmet_ctrl *ctrl;
+	int ret;
+	u16 status;
+
+	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+	if (!subsys) {
+		pr_warn("connect request for invalid subsystem %s!\n",
+			subsysnqn);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+		goto out;
+	}
+
+	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+	down_read(&nvmet_config_sem);
+	if (!nvmet_host_allowed(req, subsys, hostnqn)) {
+		pr_info("connect by host %s for subsystem %s not allowed\n",
+			hostnqn, subsysnqn);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+		up_read(&nvmet_config_sem);
+		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
+		goto out_put_subsystem;
+	}
+	up_read(&nvmet_config_sem);
+
+	status = NVME_SC_INTERNAL;
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		goto out_put_subsystem;
+	mutex_init(&ctrl->lock);
+
+	nvmet_init_cap(ctrl);
+
+	ctrl->port = req->port;
+
+	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
+	INIT_LIST_HEAD(&ctrl->async_events);
+
+	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
+	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+
+	kref_init(&ctrl->ref);
+	ctrl->subsys = subsys;
+	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
+
+	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
+			sizeof(__le32), GFP_KERNEL);
+	if (!ctrl->changed_ns_list)
+		goto out_free_ctrl;
+
+	ctrl->cqs = kcalloc(subsys->max_qid + 1,
+			sizeof(struct nvmet_cq *),
+			GFP_KERNEL);
+	if (!ctrl->cqs)
+		goto out_free_changed_ns_list;
+
+	ctrl->sqs = kcalloc(subsys->max_qid + 1,
+			sizeof(struct nvmet_sq *),
+			GFP_KERNEL);
+	if (!ctrl->sqs)
+		goto out_free_cqs;
+
+	ret = ida_simple_get(&cntlid_ida,
+			     NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+			     GFP_KERNEL);
+	if (ret < 0) {
+		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+		goto out_free_sqs;
+	}
+	ctrl->cntlid = ret;
+
+	ctrl->ops = req->ops;
+	if (ctrl->subsys->type == NVME_NQN_DISC) {
+		/* Don't accept keep-alive timeout for discovery controllers */
+		if (kato) {
+			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+			goto out_remove_ida;
+		}
+
+		/*
+		 * Discovery controllers use some arbitrary high value in order
+		 * to cleanup stale discovery sessions
+		 *
+		 * From the latest base diff RC:
+		 * "The Keep Alive command is not supported by
+		 * Discovery controllers. A transport may specify a
+		 * fixed Discovery controller activity timeout value
+		 * (e.g., 2 minutes).  If no commands are received
+		 * by a Discovery controller within that time
+		 * period, the controller may perform the
+		 * actions for Keep Alive Timer expiration".
+		 */
+		ctrl->kato = NVMET_DISC_KATO;
+	} else {
+		/* keep-alive timeout in seconds */
+		ctrl->kato = DIV_ROUND_UP(kato, 1000);
+	}
+	nvmet_start_keep_alive_timer(ctrl);
+
+	mutex_lock(&subsys->lock);
+	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+	mutex_unlock(&subsys->lock);
+
+	*ctrlp = ctrl;
+	return 0;
+
+out_remove_ida:
+	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+out_free_sqs:
+	kfree(ctrl->sqs);
+out_free_cqs:
+	kfree(ctrl->cqs);
+out_free_changed_ns_list:
+	kfree(ctrl->changed_ns_list);
+out_free_ctrl:
+	kfree(ctrl);
+out_put_subsystem:
+	nvmet_subsys_put(subsys);
+out:
+	return status;
+}
+
+static void nvmet_ctrl_free(struct kref *ref)
+{
+	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
+	struct nvmet_subsys *subsys = ctrl->subsys;
+
+	mutex_lock(&subsys->lock);
+	list_del(&ctrl->subsys_entry);
+	mutex_unlock(&subsys->lock);
+
+	nvmet_stop_keep_alive_timer(ctrl);
+
+	flush_work(&ctrl->async_event_work);
+	cancel_work_sync(&ctrl->fatal_err_work);
+
+	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+
+	kfree(ctrl->sqs);
+	kfree(ctrl->cqs);
+	kfree(ctrl->changed_ns_list);
+	kfree(ctrl);
+
+	nvmet_subsys_put(subsys);
+}
+
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
+{
+	kref_put(&ctrl->ref, nvmet_ctrl_free);
+}
+
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+	struct nvmet_ctrl *ctrl =
+			container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+	ctrl->ops->delete_ctrl(ctrl);
+}
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
+{
+	mutex_lock(&ctrl->lock);
+	if (!(ctrl->csts & NVME_CSTS_CFS)) {
+		ctrl->csts |= NVME_CSTS_CFS;
+		INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+		schedule_work(&ctrl->fatal_err_work);
+	}
+	mutex_unlock(&ctrl->lock);
+}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+		const char *subsysnqn)
+{
+	struct nvmet_subsys_link *p;
+
+	if (!port)
+		return NULL;
+
+	if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
+			NVMF_NQN_SIZE)) {
+		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
+			return NULL;
+		return nvmet_disc_subsys;
+	}
+
+	down_read(&nvmet_config_sem);
+	list_for_each_entry(p, &port->subsystems, entry) {
+		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
+				NVMF_NQN_SIZE)) {
+			if (!kref_get_unless_zero(&p->subsys->ref))
+				break;
+			up_read(&nvmet_config_sem);
+			return p->subsys;
+		}
+	}
+	up_read(&nvmet_config_sem);
+	return NULL;
+}
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+		enum nvme_subsys_type type)
+{
+	struct nvmet_subsys *subsys;
+
+	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+	if (!subsys)
+		return NULL;
+
+	subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+	/* generate a random serial number as our controllers are ephemeral: */
+	get_random_bytes(&subsys->serial, sizeof(subsys->serial));
+
+	switch (type) {
+	case NVME_NQN_NVME:
+		subsys->max_qid = NVMET_NR_QUEUES;
+		break;
+	case NVME_NQN_DISC:
+		subsys->max_qid = 0;
+		break;
+	default:
+		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
+		kfree(subsys);
+		return NULL;
+	}
+	subsys->type = type;
+	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
+			GFP_KERNEL);
+	if (!subsys->subsysnqn) {
+		kfree(subsys);
+		return NULL;
+	}
+
+	kref_init(&subsys->ref);
+
+	mutex_init(&subsys->lock);
+	INIT_LIST_HEAD(&subsys->namespaces);
+	INIT_LIST_HEAD(&subsys->ctrls);
+	INIT_LIST_HEAD(&subsys->hosts);
+
+	return subsys;
+}
+
+static void nvmet_subsys_free(struct kref *ref)
+{
+	struct nvmet_subsys *subsys =
+		container_of(ref, struct nvmet_subsys, ref);
+
+	WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+
+	kfree(subsys->subsysnqn);
+	kfree(subsys);
+}
+
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+	struct nvmet_ctrl *ctrl;
+
+	mutex_lock(&subsys->lock);
+	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+		ctrl->ops->delete_ctrl(ctrl);
+	mutex_unlock(&subsys->lock);
+}
+
+void nvmet_subsys_put(struct nvmet_subsys *subsys)
+{
+	kref_put(&subsys->ref, nvmet_subsys_free);
+}
+
+static int __init nvmet_init(void)
+{
+	int error;
+
+	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+
+	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+			WQ_MEM_RECLAIM, 0);
+	if (!buffered_io_wq) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	error = nvmet_init_discovery();
+	if (error)
+		goto out_free_work_queue;
+
+	error = nvmet_init_configfs();
+	if (error)
+		goto out_exit_discovery;
+	return 0;
+
+out_exit_discovery:
+	nvmet_exit_discovery();
+out_free_work_queue:
+	destroy_workqueue(buffered_io_wq);
+out:
+	return error;
+}
+
+static void __exit nvmet_exit(void)
+{
+	nvmet_exit_configfs();
+	nvmet_exit_discovery();
+	ida_destroy(&cntlid_ida);
+	destroy_workqueue(buffered_io_wq);
+
+	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
+	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
+}
+
+module_init(nvmet_init);
+module_exit(nvmet_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
new file mode 100644
index 0000000..eae29f4
--- /dev/null
+++ b/drivers/nvme/target/discovery.c
@@ -0,0 +1,242 @@
+/*
+ * Discovery service for the NVMe over Fabrics target.
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/slab.h>
+#include <generated/utsrelease.h>
+#include "nvmet.h"
+
+struct nvmet_subsys *nvmet_disc_subsys;
+
+u64 nvmet_genctr;
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
+{
+	down_write(&nvmet_config_sem);
+	if (list_empty(&port->entry)) {
+		list_add_tail(&port->entry, &parent->referrals);
+		port->enabled = true;
+		nvmet_genctr++;
+	}
+	up_write(&nvmet_config_sem);
+}
+
+void nvmet_referral_disable(struct nvmet_port *port)
+{
+	down_write(&nvmet_config_sem);
+	if (!list_empty(&port->entry)) {
+		port->enabled = false;
+		list_del_init(&port->entry);
+		nvmet_genctr++;
+	}
+	up_write(&nvmet_config_sem);
+}
+
+static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
+		struct nvmet_port *port, char *subsys_nqn, char *traddr,
+		u8 type, u32 numrec)
+{
+	struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
+
+	e->trtype = port->disc_addr.trtype;
+	e->adrfam = port->disc_addr.adrfam;
+	e->treq = port->disc_addr.treq;
+	e->portid = port->disc_addr.portid;
+	/* we support only dynamic controllers */
+	e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
+	e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
+	e->subtype = type;
+	memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
+	memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
+	memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
+	strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+}
+
+/*
+ * nvmet_set_disc_traddr - set a correct discovery log entry traddr
+ *
+ * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
+ * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
+ * must not contain that "any" IP address. If the transport implements
+ * .disc_traddr, use it. this callback will set the discovery traddr
+ * from the req->port address in case the port in question listens
+ * "any" IP address.
+ */
+static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
+		char *traddr)
+{
+	if (req->ops->disc_traddr)
+		req->ops->disc_traddr(req, port, traddr);
+	else
+		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+}
+
+static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+{
+	const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvmf_disc_rsp_page_hdr *hdr;
+	size_t data_len = nvmet_get_log_page_len(req->cmd);
+	size_t alloc_len = max(data_len, sizeof(*hdr));
+	int residual_len = data_len - sizeof(*hdr);
+	struct nvmet_subsys_link *p;
+	struct nvmet_port *r;
+	u32 numrec = 0;
+	u16 status = 0;
+
+	/*
+	 * Make sure we're passing at least a buffer of response header size.
+	 * If host provided data len is less than the header size, only the
+	 * number of bytes requested by host will be sent to host.
+	 */
+	hdr = kzalloc(alloc_len, GFP_KERNEL);
+	if (!hdr) {
+		status = NVME_SC_INTERNAL;
+		goto out;
+	}
+
+	down_read(&nvmet_config_sem);
+	list_for_each_entry(p, &req->port->subsystems, entry) {
+		if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn))
+			continue;
+		if (residual_len >= entry_size) {
+			char traddr[NVMF_TRADDR_SIZE];
+
+			nvmet_set_disc_traddr(req, req->port, traddr);
+			nvmet_format_discovery_entry(hdr, req->port,
+					p->subsys->subsysnqn, traddr,
+					NVME_NQN_NVME, numrec);
+			residual_len -= entry_size;
+		}
+		numrec++;
+	}
+
+	list_for_each_entry(r, &req->port->referrals, entry) {
+		if (residual_len >= entry_size) {
+			nvmet_format_discovery_entry(hdr, r,
+					NVME_DISC_SUBSYS_NAME,
+					r->disc_addr.traddr,
+					NVME_NQN_DISC, numrec);
+			residual_len -= entry_size;
+		}
+		numrec++;
+	}
+
+	hdr->genctr = cpu_to_le64(nvmet_genctr);
+	hdr->numrec = cpu_to_le64(numrec);
+	hdr->recfmt = cpu_to_le16(0);
+
+	up_read(&nvmet_config_sem);
+
+	status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
+	kfree(hdr);
+out:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvme_id_ctrl *id;
+	u16 status = 0;
+
+	id = kzalloc(sizeof(*id), GFP_KERNEL);
+	if (!id) {
+		status = NVME_SC_INTERNAL;
+		goto out;
+	}
+
+	memset(id->fr, ' ', sizeof(id->fr));
+	strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+
+	/* no limit on data transfer sizes for now */
+	id->mdts = 0;
+	id->cntlid = cpu_to_le16(ctrl->cntlid);
+	id->ver = cpu_to_le32(ctrl->subsys->ver);
+	id->lpa = (1 << 2);
+
+	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
+	if (ctrl->ops->has_keyed_sgls)
+		id->sgls |= cpu_to_le32(1 << 2);
+	if (req->port->inline_data_size)
+		id->sgls |= cpu_to_le32(1 << 20);
+
+	strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+
+	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+	kfree(id);
+out:
+	nvmet_req_complete(req, status);
+}
+
+u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+
+	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+		pr_err("got cmd %d while not ready\n",
+		       cmd->common.opcode);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+
+	switch (cmd->common.opcode) {
+	case nvme_admin_get_log_page:
+		req->data_len = nvmet_get_log_page_len(cmd);
+
+		switch (cmd->get_log_page.lid) {
+		case NVME_LOG_DISC:
+			req->execute = nvmet_execute_get_disc_log_page;
+			return 0;
+		default:
+			pr_err("unsupported get_log_page lid %d\n",
+			       cmd->get_log_page.lid);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+		}
+	case nvme_admin_identify:
+		req->data_len = NVME_IDENTIFY_DATA_SIZE;
+		switch (cmd->identify.cns) {
+		case NVME_ID_CNS_CTRL:
+			req->execute =
+				nvmet_execute_identify_disc_ctrl;
+			return 0;
+		default:
+			pr_err("unsupported identify cns %d\n",
+			       cmd->identify.cns);
+			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+		}
+	default:
+		pr_err("unsupported cmd %d\n", cmd->common.opcode);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+
+	pr_err("unhandled cmd %d\n", cmd->common.opcode);
+	return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
+int __init nvmet_init_discovery(void)
+{
+	nvmet_disc_subsys =
+		nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+	if (!nvmet_disc_subsys)
+		return -ENOMEM;
+	return 0;
+}
+
+void nvmet_exit_discovery(void)
+{
+	nvmet_subsys_put(nvmet_disc_subsys);
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
new file mode 100644
index 0000000..d84ae00
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -0,0 +1,260 @@
+/*
+ * NVMe Fabrics command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+static void nvmet_execute_prop_set(struct nvmet_req *req)
+{
+	u16 status = 0;
+
+	if (!(req->cmd->prop_set.attrib & 1)) {
+		u64 val = le64_to_cpu(req->cmd->prop_set.value);
+
+		switch (le32_to_cpu(req->cmd->prop_set.offset)) {
+		case NVME_REG_CC:
+			nvmet_update_cc(req->sq->ctrl, val);
+			break;
+		default:
+			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+			break;
+		}
+	} else {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+	}
+
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_prop_get(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	u16 status = 0;
+	u64 val = 0;
+
+	if (req->cmd->prop_get.attrib & 1) {
+		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+		case NVME_REG_CAP:
+			val = ctrl->cap;
+			break;
+		default:
+			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+			break;
+		}
+	} else {
+		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+		case NVME_REG_VS:
+			val = ctrl->subsys->ver;
+			break;
+		case NVME_REG_CC:
+			val = ctrl->cc;
+			break;
+		case NVME_REG_CSTS:
+			val = ctrl->csts;
+			break;
+		default:
+			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+			break;
+		}
+	}
+
+	req->rsp->result.u64 = cpu_to_le64(val);
+	nvmet_req_complete(req, status);
+}
+
+u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+
+	switch (cmd->fabrics.fctype) {
+	case nvme_fabrics_type_property_set:
+		req->data_len = 0;
+		req->execute = nvmet_execute_prop_set;
+		break;
+	case nvme_fabrics_type_property_get:
+		req->data_len = 0;
+		req->execute = nvmet_execute_prop_get;
+		break;
+	default:
+		pr_err("received unknown capsule type 0x%x\n",
+			cmd->fabrics.fctype);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+
+	return 0;
+}
+
+static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+	struct nvmf_connect_command *c = &req->cmd->connect;
+	u16 qid = le16_to_cpu(c->qid);
+	u16 sqsize = le16_to_cpu(c->sqsize);
+	struct nvmet_ctrl *old;
+
+	old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+	if (old) {
+		pr_warn("queue already connected!\n");
+		return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+	}
+	if (!sqsize) {
+		pr_warn("queue size zero!\n");
+		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+	}
+
+	/* note: convert queue size from 0's-based value to 1's-based value */
+	nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
+	nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
+	return 0;
+}
+
+static void nvmet_execute_admin_connect(struct nvmet_req *req)
+{
+	struct nvmf_connect_command *c = &req->cmd->connect;
+	struct nvmf_connect_data *d;
+	struct nvmet_ctrl *ctrl = NULL;
+	u16 status = 0;
+
+	d = kmalloc(sizeof(*d), GFP_KERNEL);
+	if (!d) {
+		status = NVME_SC_INTERNAL;
+		goto complete;
+	}
+
+	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+	if (status)
+		goto out;
+
+	/* zero out initial completion result, assign values as needed */
+	req->rsp->result.u32 = 0;
+
+	if (c->recfmt != 0) {
+		pr_warn("invalid connect version (%d).\n",
+			le16_to_cpu(c->recfmt));
+		status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+		goto out;
+	}
+
+	if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
+		pr_warn("connect attempt for invalid controller ID %#x\n",
+			d->cntlid);
+		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+		goto out;
+	}
+
+	status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+				  le32_to_cpu(c->kato), &ctrl);
+	if (status)
+		goto out;
+	uuid_copy(&ctrl->hostid, &d->hostid);
+
+	status = nvmet_install_queue(ctrl, req);
+	if (status) {
+		nvmet_ctrl_put(ctrl);
+		goto out;
+	}
+
+	pr_info("creating controller %d for subsystem %s for NQN %s.\n",
+		ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
+	req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
+
+out:
+	kfree(d);
+complete:
+	nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_io_connect(struct nvmet_req *req)
+{
+	struct nvmf_connect_command *c = &req->cmd->connect;
+	struct nvmf_connect_data *d;
+	struct nvmet_ctrl *ctrl = NULL;
+	u16 qid = le16_to_cpu(c->qid);
+	u16 status = 0;
+
+	d = kmalloc(sizeof(*d), GFP_KERNEL);
+	if (!d) {
+		status = NVME_SC_INTERNAL;
+		goto complete;
+	}
+
+	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+	if (status)
+		goto out;
+
+	/* zero out initial completion result, assign values as needed */
+	req->rsp->result.u32 = 0;
+
+	if (c->recfmt != 0) {
+		pr_warn("invalid connect version (%d).\n",
+			le16_to_cpu(c->recfmt));
+		status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+		goto out;
+	}
+
+	status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+				     le16_to_cpu(d->cntlid),
+				     req, &ctrl);
+	if (status)
+		goto out;
+
+	if (unlikely(qid > ctrl->subsys->max_qid)) {
+		pr_warn("invalid queue id (%d)\n", qid);
+		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
+		goto out_ctrl_put;
+	}
+
+	status = nvmet_install_queue(ctrl, req);
+	if (status) {
+		/* pass back cntlid that had the issue of installing queue */
+		req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
+		goto out_ctrl_put;
+	}
+
+	pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+
+out:
+	kfree(d);
+complete:
+	nvmet_req_complete(req, status);
+	return;
+
+out_ctrl_put:
+	nvmet_ctrl_put(ctrl);
+	goto out;
+}
+
+u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+
+	if (cmd->common.opcode != nvme_fabrics_command) {
+		pr_err("invalid command 0x%x on unconnected queue.\n",
+			cmd->fabrics.opcode);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+	if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+		pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+			cmd->fabrics.fctype);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+
+	req->data_len = sizeof(struct nvmf_connect_data);
+	if (cmd->connect.qid == 0)
+		req->execute = nvmet_execute_admin_connect;
+	else
+		req->execute = nvmet_execute_io_connect;
+	return 0;
+}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
new file mode 100644
index 0000000..29b4b23
--- /dev/null
+++ b/drivers/nvme/target/fc.c
@@ -0,0 +1,2576 @@
+/*
+ * Copyright (c) 2016 Avago Technologies.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+#define NVMET_LS_CTX_COUNT		256
+
+/* for this implementation, assume small single frame rqst/rsp */
+#define NVME_FC_MAX_LS_BUFFER_SIZE		2048
+
+struct nvmet_fc_tgtport;
+struct nvmet_fc_tgt_assoc;
+
+struct nvmet_fc_ls_iod {
+	struct nvmefc_tgt_ls_req	*lsreq;
+	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
+
+	struct list_head		ls_list;	/* tgtport->ls_list */
+
+	struct nvmet_fc_tgtport		*tgtport;
+	struct nvmet_fc_tgt_assoc	*assoc;
+
+	u8				*rqstbuf;
+	u8				*rspbuf;
+	u16				rqstdatalen;
+	dma_addr_t			rspdma;
+
+	struct scatterlist		sg[2];
+
+	struct work_struct		work;
+} __aligned(sizeof(unsigned long long));
+
+/* desired maximum for a single sequence - if sg list allows it */
+#define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
+
+enum nvmet_fcp_datadir {
+	NVMET_FCP_NODATA,
+	NVMET_FCP_WRITE,
+	NVMET_FCP_READ,
+	NVMET_FCP_ABORTED,
+};
+
+struct nvmet_fc_fcp_iod {
+	struct nvmefc_tgt_fcp_req	*fcpreq;
+
+	struct nvme_fc_cmd_iu		cmdiubuf;
+	struct nvme_fc_ersp_iu		rspiubuf;
+	dma_addr_t			rspdma;
+	struct scatterlist		*next_sg;
+	struct scatterlist		*data_sg;
+	int				data_sg_cnt;
+	u32				offset;
+	enum nvmet_fcp_datadir		io_dir;
+	bool				active;
+	bool				abort;
+	bool				aborted;
+	bool				writedataactive;
+	spinlock_t			flock;
+
+	struct nvmet_req		req;
+	struct work_struct		work;
+	struct work_struct		done_work;
+	struct work_struct		defer_work;
+
+	struct nvmet_fc_tgtport		*tgtport;
+	struct nvmet_fc_tgt_queue	*queue;
+
+	struct list_head		fcp_list;	/* tgtport->fcp_list */
+};
+
+struct nvmet_fc_tgtport {
+
+	struct nvmet_fc_target_port	fc_target_port;
+
+	struct list_head		tgt_list; /* nvmet_fc_target_list */
+	struct device			*dev;	/* dev for dma mapping */
+	struct nvmet_fc_target_template	*ops;
+
+	struct nvmet_fc_ls_iod		*iod;
+	spinlock_t			lock;
+	struct list_head		ls_list;
+	struct list_head		ls_busylist;
+	struct list_head		assoc_list;
+	struct ida			assoc_cnt;
+	struct nvmet_port		*port;
+	struct kref			ref;
+	u32				max_sg_cnt;
+};
+
+struct nvmet_fc_defer_fcp_req {
+	struct list_head		req_list;
+	struct nvmefc_tgt_fcp_req	*fcp_req;
+};
+
+struct nvmet_fc_tgt_queue {
+	bool				ninetypercent;
+	u16				qid;
+	u16				sqsize;
+	u16				ersp_ratio;
+	__le16				sqhd;
+	int				cpu;
+	atomic_t			connected;
+	atomic_t			sqtail;
+	atomic_t			zrspcnt;
+	atomic_t			rsn;
+	spinlock_t			qlock;
+	struct nvmet_port		*port;
+	struct nvmet_cq			nvme_cq;
+	struct nvmet_sq			nvme_sq;
+	struct nvmet_fc_tgt_assoc	*assoc;
+	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
+	struct list_head		fod_list;
+	struct list_head		pending_cmd_list;
+	struct list_head		avail_defer_list;
+	struct workqueue_struct		*work_q;
+	struct kref			ref;
+} __aligned(sizeof(unsigned long long));
+
+struct nvmet_fc_tgt_assoc {
+	u64				association_id;
+	u32				a_id;
+	struct nvmet_fc_tgtport		*tgtport;
+	struct list_head		a_list;
+	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
+	struct kref			ref;
+	struct work_struct		del_work;
+};
+
+
+static inline int
+nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
+{
+	return (iodptr - iodptr->tgtport->iod);
+}
+
+static inline int
+nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
+{
+	return (fodptr - fodptr->queue->fod);
+}
+
+
+/*
+ * Association and Connection IDs:
+ *
+ * Association ID will have random number in upper 6 bytes and zero
+ *   in lower 2 bytes
+ *
+ * Connection IDs will be Association ID with QID or'd in lower 2 bytes
+ *
+ * note: Association ID = Connection ID for queue 0
+ */
+#define BYTES_FOR_QID			sizeof(u16)
+#define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
+#define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
+
+static inline u64
+nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
+{
+	return (assoc->association_id | qid);
+}
+
+static inline u64
+nvmet_fc_getassociationid(u64 connectionid)
+{
+	return connectionid & ~NVMET_FC_QUEUEID_MASK;
+}
+
+static inline u16
+nvmet_fc_getqueueid(u64 connectionid)
+{
+	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
+}
+
+static inline struct nvmet_fc_tgtport *
+targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
+{
+	return container_of(targetport, struct nvmet_fc_tgtport,
+				 fc_target_port);
+}
+
+static inline struct nvmet_fc_fcp_iod *
+nvmet_req_to_fod(struct nvmet_req *nvme_req)
+{
+	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
+}
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
+
+static LIST_HEAD(nvmet_fc_target_list);
+static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+
+
+static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
+static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
+static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
+static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+					struct nvmet_fc_fcp_iod *fod);
+static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+		enum dma_data_direction dir)
+{
+	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+	enum dma_data_direction dir)
+{
+	if (dev)
+		dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+	struct scatterlist *s;
+	int i;
+
+	WARN_ON(nents == 0 || sg[0].length == 0);
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		s->dma_length = s->length;
+#endif
+	}
+	return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir)
+{
+	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+
+static int
+nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_ls_iod *iod;
+	int i;
+
+	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
+			GFP_KERNEL);
+	if (!iod)
+		return -ENOMEM;
+
+	tgtport->iod = iod;
+
+	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
+		iod->tgtport = tgtport;
+		list_add_tail(&iod->ls_list, &tgtport->ls_list);
+
+		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
+			GFP_KERNEL);
+		if (!iod->rqstbuf)
+			goto out_fail;
+
+		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+
+		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
+						NVME_FC_MAX_LS_BUFFER_SIZE,
+						DMA_TO_DEVICE);
+		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
+			goto out_fail;
+	}
+
+	return 0;
+
+out_fail:
+	kfree(iod->rqstbuf);
+	list_del(&iod->ls_list);
+	for (iod--, i--; i >= 0; iod--, i--) {
+		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
+				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+		kfree(iod->rqstbuf);
+		list_del(&iod->ls_list);
+	}
+
+	kfree(iod);
+
+	return -EFAULT;
+}
+
+static void
+nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_ls_iod *iod = tgtport->iod;
+	int i;
+
+	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+		fc_dma_unmap_single(tgtport->dev,
+				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+				DMA_TO_DEVICE);
+		kfree(iod->rqstbuf);
+		list_del(&iod->ls_list);
+	}
+	kfree(tgtport->iod);
+}
+
+static struct nvmet_fc_ls_iod *
+nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_ls_iod *iod;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	iod = list_first_entry_or_null(&tgtport->ls_list,
+					struct nvmet_fc_ls_iod, ls_list);
+	if (iod)
+		list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+	return iod;
+}
+
+
+static void
+nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_move(&iod->ls_list, &tgtport->ls_list);
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+static void
+nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_fcp_iod *fod = queue->fod;
+	int i;
+
+	for (i = 0; i < queue->sqsize; fod++, i++) {
+		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
+		INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
+		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
+		fod->tgtport = tgtport;
+		fod->queue = queue;
+		fod->active = false;
+		fod->abort = false;
+		fod->aborted = false;
+		fod->fcpreq = NULL;
+		list_add_tail(&fod->fcp_list, &queue->fod_list);
+		spin_lock_init(&fod->flock);
+
+		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
+					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
+			list_del(&fod->fcp_list);
+			for (fod--, i--; i >= 0; fod--, i--) {
+				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+						sizeof(fod->rspiubuf),
+						DMA_TO_DEVICE);
+				fod->rspdma = 0L;
+				list_del(&fod->fcp_list);
+			}
+
+			return;
+		}
+	}
+}
+
+static void
+nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_fcp_iod *fod = queue->fod;
+	int i;
+
+	for (i = 0; i < queue->sqsize; fod++, i++) {
+		if (fod->rspdma)
+			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+	}
+}
+
+static struct nvmet_fc_fcp_iod *
+nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_fcp_iod *fod;
+
+	lockdep_assert_held(&queue->qlock);
+
+	fod = list_first_entry_or_null(&queue->fod_list,
+					struct nvmet_fc_fcp_iod, fcp_list);
+	if (fod) {
+		list_del(&fod->fcp_list);
+		fod->active = true;
+		/*
+		 * no queue reference is taken, as it was taken by the
+		 * queue lookup just prior to the allocation. The iod
+		 * will "inherit" that reference.
+		 */
+	}
+	return fod;
+}
+
+
+static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+		       struct nvmet_fc_tgt_queue *queue,
+		       struct nvmefc_tgt_fcp_req *fcpreq)
+{
+	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+	/*
+	 * put all admin cmds on hw queue id 0. All io commands go to
+	 * the respective hw queue based on a modulo basis
+	 */
+	fcpreq->hwqid = queue->qid ?
+			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+	if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+		queue_work_on(queue->cpu, queue->work_q, &fod->work);
+	else
+		nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+static void
+nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
+{
+	struct nvmet_fc_fcp_iod *fod =
+		container_of(work, struct nvmet_fc_fcp_iod, defer_work);
+
+	/* Submit deferred IO for processing */
+	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
+
+}
+
+static void
+nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
+			struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+	struct nvmet_fc_defer_fcp_req *deferfcp;
+	unsigned long flags;
+
+	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+
+	fcpreq->nvmet_fc_private = NULL;
+
+	fod->active = false;
+	fod->abort = false;
+	fod->aborted = false;
+	fod->writedataactive = false;
+	fod->fcpreq = NULL;
+
+	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+	/* release the queue lookup reference on the completed IO */
+	nvmet_fc_tgt_q_put(queue);
+
+	spin_lock_irqsave(&queue->qlock, flags);
+	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+				struct nvmet_fc_defer_fcp_req, req_list);
+	if (!deferfcp) {
+		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+		spin_unlock_irqrestore(&queue->qlock, flags);
+		return;
+	}
+
+	/* Re-use the fod for the next pending cmd that was deferred */
+	list_del(&deferfcp->req_list);
+
+	fcpreq = deferfcp->fcp_req;
+
+	/* deferfcp can be reused for another IO at a later date */
+	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
+	spin_unlock_irqrestore(&queue->qlock, flags);
+
+	/* Save NVME CMD IO in fod */
+	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+	/* Setup new fcpreq to be processed */
+	fcpreq->rspaddr = NULL;
+	fcpreq->rsplen  = 0;
+	fcpreq->nvmet_fc_private = fod;
+	fod->fcpreq = fcpreq;
+	fod->active = true;
+
+	/* inform LLDD IO is now being processed */
+	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+	/*
+	 * Leave the queue lookup get reference taken when
+	 * fod was originally allocated.
+	 */
+
+	queue_work(queue->work_q, &fod->defer_work);
+}
+
+static int
+nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
+{
+	int cpu, idx, cnt;
+
+	if (tgtport->ops->max_hw_queues == 1)
+		return WORK_CPU_UNBOUND;
+
+	/* Simple cpu selection based on qid modulo active cpu count */
+	idx = !qid ? 0 : (qid - 1) % num_active_cpus();
+
+	/* find the n'th active cpu */
+	for (cpu = 0, cnt = 0; ; ) {
+		if (cpu_active(cpu)) {
+			if (cnt == idx)
+				break;
+			cnt++;
+		}
+		cpu = (cpu + 1) % num_possible_cpus();
+	}
+
+	return cpu;
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+			u16 qid, u16 sqsize)
+{
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+	int ret;
+
+	if (qid > NVMET_NR_QUEUES)
+		return NULL;
+
+	queue = kzalloc((sizeof(*queue) +
+				(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
+				GFP_KERNEL);
+	if (!queue)
+		return NULL;
+
+	if (!nvmet_fc_tgt_a_get(assoc))
+		goto out_free_queue;
+
+	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+				assoc->tgtport->fc_target_port.port_num,
+				assoc->a_id, qid);
+	if (!queue->work_q)
+		goto out_a_put;
+
+	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
+	queue->qid = qid;
+	queue->sqsize = sqsize;
+	queue->assoc = assoc;
+	queue->port = assoc->tgtport->port;
+	queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
+	INIT_LIST_HEAD(&queue->fod_list);
+	INIT_LIST_HEAD(&queue->avail_defer_list);
+	INIT_LIST_HEAD(&queue->pending_cmd_list);
+	atomic_set(&queue->connected, 0);
+	atomic_set(&queue->sqtail, 0);
+	atomic_set(&queue->rsn, 1);
+	atomic_set(&queue->zrspcnt, 0);
+	spin_lock_init(&queue->qlock);
+	kref_init(&queue->ref);
+
+	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
+
+	ret = nvmet_sq_init(&queue->nvme_sq);
+	if (ret)
+		goto out_fail_iodlist;
+
+	WARN_ON(assoc->queues[qid]);
+	spin_lock_irqsave(&assoc->tgtport->lock, flags);
+	assoc->queues[qid] = queue;
+	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+
+	return queue;
+
+out_fail_iodlist:
+	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+	destroy_workqueue(queue->work_q);
+out_a_put:
+	nvmet_fc_tgt_a_put(assoc);
+out_free_queue:
+	kfree(queue);
+	return NULL;
+}
+
+
+static void
+nvmet_fc_tgt_queue_free(struct kref *ref)
+{
+	struct nvmet_fc_tgt_queue *queue =
+		container_of(ref, struct nvmet_fc_tgt_queue, ref);
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
+	queue->assoc->queues[queue->qid] = NULL;
+	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+
+	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+	nvmet_fc_tgt_a_put(queue->assoc);
+
+	destroy_workqueue(queue->work_q);
+
+	kfree(queue);
+}
+
+static void
+nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
+{
+	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
+}
+
+static int
+nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
+{
+	return kref_get_unless_zero(&queue->ref);
+}
+
+
+static void
+nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
+	struct nvmet_fc_fcp_iod *fod = queue->fod;
+	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
+	unsigned long flags;
+	int i, writedataactive;
+	bool disconnect;
+
+	disconnect = atomic_xchg(&queue->connected, 0);
+
+	spin_lock_irqsave(&queue->qlock, flags);
+	/* about outstanding io's */
+	for (i = 0; i < queue->sqsize; fod++, i++) {
+		if (fod->active) {
+			spin_lock(&fod->flock);
+			fod->abort = true;
+			writedataactive = fod->writedataactive;
+			spin_unlock(&fod->flock);
+			/*
+			 * only call lldd abort routine if waiting for
+			 * writedata. other outstanding ops should finish
+			 * on their own.
+			 */
+			if (writedataactive) {
+				spin_lock(&fod->flock);
+				fod->aborted = true;
+				spin_unlock(&fod->flock);
+				tgtport->ops->fcp_abort(
+					&tgtport->fc_target_port, fod->fcpreq);
+			}
+		}
+	}
+
+	/* Cleanup defer'ed IOs in queue */
+	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
+				req_list) {
+		list_del(&deferfcp->req_list);
+		kfree(deferfcp);
+	}
+
+	for (;;) {
+		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+				struct nvmet_fc_defer_fcp_req, req_list);
+		if (!deferfcp)
+			break;
+
+		list_del(&deferfcp->req_list);
+		spin_unlock_irqrestore(&queue->qlock, flags);
+
+		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+				deferfcp->fcp_req);
+
+		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+				deferfcp->fcp_req);
+
+		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+				deferfcp->fcp_req);
+
+		/* release the queue lookup reference */
+		nvmet_fc_tgt_q_put(queue);
+
+		kfree(deferfcp);
+
+		spin_lock_irqsave(&queue->qlock, flags);
+	}
+	spin_unlock_irqrestore(&queue->qlock, flags);
+
+	flush_workqueue(queue->work_q);
+
+	if (disconnect)
+		nvmet_sq_destroy(&queue->nvme_sq);
+
+	nvmet_fc_tgt_q_put(queue);
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+				u64 connection_id)
+{
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct nvmet_fc_tgt_queue *queue;
+	u64 association_id = nvmet_fc_getassociationid(connection_id);
+	u16 qid = nvmet_fc_getqueueid(connection_id);
+	unsigned long flags;
+
+	if (qid > NVMET_NR_QUEUES)
+		return NULL;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		if (association_id == assoc->association_id) {
+			queue = assoc->queues[qid];
+			if (queue &&
+			    (!atomic_read(&queue->connected) ||
+			     !nvmet_fc_tgt_q_get(queue)))
+				queue = NULL;
+			spin_unlock_irqrestore(&tgtport->lock, flags);
+			return queue;
+		}
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+	return NULL;
+}
+
+static void
+nvmet_fc_delete_assoc(struct work_struct *work)
+{
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+
+	nvmet_fc_delete_target_assoc(assoc);
+	nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+	unsigned long flags;
+	u64 ran;
+	int idx;
+	bool needrandom = true;
+
+	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+	if (!assoc)
+		return NULL;
+
+	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0)
+		goto out_free_assoc;
+
+	if (!nvmet_fc_tgtport_get(tgtport))
+		goto out_ida_put;
+
+	assoc->tgtport = tgtport;
+	assoc->a_id = idx;
+	INIT_LIST_HEAD(&assoc->a_list);
+	kref_init(&assoc->ref);
+	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+
+	while (needrandom) {
+		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
+		ran = ran << BYTES_FOR_QID_SHIFT;
+
+		spin_lock_irqsave(&tgtport->lock, flags);
+		needrandom = false;
+		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+			if (ran == tmpassoc->association_id) {
+				needrandom = true;
+				break;
+			}
+		if (!needrandom) {
+			assoc->association_id = ran;
+			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+		}
+		spin_unlock_irqrestore(&tgtport->lock, flags);
+	}
+
+	return assoc;
+
+out_ida_put:
+	ida_simple_remove(&tgtport->assoc_cnt, idx);
+out_free_assoc:
+	kfree(assoc);
+	return NULL;
+}
+
+static void
+nvmet_fc_target_assoc_free(struct kref *ref)
+{
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_del(&assoc->a_list);
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+	kfree(assoc);
+	nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
+{
+	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
+}
+
+static int
+nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
+{
+	return kref_get_unless_zero(&assoc->ref);
+}
+
+static void
+nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+		queue = assoc->queues[i];
+		if (queue) {
+			if (!nvmet_fc_tgt_q_get(queue))
+				continue;
+			spin_unlock_irqrestore(&tgtport->lock, flags);
+			nvmet_fc_delete_target_queue(queue);
+			nvmet_fc_tgt_q_put(queue);
+			spin_lock_irqsave(&tgtport->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+
+	nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
+				u64 association_id)
+{
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct nvmet_fc_tgt_assoc *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		if (association_id == assoc->association_id) {
+			ret = assoc;
+			nvmet_fc_tgt_a_get(assoc);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+
+	return ret;
+}
+
+
+/**
+ * nvme_fc_register_targetport - transport entry point called by an
+ *                              LLDD to register the existence of a local
+ *                              NVME subystem FC port.
+ * @pinfo:     pointer to information about the port to be registered
+ * @template:  LLDD entrypoints and operational parameters for the port
+ * @dev:       physical hardware device node port corresponds to. Will be
+ *             used for DMA mappings
+ * @portptr:   pointer to a local port pointer. Upon success, the routine
+ *             will allocate a nvme_fc_local_port structure and place its
+ *             address in the local port pointer. Upon failure, local port
+ *             pointer will be set to NULL.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+			struct nvmet_fc_target_template *template,
+			struct device *dev,
+			struct nvmet_fc_target_port **portptr)
+{
+	struct nvmet_fc_tgtport *newrec;
+	unsigned long flags;
+	int ret, idx;
+
+	if (!template->xmt_ls_rsp || !template->fcp_op ||
+	    !template->fcp_abort ||
+	    !template->fcp_req_release || !template->targetport_delete ||
+	    !template->max_hw_queues || !template->max_sgl_segments ||
+	    !template->max_dif_sgl_segments || !template->dma_boundary) {
+		ret = -EINVAL;
+		goto out_regtgt_failed;
+	}
+
+	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
+			 GFP_KERNEL);
+	if (!newrec) {
+		ret = -ENOMEM;
+		goto out_regtgt_failed;
+	}
+
+	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out_fail_kfree;
+	}
+
+	if (!get_device(dev) && dev) {
+		ret = -ENODEV;
+		goto out_ida_put;
+	}
+
+	newrec->fc_target_port.node_name = pinfo->node_name;
+	newrec->fc_target_port.port_name = pinfo->port_name;
+	newrec->fc_target_port.private = &newrec[1];
+	newrec->fc_target_port.port_id = pinfo->port_id;
+	newrec->fc_target_port.port_num = idx;
+	INIT_LIST_HEAD(&newrec->tgt_list);
+	newrec->dev = dev;
+	newrec->ops = template;
+	spin_lock_init(&newrec->lock);
+	INIT_LIST_HEAD(&newrec->ls_list);
+	INIT_LIST_HEAD(&newrec->ls_busylist);
+	INIT_LIST_HEAD(&newrec->assoc_list);
+	kref_init(&newrec->ref);
+	ida_init(&newrec->assoc_cnt);
+	newrec->max_sg_cnt = template->max_sgl_segments;
+
+	ret = nvmet_fc_alloc_ls_iodlist(newrec);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_free_newrec;
+	}
+
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+	*portptr = &newrec->fc_target_port;
+	return 0;
+
+out_free_newrec:
+	put_device(dev);
+out_ida_put:
+	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+out_fail_kfree:
+	kfree(newrec);
+out_regtgt_failed:
+	*portptr = NULL;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
+
+
+static void
+nvmet_fc_free_tgtport(struct kref *ref)
+{
+	struct nvmet_fc_tgtport *tgtport =
+		container_of(ref, struct nvmet_fc_tgtport, ref);
+	struct device *dev = tgtport->dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_del(&tgtport->tgt_list);
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+	nvmet_fc_free_ls_iodlist(tgtport);
+
+	/* let the LLDD know we've finished tearing it down */
+	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
+
+	ida_simple_remove(&nvmet_fc_tgtport_cnt,
+			tgtport->fc_target_port.port_num);
+
+	ida_destroy(&tgtport->assoc_cnt);
+
+	kfree(tgtport);
+
+	put_device(dev);
+}
+
+static void
+nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
+{
+	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
+}
+
+static int
+nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
+{
+	return kref_get_unless_zero(&tgtport->ref);
+}
+
+static void
+__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_tgt_assoc *assoc, *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_for_each_entry_safe(assoc, next,
+				&tgtport->assoc_list, a_list) {
+		if (!nvmet_fc_tgt_a_get(assoc))
+			continue;
+		spin_unlock_irqrestore(&tgtport->lock, flags);
+		nvmet_fc_delete_target_assoc(assoc);
+		nvmet_fc_tgt_a_put(assoc);
+		spin_lock_irqsave(&tgtport->lock, flags);
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+/*
+ * nvmet layer has called to terminate an association
+ */
+static void
+nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+	struct nvmet_fc_tgtport *tgtport, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+	bool found_ctrl = false;
+
+	/* this is a bit ugly, but don't want to make locks layered */
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
+			tgt_list) {
+		if (!nvmet_fc_tgtport_get(tgtport))
+			continue;
+		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+		spin_lock_irqsave(&tgtport->lock, flags);
+		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+			queue = assoc->queues[0];
+			if (queue && queue->nvme_sq.ctrl == ctrl) {
+				if (nvmet_fc_tgt_a_get(assoc))
+					found_ctrl = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&tgtport->lock, flags);
+
+		nvmet_fc_tgtport_put(tgtport);
+
+		if (found_ctrl) {
+			schedule_work(&assoc->del_work);
+			return;
+		}
+
+		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	}
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/**
+ * nvme_fc_unregister_targetport - transport entry point called by an
+ *                              LLDD to deregister/remove a previously
+ *                              registered a local NVME subsystem FC port.
+ * @tgtport: pointer to the (registered) target port that is to be
+ *           deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+{
+	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+
+	/* terminate any outstanding associations */
+	__nvmet_fc_free_assocs(tgtport);
+
+	nvmet_fc_tgtport_put(tgtport);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+
+static void
+nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
+{
+	struct fcnvme_ls_acc_hdr *acc = buf;
+
+	acc->w0.ls_cmd = ls_cmd;
+	acc->desc_list_len = desc_len;
+	acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+	acc->rqst.desc_len =
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+	acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static int
+nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+			u8 reason, u8 explanation, u8 vendor)
+{
+	struct fcnvme_ls_rjt *rjt = buf;
+
+	nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+			ls_cmd);
+	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+	rjt->rjt.reason_code = reason;
+	rjt->rjt.reason_explanation = explanation;
+	rjt->rjt.vendor = vendor;
+
+	return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+	VERR_NO_ERROR		= 0,
+	VERR_CR_ASSOC_LEN	= 1,
+	VERR_CR_ASSOC_RQST_LEN	= 2,
+	VERR_CR_ASSOC_CMD	= 3,
+	VERR_CR_ASSOC_CMD_LEN	= 4,
+	VERR_ERSP_RATIO		= 5,
+	VERR_ASSOC_ALLOC_FAIL	= 6,
+	VERR_QUEUE_ALLOC_FAIL	= 7,
+	VERR_CR_CONN_LEN	= 8,
+	VERR_CR_CONN_RQST_LEN	= 9,
+	VERR_ASSOC_ID		= 10,
+	VERR_ASSOC_ID_LEN	= 11,
+	VERR_NO_ASSOC		= 12,
+	VERR_CONN_ID		= 13,
+	VERR_CONN_ID_LEN	= 14,
+	VERR_NO_CONN		= 15,
+	VERR_CR_CONN_CMD	= 16,
+	VERR_CR_CONN_CMD_LEN	= 17,
+	VERR_DISCONN_LEN	= 18,
+	VERR_DISCONN_RQST_LEN	= 19,
+	VERR_DISCONN_CMD	= 20,
+	VERR_DISCONN_CMD_LEN	= 21,
+	VERR_DISCONN_SCOPE	= 22,
+	VERR_RS_LEN		= 23,
+	VERR_RS_RQST_LEN	= 24,
+	VERR_RS_CMD		= 25,
+	VERR_RS_CMD_LEN		= 26,
+	VERR_RS_RCTL		= 27,
+	VERR_RS_RO		= 28,
+};
+
+static char *validation_errors[] = {
+	"OK",
+	"Bad CR_ASSOC Length",
+	"Bad CR_ASSOC Rqst Length",
+	"Not CR_ASSOC Cmd",
+	"Bad CR_ASSOC Cmd Length",
+	"Bad Ersp Ratio",
+	"Association Allocation Failed",
+	"Queue Allocation Failed",
+	"Bad CR_CONN Length",
+	"Bad CR_CONN Rqst Length",
+	"Not Association ID",
+	"Bad Association ID Length",
+	"No Association",
+	"Not Connection ID",
+	"Bad Connection ID Length",
+	"No Connection",
+	"Not CR_CONN Cmd",
+	"Bad CR_CONN Cmd Length",
+	"Bad DISCONN Length",
+	"Bad DISCONN Rqst Length",
+	"Not DISCONN Cmd",
+	"Bad DISCONN Cmd Length",
+	"Bad Disconnect Scope",
+	"Bad RS Length",
+	"Bad RS Rqst Length",
+	"Not RS Cmd",
+	"Bad RS Cmd Length",
+	"Bad RS R_CTL",
+	"Bad RS Relative Offset",
+};
+
+static void
+nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_cr_assoc_rqst *rqst =
+				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
+	struct fcnvme_ls_cr_assoc_acc *acc =
+				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+	struct nvmet_fc_tgt_queue *queue;
+	int ret = 0;
+
+	memset(acc, 0, sizeof(*acc));
+
+	/*
+	 * FC-NVME spec changes. There are initiators sending different
+	 * lengths as padding sizes for Create Association Cmd descriptor
+	 * was incorrect.
+	 * Accept anything of "minimum" length. Assume format per 1.15
+	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
+	 * trailing pad length is.
+	 */
+	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
+		ret = VERR_CR_ASSOC_LEN;
+	else if (be32_to_cpu(rqst->desc_list_len) <
+			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
+		ret = VERR_CR_ASSOC_RQST_LEN;
+	else if (rqst->assoc_cmd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
+		ret = VERR_CR_ASSOC_CMD;
+	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
+			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
+		ret = VERR_CR_ASSOC_CMD_LEN;
+	else if (!rqst->assoc_cmd.ersp_ratio ||
+		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
+				be16_to_cpu(rqst->assoc_cmd.sqsize)))
+		ret = VERR_ERSP_RATIO;
+
+	else {
+		/* new association w/ admin queue */
+		iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+		if (!iod->assoc)
+			ret = VERR_ASSOC_ALLOC_FAIL;
+		else {
+			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+					be16_to_cpu(rqst->assoc_cmd.sqsize));
+			if (!queue)
+				ret = VERR_QUEUE_ALLOC_FAIL;
+		}
+	}
+
+	if (ret) {
+		dev_err(tgtport->dev,
+			"Create Association LS failed: %s\n",
+			validation_errors[ret]);
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				FCNVME_RJT_RC_LOGIC,
+				FCNVME_RJT_EXP_NONE, 0);
+		return;
+	}
+
+	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
+	atomic_set(&queue->connected, 1);
+	queue->sqhd = 0;	/* best place to init value */
+
+	/* format a response */
+
+	iod->lsreq->rsplen = sizeof(*acc);
+
+	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_cr_assoc_acc)),
+			FCNVME_LS_CREATE_ASSOCIATION);
+	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+	acc->associd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id));
+	acc->associd.association_id =
+			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
+	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+	acc->connectid.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_conn_id));
+	acc->connectid.connection_id = acc->associd.association_id;
+}
+
+static void
+nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_cr_conn_rqst *rqst =
+				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
+	struct fcnvme_ls_cr_conn_acc *acc =
+				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+	struct nvmet_fc_tgt_queue *queue;
+	int ret = 0;
+
+	memset(acc, 0, sizeof(*acc));
+
+	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
+		ret = VERR_CR_CONN_LEN;
+	else if (rqst->desc_list_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_cr_conn_rqst)))
+		ret = VERR_CR_CONN_RQST_LEN;
+	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+		ret = VERR_ASSOC_ID;
+	else if (rqst->associd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id)))
+		ret = VERR_ASSOC_ID_LEN;
+	else if (rqst->connect_cmd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
+		ret = VERR_CR_CONN_CMD;
+	else if (rqst->connect_cmd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
+		ret = VERR_CR_CONN_CMD_LEN;
+	else if (!rqst->connect_cmd.ersp_ratio ||
+		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
+				be16_to_cpu(rqst->connect_cmd.sqsize)))
+		ret = VERR_ERSP_RATIO;
+
+	else {
+		/* new io queue */
+		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
+				be64_to_cpu(rqst->associd.association_id));
+		if (!iod->assoc)
+			ret = VERR_NO_ASSOC;
+		else {
+			queue = nvmet_fc_alloc_target_queue(iod->assoc,
+					be16_to_cpu(rqst->connect_cmd.qid),
+					be16_to_cpu(rqst->connect_cmd.sqsize));
+			if (!queue)
+				ret = VERR_QUEUE_ALLOC_FAIL;
+
+			/* release get taken in nvmet_fc_find_target_assoc */
+			nvmet_fc_tgt_a_put(iod->assoc);
+		}
+	}
+
+	if (ret) {
+		dev_err(tgtport->dev,
+			"Create Connection LS failed: %s\n",
+			validation_errors[ret]);
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				(ret == VERR_NO_ASSOC) ?
+					FCNVME_RJT_RC_INV_ASSOC :
+					FCNVME_RJT_RC_LOGIC,
+				FCNVME_RJT_EXP_NONE, 0);
+		return;
+	}
+
+	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
+	atomic_set(&queue->connected, 1);
+	queue->sqhd = 0;	/* best place to init value */
+
+	/* format a response */
+
+	iod->lsreq->rsplen = sizeof(*acc);
+
+	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
+			FCNVME_LS_CREATE_CONNECTION);
+	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+	acc->connectid.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_conn_id));
+	acc->connectid.connection_id =
+			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
+				be16_to_cpu(rqst->connect_cmd.qid)));
+}
+
+static void
+nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_disconnect_rqst *rqst =
+			(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
+	struct fcnvme_ls_disconnect_acc *acc =
+			(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+	struct nvmet_fc_tgt_queue *queue = NULL;
+	struct nvmet_fc_tgt_assoc *assoc;
+	int ret = 0;
+	bool del_assoc = false;
+
+	memset(acc, 0, sizeof(*acc));
+
+	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+		ret = VERR_DISCONN_LEN;
+	else if (rqst->desc_list_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_disconnect_rqst)))
+		ret = VERR_DISCONN_RQST_LEN;
+	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+		ret = VERR_ASSOC_ID;
+	else if (rqst->associd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id)))
+		ret = VERR_ASSOC_ID_LEN;
+	else if (rqst->discon_cmd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+		ret = VERR_DISCONN_CMD;
+	else if (rqst->discon_cmd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+		ret = VERR_DISCONN_CMD_LEN;
+	else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
+			(rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+		ret = VERR_DISCONN_SCOPE;
+	else {
+		/* match an active association */
+		assoc = nvmet_fc_find_target_assoc(tgtport,
+				be64_to_cpu(rqst->associd.association_id));
+		iod->assoc = assoc;
+		if (assoc) {
+			if (rqst->discon_cmd.scope ==
+					FCNVME_DISCONN_CONNECTION) {
+				queue = nvmet_fc_find_target_queue(tgtport,
+						be64_to_cpu(
+							rqst->discon_cmd.id));
+				if (!queue) {
+					nvmet_fc_tgt_a_put(assoc);
+					ret = VERR_NO_CONN;
+				}
+			}
+		} else
+			ret = VERR_NO_ASSOC;
+	}
+
+	if (ret) {
+		dev_err(tgtport->dev,
+			"Disconnect LS failed: %s\n",
+			validation_errors[ret]);
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				(ret == VERR_NO_ASSOC) ?
+					FCNVME_RJT_RC_INV_ASSOC :
+					(ret == VERR_NO_CONN) ?
+						FCNVME_RJT_RC_INV_CONN :
+						FCNVME_RJT_RC_LOGIC,
+				FCNVME_RJT_EXP_NONE, 0);
+		return;
+	}
+
+	/* format a response */
+
+	iod->lsreq->rsplen = sizeof(*acc);
+
+	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_disconnect_acc)),
+			FCNVME_LS_DISCONNECT);
+
+
+	/* are we to delete a Connection ID (queue) */
+	if (queue) {
+		int qid = queue->qid;
+
+		nvmet_fc_delete_target_queue(queue);
+
+		/* release the get taken by find_target_queue */
+		nvmet_fc_tgt_q_put(queue);
+
+		/* tear association down if io queue terminated */
+		if (!qid)
+			del_assoc = true;
+	}
+
+	/* release get taken in nvmet_fc_find_target_assoc */
+	nvmet_fc_tgt_a_put(iod->assoc);
+
+	if (del_assoc)
+		nvmet_fc_delete_target_assoc(iod->assoc);
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
+
+static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
+
+static void
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+{
+	struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
+				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+	nvmet_fc_free_ls_iod(tgtport, iod);
+	nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_ls_iod *iod)
+{
+	int ret;
+
+	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
+				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+
+	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+	if (ret)
+		nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_rqst_w0 *w0 =
+			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+
+	iod->lsreq->nvmet_fc_private = iod;
+	iod->lsreq->rspbuf = iod->rspbuf;
+	iod->lsreq->rspdma = iod->rspdma;
+	iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+	/* Be preventative. handlers will later set to valid length */
+	iod->lsreq->rsplen = 0;
+
+	iod->assoc = NULL;
+
+	/*
+	 * handlers:
+	 *   parse request input, execute the request, and format the
+	 *   LS response
+	 */
+	switch (w0->ls_cmd) {
+	case FCNVME_LS_CREATE_ASSOCIATION:
+		/* Creates Association and initial Admin Queue/Connection */
+		nvmet_fc_ls_create_association(tgtport, iod);
+		break;
+	case FCNVME_LS_CREATE_CONNECTION:
+		/* Creates an IO Queue/Connection */
+		nvmet_fc_ls_create_connection(tgtport, iod);
+		break;
+	case FCNVME_LS_DISCONNECT:
+		/* Terminate a Queue/Connection or the Association */
+		nvmet_fc_ls_disconnect(tgtport, iod);
+		break;
+	default:
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
+				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
+	}
+
+	nvmet_fc_xmt_ls_rsp(tgtport, iod);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+	struct nvmet_fc_ls_iod *iod =
+		container_of(work, struct nvmet_fc_ls_iod, work);
+	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+	nvmet_fc_handle_ls_rqst(tgtport, iod);
+}
+
+
+/**
+ * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
+ *                       upon the reception of a NVME LS request.
+ *
+ * The nvmet-fc layer will copy payload to an internal structure for
+ * processing.  As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @tgtport:    pointer to the (registered) target port the LS was
+ *              received on.
+ * @lsreq:      pointer to a lsreq request structure to be used to reference
+ *              the exchange corresponding to the LS.
+ * @lsreqbuf:   pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
+			struct nvmefc_tgt_ls_req *lsreq,
+			void *lsreqbuf, u32 lsreqbuf_len)
+{
+	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+	struct nvmet_fc_ls_iod *iod;
+
+	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+		return -E2BIG;
+
+	if (!nvmet_fc_tgtport_get(tgtport))
+		return -ESHUTDOWN;
+
+	iod = nvmet_fc_alloc_ls_iod(tgtport);
+	if (!iod) {
+		nvmet_fc_tgtport_put(tgtport);
+		return -ENOENT;
+	}
+
+	iod->lsreq = lsreq;
+	iod->fcpreq = NULL;
+	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
+	iod->rqstdatalen = lsreqbuf_len;
+
+	schedule_work(&iod->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
+
+
+/*
+ * **********************
+ * Start of FCP handling
+ * **********************
+ */
+
+static int
+nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+	struct scatterlist *sg;
+	unsigned int nent;
+
+	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
+	if (!sg)
+		goto out;
+
+	fod->data_sg = sg;
+	fod->data_sg_cnt = nent;
+	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+				((fod->io_dir == NVMET_FCP_WRITE) ?
+					DMA_FROM_DEVICE : DMA_TO_DEVICE));
+				/* note: write from initiator perspective */
+	fod->next_sg = fod->data_sg;
+
+	return 0;
+
+out:
+	return NVME_SC_INTERNAL;
+}
+
+static void
+nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+	if (!fod->data_sg || !fod->data_sg_cnt)
+		return;
+
+	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+				((fod->io_dir == NVMET_FCP_WRITE) ?
+					DMA_FROM_DEVICE : DMA_TO_DEVICE));
+	sgl_free(fod->data_sg);
+	fod->data_sg = NULL;
+	fod->data_sg_cnt = 0;
+}
+
+
+static bool
+queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
+{
+	u32 sqtail, used;
+
+	/* egad, this is ugly. And sqtail is just a best guess */
+	sqtail = atomic_read(&q->sqtail) % q->sqsize;
+
+	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
+	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
+}
+
+/*
+ * Prep RSP payload.
+ * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
+ */
+static void
+nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
+	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+	struct nvme_completion *cqe = &ersp->cqe;
+	u32 *cqewd = (u32 *)cqe;
+	bool send_ersp = false;
+	u32 rsn, rspcnt, xfr_length;
+
+	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
+		xfr_length = fod->req.transfer_len;
+	else
+		xfr_length = fod->offset;
+
+	/*
+	 * check to see if we can send a 0's rsp.
+	 *   Note: to send a 0's response, the NVME-FC host transport will
+	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
+	 *   seen in an ersp), and command_id. Thus it will create a
+	 *   zero-filled CQE with those known fields filled in. Transport
+	 *   must send an ersp for any condition where the cqe won't match
+	 *   this.
+	 *
+	 * Here are the FC-NVME mandated cases where we must send an ersp:
+	 *  every N responses, where N=ersp_ratio
+	 *  force fabric commands to send ersp's (not in FC-NVME but good
+	 *    practice)
+	 *  normal cmds: any time status is non-zero, or status is zero
+	 *     but words 0 or 1 are non-zero.
+	 *  the SQ is 90% or more full
+	 *  the cmd is a fused command
+	 *  transferred data length not equal to cmd iu length
+	 */
+	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
+	if (!(rspcnt % fod->queue->ersp_ratio) ||
+	    sqe->opcode == nvme_fabrics_command ||
+	    xfr_length != fod->req.transfer_len ||
+	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
+	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
+	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
+		send_ersp = true;
+
+	/* re-set the fields */
+	fod->fcpreq->rspaddr = ersp;
+	fod->fcpreq->rspdma = fod->rspdma;
+
+	if (!send_ersp) {
+		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
+		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
+	} else {
+		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
+		rsn = atomic_inc_return(&fod->queue->rsn);
+		ersp->rsn = cpu_to_be32(rsn);
+		ersp->xfrd_len = cpu_to_be32(xfr_length);
+		fod->fcpreq->rsplen = sizeof(*ersp);
+	}
+
+	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
+				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+}
+
+static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
+
+static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+
+	/* data no longer needed */
+	nvmet_fc_free_tgt_pgs(fod);
+
+	/*
+	 * if an ABTS was received or we issued the fcp_abort early
+	 * don't call abort routine again.
+	 */
+	/* no need to take lock - lock was taken earlier to get here */
+	if (!fod->aborted)
+		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
+
+	nvmet_fc_free_fcp_iod(fod->queue, fod);
+}
+
+static void
+nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod)
+{
+	int ret;
+
+	fod->fcpreq->op = NVMET_FCOP_RSP;
+	fod->fcpreq->timeout = 0;
+
+	nvmet_fc_prep_fcp_rsp(tgtport, fod);
+
+	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+	if (ret)
+		nvmet_fc_abort_op(tgtport, fod);
+}
+
+static void
+nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod, u8 op)
+{
+	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+	struct scatterlist *sg = fod->next_sg;
+	unsigned long flags;
+	u32 remaininglen = fod->req.transfer_len - fod->offset;
+	u32 tlen = 0;
+	int ret;
+
+	fcpreq->op = op;
+	fcpreq->offset = fod->offset;
+	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+
+	/*
+	 * for next sequence:
+	 *  break at a sg element boundary
+	 *  attempt to keep sequence length capped at
+	 *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+	 *    be longer if a single sg element is larger
+	 *    than that amount. This is done to avoid creating
+	 *    a new sg list to use for the tgtport api.
+	 */
+	fcpreq->sg = sg;
+	fcpreq->sg_cnt = 0;
+	while (tlen < remaininglen &&
+	       fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+	       tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+		fcpreq->sg_cnt++;
+		tlen += sg_dma_len(sg);
+		sg = sg_next(sg);
+	}
+	if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+		fcpreq->sg_cnt++;
+		tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+		sg = sg_next(sg);
+	}
+	if (tlen < remaininglen)
+		fod->next_sg = sg;
+	else
+		fod->next_sg = NULL;
+
+	fcpreq->transfer_length = tlen;
+	fcpreq->transferred_length = 0;
+	fcpreq->fcp_error = 0;
+	fcpreq->rsplen = 0;
+
+	/*
+	 * If the last READDATA request: check if LLDD supports
+	 * combined xfr with response.
+	 */
+	if ((op == NVMET_FCOP_READDATA) &&
+	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
+	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
+		fcpreq->op = NVMET_FCOP_READDATA_RSP;
+		nvmet_fc_prep_fcp_rsp(tgtport, fod);
+	}
+
+	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+	if (ret) {
+		/*
+		 * should be ok to set w/o lock as its in the thread of
+		 * execution (not an async timer routine) and doesn't
+		 * contend with any clearing action
+		 */
+		fod->abort = true;
+
+		if (op == NVMET_FCOP_WRITEDATA) {
+			spin_lock_irqsave(&fod->flock, flags);
+			fod->writedataactive = false;
+			spin_unlock_irqrestore(&fod->flock, flags);
+			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
+		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+			fcpreq->fcp_error = ret;
+			fcpreq->transferred_length = 0;
+			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
+		}
+	}
+}
+
+static inline bool
+__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
+{
+	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+	/* if in the middle of an io and we need to tear down */
+	if (abort) {
+		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
+			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
+			return true;
+		}
+
+		nvmet_fc_abort_op(tgtport, fod);
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * actual done handler for FCP operations when completed by the lldd
+ */
+static void
+nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+	unsigned long flags;
+	bool abort;
+
+	spin_lock_irqsave(&fod->flock, flags);
+	abort = fod->abort;
+	fod->writedataactive = false;
+	spin_unlock_irqrestore(&fod->flock, flags);
+
+	switch (fcpreq->op) {
+
+	case NVMET_FCOP_WRITEDATA:
+		if (__nvmet_fc_fod_op_abort(fod, abort))
+			return;
+		if (fcpreq->fcp_error ||
+		    fcpreq->transferred_length != fcpreq->transfer_length) {
+			spin_lock(&fod->flock);
+			fod->abort = true;
+			spin_unlock(&fod->flock);
+
+			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
+			return;
+		}
+
+		fod->offset += fcpreq->transferred_length;
+		if (fod->offset != fod->req.transfer_len) {
+			spin_lock_irqsave(&fod->flock, flags);
+			fod->writedataactive = true;
+			spin_unlock_irqrestore(&fod->flock, flags);
+
+			/* transfer the next chunk */
+			nvmet_fc_transfer_fcp_data(tgtport, fod,
+						NVMET_FCOP_WRITEDATA);
+			return;
+		}
+
+		/* data transfer complete, resume with nvmet layer */
+		nvmet_req_execute(&fod->req);
+		break;
+
+	case NVMET_FCOP_READDATA:
+	case NVMET_FCOP_READDATA_RSP:
+		if (__nvmet_fc_fod_op_abort(fod, abort))
+			return;
+		if (fcpreq->fcp_error ||
+		    fcpreq->transferred_length != fcpreq->transfer_length) {
+			nvmet_fc_abort_op(tgtport, fod);
+			return;
+		}
+
+		/* success */
+
+		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
+			/* data no longer needed */
+			nvmet_fc_free_tgt_pgs(fod);
+			nvmet_fc_free_fcp_iod(fod->queue, fod);
+			return;
+		}
+
+		fod->offset += fcpreq->transferred_length;
+		if (fod->offset != fod->req.transfer_len) {
+			/* transfer the next chunk */
+			nvmet_fc_transfer_fcp_data(tgtport, fod,
+						NVMET_FCOP_READDATA);
+			return;
+		}
+
+		/* data transfer complete, send response */
+
+		/* data no longer needed */
+		nvmet_fc_free_tgt_pgs(fod);
+
+		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+
+		break;
+
+	case NVMET_FCOP_RSP:
+		if (__nvmet_fc_fod_op_abort(fod, abort))
+			return;
+		nvmet_fc_free_fcp_iod(fod->queue, fod);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void
+nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
+{
+	struct nvmet_fc_fcp_iod *fod =
+		container_of(work, struct nvmet_fc_fcp_iod, done_work);
+
+	nvmet_fc_fod_op_done(fod);
+}
+
+static void
+nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+{
+	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+	struct nvmet_fc_tgt_queue *queue = fod->queue;
+
+	if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
+		/* context switch so completion is not in ISR context */
+		queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
+	else
+		nvmet_fc_fod_op_done(fod);
+}
+
+/*
+ * actual completion handler after execution by the nvmet layer
+ */
+static void
+__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_fcp_iod *fod, int status)
+{
+	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
+	unsigned long flags;
+	bool abort;
+
+	spin_lock_irqsave(&fod->flock, flags);
+	abort = fod->abort;
+	spin_unlock_irqrestore(&fod->flock, flags);
+
+	/* if we have a CQE, snoop the last sq_head value */
+	if (!status)
+		fod->queue->sqhd = cqe->sq_head;
+
+	if (abort) {
+		nvmet_fc_abort_op(tgtport, fod);
+		return;
+	}
+
+	/* if an error handling the cmd post initial parsing */
+	if (status) {
+		/* fudge up a failed CQE status for our transport error */
+		memset(cqe, 0, sizeof(*cqe));
+		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
+		cqe->sq_id = cpu_to_le16(fod->queue->qid);
+		cqe->command_id = sqe->command_id;
+		cqe->status = cpu_to_le16(status);
+	} else {
+
+		/*
+		 * try to push the data even if the SQE status is non-zero.
+		 * There may be a status where data still was intended to
+		 * be moved
+		 */
+		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
+			/* push the data over before sending rsp */
+			nvmet_fc_transfer_fcp_data(tgtport, fod,
+						NVMET_FCOP_READDATA);
+			return;
+		}
+
+		/* writes & no data - fall thru */
+	}
+
+	/* data no longer needed */
+	nvmet_fc_free_tgt_pgs(fod);
+
+	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+}
+
+
+static void
+nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
+{
+	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
+}
+
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
+	int ret;
+
+	/*
+	 * Fused commands are currently not supported in the linux
+	 * implementation.
+	 *
+	 * As such, the implementation of the FC transport does not
+	 * look at the fused commands and order delivery to the upper
+	 * layer until we have both based on csn.
+	 */
+
+	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
+
+	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
+		fod->io_dir = NVMET_FCP_WRITE;
+		if (!nvme_is_write(&cmdiu->sqe))
+			goto transport_error;
+	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
+		fod->io_dir = NVMET_FCP_READ;
+		if (nvme_is_write(&cmdiu->sqe))
+			goto transport_error;
+	} else {
+		fod->io_dir = NVMET_FCP_NODATA;
+		if (xfrlen)
+			goto transport_error;
+	}
+
+	fod->req.cmd = &fod->cmdiubuf.sqe;
+	fod->req.rsp = &fod->rspiubuf.cqe;
+	fod->req.port = fod->queue->port;
+
+	/* clear any response payload */
+	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+
+	fod->data_sg = NULL;
+	fod->data_sg_cnt = 0;
+
+	ret = nvmet_req_init(&fod->req,
+				&fod->queue->nvme_cq,
+				&fod->queue->nvme_sq,
+				&nvmet_fc_tgt_fcp_ops);
+	if (!ret) {
+		/* bad SQE content or invalid ctrl state */
+		/* nvmet layer has already called op done to send rsp. */
+		return;
+	}
+
+	fod->req.transfer_len = xfrlen;
+
+	/* keep a running counter of tail position */
+	atomic_inc(&fod->queue->sqtail);
+
+	if (fod->req.transfer_len) {
+		ret = nvmet_fc_alloc_tgt_pgs(fod);
+		if (ret) {
+			nvmet_req_complete(&fod->req, ret);
+			return;
+		}
+	}
+	fod->req.sg = fod->data_sg;
+	fod->req.sg_cnt = fod->data_sg_cnt;
+	fod->offset = 0;
+
+	if (fod->io_dir == NVMET_FCP_WRITE) {
+		/* pull the data over before invoking nvmet layer */
+		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
+		return;
+	}
+
+	/*
+	 * Reads or no data:
+	 *
+	 * can invoke the nvmet_layer now. If read data, cmd completion will
+	 * push the data
+	 */
+	nvmet_req_execute(&fod->req);
+	return;
+
+transport_error:
+	nvmet_fc_abort_op(tgtport, fod);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
+{
+	struct nvmet_fc_fcp_iod *fod =
+		container_of(work, struct nvmet_fc_fcp_iod, work);
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+	nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+/**
+ * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
+ *                       upon the reception of a NVME FCP CMD IU.
+ *
+ * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
+ * layer for processing.
+ *
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
+ *
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
+ *
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ *              was received on.
+ * @fcpreq:     pointer to a fcpreq request structure to be used to reference
+ *              the exchange corresponding to the FCP Exchange.
+ * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
+ * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
+ */
+int
+nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
+			struct nvmefc_tgt_fcp_req *fcpreq,
+			void *cmdiubuf, u32 cmdiubuf_len)
+{
+	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
+	struct nvmet_fc_tgt_queue *queue;
+	struct nvmet_fc_fcp_iod *fod;
+	struct nvmet_fc_defer_fcp_req *deferfcp;
+	unsigned long flags;
+
+	/* validate iu, so the connection id can be used to find the queue */
+	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
+			(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
+			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
+		return -EIO;
+
+	queue = nvmet_fc_find_target_queue(tgtport,
+				be64_to_cpu(cmdiu->connection_id));
+	if (!queue)
+		return -ENOTCONN;
+
+	/*
+	 * note: reference taken by find_target_queue
+	 * After successful fod allocation, the fod will inherit the
+	 * ownership of that reference and will remove the reference
+	 * when the fod is freed.
+	 */
+
+	spin_lock_irqsave(&queue->qlock, flags);
+
+	fod = nvmet_fc_alloc_fcp_iod(queue);
+	if (fod) {
+		spin_unlock_irqrestore(&queue->qlock, flags);
+
+		fcpreq->nvmet_fc_private = fod;
+		fod->fcpreq = fcpreq;
+
+		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+		return 0;
+	}
+
+	if (!tgtport->ops->defer_rcv) {
+		spin_unlock_irqrestore(&queue->qlock, flags);
+		/* release the queue lookup reference */
+		nvmet_fc_tgt_q_put(queue);
+		return -ENOENT;
+	}
+
+	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+			struct nvmet_fc_defer_fcp_req, req_list);
+	if (deferfcp) {
+		/* Just re-use one that was previously allocated */
+		list_del(&deferfcp->req_list);
+	} else {
+		spin_unlock_irqrestore(&queue->qlock, flags);
+
+		/* Now we need to dynamically allocate one */
+		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+		if (!deferfcp) {
+			/* release the queue lookup reference */
+			nvmet_fc_tgt_q_put(queue);
+			return -ENOMEM;
+		}
+		spin_lock_irqsave(&queue->qlock, flags);
+	}
+
+	/* For now, use rspaddr / rsplen to save payload information */
+	fcpreq->rspaddr = cmdiubuf;
+	fcpreq->rsplen  = cmdiubuf_len;
+	deferfcp->fcp_req = fcpreq;
+
+	/* defer processing till a fod becomes available */
+	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+	/* NOTE: the queue lookup reference is still valid */
+
+	spin_unlock_irqrestore(&queue->qlock, flags);
+
+	return -EOVERFLOW;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
+
+/**
+ * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
+ *                       upon the reception of an ABTS for a FCP command
+ *
+ * Notify the transport that an ABTS has been received for a FCP command
+ * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
+ * LLDD believes the command is still being worked on
+ * (template_ops->fcp_req_release() has not been called).
+ *
+ * The transport will wait for any outstanding work (an op to the LLDD,
+ * which the lldd should complete with error due to the ABTS; or the
+ * completion from the nvmet layer of the nvme command), then will
+ * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
+ * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
+ * to the ABTS either after return from this function (assuming any
+ * outstanding op work has been terminated) or upon the callback being
+ * called.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ *              was received on.
+ * @fcpreq:     pointer to the fcpreq request structure that corresponds
+ *              to the exchange that received the ABTS.
+ */
+void
+nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
+			struct nvmefc_tgt_fcp_req *fcpreq)
+{
+	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+
+	if (!fod || fod->fcpreq != fcpreq)
+		/* job appears to have already completed, ignore abort */
+		return;
+
+	queue = fod->queue;
+
+	spin_lock_irqsave(&queue->qlock, flags);
+	if (fod->active) {
+		/*
+		 * mark as abort. The abort handler, invoked upon completion
+		 * of any work, will detect the aborted status and do the
+		 * callback.
+		 */
+		spin_lock(&fod->flock);
+		fod->abort = true;
+		fod->aborted = true;
+		spin_unlock(&fod->flock);
+	}
+	spin_unlock_irqrestore(&queue->qlock, flags);
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
+
+
+struct nvmet_fc_traddr {
+	u64	nn;
+	u64	pn;
+};
+
+static int
+__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
+{
+	u64 token64;
+
+	if (match_u64(sstr, &token64))
+		return -EINVAL;
+	*val = token64;
+
+	return 0;
+}
+
+/*
+ * This routine validates and extracts the WWN's from the TRADDR string.
+ * As kernel parsers need the 0x to determine number base, universally
+ * build string to parse with 0x prefix before parsing name strings.
+ */
+static int
+nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
+{
+	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
+	substring_t wwn = { name, &name[sizeof(name)-1] };
+	int nnoffset, pnoffset;
+
+	/* validate it string one of the 2 allowed formats */
+	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
+			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
+			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
+				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
+		nnoffset = NVME_FC_TRADDR_OXNNLEN;
+		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
+						NVME_FC_TRADDR_OXNNLEN;
+	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
+			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
+			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
+				"pn-", NVME_FC_TRADDR_NNLEN))) {
+		nnoffset = NVME_FC_TRADDR_NNLEN;
+		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
+	} else
+		goto out_einval;
+
+	name[0] = '0';
+	name[1] = 'x';
+	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
+
+	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
+		goto out_einval;
+
+	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
+	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
+		goto out_einval;
+
+	return 0;
+
+out_einval:
+	pr_warn("%s: bad traddr string\n", __func__);
+	return -EINVAL;
+}
+
+static int
+nvmet_fc_add_port(struct nvmet_port *port)
+{
+	struct nvmet_fc_tgtport *tgtport;
+	struct nvmet_fc_traddr traddr = { 0L, 0L };
+	unsigned long flags;
+	int ret;
+
+	/* validate the address info */
+	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
+	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
+		return -EINVAL;
+
+	/* map the traddr address info to a target port */
+
+	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
+			sizeof(port->disc_addr.traddr));
+	if (ret)
+		return ret;
+
+	ret = -ENXIO;
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
+		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
+		    (tgtport->fc_target_port.port_name == traddr.pn)) {
+			tgtport->port = port;
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+	return ret;
+}
+
+static void
+nvmet_fc_remove_port(struct nvmet_port *port)
+{
+	/* nothing to do */
+}
+
+static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
+	.owner			= THIS_MODULE,
+	.type			= NVMF_TRTYPE_FC,
+	.msdbd			= 1,
+	.add_port		= nvmet_fc_add_port,
+	.remove_port		= nvmet_fc_remove_port,
+	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
+	.delete_ctrl		= nvmet_fc_delete_ctrl,
+};
+
+static int __init nvmet_fc_init_module(void)
+{
+	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+}
+
+static void __exit nvmet_fc_exit_module(void)
+{
+	/* sanity check - all lports should be removed */
+	if (!list_empty(&nvmet_fc_target_list))
+		pr_warn("%s: targetport list not empty\n", __func__);
+
+	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
+
+	ida_destroy(&nvmet_fc_tgtport_cnt);
+}
+
+module_init(nvmet_fc_init_module);
+module_exit(nvmet_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
new file mode 100644
index 0000000..5251689
--- /dev/null
+++ b/drivers/nvme/target/fcloop.c
@@ -0,0 +1,1388 @@
+/*
+ * Copyright (c) 2016 Avago Technologies.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+
+#include "../host/nvme.h"
+#include "../target/nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+enum {
+	NVMF_OPT_ERR		= 0,
+	NVMF_OPT_WWNN		= 1 << 0,
+	NVMF_OPT_WWPN		= 1 << 1,
+	NVMF_OPT_ROLES		= 1 << 2,
+	NVMF_OPT_FCADDR		= 1 << 3,
+	NVMF_OPT_LPWWNN		= 1 << 4,
+	NVMF_OPT_LPWWPN		= 1 << 5,
+};
+
+struct fcloop_ctrl_options {
+	int			mask;
+	u64			wwnn;
+	u64			wwpn;
+	u32			roles;
+	u32			fcaddr;
+	u64			lpwwnn;
+	u64			lpwwpn;
+};
+
+static const match_table_t opt_tokens = {
+	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
+	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
+	{ NVMF_OPT_ROLES,	"roles=%d"	},
+	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
+	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
+	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
+	{ NVMF_OPT_ERR,		NULL		}
+};
+
+static int
+fcloop_parse_options(struct fcloop_ctrl_options *opts,
+		const char *buf)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *options, *o, *p;
+	int token, ret = 0;
+	u64 token64;
+
+	options = o = kstrdup(buf, GFP_KERNEL);
+	if (!options)
+		return -ENOMEM;
+
+	while ((p = strsep(&o, ",\n")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, opt_tokens, args);
+		opts->mask |= token;
+		switch (token) {
+		case NVMF_OPT_WWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->wwnn = token64;
+			break;
+		case NVMF_OPT_WWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->wwpn = token64;
+			break;
+		case NVMF_OPT_ROLES:
+			if (match_int(args, &token)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->roles = token;
+			break;
+		case NVMF_OPT_FCADDR:
+			if (match_hex(args, &token)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->fcaddr = token;
+			break;
+		case NVMF_OPT_LPWWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->lpwwnn = token64;
+			break;
+		case NVMF_OPT_LPWWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->lpwwpn = token64;
+			break;
+		default:
+			pr_warn("unknown parameter or missing value '%s'\n", p);
+			ret = -EINVAL;
+			goto out_free_options;
+		}
+	}
+
+out_free_options:
+	kfree(options);
+	return ret;
+}
+
+
+static int
+fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
+		const char *buf)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *options, *o, *p;
+	int token, ret = 0;
+	u64 token64;
+
+	*nname = -1;
+	*pname = -1;
+
+	options = o = kstrdup(buf, GFP_KERNEL);
+	if (!options)
+		return -ENOMEM;
+
+	while ((p = strsep(&o, ",\n")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, opt_tokens, args);
+		switch (token) {
+		case NVMF_OPT_WWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			*nname = token64;
+			break;
+		case NVMF_OPT_WWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			*pname = token64;
+			break;
+		default:
+			pr_warn("unknown parameter or missing value '%s'\n", p);
+			ret = -EINVAL;
+			goto out_free_options;
+		}
+	}
+
+out_free_options:
+	kfree(options);
+
+	if (!ret) {
+		if (*nname == -1)
+			return -EINVAL;
+		if (*pname == -1)
+			return -EINVAL;
+	}
+
+	return ret;
+}
+
+
+#define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
+			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+#define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+
+static DEFINE_SPINLOCK(fcloop_lock);
+static LIST_HEAD(fcloop_lports);
+static LIST_HEAD(fcloop_nports);
+
+struct fcloop_lport {
+	struct nvme_fc_local_port *localport;
+	struct list_head lport_list;
+	struct completion unreg_done;
+};
+
+struct fcloop_lport_priv {
+	struct fcloop_lport *lport;
+};
+
+struct fcloop_rport {
+	struct nvme_fc_remote_port *remoteport;
+	struct nvmet_fc_target_port *targetport;
+	struct fcloop_nport *nport;
+	struct fcloop_lport *lport;
+};
+
+struct fcloop_tport {
+	struct nvmet_fc_target_port *targetport;
+	struct nvme_fc_remote_port *remoteport;
+	struct fcloop_nport *nport;
+	struct fcloop_lport *lport;
+};
+
+struct fcloop_nport {
+	struct fcloop_rport *rport;
+	struct fcloop_tport *tport;
+	struct fcloop_lport *lport;
+	struct list_head nport_list;
+	struct kref ref;
+	u64 node_name;
+	u64 port_name;
+	u32 port_role;
+	u32 port_id;
+};
+
+struct fcloop_lsreq {
+	struct fcloop_tport		*tport;
+	struct nvmefc_ls_req		*lsreq;
+	struct work_struct		work;
+	struct nvmefc_tgt_ls_req	tgt_ls_req;
+	int				status;
+};
+
+enum {
+	INI_IO_START		= 0,
+	INI_IO_ACTIVE		= 1,
+	INI_IO_ABORTED		= 2,
+	INI_IO_COMPLETED	= 3,
+};
+
+struct fcloop_fcpreq {
+	struct fcloop_tport		*tport;
+	struct nvmefc_fcp_req		*fcpreq;
+	spinlock_t			reqlock;
+	u16				status;
+	u32				inistate;
+	bool				active;
+	bool				aborted;
+	struct kref			ref;
+	struct work_struct		fcp_rcv_work;
+	struct work_struct		abort_rcv_work;
+	struct work_struct		tio_done_work;
+	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
+};
+
+struct fcloop_ini_fcpreq {
+	struct nvmefc_fcp_req		*fcpreq;
+	struct fcloop_fcpreq		*tfcp_req;
+	spinlock_t			inilock;
+};
+
+static inline struct fcloop_lsreq *
+tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+}
+
+static inline struct fcloop_fcpreq *
+tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
+}
+
+
+static int
+fcloop_create_queue(struct nvme_fc_local_port *localport,
+			unsigned int qidx, u16 qsize,
+			void **handle)
+{
+	*handle = localport;
+	return 0;
+}
+
+static void
+fcloop_delete_queue(struct nvme_fc_local_port *localport,
+			unsigned int idx, void *handle)
+{
+}
+
+
+/*
+ * Transmit of LS RSP done (e.g. buffers all set). call back up
+ * initiator "done" flows.
+ */
+static void
+fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+{
+	struct fcloop_lsreq *tls_req =
+		container_of(work, struct fcloop_lsreq, work);
+	struct fcloop_tport *tport = tls_req->tport;
+	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+	if (!tport || tport->remoteport)
+		lsreq->done(lsreq, tls_req->status);
+}
+
+static int
+fcloop_ls_req(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+			struct nvmefc_ls_req *lsreq)
+{
+	struct fcloop_lsreq *tls_req = lsreq->private;
+	struct fcloop_rport *rport = remoteport->private;
+	int ret = 0;
+
+	tls_req->lsreq = lsreq;
+	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+
+	if (!rport->targetport) {
+		tls_req->status = -ECONNREFUSED;
+		tls_req->tport = NULL;
+		schedule_work(&tls_req->work);
+		return ret;
+	}
+
+	tls_req->status = 0;
+	tls_req->tport = rport->targetport->private;
+	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
+				 lsreq->rqstaddr, lsreq->rqstlen);
+
+	return ret;
+}
+
+static int
+fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
+			struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
+		((lsreq->rsplen < tgt_lsreq->rsplen) ?
+				lsreq->rsplen : tgt_lsreq->rsplen));
+	tgt_lsreq->done(tgt_lsreq);
+
+	schedule_work(&tls_req->work);
+
+	return 0;
+}
+
+static void
+fcloop_tfcp_req_free(struct kref *ref)
+{
+	struct fcloop_fcpreq *tfcp_req =
+		container_of(ref, struct fcloop_fcpreq, ref);
+
+	kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+	kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+	return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+			struct fcloop_fcpreq *tfcp_req, int status)
+{
+	struct fcloop_ini_fcpreq *inireq = NULL;
+
+	if (fcpreq) {
+		inireq = fcpreq->private;
+		spin_lock(&inireq->inilock);
+		inireq->tfcp_req = NULL;
+		spin_unlock(&inireq->inilock);
+
+		fcpreq->status = status;
+		fcpreq->done(fcpreq);
+	}
+
+	/* release original io reference on tgt struct */
+	fcloop_tfcp_req_put(tfcp_req);
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
+{
+	struct fcloop_fcpreq *tfcp_req =
+		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+	int ret = 0;
+	bool aborted = false;
+
+	spin_lock(&tfcp_req->reqlock);
+	switch (tfcp_req->inistate) {
+	case INI_IO_START:
+		tfcp_req->inistate = INI_IO_ACTIVE;
+		break;
+	case INI_IO_ABORTED:
+		aborted = true;
+		break;
+	default:
+		spin_unlock(&tfcp_req->reqlock);
+		WARN_ON(1);
+		return;
+	}
+	spin_unlock(&tfcp_req->reqlock);
+
+	if (unlikely(aborted))
+		ret = -ECANCELED;
+	else
+		ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+				&tfcp_req->tgt_fcp_req,
+				fcpreq->cmdaddr, fcpreq->cmdlen);
+	if (ret)
+		fcloop_call_host_done(fcpreq, tfcp_req, ret);
+
+	return;
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+	struct fcloop_fcpreq *tfcp_req =
+		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+	struct nvmefc_fcp_req *fcpreq;
+	bool completed = false;
+
+	spin_lock(&tfcp_req->reqlock);
+	fcpreq = tfcp_req->fcpreq;
+	switch (tfcp_req->inistate) {
+	case INI_IO_ABORTED:
+		break;
+	case INI_IO_COMPLETED:
+		completed = true;
+		break;
+	default:
+		spin_unlock(&tfcp_req->reqlock);
+		WARN_ON(1);
+		return;
+	}
+	spin_unlock(&tfcp_req->reqlock);
+
+	if (unlikely(completed)) {
+		/* remove reference taken in original abort downcall */
+		fcloop_tfcp_req_put(tfcp_req);
+		return;
+	}
+
+	if (tfcp_req->tport->targetport)
+		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+					&tfcp_req->tgt_fcp_req);
+
+	spin_lock(&tfcp_req->reqlock);
+	tfcp_req->fcpreq = NULL;
+	spin_unlock(&tfcp_req->reqlock);
+
+	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+	/* call_host_done releases reference for abort downcall */
+}
+
+/*
+ * FCP IO operation done by target completion.
+ * call back up initiator "done" flows.
+ */
+static void
+fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+{
+	struct fcloop_fcpreq *tfcp_req =
+		container_of(work, struct fcloop_fcpreq, tio_done_work);
+	struct nvmefc_fcp_req *fcpreq;
+
+	spin_lock(&tfcp_req->reqlock);
+	fcpreq = tfcp_req->fcpreq;
+	tfcp_req->inistate = INI_IO_COMPLETED;
+	spin_unlock(&tfcp_req->reqlock);
+
+	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
+}
+
+
+static int
+fcloop_fcp_req(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+			void *hw_queue_handle,
+			struct nvmefc_fcp_req *fcpreq)
+{
+	struct fcloop_rport *rport = remoteport->private;
+	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+	struct fcloop_fcpreq *tfcp_req;
+
+	if (!rport->targetport)
+		return -ECONNREFUSED;
+
+	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
+	if (!tfcp_req)
+		return -ENOMEM;
+
+	inireq->fcpreq = fcpreq;
+	inireq->tfcp_req = tfcp_req;
+	spin_lock_init(&inireq->inilock);
+
+	tfcp_req->fcpreq = fcpreq;
+	tfcp_req->tport = rport->targetport->private;
+	tfcp_req->inistate = INI_IO_START;
+	spin_lock_init(&tfcp_req->reqlock);
+	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+	kref_init(&tfcp_req->ref);
+
+	schedule_work(&tfcp_req->fcp_rcv_work);
+
+	return 0;
+}
+
+static void
+fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
+			struct scatterlist *io_sg, u32 offset, u32 length)
+{
+	void *data_p, *io_p;
+	u32 data_len, io_len, tlen;
+
+	io_p = sg_virt(io_sg);
+	io_len = io_sg->length;
+
+	for ( ; offset; ) {
+		tlen = min_t(u32, offset, io_len);
+		offset -= tlen;
+		io_len -= tlen;
+		if (!io_len) {
+			io_sg = sg_next(io_sg);
+			io_p = sg_virt(io_sg);
+			io_len = io_sg->length;
+		} else
+			io_p += tlen;
+	}
+
+	data_p = sg_virt(data_sg);
+	data_len = data_sg->length;
+
+	for ( ; length; ) {
+		tlen = min_t(u32, io_len, data_len);
+		tlen = min_t(u32, tlen, length);
+
+		if (op == NVMET_FCOP_WRITEDATA)
+			memcpy(data_p, io_p, tlen);
+		else
+			memcpy(io_p, data_p, tlen);
+
+		length -= tlen;
+
+		io_len -= tlen;
+		if ((!io_len) && (length)) {
+			io_sg = sg_next(io_sg);
+			io_p = sg_virt(io_sg);
+			io_len = io_sg->length;
+		} else
+			io_p += tlen;
+
+		data_len -= tlen;
+		if ((!data_len) && (length)) {
+			data_sg = sg_next(data_sg);
+			data_p = sg_virt(data_sg);
+			data_len = data_sg->length;
+		} else
+			data_p += tlen;
+	}
+}
+
+static int
+fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+	struct nvmefc_fcp_req *fcpreq;
+	u32 rsplen = 0, xfrlen = 0;
+	int fcp_err = 0, active, aborted;
+	u8 op = tgt_fcpreq->op;
+
+	spin_lock(&tfcp_req->reqlock);
+	fcpreq = tfcp_req->fcpreq;
+	active = tfcp_req->active;
+	aborted = tfcp_req->aborted;
+	tfcp_req->active = true;
+	spin_unlock(&tfcp_req->reqlock);
+
+	if (unlikely(active))
+		/* illegal - call while i/o active */
+		return -EALREADY;
+
+	if (unlikely(aborted)) {
+		/* target transport has aborted i/o prior */
+		spin_lock(&tfcp_req->reqlock);
+		tfcp_req->active = false;
+		spin_unlock(&tfcp_req->reqlock);
+		tgt_fcpreq->transferred_length = 0;
+		tgt_fcpreq->fcp_error = -ECANCELED;
+		tgt_fcpreq->done(tgt_fcpreq);
+		return 0;
+	}
+
+	/*
+	 * if fcpreq is NULL, the I/O has been aborted (from
+	 * initiator side). For the target side, act as if all is well
+	 * but don't actually move data.
+	 */
+
+	switch (op) {
+	case NVMET_FCOP_WRITEDATA:
+		xfrlen = tgt_fcpreq->transfer_length;
+		if (fcpreq) {
+			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
+					fcpreq->first_sgl, tgt_fcpreq->offset,
+					xfrlen);
+			fcpreq->transferred_length += xfrlen;
+		}
+		break;
+
+	case NVMET_FCOP_READDATA:
+	case NVMET_FCOP_READDATA_RSP:
+		xfrlen = tgt_fcpreq->transfer_length;
+		if (fcpreq) {
+			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
+					fcpreq->first_sgl, tgt_fcpreq->offset,
+					xfrlen);
+			fcpreq->transferred_length += xfrlen;
+		}
+		if (op == NVMET_FCOP_READDATA)
+			break;
+
+		/* Fall-Thru to RSP handling */
+
+	case NVMET_FCOP_RSP:
+		if (fcpreq) {
+			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
+					fcpreq->rsplen : tgt_fcpreq->rsplen);
+			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
+			if (rsplen < tgt_fcpreq->rsplen)
+				fcp_err = -E2BIG;
+			fcpreq->rcv_rsplen = rsplen;
+			fcpreq->status = 0;
+		}
+		tfcp_req->status = 0;
+		break;
+
+	default:
+		fcp_err = -EINVAL;
+		break;
+	}
+
+	spin_lock(&tfcp_req->reqlock);
+	tfcp_req->active = false;
+	spin_unlock(&tfcp_req->reqlock);
+
+	tgt_fcpreq->transferred_length = xfrlen;
+	tgt_fcpreq->fcp_error = fcp_err;
+	tgt_fcpreq->done(tgt_fcpreq);
+
+	return 0;
+}
+
+static void
+fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+
+	/*
+	 * mark aborted only in case there were 2 threads in transport
+	 * (one doing io, other doing abort) and only kills ops posted
+	 * after the abort request
+	 */
+	spin_lock(&tfcp_req->reqlock);
+	tfcp_req->aborted = true;
+	spin_unlock(&tfcp_req->reqlock);
+
+	tfcp_req->status = NVME_SC_INTERNAL;
+
+	/*
+	 * nothing more to do. If io wasn't active, the transport should
+	 * immediately call the req_release. If it was active, the op
+	 * will complete, and the lldd should call req_release.
+	 */
+}
+
+static void
+fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+
+	schedule_work(&tfcp_req->tio_done_work);
+}
+
+static void
+fcloop_ls_abort(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+				struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
+fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+			void *hw_queue_handle,
+			struct nvmefc_fcp_req *fcpreq)
+{
+	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+	struct fcloop_fcpreq *tfcp_req;
+	bool abortio = true;
+
+	spin_lock(&inireq->inilock);
+	tfcp_req = inireq->tfcp_req;
+	if (tfcp_req)
+		fcloop_tfcp_req_get(tfcp_req);
+	spin_unlock(&inireq->inilock);
+
+	if (!tfcp_req)
+		/* abort has already been called */
+		return;
+
+	/* break initiator/target relationship for io */
+	spin_lock(&tfcp_req->reqlock);
+	switch (tfcp_req->inistate) {
+	case INI_IO_START:
+	case INI_IO_ACTIVE:
+		tfcp_req->inistate = INI_IO_ABORTED;
+		break;
+	case INI_IO_COMPLETED:
+		abortio = false;
+		break;
+	default:
+		spin_unlock(&tfcp_req->reqlock);
+		WARN_ON(1);
+		return;
+	}
+	spin_unlock(&tfcp_req->reqlock);
+
+	if (abortio)
+		/* leave the reference while the work item is scheduled */
+		WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+	else  {
+		/*
+		 * as the io has already had the done callback made,
+		 * nothing more to do. So release the reference taken above
+		 */
+		fcloop_tfcp_req_put(tfcp_req);
+	}
+}
+
+static void
+fcloop_nport_free(struct kref *ref)
+{
+	struct fcloop_nport *nport =
+		container_of(ref, struct fcloop_nport, ref);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+	list_del(&nport->nport_list);
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+	kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+	return kref_get_unless_zero(&nport->ref);
+}
+
+static void
+fcloop_localport_delete(struct nvme_fc_local_port *localport)
+{
+	struct fcloop_lport_priv *lport_priv = localport->private;
+	struct fcloop_lport *lport = lport_priv->lport;
+
+	/* release any threads waiting for the unreg to complete */
+	complete(&lport->unreg_done);
+}
+
+static void
+fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+	struct fcloop_rport *rport = remoteport->private;
+
+	fcloop_nport_put(rport->nport);
+}
+
+static void
+fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+	struct fcloop_tport *tport = targetport->private;
+
+	fcloop_nport_put(tport->nport);
+}
+
+#define	FCLOOP_HW_QUEUES		4
+#define	FCLOOP_SGL_SEGS			256
+#define FCLOOP_DMABOUND_4G		0xFFFFFFFF
+
+static struct nvme_fc_port_template fctemplate = {
+	.localport_delete	= fcloop_localport_delete,
+	.remoteport_delete	= fcloop_remoteport_delete,
+	.create_queue		= fcloop_create_queue,
+	.delete_queue		= fcloop_delete_queue,
+	.ls_req			= fcloop_ls_req,
+	.fcp_io			= fcloop_fcp_req,
+	.ls_abort		= fcloop_ls_abort,
+	.fcp_abort		= fcloop_fcp_abort,
+	.max_hw_queues		= FCLOOP_HW_QUEUES,
+	.max_sgl_segments	= FCLOOP_SGL_SEGS,
+	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
+	.dma_boundary		= FCLOOP_DMABOUND_4G,
+	/* sizes of additional private data for data structures */
+	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
+	.remote_priv_sz		= sizeof(struct fcloop_rport),
+	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
+	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
+};
+
+static struct nvmet_fc_target_template tgttemplate = {
+	.targetport_delete	= fcloop_targetport_delete,
+	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
+	.fcp_op			= fcloop_fcp_op,
+	.fcp_abort		= fcloop_tgt_fcp_abort,
+	.fcp_req_release	= fcloop_fcp_req_release,
+	.max_hw_queues		= FCLOOP_HW_QUEUES,
+	.max_sgl_segments	= FCLOOP_SGL_SEGS,
+	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
+	.dma_boundary		= FCLOOP_DMABOUND_4G,
+	/* optional features */
+	.target_features	= 0,
+	/* sizes of additional private data for data structures */
+	.target_priv_sz		= sizeof(struct fcloop_tport),
+};
+
+static ssize_t
+fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct nvme_fc_port_info pinfo;
+	struct fcloop_ctrl_options *opts;
+	struct nvme_fc_local_port *localport;
+	struct fcloop_lport *lport;
+	struct fcloop_lport_priv *lport_priv;
+	unsigned long flags;
+	int ret = -ENOMEM;
+
+	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+	if (!lport)
+		return -ENOMEM;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		goto out_free_lport;
+
+	ret = fcloop_parse_options(opts, buf);
+	if (ret)
+		goto out_free_opts;
+
+	/* everything there ? */
+	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
+		ret = -EINVAL;
+		goto out_free_opts;
+	}
+
+	memset(&pinfo, 0, sizeof(pinfo));
+	pinfo.node_name = opts->wwnn;
+	pinfo.port_name = opts->wwpn;
+	pinfo.port_role = opts->roles;
+	pinfo.port_id = opts->fcaddr;
+
+	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
+	if (!ret) {
+		/* success */
+		lport_priv = localport->private;
+		lport_priv->lport = lport;
+
+		lport->localport = localport;
+		INIT_LIST_HEAD(&lport->lport_list);
+
+		spin_lock_irqsave(&fcloop_lock, flags);
+		list_add_tail(&lport->lport_list, &fcloop_lports);
+		spin_unlock_irqrestore(&fcloop_lock, flags);
+	}
+
+out_free_opts:
+	kfree(opts);
+out_free_lport:
+	/* free only if we're going to fail */
+	if (ret)
+		kfree(lport);
+
+	return ret ? ret : count;
+}
+
+
+static void
+__unlink_local_port(struct fcloop_lport *lport)
+{
+	list_del(&lport->lport_list);
+}
+
+static int
+__wait_localport_unreg(struct fcloop_lport *lport)
+{
+	int ret;
+
+	init_completion(&lport->unreg_done);
+
+	ret = nvme_fc_unregister_localport(lport->localport);
+
+	wait_for_completion(&lport->unreg_done);
+
+	kfree(lport);
+
+	return ret;
+}
+
+
+static ssize_t
+fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fcloop_lport *tlport, *lport = NULL;
+	u64 nodename, portname;
+	unsigned long flags;
+	int ret;
+
+	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
+		if (tlport->localport->node_name == nodename &&
+		    tlport->localport->port_name == portname) {
+			lport = tlport;
+			__unlink_local_port(lport);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	if (!lport)
+		return -ENOENT;
+
+	ret = __wait_localport_unreg(lport);
+
+	return ret ? ret : count;
+}
+
+static struct fcloop_nport *
+fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
+{
+	struct fcloop_nport *newnport, *nport = NULL;
+	struct fcloop_lport *tmplport, *lport = NULL;
+	struct fcloop_ctrl_options *opts;
+	unsigned long flags;
+	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
+	int ret;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return NULL;
+
+	ret = fcloop_parse_options(opts, buf);
+	if (ret)
+		goto out_free_opts;
+
+	/* everything there ? */
+	if ((opts->mask & opts_mask) != opts_mask) {
+		ret = -EINVAL;
+		goto out_free_opts;
+	}
+
+	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
+	if (!newnport)
+		goto out_free_opts;
+
+	INIT_LIST_HEAD(&newnport->nport_list);
+	newnport->node_name = opts->wwnn;
+	newnport->port_name = opts->wwpn;
+	if (opts->mask & NVMF_OPT_ROLES)
+		newnport->port_role = opts->roles;
+	if (opts->mask & NVMF_OPT_FCADDR)
+		newnport->port_id = opts->fcaddr;
+	kref_init(&newnport->ref);
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
+		if (tmplport->localport->node_name == opts->wwnn &&
+		    tmplport->localport->port_name == opts->wwpn)
+			goto out_invalid_opts;
+
+		if (tmplport->localport->node_name == opts->lpwwnn &&
+		    tmplport->localport->port_name == opts->lpwwpn)
+			lport = tmplport;
+	}
+
+	if (remoteport) {
+		if (!lport)
+			goto out_invalid_opts;
+		newnport->lport = lport;
+	}
+
+	list_for_each_entry(nport, &fcloop_nports, nport_list) {
+		if (nport->node_name == opts->wwnn &&
+		    nport->port_name == opts->wwpn) {
+			if ((remoteport && nport->rport) ||
+			    (!remoteport && nport->tport)) {
+				nport = NULL;
+				goto out_invalid_opts;
+			}
+
+			fcloop_nport_get(nport);
+
+			spin_unlock_irqrestore(&fcloop_lock, flags);
+
+			if (remoteport)
+				nport->lport = lport;
+			if (opts->mask & NVMF_OPT_ROLES)
+				nport->port_role = opts->roles;
+			if (opts->mask & NVMF_OPT_FCADDR)
+				nport->port_id = opts->fcaddr;
+			goto out_free_newnport;
+		}
+	}
+
+	list_add_tail(&newnport->nport_list, &fcloop_nports);
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	kfree(opts);
+	return newnport;
+
+out_invalid_opts:
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+out_free_newnport:
+	kfree(newnport);
+out_free_opts:
+	kfree(opts);
+	return nport;
+}
+
+static ssize_t
+fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct nvme_fc_remote_port *remoteport;
+	struct fcloop_nport *nport;
+	struct fcloop_rport *rport;
+	struct nvme_fc_port_info pinfo;
+	int ret;
+
+	nport = fcloop_alloc_nport(buf, count, true);
+	if (!nport)
+		return -EIO;
+
+	memset(&pinfo, 0, sizeof(pinfo));
+	pinfo.node_name = nport->node_name;
+	pinfo.port_name = nport->port_name;
+	pinfo.port_role = nport->port_role;
+	pinfo.port_id = nport->port_id;
+
+	ret = nvme_fc_register_remoteport(nport->lport->localport,
+						&pinfo, &remoteport);
+	if (ret || !remoteport) {
+		fcloop_nport_put(nport);
+		return ret;
+	}
+
+	/* success */
+	rport = remoteport->private;
+	rport->remoteport = remoteport;
+	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
+	if (nport->tport) {
+		nport->tport->remoteport = remoteport;
+		nport->tport->lport = nport->lport;
+	}
+	rport->nport = nport;
+	rport->lport = nport->lport;
+	nport->rport = rport;
+
+	return count;
+}
+
+
+static struct fcloop_rport *
+__unlink_remote_port(struct fcloop_nport *nport)
+{
+	struct fcloop_rport *rport = nport->rport;
+
+	if (rport && nport->tport)
+		nport->tport->remoteport = NULL;
+	nport->rport = NULL;
+
+	return rport;
+}
+
+static int
+__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+{
+	if (!rport)
+		return -EALREADY;
+
+	return nvme_fc_unregister_remoteport(rport->remoteport);
+}
+
+static ssize_t
+fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fcloop_nport *nport = NULL, *tmpport;
+	static struct fcloop_rport *rport;
+	u64 nodename, portname;
+	unsigned long flags;
+	int ret;
+
+	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+		if (tmpport->node_name == nodename &&
+		    tmpport->port_name == portname && tmpport->rport) {
+			nport = tmpport;
+			rport = __unlink_remote_port(nport);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	if (!nport)
+		return -ENOENT;
+
+	ret = __remoteport_unreg(nport, rport);
+
+	return ret ? ret : count;
+}
+
+static ssize_t
+fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct nvmet_fc_target_port *targetport;
+	struct fcloop_nport *nport;
+	struct fcloop_tport *tport;
+	struct nvmet_fc_port_info tinfo;
+	int ret;
+
+	nport = fcloop_alloc_nport(buf, count, false);
+	if (!nport)
+		return -EIO;
+
+	tinfo.node_name = nport->node_name;
+	tinfo.port_name = nport->port_name;
+	tinfo.port_id = nport->port_id;
+
+	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
+						&targetport);
+	if (ret) {
+		fcloop_nport_put(nport);
+		return ret;
+	}
+
+	/* success */
+	tport = targetport->private;
+	tport->targetport = targetport;
+	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
+	if (nport->rport)
+		nport->rport->targetport = targetport;
+	tport->nport = nport;
+	tport->lport = nport->lport;
+	nport->tport = tport;
+
+	return count;
+}
+
+
+static struct fcloop_tport *
+__unlink_target_port(struct fcloop_nport *nport)
+{
+	struct fcloop_tport *tport = nport->tport;
+
+	if (tport && nport->rport)
+		nport->rport->targetport = NULL;
+	nport->tport = NULL;
+
+	return tport;
+}
+
+static int
+__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+{
+	if (!tport)
+		return -EALREADY;
+
+	return nvmet_fc_unregister_targetport(tport->targetport);
+}
+
+static ssize_t
+fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fcloop_nport *nport = NULL, *tmpport;
+	struct fcloop_tport *tport = NULL;
+	u64 nodename, portname;
+	unsigned long flags;
+	int ret;
+
+	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+		if (tmpport->node_name == nodename &&
+		    tmpport->port_name == portname && tmpport->tport) {
+			nport = tmpport;
+			tport = __unlink_target_port(nport);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	if (!nport)
+		return -ENOENT;
+
+	ret = __targetport_unreg(nport, tport);
+
+	return ret ? ret : count;
+}
+
+
+static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
+static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
+static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
+static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
+static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
+static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+
+static struct attribute *fcloop_dev_attrs[] = {
+	&dev_attr_add_local_port.attr,
+	&dev_attr_del_local_port.attr,
+	&dev_attr_add_remote_port.attr,
+	&dev_attr_del_remote_port.attr,
+	&dev_attr_add_target_port.attr,
+	&dev_attr_del_target_port.attr,
+	NULL
+};
+
+static struct attribute_group fclopp_dev_attrs_group = {
+	.attrs		= fcloop_dev_attrs,
+};
+
+static const struct attribute_group *fcloop_dev_attr_groups[] = {
+	&fclopp_dev_attrs_group,
+	NULL,
+};
+
+static struct class *fcloop_class;
+static struct device *fcloop_device;
+
+
+static int __init fcloop_init(void)
+{
+	int ret;
+
+	fcloop_class = class_create(THIS_MODULE, "fcloop");
+	if (IS_ERR(fcloop_class)) {
+		pr_err("couldn't register class fcloop\n");
+		ret = PTR_ERR(fcloop_class);
+		return ret;
+	}
+
+	fcloop_device = device_create_with_groups(
+				fcloop_class, NULL, MKDEV(0, 0), NULL,
+				fcloop_dev_attr_groups, "ctl");
+	if (IS_ERR(fcloop_device)) {
+		pr_err("couldn't create ctl device!\n");
+		ret = PTR_ERR(fcloop_device);
+		goto out_destroy_class;
+	}
+
+	get_device(fcloop_device);
+
+	return 0;
+
+out_destroy_class:
+	class_destroy(fcloop_class);
+	return ret;
+}
+
+static void __exit fcloop_exit(void)
+{
+	struct fcloop_lport *lport;
+	struct fcloop_nport *nport;
+	struct fcloop_tport *tport;
+	struct fcloop_rport *rport;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	for (;;) {
+		nport = list_first_entry_or_null(&fcloop_nports,
+						typeof(*nport), nport_list);
+		if (!nport)
+			break;
+
+		tport = __unlink_target_port(nport);
+		rport = __unlink_remote_port(nport);
+
+		spin_unlock_irqrestore(&fcloop_lock, flags);
+
+		ret = __targetport_unreg(nport, tport);
+		if (ret)
+			pr_warn("%s: Failed deleting target port\n", __func__);
+
+		ret = __remoteport_unreg(nport, rport);
+		if (ret)
+			pr_warn("%s: Failed deleting remote port\n", __func__);
+
+		spin_lock_irqsave(&fcloop_lock, flags);
+	}
+
+	for (;;) {
+		lport = list_first_entry_or_null(&fcloop_lports,
+						typeof(*lport), lport_list);
+		if (!lport)
+			break;
+
+		__unlink_local_port(lport);
+
+		spin_unlock_irqrestore(&fcloop_lock, flags);
+
+		ret = __wait_localport_unreg(lport);
+		if (ret)
+			pr_warn("%s: Failed deleting local port\n", __func__);
+
+		spin_lock_irqsave(&fcloop_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	put_device(fcloop_device);
+
+	device_destroy(fcloop_class, MKDEV(0, 0));
+	class_destroy(fcloop_class);
+}
+
+module_init(fcloop_init);
+module_exit(fcloop_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
new file mode 100644
index 0000000..7bc9f62
--- /dev/null
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -0,0 +1,248 @@
+/*
+ * NVMe I/O command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include "nvmet.h"
+
+int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
+{
+	int ret;
+
+	ns->bdev = blkdev_get_by_path(ns->device_path,
+			FMODE_READ | FMODE_WRITE, NULL);
+	if (IS_ERR(ns->bdev)) {
+		ret = PTR_ERR(ns->bdev);
+		if (ret != -ENOTBLK) {
+			pr_err("failed to open block device %s: (%ld)\n",
+					ns->device_path, PTR_ERR(ns->bdev));
+		}
+		ns->bdev = NULL;
+		return ret;
+	}
+	ns->size = i_size_read(ns->bdev->bd_inode);
+	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+	return 0;
+}
+
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+	if (ns->bdev) {
+		blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
+		ns->bdev = NULL;
+	}
+}
+
+static void nvmet_bio_done(struct bio *bio)
+{
+	struct nvmet_req *req = bio->bi_private;
+
+	nvmet_req_complete(req,
+		bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+
+	if (bio != &req->b.inline_bio)
+		bio_put(bio);
+}
+
+static void nvmet_bdev_execute_rw(struct nvmet_req *req)
+{
+	int sg_cnt = req->sg_cnt;
+	struct bio *bio = &req->b.inline_bio;
+	struct scatterlist *sg;
+	sector_t sector;
+	blk_qc_t cookie;
+	int op, op_flags = 0, i;
+
+	if (!req->sg_cnt) {
+		nvmet_req_complete(req, 0);
+		return;
+	}
+
+	if (req->cmd->rw.opcode == nvme_cmd_write) {
+		op = REQ_OP_WRITE;
+		op_flags = REQ_SYNC | REQ_IDLE;
+		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+			op_flags |= REQ_FUA;
+	} else {
+		op = REQ_OP_READ;
+	}
+
+	sector = le64_to_cpu(req->cmd->rw.slba);
+	sector <<= (req->ns->blksize_shift - 9);
+
+	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+	bio_set_dev(bio, req->ns->bdev);
+	bio->bi_iter.bi_sector = sector;
+	bio->bi_private = req;
+	bio->bi_end_io = nvmet_bio_done;
+	bio_set_op_attrs(bio, op, op_flags);
+
+	for_each_sg(req->sg, sg, req->sg_cnt, i) {
+		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+				!= sg->length) {
+			struct bio *prev = bio;
+
+			bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+			bio_set_dev(bio, req->ns->bdev);
+			bio->bi_iter.bi_sector = sector;
+			bio_set_op_attrs(bio, op, op_flags);
+
+			bio_chain(bio, prev);
+			submit_bio(prev);
+		}
+
+		sector += sg->length >> 9;
+		sg_cnt--;
+	}
+
+	cookie = submit_bio(bio);
+
+	blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+}
+
+static void nvmet_bdev_execute_flush(struct nvmet_req *req)
+{
+	struct bio *bio = &req->b.inline_bio;
+
+	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+	bio_set_dev(bio, req->ns->bdev);
+	bio->bi_private = req;
+	bio->bi_end_io = nvmet_bio_done;
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+	submit_bio(bio);
+}
+
+u16 nvmet_bdev_flush(struct nvmet_req *req)
+{
+	if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+		return NVME_SC_INTERNAL | NVME_SC_DNR;
+	return 0;
+}
+
+static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
+		struct nvme_dsm_range *range, struct bio **bio)
+{
+	int ret;
+
+	ret = __blkdev_issue_discard(ns->bdev,
+			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
+			GFP_KERNEL, 0, bio);
+	if (ret && ret != -EOPNOTSUPP)
+		return NVME_SC_INTERNAL | NVME_SC_DNR;
+	return 0;
+}
+
+static void nvmet_bdev_execute_discard(struct nvmet_req *req)
+{
+	struct nvme_dsm_range range;
+	struct bio *bio = NULL;
+	int i;
+	u16 status;
+
+	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+				sizeof(range));
+		if (status)
+			break;
+
+		status = nvmet_bdev_discard_range(req->ns, &range, &bio);
+		if (status)
+			break;
+	}
+
+	if (bio) {
+		bio->bi_private = req;
+		bio->bi_end_io = nvmet_bio_done;
+		if (status) {
+			bio->bi_status = BLK_STS_IOERR;
+			bio_endio(bio);
+		} else {
+			submit_bio(bio);
+		}
+	} else {
+		nvmet_req_complete(req, status);
+	}
+}
+
+static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
+{
+	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+	case NVME_DSMGMT_AD:
+		nvmet_bdev_execute_discard(req);
+		return;
+	case NVME_DSMGMT_IDR:
+	case NVME_DSMGMT_IDW:
+	default:
+		/* Not supported yet */
+		nvmet_req_complete(req, 0);
+		return;
+	}
+}
+
+static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
+{
+	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+	struct bio *bio = NULL;
+	u16 status = NVME_SC_SUCCESS;
+	sector_t sector;
+	sector_t nr_sector;
+
+	sector = le64_to_cpu(write_zeroes->slba) <<
+		(req->ns->blksize_shift - 9);
+	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+		(req->ns->blksize_shift - 9));
+
+	if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
+				GFP_KERNEL, &bio, 0))
+		status = NVME_SC_INTERNAL | NVME_SC_DNR;
+
+	if (bio) {
+		bio->bi_private = req;
+		bio->bi_end_io = nvmet_bio_done;
+		submit_bio(bio);
+	} else {
+		nvmet_req_complete(req, status);
+	}
+}
+
+u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+
+	switch (cmd->common.opcode) {
+	case nvme_cmd_read:
+	case nvme_cmd_write:
+		req->execute = nvmet_bdev_execute_rw;
+		req->data_len = nvmet_rw_len(req);
+		return 0;
+	case nvme_cmd_flush:
+		req->execute = nvmet_bdev_execute_flush;
+		req->data_len = 0;
+		return 0;
+	case nvme_cmd_dsm:
+		req->execute = nvmet_bdev_execute_dsm;
+		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
+			sizeof(struct nvme_dsm_range);
+		return 0;
+	case nvme_cmd_write_zeroes:
+		req->execute = nvmet_bdev_execute_write_zeroes;
+		return 0;
+	default:
+		pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
+		       req->sq->qid);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
new file mode 100644
index 0000000..81a9dc5
--- /dev/null
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target File I/O commands implementation.
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/uio.h>
+#include <linux/falloc.h>
+#include <linux/file.h>
+#include "nvmet.h"
+
+#define NVMET_MAX_MPOOL_BVEC		16
+#define NVMET_MIN_MPOOL_OBJ		16
+
+void nvmet_file_ns_disable(struct nvmet_ns *ns)
+{
+	if (ns->file) {
+		if (ns->buffered_io)
+			flush_workqueue(buffered_io_wq);
+		mempool_destroy(ns->bvec_pool);
+		ns->bvec_pool = NULL;
+		kmem_cache_destroy(ns->bvec_cache);
+		ns->bvec_cache = NULL;
+		fput(ns->file);
+		ns->file = NULL;
+	}
+}
+
+int nvmet_file_ns_enable(struct nvmet_ns *ns)
+{
+	int flags = O_RDWR | O_LARGEFILE;
+	struct kstat stat;
+	int ret;
+
+	if (!ns->buffered_io)
+		flags |= O_DIRECT;
+
+	ns->file = filp_open(ns->device_path, flags, 0);
+	if (IS_ERR(ns->file)) {
+		pr_err("failed to open file %s: (%ld)\n",
+				ns->device_path, PTR_ERR(ns->file));
+		return PTR_ERR(ns->file);
+	}
+
+	ret = vfs_getattr(&ns->file->f_path,
+			&stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+	if (ret)
+		goto err;
+
+	ns->size = stat.size;
+	ns->blksize_shift = file_inode(ns->file)->i_blkbits;
+
+	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
+			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
+			0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!ns->bvec_cache) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
+			mempool_free_slab, ns->bvec_cache);
+
+	if (!ns->bvec_pool) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	return ret;
+err:
+	ns->size = 0;
+	ns->blksize_shift = 0;
+	nvmet_file_ns_disable(ns);
+	return ret;
+}
+
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+{
+	bv->bv_page = sg_page_iter_page(iter);
+	bv->bv_offset = iter->sg->offset;
+	bv->bv_len = PAGE_SIZE - iter->sg->offset;
+}
+
+static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
+		unsigned long nr_segs, size_t count)
+{
+	struct kiocb *iocb = &req->f.iocb;
+	ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
+	struct iov_iter iter;
+	int ki_flags = 0, rw;
+	ssize_t ret;
+
+	if (req->cmd->rw.opcode == nvme_cmd_write) {
+		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+			ki_flags = IOCB_DSYNC;
+		call_iter = req->ns->file->f_op->write_iter;
+		rw = WRITE;
+	} else {
+		call_iter = req->ns->file->f_op->read_iter;
+		rw = READ;
+	}
+
+	iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
+
+	iocb->ki_pos = pos;
+	iocb->ki_filp = req->ns->file;
+	iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
+
+	ret = call_iter(iocb, &iter);
+
+	if (ret != -EIOCBQUEUED && iocb->ki_complete)
+		iocb->ki_complete(iocb, ret, 0);
+
+	return ret;
+}
+
+static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+{
+	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
+
+	if (req->f.bvec != req->inline_bvec) {
+		if (likely(req->f.mpool_alloc == false))
+			kfree(req->f.bvec);
+		else
+			mempool_free(req->f.bvec, req->ns->bvec_pool);
+	}
+
+	nvmet_req_complete(req, ret != req->data_len ?
+			NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
+
+static void nvmet_file_execute_rw(struct nvmet_req *req)
+{
+	ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+	struct sg_page_iter sg_pg_iter;
+	unsigned long bv_cnt = 0;
+	bool is_sync = false;
+	size_t len = 0, total_len = 0;
+	ssize_t ret = 0;
+	loff_t pos;
+
+	if (!req->sg_cnt || !nr_bvec) {
+		nvmet_req_complete(req, 0);
+		return;
+	}
+
+	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
+	if (unlikely(pos + req->data_len > req->ns->size)) {
+		nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+		return;
+	}
+
+	if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
+		req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
+				GFP_KERNEL);
+	else
+		req->f.bvec = req->inline_bvec;
+
+	req->f.mpool_alloc = false;
+	if (unlikely(!req->f.bvec)) {
+		/* fallback under memory pressure */
+		req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
+		req->f.mpool_alloc = true;
+		if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
+			is_sync = true;
+	}
+
+	memset(&req->f.iocb, 0, sizeof(struct kiocb));
+	for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
+		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+		len += req->f.bvec[bv_cnt].bv_len;
+		total_len += req->f.bvec[bv_cnt].bv_len;
+		bv_cnt++;
+
+		WARN_ON_ONCE((nr_bvec - 1) < 0);
+
+		if (unlikely(is_sync) &&
+		    (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
+			ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
+			if (ret < 0)
+				goto out;
+			pos += len;
+			bv_cnt = 0;
+			len = 0;
+		}
+		nr_bvec--;
+	}
+
+	if (WARN_ON_ONCE(total_len != req->data_len))
+		ret = -EIO;
+out:
+	if (unlikely(is_sync || ret)) {
+		nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
+		return;
+	}
+	req->f.iocb.ki_complete = nvmet_file_io_done;
+	nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
+}
+
+static void nvmet_file_buffered_io_work(struct work_struct *w)
+{
+	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+	nvmet_file_execute_rw(req);
+}
+
+static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
+{
+	INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+	queue_work(buffered_io_wq, &req->f.work);
+}
+
+u16 nvmet_file_flush(struct nvmet_req *req)
+{
+	if (vfs_fsync(req->ns->file, 1) < 0)
+		return NVME_SC_INTERNAL | NVME_SC_DNR;
+	return 0;
+}
+
+static void nvmet_file_flush_work(struct work_struct *w)
+{
+	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+	nvmet_req_complete(req, nvmet_file_flush(req));
+}
+
+static void nvmet_file_execute_flush(struct nvmet_req *req)
+{
+	INIT_WORK(&req->f.work, nvmet_file_flush_work);
+	schedule_work(&req->f.work);
+}
+
+static void nvmet_file_execute_discard(struct nvmet_req *req)
+{
+	int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+	struct nvme_dsm_range range;
+	loff_t offset, len;
+	u16 ret;
+	int i;
+
+	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+		ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+					sizeof(range));
+		if (ret)
+			break;
+
+		offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
+		len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+		if (offset + len > req->ns->size) {
+			ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+			break;
+		}
+
+		if (vfs_fallocate(req->ns->file, mode, offset, len)) {
+			ret = NVME_SC_INTERNAL | NVME_SC_DNR;
+			break;
+		}
+	}
+
+	nvmet_req_complete(req, ret);
+}
+
+static void nvmet_file_dsm_work(struct work_struct *w)
+{
+	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+	case NVME_DSMGMT_AD:
+		nvmet_file_execute_discard(req);
+		return;
+	case NVME_DSMGMT_IDR:
+	case NVME_DSMGMT_IDW:
+	default:
+		/* Not supported yet */
+		nvmet_req_complete(req, 0);
+		return;
+	}
+}
+
+static void nvmet_file_execute_dsm(struct nvmet_req *req)
+{
+	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
+	schedule_work(&req->f.work);
+}
+
+static void nvmet_file_write_zeroes_work(struct work_struct *w)
+{
+	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+	int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
+	loff_t offset;
+	loff_t len;
+	int ret;
+
+	offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
+	len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+			req->ns->blksize_shift);
+
+	if (unlikely(offset + len > req->ns->size)) {
+		nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+		return;
+	}
+
+	ret = vfs_fallocate(req->ns->file, mode, offset, len);
+	nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+}
+
+static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
+{
+	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
+	schedule_work(&req->f.work);
+}
+
+u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
+{
+	struct nvme_command *cmd = req->cmd;
+
+	switch (cmd->common.opcode) {
+	case nvme_cmd_read:
+	case nvme_cmd_write:
+		if (req->ns->buffered_io)
+			req->execute = nvmet_file_execute_rw_buffered_io;
+		else
+			req->execute = nvmet_file_execute_rw;
+		req->data_len = nvmet_rw_len(req);
+		return 0;
+	case nvme_cmd_flush:
+		req->execute = nvmet_file_execute_flush;
+		req->data_len = 0;
+		return 0;
+	case nvme_cmd_dsm:
+		req->execute = nvmet_file_execute_dsm;
+		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
+			sizeof(struct nvme_dsm_range);
+		return 0;
+	case nvme_cmd_write_zeroes:
+		req->execute = nvmet_file_execute_write_zeroes;
+		req->data_len = 0;
+		return 0;
+	default:
+		pr_err("unhandled cmd for file ns %d on qid %d\n",
+				cmd->common.opcode, req->sq->qid);
+		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+	}
+}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
new file mode 100644
index 0000000..9908082
--- /dev/null
+++ b/drivers/nvme/target/loop.c
@@ -0,0 +1,733 @@
+/*
+ * NVMe over Fabrics loopback device.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
+#include <linux/nvme.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include "nvmet.h"
+#include "../host/nvme.h"
+#include "../host/fabrics.h"
+
+#define NVME_LOOP_MAX_SEGMENTS		256
+
+struct nvme_loop_iod {
+	struct nvme_request	nvme_req;
+	struct nvme_command	cmd;
+	struct nvme_completion	rsp;
+	struct nvmet_req	req;
+	struct nvme_loop_queue	*queue;
+	struct work_struct	work;
+	struct sg_table		sg_table;
+	struct scatterlist	first_sgl[];
+};
+
+struct nvme_loop_ctrl {
+	struct nvme_loop_queue	*queues;
+
+	struct blk_mq_tag_set	admin_tag_set;
+
+	struct list_head	list;
+	struct blk_mq_tag_set	tag_set;
+	struct nvme_loop_iod	async_event_iod;
+	struct nvme_ctrl	ctrl;
+
+	struct nvmet_ctrl	*target_ctrl;
+	struct nvmet_port	*port;
+};
+
+static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
+{
+	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
+}
+
+enum nvme_loop_queue_flags {
+	NVME_LOOP_Q_LIVE	= 0,
+};
+
+struct nvme_loop_queue {
+	struct nvmet_cq		nvme_cq;
+	struct nvmet_sq		nvme_sq;
+	struct nvme_loop_ctrl	*ctrl;
+	unsigned long		flags;
+};
+
+static LIST_HEAD(nvme_loop_ports);
+static DEFINE_MUTEX(nvme_loop_ports_mutex);
+
+static LIST_HEAD(nvme_loop_ctrl_list);
+static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
+
+static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
+
+static const struct nvmet_fabrics_ops nvme_loop_ops;
+
+static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
+{
+	return queue - queue->ctrl->queues;
+}
+
+static void nvme_loop_complete_rq(struct request *req)
+{
+	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+
+	nvme_cleanup_cmd(req);
+	sg_free_table_chained(&iod->sg_table, true);
+	nvme_complete_rq(req);
+}
+
+static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
+{
+	u32 queue_idx = nvme_loop_queue_idx(queue);
+
+	if (queue_idx == 0)
+		return queue->ctrl->admin_tag_set.tags[queue_idx];
+	return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static void nvme_loop_queue_response(struct nvmet_req *req)
+{
+	struct nvme_loop_queue *queue =
+		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
+	struct nvme_completion *cqe = req->rsp;
+
+	/*
+	 * AEN requests are special as they don't time out and can
+	 * survive any kind of queue freeze and often don't respond to
+	 * aborts.  We don't even bother to allocate a struct request
+	 * for them but rather special case them here.
+	 */
+	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
+			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+				&cqe->result);
+	} else {
+		struct request *rq;
+
+		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+		if (!rq) {
+			dev_err(queue->ctrl->ctrl.device,
+				"tag 0x%x on queue %d not found\n",
+				cqe->command_id, nvme_loop_queue_idx(queue));
+			return;
+		}
+
+		nvme_end_request(rq, cqe->status, cqe->result);
+	}
+}
+
+static void nvme_loop_execute_work(struct work_struct *work)
+{
+	struct nvme_loop_iod *iod =
+		container_of(work, struct nvme_loop_iod, work);
+
+	nvmet_req_execute(&iod->req);
+}
+
+static enum blk_eh_timer_return
+nvme_loop_timeout(struct request *rq, bool reserved)
+{
+	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
+
+	/* queue error recovery */
+	nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
+
+	/* fail with DNR on admin cmd timeout */
+	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+
+	return BLK_EH_DONE;
+}
+
+static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+		const struct blk_mq_queue_data *bd)
+{
+	struct nvme_ns *ns = hctx->queue->queuedata;
+	struct nvme_loop_queue *queue = hctx->driver_data;
+	struct request *req = bd->rq;
+	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
+	blk_status_t ret;
+
+	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
+
+	ret = nvme_setup_cmd(ns, req, &iod->cmd);
+	if (ret)
+		return ret;
+
+	blk_mq_start_request(req);
+	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+	iod->req.port = queue->ctrl->port;
+	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
+			&queue->nvme_sq, &nvme_loop_ops))
+		return BLK_STS_OK;
+
+	if (blk_rq_nr_phys_segments(req)) {
+		iod->sg_table.sgl = iod->first_sgl;
+		if (sg_alloc_table_chained(&iod->sg_table,
+				blk_rq_nr_phys_segments(req),
+				iod->sg_table.sgl))
+			return BLK_STS_RESOURCE;
+
+		iod->req.sg = iod->sg_table.sgl;
+		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+		iod->req.transfer_len = blk_rq_payload_bytes(req);
+	}
+
+	schedule_work(&iod->work);
+	return BLK_STS_OK;
+}
+
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
+{
+	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
+	struct nvme_loop_queue *queue = &ctrl->queues[0];
+	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
+
+	memset(&iod->cmd, 0, sizeof(iod->cmd));
+	iod->cmd.common.opcode = nvme_admin_async_event;
+	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+
+	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
+			&nvme_loop_ops)) {
+		dev_err(ctrl->ctrl.device, "failed async event work\n");
+		return;
+	}
+
+	schedule_work(&iod->work);
+}
+
+static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
+		struct nvme_loop_iod *iod, unsigned int queue_idx)
+{
+	iod->req.cmd = &iod->cmd;
+	iod->req.rsp = &iod->rsp;
+	iod->queue = &ctrl->queues[queue_idx];
+	INIT_WORK(&iod->work, nvme_loop_execute_work);
+	return 0;
+}
+
+static int nvme_loop_init_request(struct blk_mq_tag_set *set,
+		struct request *req, unsigned int hctx_idx,
+		unsigned int numa_node)
+{
+	struct nvme_loop_ctrl *ctrl = set->driver_data;
+
+	nvme_req(req)->ctrl = &ctrl->ctrl;
+	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
+			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
+}
+
+static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+		unsigned int hctx_idx)
+{
+	struct nvme_loop_ctrl *ctrl = data;
+	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
+
+	hctx->driver_data = queue;
+	return 0;
+}
+
+static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+		unsigned int hctx_idx)
+{
+	struct nvme_loop_ctrl *ctrl = data;
+	struct nvme_loop_queue *queue = &ctrl->queues[0];
+
+	BUG_ON(hctx_idx != 0);
+
+	hctx->driver_data = queue;
+	return 0;
+}
+
+static const struct blk_mq_ops nvme_loop_mq_ops = {
+	.queue_rq	= nvme_loop_queue_rq,
+	.complete	= nvme_loop_complete_rq,
+	.init_request	= nvme_loop_init_request,
+	.init_hctx	= nvme_loop_init_hctx,
+	.timeout	= nvme_loop_timeout,
+};
+
+static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
+	.queue_rq	= nvme_loop_queue_rq,
+	.complete	= nvme_loop_complete_rq,
+	.init_request	= nvme_loop_init_request,
+	.init_hctx	= nvme_loop_init_admin_hctx,
+	.timeout	= nvme_loop_timeout,
+};
+
+static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+	blk_cleanup_queue(ctrl->ctrl.admin_q);
+	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+}
+
+static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
+{
+	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+
+	if (list_empty(&ctrl->list))
+		goto free_ctrl;
+
+	mutex_lock(&nvme_loop_ctrl_mutex);
+	list_del(&ctrl->list);
+	mutex_unlock(&nvme_loop_ctrl_mutex);
+
+	if (nctrl->tagset) {
+		blk_cleanup_queue(ctrl->ctrl.connect_q);
+		blk_mq_free_tag_set(&ctrl->tag_set);
+	}
+	kfree(ctrl->queues);
+	nvmf_free_options(nctrl->opts);
+free_ctrl:
+	kfree(ctrl);
+}
+
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	int i;
+
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+	}
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+	unsigned int nr_io_queues;
+	int ret, i;
+
+	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+	if (ret || !nr_io_queues)
+		return ret;
+
+	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+	for (i = 1; i <= nr_io_queues; i++) {
+		ctrl->queues[i].ctrl = ctrl;
+		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+		if (ret)
+			goto out_destroy_queues;
+
+		ctrl->ctrl.queue_count++;
+	}
+
+	return 0;
+
+out_destroy_queues:
+	nvme_loop_destroy_io_queues(ctrl);
+	return ret;
+}
+
+static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	int i, ret;
+
+	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+		if (ret)
+			return ret;
+		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+	}
+
+	return 0;
+}
+
+static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+	int error;
+
+	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
+	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+		SG_CHUNK_SIZE * sizeof(struct scatterlist);
+	ctrl->admin_tag_set.driver_data = ctrl;
+	ctrl->admin_tag_set.nr_hw_queues = 1;
+	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
+
+	ctrl->queues[0].ctrl = ctrl;
+	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
+	if (error)
+		return error;
+	ctrl->ctrl.queue_count = 1;
+
+	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+	if (error)
+		goto out_free_sq;
+	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
+
+	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+	if (IS_ERR(ctrl->ctrl.admin_q)) {
+		error = PTR_ERR(ctrl->ctrl.admin_q);
+		goto out_free_tagset;
+	}
+
+	error = nvmf_connect_admin_queue(&ctrl->ctrl);
+	if (error)
+		goto out_cleanup_queue;
+
+	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
+	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
+	if (error) {
+		dev_err(ctrl->ctrl.device,
+			"prop_get NVME_REG_CAP failed\n");
+		goto out_cleanup_queue;
+	}
+
+	ctrl->ctrl.sqsize =
+		min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
+
+	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+	if (error)
+		goto out_cleanup_queue;
+
+	ctrl->ctrl.max_hw_sectors =
+		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
+
+	error = nvme_init_identify(&ctrl->ctrl);
+	if (error)
+		goto out_cleanup_queue;
+
+	return 0;
+
+out_cleanup_queue:
+	blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_free_sq:
+	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+	return error;
+}
+
+static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
+{
+	if (ctrl->ctrl.queue_count > 1) {
+		nvme_stop_queues(&ctrl->ctrl);
+		blk_mq_tagset_busy_iter(&ctrl->tag_set,
+					nvme_cancel_request, &ctrl->ctrl);
+		nvme_loop_destroy_io_queues(ctrl);
+	}
+
+	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+		nvme_shutdown_ctrl(&ctrl->ctrl);
+
+	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+				nvme_cancel_request, &ctrl->ctrl);
+	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+	nvme_loop_destroy_admin_queue(ctrl);
+}
+
+static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
+{
+	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
+}
+
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
+{
+	struct nvme_loop_ctrl *ctrl;
+
+	mutex_lock(&nvme_loop_ctrl_mutex);
+	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
+		if (ctrl->ctrl.cntlid == nctrl->cntlid)
+			nvme_delete_ctrl(&ctrl->ctrl);
+	}
+	mutex_unlock(&nvme_loop_ctrl_mutex);
+}
+
+static void nvme_loop_reset_ctrl_work(struct work_struct *work)
+{
+	struct nvme_loop_ctrl *ctrl =
+		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
+	bool changed;
+	int ret;
+
+	nvme_stop_ctrl(&ctrl->ctrl);
+	nvme_loop_shutdown_ctrl(ctrl);
+
+	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+		/* state change failure should never happen */
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	ret = nvme_loop_configure_admin_queue(ctrl);
+	if (ret)
+		goto out_disable;
+
+	ret = nvme_loop_init_io_queues(ctrl);
+	if (ret)
+		goto out_destroy_admin;
+
+	ret = nvme_loop_connect_io_queues(ctrl);
+	if (ret)
+		goto out_destroy_io;
+
+	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+			ctrl->ctrl.queue_count - 1);
+
+	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+	WARN_ON_ONCE(!changed);
+
+	nvme_start_ctrl(&ctrl->ctrl);
+
+	return;
+
+out_destroy_io:
+	nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
+	nvme_loop_destroy_admin_queue(ctrl);
+out_disable:
+	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+	nvme_uninit_ctrl(&ctrl->ctrl);
+	nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
+	.name			= "loop",
+	.module			= THIS_MODULE,
+	.flags			= NVME_F_FABRICS,
+	.reg_read32		= nvmf_reg_read32,
+	.reg_read64		= nvmf_reg_read64,
+	.reg_write32		= nvmf_reg_write32,
+	.free_ctrl		= nvme_loop_free_ctrl,
+	.submit_async_event	= nvme_loop_submit_async_event,
+	.delete_ctrl		= nvme_loop_delete_ctrl_host,
+	.get_address		= nvmf_get_address,
+};
+
+static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	int ret;
+
+	ret = nvme_loop_init_io_queues(ctrl);
+	if (ret)
+		return ret;
+
+	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+	ctrl->tag_set.ops = &nvme_loop_mq_ops;
+	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+	ctrl->tag_set.numa_node = NUMA_NO_NODE;
+	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+		SG_CHUNK_SIZE * sizeof(struct scatterlist);
+	ctrl->tag_set.driver_data = ctrl;
+	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
+	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+	ctrl->ctrl.tagset = &ctrl->tag_set;
+
+	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+	if (ret)
+		goto out_destroy_queues;
+
+	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+	if (IS_ERR(ctrl->ctrl.connect_q)) {
+		ret = PTR_ERR(ctrl->ctrl.connect_q);
+		goto out_free_tagset;
+	}
+
+	ret = nvme_loop_connect_io_queues(ctrl);
+	if (ret)
+		goto out_cleanup_connect_q;
+
+	return 0;
+
+out_cleanup_connect_q:
+	blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tagset:
+	blk_mq_free_tag_set(&ctrl->tag_set);
+out_destroy_queues:
+	nvme_loop_destroy_io_queues(ctrl);
+	return ret;
+}
+
+static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
+{
+	struct nvmet_port *p, *found = NULL;
+
+	mutex_lock(&nvme_loop_ports_mutex);
+	list_for_each_entry(p, &nvme_loop_ports, entry) {
+		/* if no transport address is specified use the first port */
+		if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
+		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
+			continue;
+		found = p;
+		break;
+	}
+	mutex_unlock(&nvme_loop_ports_mutex);
+	return found;
+}
+
+static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
+		struct nvmf_ctrl_options *opts)
+{
+	struct nvme_loop_ctrl *ctrl;
+	bool changed;
+	int ret;
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return ERR_PTR(-ENOMEM);
+	ctrl->ctrl.opts = opts;
+	INIT_LIST_HEAD(&ctrl->list);
+
+	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
+
+	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
+				0 /* no quirks, we're perfect! */);
+	if (ret)
+		goto out_put_ctrl;
+
+	ret = -ENOMEM;
+
+	ctrl->ctrl.sqsize = opts->queue_size - 1;
+	ctrl->ctrl.kato = opts->kato;
+	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
+
+	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
+			GFP_KERNEL);
+	if (!ctrl->queues)
+		goto out_uninit_ctrl;
+
+	ret = nvme_loop_configure_admin_queue(ctrl);
+	if (ret)
+		goto out_free_queues;
+
+	if (opts->queue_size > ctrl->ctrl.maxcmd) {
+		/* warn if maxcmd is lower than queue_size */
+		dev_warn(ctrl->ctrl.device,
+			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
+			opts->queue_size, ctrl->ctrl.maxcmd);
+		opts->queue_size = ctrl->ctrl.maxcmd;
+	}
+
+	if (opts->nr_io_queues) {
+		ret = nvme_loop_create_io_queues(ctrl);
+		if (ret)
+			goto out_remove_admin_queue;
+	}
+
+	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
+
+	dev_info(ctrl->ctrl.device,
+		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
+
+	nvme_get_ctrl(&ctrl->ctrl);
+
+	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+	WARN_ON_ONCE(!changed);
+
+	mutex_lock(&nvme_loop_ctrl_mutex);
+	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
+	mutex_unlock(&nvme_loop_ctrl_mutex);
+
+	nvme_start_ctrl(&ctrl->ctrl);
+
+	return &ctrl->ctrl;
+
+out_remove_admin_queue:
+	nvme_loop_destroy_admin_queue(ctrl);
+out_free_queues:
+	kfree(ctrl->queues);
+out_uninit_ctrl:
+	nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
+	nvme_put_ctrl(&ctrl->ctrl);
+	if (ret > 0)
+		ret = -EIO;
+	return ERR_PTR(ret);
+}
+
+static int nvme_loop_add_port(struct nvmet_port *port)
+{
+	mutex_lock(&nvme_loop_ports_mutex);
+	list_add_tail(&port->entry, &nvme_loop_ports);
+	mutex_unlock(&nvme_loop_ports_mutex);
+	return 0;
+}
+
+static void nvme_loop_remove_port(struct nvmet_port *port)
+{
+	mutex_lock(&nvme_loop_ports_mutex);
+	list_del_init(&port->entry);
+	mutex_unlock(&nvme_loop_ports_mutex);
+}
+
+static const struct nvmet_fabrics_ops nvme_loop_ops = {
+	.owner		= THIS_MODULE,
+	.type		= NVMF_TRTYPE_LOOP,
+	.add_port	= nvme_loop_add_port,
+	.remove_port	= nvme_loop_remove_port,
+	.queue_response = nvme_loop_queue_response,
+	.delete_ctrl	= nvme_loop_delete_ctrl,
+};
+
+static struct nvmf_transport_ops nvme_loop_transport = {
+	.name		= "loop",
+	.module		= THIS_MODULE,
+	.create_ctrl	= nvme_loop_create_ctrl,
+	.allowed_opts	= NVMF_OPT_TRADDR,
+};
+
+static int __init nvme_loop_init_module(void)
+{
+	int ret;
+
+	ret = nvmet_register_transport(&nvme_loop_ops);
+	if (ret)
+		return ret;
+
+	ret = nvmf_register_transport(&nvme_loop_transport);
+	if (ret)
+		nvmet_unregister_transport(&nvme_loop_ops);
+
+	return ret;
+}
+
+static void __exit nvme_loop_cleanup_module(void)
+{
+	struct nvme_loop_ctrl *ctrl, *next;
+
+	nvmf_unregister_transport(&nvme_loop_transport);
+	nvmet_unregister_transport(&nvme_loop_ops);
+
+	mutex_lock(&nvme_loop_ctrl_mutex);
+	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
+		nvme_delete_ctrl(&ctrl->ctrl);
+	mutex_unlock(&nvme_loop_ctrl_mutex);
+
+	flush_workqueue(nvme_delete_wq);
+}
+
+module_init(nvme_loop_init_module);
+module_exit(nvme_loop_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
new file mode 100644
index 0000000..ec9af4e
--- /dev/null
+++ b/drivers/nvme/target/nvmet.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVMET_H
+#define _NVMET_H
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/percpu-refcount.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/uuid.h>
+#include <linux/nvme.h>
+#include <linux/configfs.h>
+#include <linux/rcupdate.h>
+#include <linux/blkdev.h>
+
+#define NVMET_ASYNC_EVENTS		4
+#define NVMET_ERROR_LOG_SLOTS		128
+
+/*
+ * Supported optional AENs:
+ */
+#define NVMET_AEN_CFG_OPTIONAL \
+	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
+
+/*
+ * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
+ */
+#define NVMET_AEN_CFG_ALL \
+	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
+	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
+	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
+
+/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
+ * The 16 bit shift is to set IATTR bit to 1, which means offending
+ * offset starts in the data section of connect()
+ */
+#define IPO_IATTR_CONNECT_DATA(x)	\
+	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
+#define IPO_IATTR_CONNECT_SQE(x)	\
+	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
+
+struct nvmet_ns {
+	struct list_head	dev_link;
+	struct percpu_ref	ref;
+	struct block_device	*bdev;
+	struct file		*file;
+	bool			readonly;
+	u32			nsid;
+	u32			blksize_shift;
+	loff_t			size;
+	u8			nguid[16];
+	uuid_t			uuid;
+	u32			anagrpid;
+
+	bool			buffered_io;
+	bool			enabled;
+	struct nvmet_subsys	*subsys;
+	const char		*device_path;
+
+	struct config_group	device_group;
+	struct config_group	group;
+
+	struct completion	disable_done;
+	mempool_t		*bvec_pool;
+	struct kmem_cache	*bvec_cache;
+};
+
+static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_ns, group);
+}
+
+struct nvmet_cq {
+	u16			qid;
+	u16			size;
+};
+
+struct nvmet_sq {
+	struct nvmet_ctrl	*ctrl;
+	struct percpu_ref	ref;
+	u16			qid;
+	u16			size;
+	u32			sqhd;
+	struct completion	free_done;
+	struct completion	confirm_done;
+};
+
+struct nvmet_ana_group {
+	struct config_group	group;
+	struct nvmet_port	*port;
+	u32			grpid;
+};
+
+static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_ana_group,
+			group);
+}
+
+/**
+ * struct nvmet_port -	Common structure to keep port
+ *				information for the target.
+ * @entry:		Entry into referrals or transport list.
+ * @disc_addr:		Address information is stored in a format defined
+ *				for a discovery log page entry.
+ * @group:		ConfigFS group for this element's folder.
+ * @priv:		Private data for the transport.
+ */
+struct nvmet_port {
+	struct list_head		entry;
+	struct nvmf_disc_rsp_page_entry	disc_addr;
+	struct config_group		group;
+	struct config_group		subsys_group;
+	struct list_head		subsystems;
+	struct config_group		referrals_group;
+	struct list_head		referrals;
+	struct config_group		ana_groups_group;
+	struct nvmet_ana_group		ana_default_group;
+	enum nvme_ana_state		*ana_state;
+	void				*priv;
+	bool				enabled;
+	int				inline_data_size;
+};
+
+static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_port,
+			group);
+}
+
+static inline struct nvmet_port *ana_groups_to_port(
+		struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_port,
+			ana_groups_group);
+}
+
+struct nvmet_ctrl {
+	struct nvmet_subsys	*subsys;
+	struct nvmet_cq		**cqs;
+	struct nvmet_sq		**sqs;
+
+	struct mutex		lock;
+	u64			cap;
+	u32			cc;
+	u32			csts;
+
+	uuid_t			hostid;
+	u16			cntlid;
+	u32			kato;
+
+	struct nvmet_port	*port;
+
+	u32			aen_enabled;
+	unsigned long		aen_masked;
+	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
+	unsigned int		nr_async_event_cmds;
+	struct list_head	async_events;
+	struct work_struct	async_event_work;
+
+	struct list_head	subsys_entry;
+	struct kref		ref;
+	struct delayed_work	ka_work;
+	struct work_struct	fatal_err_work;
+
+	const struct nvmet_fabrics_ops *ops;
+
+	__le32			*changed_ns_list;
+	u32			nr_changed_ns;
+
+	char			subsysnqn[NVMF_NQN_FIELD_LEN];
+	char			hostnqn[NVMF_NQN_FIELD_LEN];
+};
+
+struct nvmet_subsys {
+	enum nvme_subsys_type	type;
+
+	struct mutex		lock;
+	struct kref		ref;
+
+	struct list_head	namespaces;
+	unsigned int		nr_namespaces;
+	unsigned int		max_nsid;
+
+	struct list_head	ctrls;
+
+	struct list_head	hosts;
+	bool			allow_any_host;
+
+	u16			max_qid;
+
+	u64			ver;
+	u64			serial;
+	char			*subsysnqn;
+
+	struct config_group	group;
+
+	struct config_group	namespaces_group;
+	struct config_group	allowed_hosts_group;
+};
+
+static inline struct nvmet_subsys *to_subsys(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_subsys, group);
+}
+
+static inline struct nvmet_subsys *namespaces_to_subsys(
+		struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_subsys,
+			namespaces_group);
+}
+
+struct nvmet_host {
+	struct config_group	group;
+};
+
+static inline struct nvmet_host *to_host(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct nvmet_host, group);
+}
+
+static inline char *nvmet_host_name(struct nvmet_host *host)
+{
+	return config_item_name(&host->group.cg_item);
+}
+
+struct nvmet_host_link {
+	struct list_head	entry;
+	struct nvmet_host	*host;
+};
+
+struct nvmet_subsys_link {
+	struct list_head	entry;
+	struct nvmet_subsys	*subsys;
+};
+
+struct nvmet_req;
+struct nvmet_fabrics_ops {
+	struct module *owner;
+	unsigned int type;
+	unsigned int msdbd;
+	bool has_keyed_sgls : 1;
+	void (*queue_response)(struct nvmet_req *req);
+	int (*add_port)(struct nvmet_port *port);
+	void (*remove_port)(struct nvmet_port *port);
+	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
+	void (*disc_traddr)(struct nvmet_req *req,
+			struct nvmet_port *port, char *traddr);
+};
+
+#define NVMET_MAX_INLINE_BIOVEC	8
+
+struct nvmet_req {
+	struct nvme_command	*cmd;
+	struct nvme_completion	*rsp;
+	struct nvmet_sq		*sq;
+	struct nvmet_cq		*cq;
+	struct nvmet_ns		*ns;
+	struct scatterlist	*sg;
+	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
+	union {
+		struct {
+			struct bio      inline_bio;
+		} b;
+		struct {
+			bool			mpool_alloc;
+			struct kiocb            iocb;
+			struct bio_vec          *bvec;
+			struct work_struct      work;
+		} f;
+	};
+	int			sg_cnt;
+	/* data length as parsed from the command: */
+	size_t			data_len;
+	/* data length as parsed from the SGL descriptor: */
+	size_t			transfer_len;
+
+	struct nvmet_port	*port;
+
+	void (*execute)(struct nvmet_req *req);
+	const struct nvmet_fabrics_ops *ops;
+};
+
+extern struct workqueue_struct *buffered_io_wq;
+
+static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
+{
+	req->rsp->status = cpu_to_le16(status << 1);
+}
+
+static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
+{
+	req->rsp->result.u32 = cpu_to_le32(result);
+}
+
+/*
+ * NVMe command writes actually are DMA reads for us on the target side.
+ */
+static inline enum dma_data_direction
+nvmet_data_dir(struct nvmet_req *req)
+{
+	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+struct nvmet_async_event {
+	struct list_head	entry;
+	u8			event_type;
+	u8			event_info;
+	u8			log_page;
+};
+
+u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
+void nvmet_req_uninit(struct nvmet_req *req);
+void nvmet_req_execute(struct nvmet_req *req);
+void nvmet_req_complete(struct nvmet_req *req, u16 status);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+		u16 size);
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+		u16 size);
+void nvmet_sq_destroy(struct nvmet_sq *sq);
+int nvmet_sq_init(struct nvmet_sq *sq);
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
+u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
+		struct nvmet_req *req, struct nvmet_ctrl **ret);
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
+u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+		enum nvme_subsys_type type);
+void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
+
+struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+void nvmet_put_namespace(struct nvmet_ns *ns);
+int nvmet_ns_enable(struct nvmet_ns *ns);
+void nvmet_ns_disable(struct nvmet_ns *ns);
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_ns_free(struct nvmet_ns *ns);
+
+void nvmet_send_ana_event(struct nvmet_subsys *subsys,
+		struct nvmet_port *port);
+void nvmet_port_send_ana_event(struct nvmet_port *port);
+
+int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
+void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+
+int nvmet_enable_port(struct nvmet_port *port);
+void nvmet_disable_port(struct nvmet_port *port);
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
+void nvmet_referral_disable(struct nvmet_port *port);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+		size_t len);
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
+		size_t len);
+u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+
+#define NVMET_QUEUE_SIZE	1024
+#define NVMET_NR_QUEUES		128
+#define NVMET_MAX_CMD		NVMET_QUEUE_SIZE
+
+/*
+ * Nice round number that makes a list of nsids fit into a page.
+ * Should become tunable at some point in the future.
+ */
+#define NVMET_MAX_NAMESPACES	1024
+
+/*
+ * 0 is not a valid ANA group ID, so we start numbering at 1.
+ *
+ * ANA Group 1 exists without manual intervention, has namespaces assigned to it
+ * by default, and is available in an optimized state through all ports.
+ */
+#define NVMET_MAX_ANAGRPS	128
+#define NVMET_DEFAULT_ANA_GRPID	1
+
+#define NVMET_KAS		10
+#define NVMET_DISC_KATO		120
+
+int __init nvmet_init_configfs(void);
+void __exit nvmet_exit_configfs(void);
+
+int __init nvmet_init_discovery(void);
+void nvmet_exit_discovery(void);
+
+extern struct nvmet_subsys *nvmet_disc_subsys;
+extern u64 nvmet_genctr;
+extern struct rw_semaphore nvmet_config_sem;
+
+extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
+extern u64 nvmet_ana_chgcnt;
+extern struct rw_semaphore nvmet_ana_sem;
+
+bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
+		const char *hostnqn);
+
+int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
+int nvmet_file_ns_enable(struct nvmet_ns *ns);
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
+void nvmet_file_ns_disable(struct nvmet_ns *ns);
+u16 nvmet_bdev_flush(struct nvmet_req *req);
+u16 nvmet_file_flush(struct nvmet_req *req);
+void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
+
+static inline u32 nvmet_rw_len(struct nvmet_req *req)
+{
+	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
+			req->ns->blksize_shift;
+}
+#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
new file mode 100644
index 0000000..e57f390
--- /dev/null
+++ b/drivers/nvme/target/rdma.c
@@ -0,0 +1,1672 @@
+/*
+ * NVMe over Fabrics RDMA target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/inet.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+
+#include <linux/nvme-rdma.h>
+#include "nvmet.h"
+
+/*
+ * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
+ */
+#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE	PAGE_SIZE
+#define NVMET_RDMA_MAX_INLINE_SGE		4
+#define NVMET_RDMA_MAX_INLINE_DATA_SIZE		max_t(int, SZ_16K, PAGE_SIZE)
+
+struct nvmet_rdma_cmd {
+	struct ib_sge		sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
+	struct ib_cqe		cqe;
+	struct ib_recv_wr	wr;
+	struct scatterlist	inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
+	struct nvme_command     *nvme_cmd;
+	struct nvmet_rdma_queue	*queue;
+};
+
+enum {
+	NVMET_RDMA_REQ_INLINE_DATA	= (1 << 0),
+	NVMET_RDMA_REQ_INVALIDATE_RKEY	= (1 << 1),
+};
+
+struct nvmet_rdma_rsp {
+	struct ib_sge		send_sge;
+	struct ib_cqe		send_cqe;
+	struct ib_send_wr	send_wr;
+
+	struct nvmet_rdma_cmd	*cmd;
+	struct nvmet_rdma_queue	*queue;
+
+	struct ib_cqe		read_cqe;
+	struct rdma_rw_ctx	rw;
+
+	struct nvmet_req	req;
+
+	bool			allocated;
+	u8			n_rdma;
+	u32			flags;
+	u32			invalidate_rkey;
+
+	struct list_head	wait_list;
+	struct list_head	free_list;
+};
+
+enum nvmet_rdma_queue_state {
+	NVMET_RDMA_Q_CONNECTING,
+	NVMET_RDMA_Q_LIVE,
+	NVMET_RDMA_Q_DISCONNECTING,
+};
+
+struct nvmet_rdma_queue {
+	struct rdma_cm_id	*cm_id;
+	struct nvmet_port	*port;
+	struct ib_cq		*cq;
+	atomic_t		sq_wr_avail;
+	struct nvmet_rdma_device *dev;
+	spinlock_t		state_lock;
+	enum nvmet_rdma_queue_state state;
+	struct nvmet_cq		nvme_cq;
+	struct nvmet_sq		nvme_sq;
+
+	struct nvmet_rdma_rsp	*rsps;
+	struct list_head	free_rsps;
+	spinlock_t		rsps_lock;
+	struct nvmet_rdma_cmd	*cmds;
+
+	struct work_struct	release_work;
+	struct list_head	rsp_wait_list;
+	struct list_head	rsp_wr_wait_list;
+	spinlock_t		rsp_wr_wait_lock;
+
+	int			idx;
+	int			host_qid;
+	int			recv_queue_size;
+	int			send_queue_size;
+
+	struct list_head	queue_list;
+};
+
+struct nvmet_rdma_device {
+	struct ib_device	*device;
+	struct ib_pd		*pd;
+	struct ib_srq		*srq;
+	struct nvmet_rdma_cmd	*srq_cmds;
+	size_t			srq_size;
+	struct kref		ref;
+	struct list_head	entry;
+	int			inline_data_size;
+	int			inline_page_count;
+};
+
+static bool nvmet_rdma_use_srq;
+module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
+MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+
+static DEFINE_IDA(nvmet_rdma_queue_ida);
+static LIST_HEAD(nvmet_rdma_queue_list);
+static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+
+static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+
+static int num_pages(int len)
+{
+	return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
+}
+
+/* XXX: really should move to a generic header sooner or later.. */
+static inline u32 get_unaligned_le24(const u8 *p)
+{
+	return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
+}
+
+static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
+{
+	return nvme_is_write(rsp->req.cmd) &&
+		rsp->req.transfer_len &&
+		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
+{
+	return !nvme_is_write(rsp->req.cmd) &&
+		rsp->req.transfer_len &&
+		!rsp->req.rsp->status &&
+		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline struct nvmet_rdma_rsp *
+nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
+{
+	struct nvmet_rdma_rsp *rsp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->rsps_lock, flags);
+	rsp = list_first_entry_or_null(&queue->free_rsps,
+				struct nvmet_rdma_rsp, free_list);
+	if (likely(rsp))
+		list_del(&rsp->free_list);
+	spin_unlock_irqrestore(&queue->rsps_lock, flags);
+
+	if (unlikely(!rsp)) {
+		rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+		if (unlikely(!rsp))
+			return NULL;
+		rsp->allocated = true;
+	}
+
+	return rsp;
+}
+
+static inline void
+nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
+{
+	unsigned long flags;
+
+	if (rsp->allocated) {
+		kfree(rsp);
+		return;
+	}
+
+	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
+	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
+	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
+}
+
+static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_cmd *c)
+{
+	struct scatterlist *sg;
+	struct ib_sge *sge;
+	int i;
+
+	if (!ndev->inline_data_size)
+		return;
+
+	sg = c->inline_sg;
+	sge = &c->sge[1];
+
+	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+		if (sge->length)
+			ib_dma_unmap_page(ndev->device, sge->addr,
+					sge->length, DMA_FROM_DEVICE);
+		if (sg_page(sg))
+			__free_page(sg_page(sg));
+	}
+}
+
+static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_cmd *c)
+{
+	struct scatterlist *sg;
+	struct ib_sge *sge;
+	struct page *pg;
+	int len;
+	int i;
+
+	if (!ndev->inline_data_size)
+		return 0;
+
+	sg = c->inline_sg;
+	sg_init_table(sg, ndev->inline_page_count);
+	sge = &c->sge[1];
+	len = ndev->inline_data_size;
+
+	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
+		pg = alloc_page(GFP_KERNEL);
+		if (!pg)
+			goto out_err;
+		sg_assign_page(sg, pg);
+		sge->addr = ib_dma_map_page(ndev->device,
+			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+		if (ib_dma_mapping_error(ndev->device, sge->addr))
+			goto out_err;
+		sge->length = min_t(int, len, PAGE_SIZE);
+		sge->lkey = ndev->pd->local_dma_lkey;
+		len -= sge->length;
+	}
+
+	return 0;
+out_err:
+	for (; i >= 0; i--, sg--, sge--) {
+		if (sge->length)
+			ib_dma_unmap_page(ndev->device, sge->addr,
+					sge->length, DMA_FROM_DEVICE);
+		if (sg_page(sg))
+			__free_page(sg_page(sg));
+	}
+	return -ENOMEM;
+}
+
+static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
+			struct nvmet_rdma_cmd *c, bool admin)
+{
+	/* NVMe command / RDMA RECV */
+	c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
+	if (!c->nvme_cmd)
+		goto out;
+
+	c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
+			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+	if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
+		goto out_free_cmd;
+
+	c->sge[0].length = sizeof(*c->nvme_cmd);
+	c->sge[0].lkey = ndev->pd->local_dma_lkey;
+
+	if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
+		goto out_unmap_cmd;
+
+	c->cqe.done = nvmet_rdma_recv_done;
+
+	c->wr.wr_cqe = &c->cqe;
+	c->wr.sg_list = c->sge;
+	c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
+
+	return 0;
+
+out_unmap_cmd:
+	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+out_free_cmd:
+	kfree(c->nvme_cmd);
+
+out:
+	return -ENOMEM;
+}
+
+static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
+		struct nvmet_rdma_cmd *c, bool admin)
+{
+	if (!admin)
+		nvmet_rdma_free_inline_pages(ndev, c);
+	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+	kfree(c->nvme_cmd);
+}
+
+static struct nvmet_rdma_cmd *
+nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
+		int nr_cmds, bool admin)
+{
+	struct nvmet_rdma_cmd *cmds;
+	int ret = -EINVAL, i;
+
+	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
+	if (!cmds)
+		goto out;
+
+	for (i = 0; i < nr_cmds; i++) {
+		ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
+		if (ret)
+			goto out_free;
+	}
+
+	return cmds;
+
+out_free:
+	while (--i >= 0)
+		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+	kfree(cmds);
+out:
+	return ERR_PTR(ret);
+}
+
+static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
+		struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
+{
+	int i;
+
+	for (i = 0; i < nr_cmds; i++)
+		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+	kfree(cmds);
+}
+
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+		struct nvmet_rdma_rsp *r)
+{
+	/* NVMe CQE / RDMA SEND */
+	r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
+	if (!r->req.rsp)
+		goto out;
+
+	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
+			sizeof(*r->req.rsp), DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
+		goto out_free_rsp;
+
+	r->send_sge.length = sizeof(*r->req.rsp);
+	r->send_sge.lkey = ndev->pd->local_dma_lkey;
+
+	r->send_cqe.done = nvmet_rdma_send_done;
+
+	r->send_wr.wr_cqe = &r->send_cqe;
+	r->send_wr.sg_list = &r->send_sge;
+	r->send_wr.num_sge = 1;
+	r->send_wr.send_flags = IB_SEND_SIGNALED;
+
+	/* Data In / RDMA READ */
+	r->read_cqe.done = nvmet_rdma_read_data_done;
+	return 0;
+
+out_free_rsp:
+	kfree(r->req.rsp);
+out:
+	return -ENOMEM;
+}
+
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+		struct nvmet_rdma_rsp *r)
+{
+	ib_dma_unmap_single(ndev->device, r->send_sge.addr,
+				sizeof(*r->req.rsp), DMA_TO_DEVICE);
+	kfree(r->req.rsp);
+}
+
+static int
+nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+{
+	struct nvmet_rdma_device *ndev = queue->dev;
+	int nr_rsps = queue->recv_queue_size * 2;
+	int ret = -EINVAL, i;
+
+	queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
+			GFP_KERNEL);
+	if (!queue->rsps)
+		goto out;
+
+	for (i = 0; i < nr_rsps; i++) {
+		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+		ret = nvmet_rdma_alloc_rsp(ndev, rsp);
+		if (ret)
+			goto out_free;
+
+		list_add_tail(&rsp->free_list, &queue->free_rsps);
+	}
+
+	return 0;
+
+out_free:
+	while (--i >= 0) {
+		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+		list_del(&rsp->free_list);
+		nvmet_rdma_free_rsp(ndev, rsp);
+	}
+	kfree(queue->rsps);
+out:
+	return ret;
+}
+
+static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+{
+	struct nvmet_rdma_device *ndev = queue->dev;
+	int i, nr_rsps = queue->recv_queue_size * 2;
+
+	for (i = 0; i < nr_rsps; i++) {
+		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+		list_del(&rsp->free_list);
+		nvmet_rdma_free_rsp(ndev, rsp);
+	}
+	kfree(queue->rsps);
+}
+
+static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
+		struct nvmet_rdma_cmd *cmd)
+{
+	int ret;
+
+	ib_dma_sync_single_for_device(ndev->device,
+		cmd->sge[0].addr, cmd->sge[0].length,
+		DMA_FROM_DEVICE);
+
+	if (ndev->srq)
+		ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+	else
+		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+
+	if (unlikely(ret))
+		pr_err("post_recv cmd failed\n");
+
+	return ret;
+}
+
+static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
+{
+	spin_lock(&queue->rsp_wr_wait_lock);
+	while (!list_empty(&queue->rsp_wr_wait_list)) {
+		struct nvmet_rdma_rsp *rsp;
+		bool ret;
+
+		rsp = list_entry(queue->rsp_wr_wait_list.next,
+				struct nvmet_rdma_rsp, wait_list);
+		list_del(&rsp->wait_list);
+
+		spin_unlock(&queue->rsp_wr_wait_lock);
+		ret = nvmet_rdma_execute_command(rsp);
+		spin_lock(&queue->rsp_wr_wait_lock);
+
+		if (!ret) {
+			list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
+			break;
+		}
+	}
+	spin_unlock(&queue->rsp_wr_wait_lock);
+}
+
+
+static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
+{
+	struct nvmet_rdma_queue *queue = rsp->queue;
+
+	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+
+	if (rsp->n_rdma) {
+		rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+				queue->cm_id->port_num, rsp->req.sg,
+				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+	}
+
+	if (rsp->req.sg != rsp->cmd->inline_sg)
+		sgl_free(rsp->req.sg);
+
+	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
+		nvmet_rdma_process_wr_wait_list(queue);
+
+	nvmet_rdma_put_rsp(rsp);
+}
+
+static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
+{
+	if (queue->nvme_sq.ctrl) {
+		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+	} else {
+		/*
+		 * we didn't setup the controller yet in case
+		 * of admin connect error, just disconnect and
+		 * cleanup the queue
+		 */
+		nvmet_rdma_queue_disconnect(queue);
+	}
+}
+
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct nvmet_rdma_rsp *rsp =
+		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+	struct nvmet_rdma_queue *queue = cq->cq_context;
+
+	nvmet_rdma_release_rsp(rsp);
+
+	if (unlikely(wc->status != IB_WC_SUCCESS &&
+		     wc->status != IB_WC_WR_FLUSH_ERR)) {
+		pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
+			wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+		nvmet_rdma_error_comp(queue);
+	}
+}
+
+static void nvmet_rdma_queue_response(struct nvmet_req *req)
+{
+	struct nvmet_rdma_rsp *rsp =
+		container_of(req, struct nvmet_rdma_rsp, req);
+	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+	struct ib_send_wr *first_wr;
+
+	if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+		rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
+		rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
+	} else {
+		rsp->send_wr.opcode = IB_WR_SEND;
+	}
+
+	if (nvmet_rdma_need_data_out(rsp))
+		first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+				cm_id->port_num, NULL, &rsp->send_wr);
+	else
+		first_wr = &rsp->send_wr;
+
+	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+	ib_dma_sync_single_for_device(rsp->queue->dev->device,
+		rsp->send_sge.addr, rsp->send_sge.length,
+		DMA_TO_DEVICE);
+
+	if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
+		pr_err("sending cmd response failed\n");
+		nvmet_rdma_release_rsp(rsp);
+	}
+}
+
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct nvmet_rdma_rsp *rsp =
+		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
+	struct nvmet_rdma_queue *queue = cq->cq_context;
+
+	WARN_ON(rsp->n_rdma <= 0);
+	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+	rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+			queue->cm_id->port_num, rsp->req.sg,
+			rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+	rsp->n_rdma = 0;
+
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		nvmet_req_uninit(&rsp->req);
+		nvmet_rdma_release_rsp(rsp);
+		if (wc->status != IB_WC_WR_FLUSH_ERR) {
+			pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
+				wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+			nvmet_rdma_error_comp(queue);
+		}
+		return;
+	}
+
+	nvmet_req_execute(&rsp->req);
+}
+
+static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
+		u64 off)
+{
+	int sg_count = num_pages(len);
+	struct scatterlist *sg;
+	int i;
+
+	sg = rsp->cmd->inline_sg;
+	for (i = 0; i < sg_count; i++, sg++) {
+		if (i < sg_count - 1)
+			sg_unmark_end(sg);
+		else
+			sg_mark_end(sg);
+		sg->offset = off;
+		sg->length = min_t(int, len, PAGE_SIZE - off);
+		len -= sg->length;
+		if (!i)
+			off = 0;
+	}
+
+	rsp->req.sg = rsp->cmd->inline_sg;
+	rsp->req.sg_cnt = sg_count;
+}
+
+static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
+{
+	struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
+	u64 off = le64_to_cpu(sgl->addr);
+	u32 len = le32_to_cpu(sgl->length);
+
+	if (!nvme_is_write(rsp->req.cmd))
+		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+
+	if (off + len > rsp->queue->dev->inline_data_size) {
+		pr_err("invalid inline data offset!\n");
+		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+	}
+
+	/* no data command? */
+	if (!len)
+		return 0;
+
+	nvmet_rdma_use_inline_sg(rsp, len, off);
+	rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
+	rsp->req.transfer_len += len;
+	return 0;
+}
+
+static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
+		struct nvme_keyed_sgl_desc *sgl, bool invalidate)
+{
+	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+	u64 addr = le64_to_cpu(sgl->addr);
+	u32 len = get_unaligned_le24(sgl->length);
+	u32 key = get_unaligned_le32(sgl->key);
+	int ret;
+
+	/* no data command? */
+	if (!len)
+		return 0;
+
+	rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+	if (!rsp->req.sg)
+		return NVME_SC_INTERNAL;
+
+	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
+			nvmet_data_dir(&rsp->req));
+	if (ret < 0)
+		return NVME_SC_INTERNAL;
+	rsp->req.transfer_len += len;
+	rsp->n_rdma += ret;
+
+	if (invalidate) {
+		rsp->invalidate_rkey = key;
+		rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
+	}
+
+	return 0;
+}
+
+static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
+{
+	struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
+
+	switch (sgl->type >> 4) {
+	case NVME_SGL_FMT_DATA_DESC:
+		switch (sgl->type & 0xf) {
+		case NVME_SGL_FMT_OFFSET:
+			return nvmet_rdma_map_sgl_inline(rsp);
+		default:
+			pr_err("invalid SGL subtype: %#x\n", sgl->type);
+			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		}
+	case NVME_KEY_SGL_FMT_DATA_DESC:
+		switch (sgl->type & 0xf) {
+		case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
+			return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
+		case NVME_SGL_FMT_ADDRESS:
+			return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
+		default:
+			pr_err("invalid SGL subtype: %#x\n", sgl->type);
+			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		}
+	default:
+		pr_err("invalid SGL type: %#x\n", sgl->type);
+		return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
+	}
+}
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
+{
+	struct nvmet_rdma_queue *queue = rsp->queue;
+
+	if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
+			&queue->sq_wr_avail) < 0)) {
+		pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
+				1 + rsp->n_rdma, queue->idx,
+				queue->nvme_sq.ctrl->cntlid);
+		atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+		return false;
+	}
+
+	if (nvmet_rdma_need_data_in(rsp)) {
+		if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+				queue->cm_id->port_num, &rsp->read_cqe, NULL))
+			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
+	} else {
+		nvmet_req_execute(&rsp->req);
+	}
+
+	return true;
+}
+
+static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
+		struct nvmet_rdma_rsp *cmd)
+{
+	u16 status;
+
+	ib_dma_sync_single_for_cpu(queue->dev->device,
+		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+		DMA_FROM_DEVICE);
+	ib_dma_sync_single_for_cpu(queue->dev->device,
+		cmd->send_sge.addr, cmd->send_sge.length,
+		DMA_TO_DEVICE);
+
+	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
+			&queue->nvme_sq, &nvmet_rdma_ops))
+		return;
+
+	status = nvmet_rdma_map_sgl(cmd);
+	if (status)
+		goto out_err;
+
+	if (unlikely(!nvmet_rdma_execute_command(cmd))) {
+		spin_lock(&queue->rsp_wr_wait_lock);
+		list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
+		spin_unlock(&queue->rsp_wr_wait_lock);
+	}
+
+	return;
+
+out_err:
+	nvmet_req_complete(&cmd->req, status);
+}
+
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct nvmet_rdma_cmd *cmd =
+		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
+	struct nvmet_rdma_queue *queue = cq->cq_context;
+	struct nvmet_rdma_rsp *rsp;
+
+	if (unlikely(wc->status != IB_WC_SUCCESS)) {
+		if (wc->status != IB_WC_WR_FLUSH_ERR) {
+			pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
+				wc->wr_cqe, ib_wc_status_msg(wc->status),
+				wc->status);
+			nvmet_rdma_error_comp(queue);
+		}
+		return;
+	}
+
+	if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
+		pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
+		nvmet_rdma_error_comp(queue);
+		return;
+	}
+
+	cmd->queue = queue;
+	rsp = nvmet_rdma_get_rsp(queue);
+	if (unlikely(!rsp)) {
+		/*
+		 * we get here only under memory pressure,
+		 * silently drop and have the host retry
+		 * as we can't even fail it.
+		 */
+		nvmet_rdma_post_recv(queue->dev, cmd);
+		return;
+	}
+	rsp->queue = queue;
+	rsp->cmd = cmd;
+	rsp->flags = 0;
+	rsp->req.cmd = cmd->nvme_cmd;
+	rsp->req.port = queue->port;
+	rsp->n_rdma = 0;
+
+	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&queue->state_lock, flags);
+		if (queue->state == NVMET_RDMA_Q_CONNECTING)
+			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+		else
+			nvmet_rdma_put_rsp(rsp);
+		spin_unlock_irqrestore(&queue->state_lock, flags);
+		return;
+	}
+
+	nvmet_rdma_handle_command(queue, rsp);
+}
+
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
+{
+	if (!ndev->srq)
+		return;
+
+	nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
+	ib_destroy_srq(ndev->srq);
+}
+
+static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+{
+	struct ib_srq_init_attr srq_attr = { NULL, };
+	struct ib_srq *srq;
+	size_t srq_size;
+	int ret, i;
+
+	srq_size = 4095;	/* XXX: tune */
+
+	srq_attr.attr.max_wr = srq_size;
+	srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
+	srq_attr.attr.srq_limit = 0;
+	srq_attr.srq_type = IB_SRQT_BASIC;
+	srq = ib_create_srq(ndev->pd, &srq_attr);
+	if (IS_ERR(srq)) {
+		/*
+		 * If SRQs aren't supported we just go ahead and use normal
+		 * non-shared receive queues.
+		 */
+		pr_info("SRQ requested but not supported.\n");
+		return 0;
+	}
+
+	ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+	if (IS_ERR(ndev->srq_cmds)) {
+		ret = PTR_ERR(ndev->srq_cmds);
+		goto out_destroy_srq;
+	}
+
+	ndev->srq = srq;
+	ndev->srq_size = srq_size;
+
+	for (i = 0; i < srq_size; i++) {
+		ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+		if (ret)
+			goto out_free_cmds;
+	}
+
+	return 0;
+
+out_free_cmds:
+	nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
+out_destroy_srq:
+	ib_destroy_srq(srq);
+	return ret;
+}
+
+static void nvmet_rdma_free_dev(struct kref *ref)
+{
+	struct nvmet_rdma_device *ndev =
+		container_of(ref, struct nvmet_rdma_device, ref);
+
+	mutex_lock(&device_list_mutex);
+	list_del(&ndev->entry);
+	mutex_unlock(&device_list_mutex);
+
+	nvmet_rdma_destroy_srq(ndev);
+	ib_dealloc_pd(ndev->pd);
+
+	kfree(ndev);
+}
+
+static struct nvmet_rdma_device *
+nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+	struct nvmet_port *port = cm_id->context;
+	struct nvmet_rdma_device *ndev;
+	int inline_page_count;
+	int inline_sge_count;
+	int ret;
+
+	mutex_lock(&device_list_mutex);
+	list_for_each_entry(ndev, &device_list, entry) {
+		if (ndev->device->node_guid == cm_id->device->node_guid &&
+		    kref_get_unless_zero(&ndev->ref))
+			goto out_unlock;
+	}
+
+	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+	if (!ndev)
+		goto out_err;
+
+	inline_page_count = num_pages(port->inline_data_size);
+	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
+				cm_id->device->attrs.max_recv_sge) - 1;
+	if (inline_page_count > inline_sge_count) {
+		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
+			port->inline_data_size, cm_id->device->name,
+			inline_sge_count * PAGE_SIZE);
+		port->inline_data_size = inline_sge_count * PAGE_SIZE;
+		inline_page_count = inline_sge_count;
+	}
+	ndev->inline_data_size = port->inline_data_size;
+	ndev->inline_page_count = inline_page_count;
+	ndev->device = cm_id->device;
+	kref_init(&ndev->ref);
+
+	ndev->pd = ib_alloc_pd(ndev->device, 0);
+	if (IS_ERR(ndev->pd))
+		goto out_free_dev;
+
+	if (nvmet_rdma_use_srq) {
+		ret = nvmet_rdma_init_srq(ndev);
+		if (ret)
+			goto out_free_pd;
+	}
+
+	list_add(&ndev->entry, &device_list);
+out_unlock:
+	mutex_unlock(&device_list_mutex);
+	pr_debug("added %s.\n", ndev->device->name);
+	return ndev;
+
+out_free_pd:
+	ib_dealloc_pd(ndev->pd);
+out_free_dev:
+	kfree(ndev);
+out_err:
+	mutex_unlock(&device_list_mutex);
+	return NULL;
+}
+
+static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
+{
+	struct ib_qp_init_attr qp_attr;
+	struct nvmet_rdma_device *ndev = queue->dev;
+	int comp_vector, nr_cqe, ret, i;
+
+	/*
+	 * Spread the io queues across completion vectors,
+	 * but still keep all admin queues on vector 0.
+	 */
+	comp_vector = !queue->host_qid ? 0 :
+		queue->idx % ndev->device->num_comp_vectors;
+
+	/*
+	 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
+	 */
+	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
+
+	queue->cq = ib_alloc_cq(ndev->device, queue,
+			nr_cqe + 1, comp_vector,
+			IB_POLL_WORKQUEUE);
+	if (IS_ERR(queue->cq)) {
+		ret = PTR_ERR(queue->cq);
+		pr_err("failed to create CQ cqe= %d ret= %d\n",
+		       nr_cqe + 1, ret);
+		goto out;
+	}
+
+	memset(&qp_attr, 0, sizeof(qp_attr));
+	qp_attr.qp_context = queue;
+	qp_attr.event_handler = nvmet_rdma_qp_event;
+	qp_attr.send_cq = queue->cq;
+	qp_attr.recv_cq = queue->cq;
+	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+	qp_attr.qp_type = IB_QPT_RC;
+	/* +1 for drain */
+	qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
+	qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
+	qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
+					ndev->device->attrs.max_send_sge);
+
+	if (ndev->srq) {
+		qp_attr.srq = ndev->srq;
+	} else {
+		/* +1 for drain */
+		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
+		qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
+	}
+
+	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
+	if (ret) {
+		pr_err("failed to create_qp ret= %d\n", ret);
+		goto err_destroy_cq;
+	}
+
+	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
+
+	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
+		 qp_attr.cap.max_send_wr, queue->cm_id);
+
+	if (!ndev->srq) {
+		for (i = 0; i < queue->recv_queue_size; i++) {
+			queue->cmds[i].queue = queue;
+			ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+			if (ret)
+				goto err_destroy_qp;
+		}
+	}
+
+out:
+	return ret;
+
+err_destroy_qp:
+	rdma_destroy_qp(queue->cm_id);
+err_destroy_cq:
+	ib_free_cq(queue->cq);
+	goto out;
+}
+
+static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
+{
+	struct ib_qp *qp = queue->cm_id->qp;
+
+	ib_drain_qp(qp);
+	rdma_destroy_id(queue->cm_id);
+	ib_destroy_qp(qp);
+	ib_free_cq(queue->cq);
+}
+
+static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
+{
+	pr_debug("freeing queue %d\n", queue->idx);
+
+	nvmet_sq_destroy(&queue->nvme_sq);
+
+	nvmet_rdma_destroy_queue_ib(queue);
+	if (!queue->dev->srq) {
+		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+				queue->recv_queue_size,
+				!queue->host_qid);
+	}
+	nvmet_rdma_free_rsps(queue);
+	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+	kfree(queue);
+}
+
+static void nvmet_rdma_release_queue_work(struct work_struct *w)
+{
+	struct nvmet_rdma_queue *queue =
+		container_of(w, struct nvmet_rdma_queue, release_work);
+	struct nvmet_rdma_device *dev = queue->dev;
+
+	nvmet_rdma_free_queue(queue);
+
+	kref_put(&dev->ref, nvmet_rdma_free_dev);
+}
+
+static int
+nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
+				struct nvmet_rdma_queue *queue)
+{
+	struct nvme_rdma_cm_req *req;
+
+	req = (struct nvme_rdma_cm_req *)conn->private_data;
+	if (!req || conn->private_data_len == 0)
+		return NVME_RDMA_CM_INVALID_LEN;
+
+	if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
+		return NVME_RDMA_CM_INVALID_RECFMT;
+
+	queue->host_qid = le16_to_cpu(req->qid);
+
+	/*
+	 * req->hsqsize corresponds to our recv queue size plus 1
+	 * req->hrqsize corresponds to our send queue size
+	 */
+	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
+	queue->send_queue_size = le16_to_cpu(req->hrqsize);
+
+	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
+		return NVME_RDMA_CM_INVALID_HSQSIZE;
+
+	/* XXX: Should we enforce some kind of max for IO queues? */
+
+	return 0;
+}
+
+static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
+				enum nvme_rdma_cm_status status)
+{
+	struct nvme_rdma_cm_rej rej;
+
+	pr_debug("rejecting connect request: status %d (%s)\n",
+		 status, nvme_rdma_cm_msg(status));
+
+	rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+	rej.sts = cpu_to_le16(status);
+
+	return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+}
+
+static struct nvmet_rdma_queue *
+nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
+		struct rdma_cm_id *cm_id,
+		struct rdma_cm_event *event)
+{
+	struct nvmet_rdma_queue *queue;
+	int ret;
+
+	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+	if (!queue) {
+		ret = NVME_RDMA_CM_NO_RSC;
+		goto out_reject;
+	}
+
+	ret = nvmet_sq_init(&queue->nvme_sq);
+	if (ret) {
+		ret = NVME_RDMA_CM_NO_RSC;
+		goto out_free_queue;
+	}
+
+	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
+	if (ret)
+		goto out_destroy_sq;
+
+	/*
+	 * Schedules the actual release because calling rdma_destroy_id from
+	 * inside a CM callback would trigger a deadlock. (great API design..)
+	 */
+	INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
+	queue->dev = ndev;
+	queue->cm_id = cm_id;
+
+	spin_lock_init(&queue->state_lock);
+	queue->state = NVMET_RDMA_Q_CONNECTING;
+	INIT_LIST_HEAD(&queue->rsp_wait_list);
+	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
+	spin_lock_init(&queue->rsp_wr_wait_lock);
+	INIT_LIST_HEAD(&queue->free_rsps);
+	spin_lock_init(&queue->rsps_lock);
+	INIT_LIST_HEAD(&queue->queue_list);
+
+	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
+	if (queue->idx < 0) {
+		ret = NVME_RDMA_CM_NO_RSC;
+		goto out_destroy_sq;
+	}
+
+	ret = nvmet_rdma_alloc_rsps(queue);
+	if (ret) {
+		ret = NVME_RDMA_CM_NO_RSC;
+		goto out_ida_remove;
+	}
+
+	if (!ndev->srq) {
+		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
+				queue->recv_queue_size,
+				!queue->host_qid);
+		if (IS_ERR(queue->cmds)) {
+			ret = NVME_RDMA_CM_NO_RSC;
+			goto out_free_responses;
+		}
+	}
+
+	ret = nvmet_rdma_create_queue_ib(queue);
+	if (ret) {
+		pr_err("%s: creating RDMA queue failed (%d).\n",
+			__func__, ret);
+		ret = NVME_RDMA_CM_NO_RSC;
+		goto out_free_cmds;
+	}
+
+	return queue;
+
+out_free_cmds:
+	if (!ndev->srq) {
+		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+				queue->recv_queue_size,
+				!queue->host_qid);
+	}
+out_free_responses:
+	nvmet_rdma_free_rsps(queue);
+out_ida_remove:
+	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+out_destroy_sq:
+	nvmet_sq_destroy(&queue->nvme_sq);
+out_free_queue:
+	kfree(queue);
+out_reject:
+	nvmet_rdma_cm_reject(cm_id, ret);
+	return NULL;
+}
+
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
+{
+	struct nvmet_rdma_queue *queue = priv;
+
+	switch (event->event) {
+	case IB_EVENT_COMM_EST:
+		rdma_notify(queue->cm_id, event->event);
+		break;
+	default:
+		pr_err("received IB QP event: %s (%d)\n",
+		       ib_event_msg(event->event), event->event);
+		break;
+	}
+}
+
+static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
+		struct nvmet_rdma_queue *queue,
+		struct rdma_conn_param *p)
+{
+	struct rdma_conn_param  param = { };
+	struct nvme_rdma_cm_rep priv = { };
+	int ret = -ENOMEM;
+
+	param.rnr_retry_count = 7;
+	param.flow_control = 1;
+	param.initiator_depth = min_t(u8, p->initiator_depth,
+		queue->dev->device->attrs.max_qp_init_rd_atom);
+	param.private_data = &priv;
+	param.private_data_len = sizeof(priv);
+	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+	priv.crqsize = cpu_to_le16(queue->recv_queue_size);
+
+	ret = rdma_accept(cm_id, &param);
+	if (ret)
+		pr_err("rdma_accept failed (error code = %d)\n", ret);
+
+	return ret;
+}
+
+static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+		struct rdma_cm_event *event)
+{
+	struct nvmet_rdma_device *ndev;
+	struct nvmet_rdma_queue *queue;
+	int ret = -EINVAL;
+
+	ndev = nvmet_rdma_find_get_device(cm_id);
+	if (!ndev) {
+		nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
+		return -ECONNREFUSED;
+	}
+
+	queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
+	if (!queue) {
+		ret = -ENOMEM;
+		goto put_device;
+	}
+	queue->port = cm_id->context;
+
+	if (queue->host_qid == 0) {
+		/* Let inflight controller teardown complete */
+		flush_scheduled_work();
+	}
+
+	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+	if (ret) {
+		schedule_work(&queue->release_work);
+		/* Destroying rdma_cm id is not needed here */
+		return 0;
+	}
+
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+
+	return 0;
+
+put_device:
+	kref_put(&ndev->ref, nvmet_rdma_free_dev);
+
+	return ret;
+}
+
+static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->state_lock, flags);
+	if (queue->state != NVMET_RDMA_Q_CONNECTING) {
+		pr_warn("trying to establish a connected queue\n");
+		goto out_unlock;
+	}
+	queue->state = NVMET_RDMA_Q_LIVE;
+
+	while (!list_empty(&queue->rsp_wait_list)) {
+		struct nvmet_rdma_rsp *cmd;
+
+		cmd = list_first_entry(&queue->rsp_wait_list,
+					struct nvmet_rdma_rsp, wait_list);
+		list_del(&cmd->wait_list);
+
+		spin_unlock_irqrestore(&queue->state_lock, flags);
+		nvmet_rdma_handle_command(queue, cmd);
+		spin_lock_irqsave(&queue->state_lock, flags);
+	}
+
+out_unlock:
+	spin_unlock_irqrestore(&queue->state_lock, flags);
+}
+
+static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+	bool disconnect = false;
+	unsigned long flags;
+
+	pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
+
+	spin_lock_irqsave(&queue->state_lock, flags);
+	switch (queue->state) {
+	case NVMET_RDMA_Q_CONNECTING:
+	case NVMET_RDMA_Q_LIVE:
+		queue->state = NVMET_RDMA_Q_DISCONNECTING;
+		disconnect = true;
+		break;
+	case NVMET_RDMA_Q_DISCONNECTING:
+		break;
+	}
+	spin_unlock_irqrestore(&queue->state_lock, flags);
+
+	if (disconnect) {
+		rdma_disconnect(queue->cm_id);
+		schedule_work(&queue->release_work);
+	}
+}
+
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+	bool disconnect = false;
+
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	if (!list_empty(&queue->queue_list)) {
+		list_del_init(&queue->queue_list);
+		disconnect = true;
+	}
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+
+	if (disconnect)
+		__nvmet_rdma_queue_disconnect(queue);
+}
+
+static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
+		struct nvmet_rdma_queue *queue)
+{
+	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
+
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	if (!list_empty(&queue->queue_list))
+		list_del_init(&queue->queue_list);
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+
+	pr_err("failed to connect queue %d\n", queue->idx);
+	schedule_work(&queue->release_work);
+}
+
+/**
+ * nvme_rdma_device_removal() - Handle RDMA device removal
+ * @cm_id:	rdma_cm id, used for nvmet port
+ * @queue:      nvmet rdma queue (cm id qp_context)
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug. Note that this event can be generated on a normal
+ * queue cm_id and/or a device bound listener cm_id (where in this
+ * case queue will be null).
+ *
+ * We registered an ib_client to handle device removal for queues,
+ * so we only need to handle the listening port cm_ids. In this case
+ * we nullify the priv to prevent double cm_id destruction and destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
+		struct nvmet_rdma_queue *queue)
+{
+	struct nvmet_port *port;
+
+	if (queue) {
+		/*
+		 * This is a queue cm_id. we have registered
+		 * an ib_client to handle queues removal
+		 * so don't interfear and just return.
+		 */
+		return 0;
+	}
+
+	port = cm_id->context;
+
+	/*
+	 * This is a listener cm_id. Make sure that
+	 * future remove_port won't invoke a double
+	 * cm_id destroy. use atomic xchg to make sure
+	 * we don't compete with remove_port.
+	 */
+	if (xchg(&port->priv, NULL) != cm_id)
+		return 0;
+
+	/*
+	 * We need to return 1 so that the core will destroy
+	 * it's own ID.  What a great API design..
+	 */
+	return 1;
+}
+
+static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
+		struct rdma_cm_event *event)
+{
+	struct nvmet_rdma_queue *queue = NULL;
+	int ret = 0;
+
+	if (cm_id->qp)
+		queue = cm_id->qp->qp_context;
+
+	pr_debug("%s (%d): status %d id %p\n",
+		rdma_event_msg(event->event), event->event,
+		event->status, cm_id);
+
+	switch (event->event) {
+	case RDMA_CM_EVENT_CONNECT_REQUEST:
+		ret = nvmet_rdma_queue_connect(cm_id, event);
+		break;
+	case RDMA_CM_EVENT_ESTABLISHED:
+		nvmet_rdma_queue_established(queue);
+		break;
+	case RDMA_CM_EVENT_ADDR_CHANGE:
+	case RDMA_CM_EVENT_DISCONNECTED:
+	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+		nvmet_rdma_queue_disconnect(queue);
+		break;
+	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+		ret = nvmet_rdma_device_removal(cm_id, queue);
+		break;
+	case RDMA_CM_EVENT_REJECTED:
+		pr_debug("Connection rejected: %s\n",
+			 rdma_reject_msg(cm_id, event->status));
+		/* FALLTHROUGH */
+	case RDMA_CM_EVENT_UNREACHABLE:
+	case RDMA_CM_EVENT_CONNECT_ERROR:
+		nvmet_rdma_queue_connect_fail(cm_id, queue);
+		break;
+	default:
+		pr_err("received unrecognized RDMA CM event %d\n",
+			event->event);
+		break;
+	}
+
+	return ret;
+}
+
+static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+	struct nvmet_rdma_queue *queue;
+
+restart:
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+		if (queue->nvme_sq.ctrl == ctrl) {
+			list_del_init(&queue->queue_list);
+			mutex_unlock(&nvmet_rdma_queue_mutex);
+
+			__nvmet_rdma_queue_disconnect(queue);
+			goto restart;
+		}
+	}
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *port)
+{
+	struct rdma_cm_id *cm_id;
+	struct sockaddr_storage addr = { };
+	__kernel_sa_family_t af;
+	int ret;
+
+	switch (port->disc_addr.adrfam) {
+	case NVMF_ADDR_FAMILY_IP4:
+		af = AF_INET;
+		break;
+	case NVMF_ADDR_FAMILY_IP6:
+		af = AF_INET6;
+		break;
+	default:
+		pr_err("address family %d not supported\n",
+				port->disc_addr.adrfam);
+		return -EINVAL;
+	}
+
+	if (port->inline_data_size < 0) {
+		port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+	} else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+		pr_warn("inline_data_size %u is too large, reducing to %u\n",
+			port->inline_data_size,
+			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+		port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+	}
+
+	ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
+			port->disc_addr.trsvcid, &addr);
+	if (ret) {
+		pr_err("malformed ip/port passed: %s:%s\n",
+			port->disc_addr.traddr, port->disc_addr.trsvcid);
+		return ret;
+	}
+
+	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
+			RDMA_PS_TCP, IB_QPT_RC);
+	if (IS_ERR(cm_id)) {
+		pr_err("CM ID creation failed\n");
+		return PTR_ERR(cm_id);
+	}
+
+	/*
+	 * Allow both IPv4 and IPv6 sockets to bind a single port
+	 * at the same time.
+	 */
+	ret = rdma_set_afonly(cm_id, 1);
+	if (ret) {
+		pr_err("rdma_set_afonly failed (%d)\n", ret);
+		goto out_destroy_id;
+	}
+
+	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
+	if (ret) {
+		pr_err("binding CM ID to %pISpcs failed (%d)\n",
+			(struct sockaddr *)&addr, ret);
+		goto out_destroy_id;
+	}
+
+	ret = rdma_listen(cm_id, 128);
+	if (ret) {
+		pr_err("listening to %pISpcs failed (%d)\n",
+			(struct sockaddr *)&addr, ret);
+		goto out_destroy_id;
+	}
+
+	pr_info("enabling port %d (%pISpcs)\n",
+		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
+	port->priv = cm_id;
+	return 0;
+
+out_destroy_id:
+	rdma_destroy_id(cm_id);
+	return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *port)
+{
+	struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
+
+	if (cm_id)
+		rdma_destroy_id(cm_id);
+}
+
+static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
+		struct nvmet_port *port, char *traddr)
+{
+	struct rdma_cm_id *cm_id = port->priv;
+
+	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
+		struct nvmet_rdma_rsp *rsp =
+			container_of(req, struct nvmet_rdma_rsp, req);
+		struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
+		struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
+
+		sprintf(traddr, "%pISc", addr);
+	} else {
+		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+	}
+}
+
+static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
+	.owner			= THIS_MODULE,
+	.type			= NVMF_TRTYPE_RDMA,
+	.msdbd			= 1,
+	.has_keyed_sgls		= 1,
+	.add_port		= nvmet_rdma_add_port,
+	.remove_port		= nvmet_rdma_remove_port,
+	.queue_response		= nvmet_rdma_queue_response,
+	.delete_ctrl		= nvmet_rdma_delete_ctrl,
+	.disc_traddr		= nvmet_rdma_disc_port_addr,
+};
+
+static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+	struct nvmet_rdma_queue *queue, *tmp;
+	struct nvmet_rdma_device *ndev;
+	bool found = false;
+
+	mutex_lock(&device_list_mutex);
+	list_for_each_entry(ndev, &device_list, entry) {
+		if (ndev->device == ib_device) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&device_list_mutex);
+
+	if (!found)
+		return;
+
+	/*
+	 * IB Device that is used by nvmet controllers is being removed,
+	 * delete all queues using this device.
+	 */
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+				 queue_list) {
+		if (queue->dev->device != ib_device)
+			continue;
+
+		pr_info("Removing queue %d\n", queue->idx);
+		list_del_init(&queue->queue_list);
+		__nvmet_rdma_queue_disconnect(queue);
+	}
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+
+	flush_scheduled_work();
+}
+
+static struct ib_client nvmet_rdma_ib_client = {
+	.name   = "nvmet_rdma",
+	.remove = nvmet_rdma_remove_one
+};
+
+static int __init nvmet_rdma_init(void)
+{
+	int ret;
+
+	ret = ib_register_client(&nvmet_rdma_ib_client);
+	if (ret)
+		return ret;
+
+	ret = nvmet_register_transport(&nvmet_rdma_ops);
+	if (ret)
+		goto err_ib_client;
+
+	return 0;
+
+err_ib_client:
+	ib_unregister_client(&nvmet_rdma_ib_client);
+	return ret;
+}
+
+static void __exit nvmet_rdma_exit(void)
+{
+	nvmet_unregister_transport(&nvmet_rdma_ops);
+	ib_unregister_client(&nvmet_rdma_ib_client);
+	WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
+	ida_destroy(&nvmet_rdma_queue_ida);
+}
+
+module_init(nvmet_rdma_init);
+module_exit(nvmet_rdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */