Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index cf0ae71..494675a 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,12 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
depends on PCI && BLOCK
select NVME_CORE
- ---help---
+ help
The NVM Express driver is for solid state drives directly
connected to the PCI or PCI Express bus. If you know you
don't have one of these, it is safe to answer N.
@@ -17,12 +18,20 @@
config NVME_MULTIPATH
bool "NVMe multipath support"
depends on NVME_CORE
- ---help---
+ help
This option enables support for multipath access to NVMe
subsystems. If this option is enabled only a single
/dev/nvmeXnY device will show up for each NVMe namespaces,
even if it is accessible through multiple controllers.
+config NVME_HWMON
+ bool "NVMe hardware monitoring"
+ depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
+ help
+ This provides support for NVMe hardware monitoring. If enabled,
+ a hardware monitoring device will be created for each NVMe drive
+ in the system.
+
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 8a4b671..d7f6a87 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -13,7 +13,9 @@
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
+nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
+nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a5b5a23..71c85c9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -13,13 +13,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
-#include <linux/list_sort.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
-#include <linux/t10-pi.h>
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
@@ -90,26 +88,38 @@
static struct class *nvme_class;
static struct class *nvme_subsys_class;
-static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
+static void nvme_update_bdev_size(struct gendisk *disk)
+{
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (bdev) {
+ bd_set_nr_sectors(bdev, get_capacity(disk));
+ bdput(bdev);
+ }
+}
+
+/*
+ * Prepare a queue for teardown.
+ *
+ * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
+ * the capacity to 0 after that to avoid blocking dispatchers that may be
+ * holding bd_butex. This will end buffered writers dirtying pages that can't
+ * be synced.
+ */
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
- /*
- * Revalidating a dead namespace sets capacity to 0. This will end
- * buffered writers dirtying pages that can't be synced.
- */
- if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
+
blk_set_queue_dying(ns->queue);
- /* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
- /*
- * Revalidate after unblocking dispatchers that may be holding bd_butex
- */
- revalidate_disk(ns->disk);
+
+ set_capacity(ns->disk, 0);
+ nvme_update_bdev_size(ns->disk);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -172,7 +182,6 @@
nvme_remove_namespaces(ctrl);
ctrl->ops->delete_ctrl(ctrl);
nvme_uninit_ctrl(ctrl);
- nvme_put_ctrl(ctrl);
}
static void nvme_delete_ctrl_work(struct work_struct *work)
@@ -193,26 +202,16 @@
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
-static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
{
- int ret = 0;
-
/*
* Keep a reference until nvme_do_delete_ctrl() complete,
* since ->delete_ctrl can free the controller.
*/
nvme_get_ctrl(ctrl);
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
- ret = -EBUSY;
- if (!ret)
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
nvme_do_delete_ctrl(ctrl);
nvme_put_ctrl(ctrl);
- return ret;
-}
-
-static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
-{
- return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
}
static blk_status_t nvme_error_status(u16 status)
@@ -223,6 +222,8 @@
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
case NVME_SC_LBA_RANGE:
+ case NVME_SC_CMD_INTERRUPTED:
+ case NVME_SC_NS_NOT_READY:
return BLK_STS_TARGET;
case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
@@ -246,22 +247,15 @@
return BLK_STS_NEXUS;
case NVME_SC_HOST_PATH_ERROR:
return BLK_STS_TRANSPORT;
+ case NVME_SC_ZONE_TOO_MANY_ACTIVE:
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+ case NVME_SC_ZONE_TOO_MANY_OPEN:
+ return BLK_STS_ZONE_OPEN_RESOURCE;
default:
return BLK_STS_IOERR;
}
}
-static inline bool nvme_req_needs_retry(struct request *req)
-{
- if (blk_noretry_request(req))
- return false;
- if (nvme_req(req)->status & NVME_SC_DNR)
- return false;
- if (nvme_req(req)->retries >= nvme_max_retries)
- return false;
- return true;
-}
-
static void nvme_retry_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -278,27 +272,66 @@
blk_mq_delay_kick_requeue_list(req->q, delay);
}
-void nvme_complete_rq(struct request *req)
+enum nvme_disposition {
+ COMPLETE,
+ RETRY,
+ FAILOVER,
+};
+
+static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
+{
+ if (likely(nvme_req(req)->status == 0))
+ return COMPLETE;
+
+ if (blk_noretry_request(req) ||
+ (nvme_req(req)->status & NVME_SC_DNR) ||
+ nvme_req(req)->retries >= nvme_max_retries)
+ return COMPLETE;
+
+ if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (nvme_is_path_error(nvme_req(req)->status) ||
+ blk_queue_dying(req->q))
+ return FAILOVER;
+ } else {
+ if (blk_queue_dying(req->q))
+ return COMPLETE;
+ }
+
+ return RETRY;
+}
+
+static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = nvme_lba_to_sect(req->q->queuedata,
+ le64_to_cpu(nvme_req(req)->result.u64));
+
+ nvme_trace_bio_complete(req, status);
+ blk_mq_end_request(req, status);
+}
+
+void nvme_complete_rq(struct request *req)
+{
trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
if (nvme_req(req)->ctrl->kas)
nvme_req(req)->ctrl->comp_seen = true;
- if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
- return;
-
- if (!blk_queue_dying(req->q)) {
- nvme_retry_req(req);
- return;
- }
+ switch (nvme_decide_disposition(req)) {
+ case COMPLETE:
+ nvme_end_req(req);
+ return;
+ case RETRY:
+ nvme_retry_req(req);
+ return;
+ case FAILOVER:
+ nvme_failover_req(req);
+ return;
}
-
- nvme_trace_bio_complete(req, status);
- blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -355,7 +388,7 @@
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -365,7 +398,7 @@
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -375,7 +408,7 @@
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -386,7 +419,17 @@
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DELETING_NOIO:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ changed = true;
+ fallthrough;
default:
break;
}
@@ -395,7 +438,7 @@
switch (old_state) {
case NVME_CTRL_DELETING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -428,6 +471,7 @@
case NVME_CTRL_CONNECTING:
return false;
case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
case NVME_CTRL_DEAD:
return true;
default:
@@ -479,10 +523,11 @@
kfree(ns);
}
-static void nvme_put_ns(struct nvme_ns *ns)
+void nvme_put_ns(struct nvme_ns *ns)
{
kref_put(&ns->kref, nvme_free_ns);
}
+EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
static inline void nvme_clear_nvme_request(struct request *req)
{
@@ -552,7 +597,7 @@
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
+ c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -575,19 +620,22 @@
ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
if (ret)
- return ret;
+ goto out_disable_stream;
ctrl->nssa = le16_to_cpu(s.nssa);
if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
dev_info(ctrl->device, "too few streams (%u) available\n",
ctrl->nssa);
- nvme_disable_streams(ctrl);
- return 0;
+ goto out_disable_stream;
}
- ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
+ ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
return 0;
+
+out_disable_stream:
+ nvme_disable_streams(ctrl);
+ return ret;
}
/*
@@ -615,6 +663,14 @@
req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}
+static void nvme_setup_passthrough(struct request *req,
+ struct nvme_command *cmd)
+{
+ memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+ /* passthru commands should let the driver set the SGL flags */
+ cmd->common.flags &= ~NVME_CMD_SGL_ALL;
+}
+
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
@@ -702,7 +758,8 @@
}
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_opcode op)
{
struct nvme_ctrl *ctrl = ns->ctrl;
u16 control = 0;
@@ -716,7 +773,7 @@
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd->rw.opcode = op;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -745,6 +802,8 @@
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
+ if (op == nvme_cmd_zone_append)
+ control |= NVME_RW_APPEND_PIREMAP;
cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break;
}
@@ -772,6 +831,7 @@
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd)
{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
nvme_clear_nvme_request(req);
@@ -780,11 +840,24 @@
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+ nvme_setup_passthrough(req, cmd);
break;
case REQ_OP_FLUSH:
nvme_setup_flush(ns, cmd);
break;
+ case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_RESET:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
+ break;
case REQ_OP_WRITE_ZEROES:
ret = nvme_setup_write_zeroes(ns, req, cmd);
break;
@@ -792,15 +865,22 @@
ret = nvme_setup_discard(ns, req, cmd);
break;
case REQ_OP_READ:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
+ break;
case REQ_OP_WRITE:
- ret = nvme_setup_rw(ns, req, cmd);
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
+ break;
+ case REQ_OP_ZONE_APPEND:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
break;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
}
- cmd->common.command_id = req->tag;
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ nvme_req(req)->genctr++;
+ cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd);
return ret;
}
@@ -913,6 +993,93 @@
return ERR_PTR(ret);
}
+static u32 nvme_known_admin_effects(u8 opcode)
+{
+ switch (opcode) {
+ case nvme_admin_format_nvm:
+ return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
+ NVME_CMD_EFFECTS_CSE_MASK;
+ case nvme_admin_sanitize_nvm:
+ return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
+ default:
+ break;
+ }
+ return 0;
+}
+
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+{
+ u32 effects = 0;
+
+ if (ns) {
+ if (ns->head->effects)
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ dev_warn(ctrl->device,
+ "IO command:%02x has unhandled effects:%08x\n",
+ opcode, effects);
+ return 0;
+ }
+
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+ effects |= nvme_known_admin_effects(opcode);
+
+ return effects;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
+
+static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode)
+{
+ u32 effects = nvme_command_effects(ctrl, ns, opcode);
+
+ /*
+ * For simplicity, IO to all namespaces is quiesced even if the command
+ * effects say only one namespace is affected.
+ */
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+ mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+ return effects;
+}
+
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+{
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+ nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
+ mutex_unlock(&ctrl->scan_lock);
+ }
+ if (effects & NVME_CMD_EFFECTS_CCC)
+ nvme_init_identify(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
+ nvme_queue_scan(ctrl);
+ flush_work(&ctrl->scan_work);
+ }
+}
+
+void nvme_execute_passthru_rq(struct request *rq)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct gendisk *disk = ns ? ns->disk : NULL;
+ u32 effects;
+
+ effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ blk_execute_rq(rq->q, disk, rq, 0);
+ nvme_passthru_end(ctrl, effects);
+}
+EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
+
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
@@ -951,7 +1118,7 @@
}
}
- blk_execute_rq(req->q, disk, req, 0);
+ nvme_execute_passthru_rq(req);
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
else
@@ -1085,15 +1252,67 @@
return error;
}
+static bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
+static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ struct nvme_ns_id_desc *cur, bool *csi_seen)
+{
+ const char *warn_str = "ctrl returned bogus length:";
+ void *data = cur;
+
+ switch (cur->nidt) {
+ case NVME_NIDT_EUI64:
+ if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
+ return NVME_NIDT_EUI64_LEN;
+ case NVME_NIDT_NGUID:
+ if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
+ return NVME_NIDT_NGUID_LEN;
+ case NVME_NIDT_UUID:
+ if (cur->nidl != NVME_NIDT_UUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ uuid_copy(&ids->uuid, data + sizeof(*cur));
+ return NVME_NIDT_UUID_LEN;
+ case NVME_NIDT_CSI:
+ if (cur->nidl != NVME_NIDT_CSI_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
+ *csi_seen = true;
+ return NVME_NIDT_CSI_LEN;
+ default:
+ /* Skip unknown types */
+ return cur->nidl;
+ }
+}
+
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
struct nvme_command c = { };
- int status;
+ bool csi_seen = false;
+ int status, pos, len;
void *data;
- int pos;
- int len;
+ if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
+ return 0;
if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
return 0;
@@ -1119,63 +1338,26 @@
if (cur->nidl == 0)
break;
- switch (cur->nidt) {
- case NVME_NIDT_EUI64:
- if (cur->nidl != NVME_NIDT_EUI64_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_EUI64_LEN;
- memcpy(ids->eui64, data + pos + sizeof(*cur), len);
+ len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
+ if (len < 0)
break;
- case NVME_NIDT_NGUID:
- if (cur->nidl != NVME_NIDT_NGUID_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_NGUID_LEN;
- memcpy(ids->nguid, data + pos + sizeof(*cur), len);
- break;
- case NVME_NIDT_UUID:
- if (cur->nidl != NVME_NIDT_UUID_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_UUID_LEN;
- uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
- break;
- default:
- /* Skip unknown types */
- len = cur->nidl;
- break;
- }
len += sizeof(*cur);
}
+
+ if (nvme_multi_css(ctrl) && !csi_seen) {
+ dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
+ nsid);
+ status = -EINVAL;
+ }
+
free_data:
kfree(data);
return status;
}
-static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
-{
- struct nvme_command c = { };
-
- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
- c.identify.nsid = cpu_to_le32(nsid);
- return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
- NVME_IDENTIFY_DATA_SIZE);
-}
-
-static int nvme_identify_ns(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns **id)
+static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_ns_ids *ids, struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1192,9 +1374,24 @@
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
- kfree(*id);
+ goto out_free_id;
}
+ error = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ if ((*id)->ncap == 0) /* namespace not allocated or attached */
+ goto out_free_id;
+
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+ return 0;
+
+out_free_id:
+ kfree(*id);
return error;
}
@@ -1334,7 +1531,7 @@
metadata = nvme_to_user_ptr(io.metadata);
}
- if (ns->ext) {
+ if (ns->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
@@ -1359,94 +1556,12 @@
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
-static u32 nvme_known_admin_effects(u8 opcode)
-{
- switch (opcode) {
- case nvme_admin_format_nvm:
- return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
- NVME_CMD_EFFECTS_CSE_MASK;
- case nvme_admin_sanitize_nvm:
- return NVME_CMD_EFFECTS_CSE_MASK;
- default:
- break;
- }
- return 0;
-}
-
-static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 opcode)
-{
- u32 effects = 0;
-
- if (ns) {
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
- if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
- dev_warn(ctrl->device,
- "IO command:%02x has unhandled effects:%08x\n",
- opcode, effects);
- return 0;
- }
-
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- effects |= nvme_known_admin_effects(opcode);
-
- /*
- * For simplicity, IO to all namespaces is quiesced even if the command
- * effects say only one namespace is affected.
- */
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
- mutex_lock(&ctrl->scan_lock);
- mutex_lock(&ctrl->subsys->lock);
- nvme_mpath_start_freeze(ctrl->subsys);
- nvme_mpath_wait_freeze(ctrl->subsys);
- nvme_start_freeze(ctrl);
- nvme_wait_freeze(ctrl);
- }
- return effects;
-}
-
-static void nvme_update_formats(struct nvme_ctrl *ctrl)
-{
- struct nvme_ns *ns;
-
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (ns->disk && nvme_revalidate_disk(ns->disk))
- nvme_set_queue_dying(ns);
- up_read(&ctrl->namespaces_rwsem);
-}
-
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
-{
- /*
- * Revalidate LBA changes prior to unfreezing. This is necessary to
- * prevent memory corruption if a logical block size was changed by
- * this command.
- */
- if (effects & NVME_CMD_EFFECTS_LBCC)
- nvme_update_formats(ctrl);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
- nvme_unfreeze(ctrl);
- nvme_mpath_unfreeze(ctrl->subsys);
- mutex_unlock(&ctrl->subsys->lock);
- nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
- mutex_unlock(&ctrl->scan_lock);
- }
- if (effects & NVME_CMD_EFFECTS_CCC)
- nvme_init_identify(ctrl);
- if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
- nvme_queue_scan(ctrl);
-}
-
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
unsigned timeout = 0;
- u32 effects;
u64 result;
int status;
@@ -1473,12 +1588,10 @@
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
- effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout);
- nvme_passthru_end(ctrl, effects);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -1494,7 +1607,6 @@
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
unsigned timeout = 0;
- u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@@ -1520,12 +1632,10 @@
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
- effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
- nvme_passthru_end(ctrl, effects);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -1539,7 +1649,7 @@
* Issue ioctl requests on the first available path. Note that unlike normal
* block layer requests we will not retry failed request on another controller.
*/
-static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
+struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx)
{
#ifdef CONFIG_NVME_MULTIPATH
@@ -1559,7 +1669,7 @@
return disk->private_data;
}
-static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
+void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
{
if (head)
srcu_read_unlock(&head->srcu, idx);
@@ -1645,6 +1755,47 @@
return ret;
}
+#ifdef CONFIG_COMPAT
+struct nvme_user_io32 {
+ __u8 opcode;
+ __u8 flags;
+ __u16 control;
+ __u16 nblocks;
+ __u16 rsvd;
+ __u64 metadata;
+ __u64 addr;
+ __u64 slba;
+ __u32 dsmgmt;
+ __u32 reftag;
+ __u16 apptag;
+ __u16 appmask;
+} __attribute__((__packed__));
+
+#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
+
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ /*
+ * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
+ * between 32 bit programs and 64 bit kernel.
+ * The cause is that the results of sizeof(struct nvme_user_io),
+ * which is used to define NVME_IOCTL_SUBMIT_IO,
+ * are not same between 32 bit compiler and 64 bit compiler.
+ * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
+ * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
+ * Other IOCTL numbers are same between 32 bit and 64 bit.
+ * So there is nothing to do regarding to other IOCTL numbers.
+ */
+ if (cmd == NVME_IOCTL_SUBMIT_IO32)
+ return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
+
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
@@ -1685,7 +1836,8 @@
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
+ u32 max_integrity_segments)
{
struct blk_integrity integrity;
@@ -1708,10 +1860,11 @@
}
integrity.tuple_size = ms;
blk_integrity_register(disk, &integrity);
- blk_queue_max_integrity_segments(disk->queue, 1);
+ blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
}
#else
-static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
+ u32 max_integrity_segments)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1761,20 +1914,6 @@
blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors);
}
-static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
- struct nvme_id_ns *id, struct nvme_ns_ids *ids)
-{
- memset(ids, 0, sizeof(*ids));
-
- if (ctrl->vs >= NVME_VS(1, 1, 0))
- memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0))
- memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
- if (ctrl->vs >= NVME_VS(1, 3, 0))
- return nvme_identify_ns_descs(ctrl, nsid, ids);
- return 0;
-}
-
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
{
return !uuid_is_null(&ids->uuid) ||
@@ -1786,7 +1925,95 @@
{
return uuid_equal(&a->uuid, &b->uuid) &&
memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
- memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
+ memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
+ a->csi == b->csi;
+}
+
+static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u32 *phys_bs, u32 *io_opt)
+{
+ struct streams_directive_params s;
+ int ret;
+
+ if (!ctrl->nr_streams)
+ return 0;
+
+ ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
+ if (ret)
+ return ret;
+
+ ns->sws = le32_to_cpu(s.sws);
+ ns->sgs = le16_to_cpu(s.sgs);
+
+ if (ns->sws) {
+ *phys_bs = ns->sws * (1 << ns->lba_shift);
+ if (ns->sgs)
+ *io_opt = *phys_bs * ns->sgs;
+ }
+
+ return 0;
+}
+
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+
+ /*
+ * The PI implementation requires the metadata size to be equal to the
+ * t10 pi tuple size.
+ */
+ ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+ if (ns->ms == sizeof(struct t10_pi_tuple))
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ else
+ ns->pi_type = 0;
+
+ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ return 0;
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+ * The NVMe over Fabrics specification only supports metadata as
+ * part of the extended data LBA. We rely on HCA/HBA support to
+ * remap the separate metadata buffer from the block layer.
+ */
+ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
+ return -EINVAL;
+ if (ctrl->max_integrity_segments)
+ ns->features |=
+ (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ } else {
+ /*
+ * For PCIe controllers, we can't easily remap the separate
+ * metadata buffer from the block layer and thus require a
+ * separate metadata buffer for block layer metadata/PI support.
+ * We allow extended LBAs for the passthrough interface, though.
+ */
+ if (id->flbas & NVME_NS_FLBAS_META_EXT)
+ ns->features |= NVME_NS_EXT_LBAS;
+ else
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ }
+
+ return 0;
+}
+
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+ struct request_queue *q)
+{
+ bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
+
+ if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+
+ max_segments = min_not_zero(max_segments, ctrl->max_segments);
+ blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ }
+ blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, 7);
+ blk_queue_write_cache(q, vwc, vwc);
}
static void nvme_update_disk_info(struct gendisk *disk,
@@ -1794,35 +2021,38 @@
{
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift;
- u32 atomic_bs, phys_bs, io_opt;
+ u32 atomic_bs, phys_bs, io_opt = 0;
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+ * yet, so catch this early and don't allow block I/O.
+ */
if (ns->lba_shift > PAGE_SHIFT) {
- /* unsupported block size, set capacity to 0 later */
+ capacity = 0;
bs = (1 << 9);
}
- blk_mq_freeze_queue(disk->queue);
+
blk_integrity_unregister(disk);
+ atomic_bs = phys_bs = bs;
+ nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
if (id->nabo == 0) {
/*
* Bit 1 indicates whether NAWUPF is defined for this namespace
* and whether it should be used instead of AWUPF. If NAWUPF ==
* 0 then AWUPF must be used instead.
*/
- if (id->nsfeat & (1 << 1) && id->nawupf)
+ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
- } else {
- atomic_bs = bs;
}
- phys_bs = bs;
- io_opt = bs;
- if (id->nsfeat & (1 << 4)) {
+
+ if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
/* NPWG = Namespace Preferred Write Granularity */
- phys_bs *= 1 + le16_to_cpu(id->npwg);
+ phys_bs = bs * (1 + le16_to_cpu(id->npwg));
/* NOWS = Namespace Optimal Write Size */
- io_opt *= 1 + le16_to_cpu(id->nows);
+ io_opt = bs * (1 + le16_to_cpu(id->nows));
}
blk_queue_logical_block_size(disk->queue, bs);
@@ -1835,109 +2065,110 @@
blk_queue_io_min(disk->queue, phys_bs);
blk_queue_io_opt(disk->queue, io_opt);
- if (ns->ms && !ns->ext &&
- (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- nvme_init_integrity(disk, ns->ms, ns->pi_type);
- if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
- ns->lba_shift > PAGE_SHIFT)
- capacity = 0;
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (ns->ms) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ (ns->features & NVME_NS_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns->ms, ns->pi_type,
+ ns->ctrl->max_integrity_segments);
+ else if (!nvme_ns_has_pi(ns))
+ capacity = 0;
+ }
- set_capacity(disk, capacity);
+ set_capacity_revalidate_and_notify(disk, capacity, false);
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk->queue, ns->ctrl);
- if (id->nsattr & (1 << 0))
+ if (id->nsattr & NVME_NS_ATTR_RO)
set_disk_ro(disk, true);
- else
- set_disk_ro(disk, false);
-
- blk_mq_unfreeze_queue(disk->queue);
}
-static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+static inline bool nvme_first_scan(struct gendisk *disk)
{
- struct nvme_ns *ns = disk->private_data;
+ /* nvme_alloc_ns() scans the disk prior to adding it */
+ return !(disk->flags & GENHD_FL_UP);
+}
+
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
u32 iob;
- /*
- * If identify namespace failed, use default 512 byte block size so
- * block layer can use before failing read/write for 0 capacity.
- */
- ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
- if (ns->lba_shift == 0)
- ns->lba_shift = 9;
-
- if ((ns->ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
- is_power_of_2(ns->ctrl->max_hw_sectors))
- iob = ns->ctrl->max_hw_sectors;
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
+ iob = ctrl->max_hw_sectors;
else
iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
- ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
- /* the PI implementation requires metadata equal t10 pi tuple size */
- if (ns->ms == sizeof(struct t10_pi_tuple))
- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
- else
- ns->pi_type = 0;
+ if (!iob)
+ return;
- if (iob)
- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
- nvme_update_disk_info(disk, ns, id);
-#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk) {
- nvme_update_disk_info(ns->head->disk, ns, id);
- blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
- nvme_mpath_update_disk_size(ns->head->disk);
+ if (!is_power_of_2(iob)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring unaligned IO boundary:%u\n",
+ ns->disk->disk_name, iob);
+ return;
}
-#endif
+
+ if (blk_queue_is_zoned(ns->disk->queue)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring zoned namespace IO boundary\n",
+ ns->disk->disk_name);
+ return;
+ }
+
+ blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_revalidate_disk(struct gendisk *disk)
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
{
- struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_id_ns *id;
- struct nvme_ns_ids ids;
- int ret = 0;
+ unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
+ int ret;
- if (test_bit(NVME_NS_DEAD, &ns->flags)) {
- set_capacity(disk, 0);
- return -ENODEV;
+ blk_mq_freeze_queue(ns->disk->queue);
+ ns->lba_shift = id->lbaf[lbaf].ds;
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+
+ if (ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_update_zone_info(ns, lbaf);
+ if (ret)
+ goto out_unfreeze;
}
- ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
+ ret = nvme_configure_metadata(ns, id);
if (ret)
- goto out;
+ goto out_unfreeze;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+ blk_mq_unfreeze_queue(ns->disk->queue);
- if (id->ncap == 0) {
- ret = -ENODEV;
- goto free_id;
+ if (blk_queue_is_zoned(ns->queue)) {
+ ret = nvme_revalidate_zones(ns);
+ if (ret && !nvme_first_scan(ns->disk))
+ return ret;
}
- __nvme_revalidate_disk(disk, id);
- ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
- if (ret)
- goto free_id;
-
- if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
- dev_err(ctrl->device,
- "identifiers changed for nsid %d\n", ns->head->ns_id);
- ret = -ENODEV;
+#ifdef CONFIG_NVME_MULTIPATH
+ if (ns->head->disk) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ blk_queue_update_readahead(ns->head->disk->queue);
+ nvme_update_bdev_size(ns->head->disk);
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
}
+#endif
+ return 0;
-free_id:
- kfree(id);
-out:
- /*
- * Only fail the function if we got a fatal error back from the
- * device, otherwise ignore the error and just move on.
- */
- if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
- ret = 0;
- else if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
+out_unfreeze:
+ blk_mq_unfreeze_queue(ns->disk->queue);
return ret;
}
@@ -2066,11 +2297,11 @@
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
- .revalidate_disk= nvme_revalidate_disk,
+ .report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -2091,11 +2322,13 @@
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
+ .submit_bio = nvme_ns_head_submit_bio,
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
.getgeo = nvme_getgeo,
+ .report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
#endif /* CONFIG_NVME_MULTIPATH */
@@ -2113,13 +2346,13 @@
if ((csts & NVME_CSTS_RDY) == bit)
break;
- msleep(100);
+ usleep_range(1000, 2000);
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
dev_err(ctrl->device,
- "Device not ready; aborting %s\n", enabled ?
- "initialisation" : "reset");
+ "Device not ready; aborting %s, CSTS=0x%x\n",
+ enabled ? "initialisation" : "reset", csts);
return -ENODEV;
}
}
@@ -2153,12 +2386,7 @@
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
- /*
- * Default to a 4K page size, with the intention to update this
- * path in the future to accomodate architectures with differing
- * kernel and IO page sizes.
- */
- unsigned dev_page_min, page_shift = 12;
+ unsigned dev_page_min;
int ret;
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
@@ -2168,17 +2396,18 @@
}
dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
- if (page_shift < dev_page_min) {
+ if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n",
- 1 << dev_page_min, 1 << page_shift);
+ 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
return -ENODEV;
}
- ctrl->page_size = 1 << page_shift;
-
- ctrl->ctrl_config = NVME_CC_CSS_NVM;
- ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
+ if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
+ ctrl->ctrl_config = NVME_CC_CSS_CSI;
+ else
+ ctrl->ctrl_config = NVME_CC_CSS_NVM;
+ ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
ctrl->ctrl_config |= NVME_CC_ENABLE;
@@ -2221,25 +2450,6 @@
}
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
-static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
- struct request_queue *q)
-{
- bool vwc = false;
-
- if (ctrl->max_hw_sectors) {
- u32 max_segments =
- (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
-
- max_segments = min_not_zero(max_segments, ctrl->max_segments);
- blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
- }
- blk_queue_virt_boundary(q, ctrl->page_size - 1);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(q, vwc, vwc);
-}
-
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
__le64 ts;
@@ -2620,6 +2830,11 @@
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2628,8 +2843,7 @@
lockdep_assert_held(&nvme_subsystems_lock);
list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
- if (tmp->state == NVME_CTRL_DELETING ||
- tmp->state == NVME_CTRL_DEAD)
+ if (nvme_state_terminal(tmp))
continue;
if (tmp->cntlid == ctrl->cntlid) {
@@ -2639,8 +2853,8 @@
return false;
}
- if ((id->cmic & (1 << 1)) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -2726,11 +2940,11 @@
return ret;
}
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset)
{
struct nvme_command c = { };
- unsigned long dwlen = size / 4 - 1;
+ u32 dwlen = nvme_bytes_to_numd(size);
c.get_log_page.opcode = nvme_admin_get_log_page;
c.get_log_page.nsid = cpu_to_le32(nsid);
@@ -2740,27 +2954,35 @@
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
+ c.get_log_page.csi = csi;
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
-static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
+static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ struct nvme_effects_log **log)
{
+ struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
int ret;
- if (!ctrl->effects)
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+ if (cel)
+ goto out;
- if (!ctrl->effects)
- return 0;
+ cel = kzalloc(sizeof(*cel), GFP_KERNEL);
+ if (!cel)
+ return -ENOMEM;
- ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
- ctrl->effects, sizeof(*ctrl->effects), 0);
+ ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
+ cel, sizeof(*cel), 0);
if (ret) {
- kfree(ctrl->effects);
- ctrl->effects = NULL;
+ kfree(cel);
+ return ret;
}
- return ret;
+
+ xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+out:
+ *log = cel;
+ return 0;
}
/*
@@ -2781,7 +3003,7 @@
return ret;
}
page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
- ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+ ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
@@ -2793,7 +3015,7 @@
}
if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
- ret = nvme_get_effects_log(ctrl);
+ ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
if (ret < 0)
goto out_free;
}
@@ -2835,6 +3057,9 @@
ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->mtfa = le16_to_cpu(id->mtfa);
ctrl->oaes = le32_to_cpu(id->oaes);
+ ctrl->wctemp = le16_to_cpu(id->wctemp);
+ ctrl->cctemp = le16_to_cpu(id->cctemp);
+
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
if (id->mdts)
@@ -2852,7 +3077,7 @@
if (id->rtd3e) {
/* us -> s */
- u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
+ u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
shutdown_timeout, 60);
@@ -2890,11 +3115,15 @@
* admin connect
*/
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+ dev_err(ctrl->device,
+ "Mismatching cntlid: Connect %u vs Identify "
+ "%u, rejecting\n",
+ ctrl->cntlid, le16_to_cpu(id->cntlid));
ret = -EINVAL;
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -2934,6 +3163,12 @@
if (ret < 0)
return ret;
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
+ ret = nvme_hwmon_init(ctrl);
+ if (ret < 0)
+ return ret;
+ }
+
ctrl->identified = true;
return 0;
@@ -3040,7 +3275,7 @@
.open = nvme_dev_open,
.release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
- .compat_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static ssize_t nvme_sysfs_reset(struct device *dev,
@@ -3237,10 +3472,6 @@
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- /* Can't delete non-created controllers */
- if (!ctrl->created)
- return -EBUSY;
-
if (device_remove_file_self(dev, attr))
nvme_delete_ctrl_sync(ctrl);
return count;
@@ -3268,6 +3499,7 @@
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
+ [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
[NVME_CTRL_DEAD] = "dead",
};
@@ -3290,6 +3522,26 @@
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
+}
+static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
+}
+static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
+
static ssize_t nvme_sysfs_show_address(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3300,6 +3552,66 @@
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (ctrl->opts->max_reconnects == -1)
+ return sprintf(buf, "off\n");
+ return sprintf(buf, "%d\n",
+ opts->max_reconnects * opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ctrl_loss_tmo, err;
+
+ err = kstrtoint(buf, 10, &ctrl_loss_tmo);
+ if (err)
+ return -EINVAL;
+
+ else if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+ else
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ return count;
+}
+static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
+
+static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->reconnect_delay == -1)
+ return sprintf(buf, "off\n");
+ return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int v;
+ int err;
+
+ err = kstrtou32(buf, 10, &v);
+ if (err)
+ return err;
+
+ ctrl->opts->reconnect_delay = v;
+ return count;
+}
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
+ nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3315,6 +3627,10 @@
&dev_attr_numa_node.attr,
&dev_attr_queue_count.attr,
&dev_attr_sqsize.attr,
+ &dev_attr_hostnqn.attr,
+ &dev_attr_hostid.attr,
+ &dev_attr_ctrl_loss_tmo.attr,
+ &dev_attr_reconnect_delay.attr,
NULL
};
@@ -3328,6 +3644,14 @@
return 0;
if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
return 0;
+ if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_hostid.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
+ return 0;
return a->mode;
}
@@ -3342,7 +3666,7 @@
NULL,
};
-static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
unsigned nsid)
{
struct nvme_ns_head *h;
@@ -3374,7 +3698,7 @@
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns *id)
+ unsigned nsid, struct nvme_ns_ids *ids)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3397,12 +3721,9 @@
goto out_ida_remove;
head->subsys = ctrl->subsys;
head->ns_id = nsid;
+ head->ids = *ids;
kref_init(&head->ref);
- ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
- if (ret)
- goto out_cleanup_srcu;
-
ret = __nvme_check_ids(ctrl->subsys, head);
if (ret) {
dev_err(ctrl->device,
@@ -3410,6 +3731,13 @@
goto out_cleanup_srcu;
}
+ if (head->ids.csi) {
+ ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
+ if (ret)
+ goto out_cleanup_srcu;
+ } else
+ head->effects = ctrl->effects;
+
ret = nvme_mpath_alloc_disk(ctrl, head);
if (ret)
goto out_cleanup_srcu;
@@ -3432,58 +3760,49 @@
}
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_id_ns *id)
+ struct nvme_ns_ids *ids, bool is_shared)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- bool is_shared = id->nmic & (1 << 0);
struct nvme_ns_head *head = NULL;
int ret = 0;
mutex_lock(&ctrl->subsys->lock);
- if (is_shared)
- head = __nvme_find_ns_head(ctrl->subsys, nsid);
+ head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) {
- head = nvme_alloc_ns_head(ctrl, nsid, id);
+ head = nvme_alloc_ns_head(ctrl, nsid, ids);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
+ head->shared = is_shared;
} else {
- struct nvme_ns_ids ids;
-
- ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
- if (ret)
- goto out_unlock;
-
- if (!nvme_ns_ids_equal(&head->ids, &ids)) {
+ ret = -EINVAL;
+ if (!is_shared || !head->shared) {
+ dev_err(ctrl->device,
+ "Duplicate unshared namespace %d\n", nsid);
+ goto out_put_ns_head;
+ }
+ if (!nvme_ns_ids_equal(&head->ids, ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
nsid);
- ret = -EINVAL;
- nvme_put_ns_head(head);
- goto out_unlock;
+ goto out_put_ns_head;
}
}
list_add_tail(&ns->siblings, &head->list);
ns->head = head;
+ mutex_unlock(&ctrl->subsys->lock);
+ return 0;
+out_put_ns_head:
+ nvme_put_ns_head(head);
out_unlock:
mutex_unlock(&ctrl->subsys->lock);
- if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
}
-static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
-
- return nsa->head->ns_id - nsb->head->ns_id;
-}
-
-static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
@@ -3501,34 +3820,26 @@
up_read(&ctrl->namespaces_rwsem);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
-static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
{
- struct streams_directive_params s;
- int ret;
+ struct nvme_ns *tmp;
- if (!ctrl->nr_streams)
- return 0;
-
- ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
- if (ret)
- return ret;
-
- ns->sws = le32_to_cpu(s.sws);
- ns->sgs = le16_to_cpu(s.sgs);
-
- if (ns->sws) {
- unsigned int bs = 1 << ns->lba_shift;
-
- blk_queue_io_min(ns->queue, bs * ns->sws);
- if (ns->sgs)
- blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
+ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+ if (tmp->head->ns_id < ns->head->ns_id) {
+ list_add(&ns->list, &tmp->list);
+ return;
+ }
}
-
- return 0;
+ list_add(&ns->list, &ns->ctrl->namespaces);
}
-static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_ns_ids *ids)
{
struct nvme_ns *ns;
struct gendisk *disk;
@@ -3536,19 +3847,19 @@
char disk_name[DISK_NAME_LEN];
int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
+ if (nvme_identify_ns(ctrl, nsid, ids, &id))
+ return;
+
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- return -ENOMEM;
+ goto out_free_id;
ns->queue = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ns->queue)) {
- ret = PTR_ERR(ns->queue);
+ if (IS_ERR(ns->queue))
goto out_free_ns;
- }
if (ctrl->opts && ctrl->opts->data_digest)
- ns->queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
@@ -3556,33 +3867,16 @@
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
-
kref_init(&ns->kref);
- ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
- blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- nvme_set_queue_limits(ctrl, ns->queue);
-
- ret = nvme_identify_ns(ctrl, nsid, &id);
+ ret = nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED);
if (ret)
goto out_free_queue;
-
- if (id->ncap == 0) {
- ret = -EINVAL;
- goto out_free_id;
- }
-
- ret = nvme_init_ns_head(ns, nsid, id);
- if (ret)
- goto out_free_id;
- nvme_setup_streams_ns(ctrl, ns);
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
disk = alloc_disk_node(0, node);
- if (!disk) {
- ret = -ENOMEM;
+ if (!disk)
goto out_unlink_ns;
- }
disk->fops = &nvme_fops;
disk->private_data = ns;
@@ -3591,7 +3885,8 @@
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
ns->disk = disk;
- __nvme_revalidate_disk(disk, id);
+ if (nvme_update_ns_info(ns, id))
+ goto out_put_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
ret = nvme_nvm_register(ns, disk_name, node);
@@ -3602,9 +3897,8 @@
}
down_write(&ctrl->namespaces_rwsem);
- list_add_tail(&ns->list, &ctrl->namespaces);
+ nvme_ns_add_to_ctrl_list(ns);
up_write(&ctrl->namespaces_rwsem);
-
nvme_get_ctrl(ctrl);
device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
@@ -3613,7 +3907,7 @@
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
kfree(id);
- return 0;
+ return;
out_put_disk:
/* prevent double queue cleanup */
ns->disk->queue = NULL;
@@ -3625,15 +3919,12 @@
list_del_init(&ns->head->entry);
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
- out_free_id:
- kfree(id);
out_free_queue:
blk_cleanup_queue(ns->queue);
out_free_ns:
kfree(ns);
- if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
- return ret;
+ out_free_id:
+ kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -3641,6 +3932,7 @@
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
+ set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
mutex_lock(&ns->ctrl->subsys->lock);
@@ -3653,7 +3945,7 @@
nvme_mpath_clear_current_path(ns);
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
- if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
+ if (ns->disk->flags & GENHD_FL_UP) {
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
@@ -3668,17 +3960,91 @@
nvme_put_ns(ns);
}
-static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
{
+ struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
+
+ if (ns) {
+ nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
+}
+
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+{
+ struct nvme_id_ns *id;
+ int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ if (test_bit(NVME_NS_DEAD, &ns->flags))
+ goto out;
+
+ ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
+ if (ret)
+ goto out;
+
+ ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ dev_err(ns->ctrl->device,
+ "identifiers changed for nsid %d\n", ns->head->ns_id);
+ goto out_free_id;
+ }
+
+ ret = nvme_update_ns_info(ns, id);
+
+out_free_id:
+ kfree(id);
+out:
+ /*
+ * Only remove the namespace if we got a fatal error back from the
+ * device, otherwise ignore the error and just move on.
+ *
+ * TODO: we should probably schedule a delayed retry here.
+ */
+ if (ret > 0 && (ret & NVME_SC_DNR))
+ nvme_ns_remove(ns);
+ else
+ revalidate_disk_size(ns->disk, true);
+}
+
+static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+{
+ struct nvme_ns_ids ids = { };
struct nvme_ns *ns;
+ if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ return;
+
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- if (ns->disk && revalidate_disk(ns->disk))
- nvme_ns_remove(ns);
+ nvme_validate_ns(ns, &ids);
nvme_put_ns(ns);
- } else
- nvme_alloc_ns(ctrl, nsid);
+ return;
+ }
+
+ switch (ids.csi) {
+ case NVME_CSI_NVM:
+ nvme_alloc_ns(ctrl, nsid, &ids);
+ break;
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_warn(ctrl->device,
+ "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ nsid);
+ break;
+ }
+ if (!nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n",
+ nsid);
+ break;
+ }
+ nvme_alloc_ns(ctrl, nsid, &ids);
+ break;
+ default:
+ dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
+ ids.csi, nsid);
+ break;
+ }
}
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -3699,39 +4065,41 @@
}
-static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
+static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
__le32 *ns_list;
- unsigned i, j, nsid, prev = 0;
- unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
- int ret = 0;
+ u32 prev = 0;
+ int ret = 0, i;
+
+ if (nvme_ctrl_limited_cns(ctrl))
+ return -EOPNOTSUPP;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!ns_list)
return -ENOMEM;
- for (i = 0; i < num_lists; i++) {
- ret = nvme_identify_ns_list(ctrl, prev, ns_list);
+ for (;;) {
+ struct nvme_command cmd = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
+ .identify.nsid = cpu_to_le32(prev),
+ };
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
+ NVME_IDENTIFY_DATA_SIZE);
if (ret)
goto free;
- for (j = 0; j < min(nn, 1024U); j++) {
- nsid = le32_to_cpu(ns_list[j]);
- if (!nsid)
+ for (i = 0; i < nr_entries; i++) {
+ u32 nsid = le32_to_cpu(ns_list[i]);
+
+ if (!nsid) /* end of the list? */
goto out;
-
- nvme_validate_ns(ctrl, nsid);
-
- while (++prev < nsid) {
- ns = nvme_find_get_ns(ctrl, prev);
- if (ns) {
- nvme_ns_remove(ns);
- nvme_put_ns(ns);
- }
- }
+ nvme_validate_or_alloc_ns(ctrl, nsid);
+ while (++prev < nsid)
+ nvme_ns_remove_by_nsid(ctrl, prev);
}
- nn -= j;
}
out:
nvme_remove_invalid_namespaces(ctrl, prev);
@@ -3740,12 +4108,18 @@
return ret;
}
-static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
{
- unsigned i;
+ struct nvme_id_ctrl *id;
+ u32 nn, i;
+
+ if (nvme_identify_ctrl(ctrl, &id))
+ return;
+ nn = le32_to_cpu(id->nn);
+ kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_ns(ctrl, i);
+ nvme_validate_or_alloc_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -3766,8 +4140,8 @@
* raced with us in reading the log page, which could cause us to miss
* updates.
*/
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
- log_size, 0);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
+ NVME_CSI_NVM, log, log_size, 0);
if (error)
dev_warn(ctrl->device,
"reading changed ns log failed: %d\n", error);
@@ -3779,8 +4153,6 @@
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, scan_work);
- struct nvme_id_ctrl *id;
- unsigned nn;
/* No tagset on a live ctrl means IO queues could not created */
if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
@@ -3791,22 +4163,10 @@
nvme_clear_changed_ns_log(ctrl);
}
- if (nvme_identify_ctrl(ctrl, &id))
- return;
-
mutex_lock(&ctrl->scan_lock);
- nn = le32_to_cpu(id->nn);
- if (!nvme_ctrl_limited_cns(ctrl)) {
- if (!nvme_scan_ns_list(ctrl, nn))
- goto out_free_id;
- }
- nvme_scan_ns_sequential(ctrl, nn);
-out_free_id:
+ if (nvme_scan_ns_list(ctrl) != 0)
+ nvme_scan_ns_sequential(ctrl);
mutex_unlock(&ctrl->scan_lock);
- kfree(id);
- down_write(&ctrl->namespaces_rwsem);
- list_sort(NULL, &ctrl->namespaces, ns_cmp);
- up_write(&ctrl->namespaces_rwsem);
}
/*
@@ -3838,6 +4198,9 @@
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
+ /* this is a no-op when called from the controller reset handler */
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
+
down_write(&ctrl->namespaces_rwsem);
list_splice_init(&ctrl->namespaces, &ns_list);
up_write(&ctrl->namespaces_rwsem);
@@ -3896,7 +4259,14 @@
container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl);
- ctrl->ops->submit_async_event(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -3921,8 +4291,8 @@
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
- sizeof(*log), 0))
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
+ log, sizeof(*log), 0))
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
}
@@ -4032,8 +4402,7 @@
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
- if (ctrl->kato)
- nvme_start_keep_alive(ctrl);
+ nvme_start_keep_alive(ctrl);
nvme_enable_aen(ctrl);
@@ -4041,7 +4410,6 @@
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
}
- ctrl->created = true;
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@@ -4050,9 +4418,23 @@
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
+ nvme_put_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
+static void nvme_free_cels(struct nvme_ctrl *ctrl)
+{
+ struct nvme_effects_log *cel;
+ unsigned long i;
+
+ xa_for_each (&ctrl->cels, i, cel) {
+ xa_erase(&ctrl->cels, i);
+ kfree(cel);
+ }
+
+ xa_destroy(&ctrl->cels);
+}
+
static void nvme_free_ctrl(struct device *dev)
{
struct nvme_ctrl *ctrl =
@@ -4062,7 +4444,7 @@
if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
- kfree(ctrl->effects);
+ nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
__free_page(ctrl->discard_page);
@@ -4093,10 +4475,12 @@
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
+ xa_init(&ctrl->cels);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ ctrl->numa_node = NUMA_NO_NODE;
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
@@ -4276,6 +4660,14 @@
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
+{
+ if (file->f_op != &nvme_dev_fops)
+ return NULL;
+ return file->private_data;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
+
/*
* Check we didn't inadvertently grow the command structure sizes:
*/
@@ -4294,6 +4686,8 @@
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
@@ -4362,6 +4756,7 @@
destroy_workqueue(nvme_delete_wq);
destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
+ ida_destroy(&nvme_instance_ida);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index d884187..7015fba 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -105,14 +105,14 @@
int len = 0;
if (ctrl->opts->mask & NVMF_OPT_TRADDR)
- len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
+ len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
- len += snprintf(buf + len, size - len, "%strsvcid=%s",
+ len += scnprintf(buf + len, size - len, "%strsvcid=%s",
(len) ? "," : "", ctrl->opts->trsvcid);
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
- len += snprintf(buf + len, size - len, "%shost_traddr=%s",
+ len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
(len) ? "," : "", ctrl->opts->host_traddr);
- len += snprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
@@ -552,7 +552,7 @@
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
- if (ctrl->state != NVME_CTRL_DELETING &&
+ if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a0ec40a..78467cb 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -153,6 +153,7 @@
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
@@ -182,7 +183,8 @@
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
+ if (likely(ctrl->state == NVME_CTRL_LIVE ||
+ ctrl->state == NVME_CTRL_DELETING))
return true;
return __nvmf_check_ready(ctrl, rq, queue_live);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0d2c22c..906cab3 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -14,6 +14,7 @@
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
+#include "fc.h"
#include <scsi/scsi_transport_fc.h>
/* *************************** Data Structures/Defines ****************** */
@@ -25,6 +26,10 @@
};
#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
+#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
+ * when connected and a
+ * connection failure.
+ */
struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl;
@@ -61,6 +66,17 @@
bool req_queued;
};
+struct nvmefc_ls_rcv_op {
+ struct nvme_fc_rport *rport;
+ struct nvmefc_ls_rsp *lsrsp;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
+ u16 rqstdatalen;
+ bool handled;
+ dma_addr_t rspdma;
+ struct list_head lsrcv_list; /* rport->ls_rcv_list */
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
enum nvme_fcpop_state {
FCPOP_STATE_UNINIT = 0,
FCPOP_STATE_IDLE = 1,
@@ -95,8 +111,8 @@
struct nvme_fcp_op_w_sgl {
struct nvme_fc_fcp_op op;
- struct scatterlist sgl[SG_CHUNK_SIZE];
- uint8_t priv[0];
+ struct scatterlist sgl[NVME_INLINE_SG_CNT];
+ uint8_t priv[];
};
struct nvme_fc_lport {
@@ -117,6 +133,7 @@
struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list;
struct list_head ls_req_list;
+ struct list_head ls_rcv_list;
struct list_head disc_list;
struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport;
@@ -124,11 +141,13 @@
struct kref ref;
atomic_t act_ctrl_cnt;
unsigned long dev_loss_end;
+ struct work_struct lsrcv_work;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
-enum nvme_fcctrl_flags {
- FCCTRL_TERMIO = (1 << 0),
-};
+/* fc_ctrl flags values - specified as bit positions */
+#define ASSOC_ACTIVE 0
+#define ASSOC_FAILED 1
+#define FCCTRL_TERMIO 2
struct nvme_fc_ctrl {
spinlock_t lock;
@@ -139,20 +158,19 @@
u32 cnum;
bool ioq_live;
- bool assoc_active;
- atomic_t err_work_active;
u64 association_id;
+ struct nvmefc_ls_rcv_op *rcv_disconn;
struct list_head ctrl_list; /* rport->ctrl_list */
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
- struct work_struct err_work;
struct kref ref;
- u32 flags;
+ unsigned long flags;
u32 iocnt;
wait_queue_head_t ioabort_wait;
@@ -213,12 +231,16 @@
*/
static struct device *fc_udev_device;
+static void nvme_fc_complete_rq(struct request *rq);
/* *********************** FC-NVME Port Management ************************ */
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
+static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
+
+
static void
nvme_fc_free_lport(struct kref *ref)
{
@@ -394,7 +416,10 @@
newrec->ops = template;
newrec->dev = dev;
ida_init(&newrec->endp_cnt);
- newrec->localport.private = &newrec[1];
+ if (template->local_priv_sz)
+ newrec->localport.private = &newrec[1];
+ else
+ newrec->localport.private = NULL;
newrec->localport.node_name = pinfo->node_name;
newrec->localport.port_name = pinfo->port_name;
newrec->localport.port_role = pinfo->port_role;
@@ -701,9 +726,13 @@
atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
newrec->remoteport.localport = &lport->localport;
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
newrec->dev = lport->dev;
newrec->lport = lport;
- newrec->remoteport.private = &newrec[1];
+ if (lport->ops->remote_priv_sz)
+ newrec->remoteport.private = &newrec[1];
+ else
+ newrec->remoteport.private = NULL;
newrec->remoteport.port_role = pinfo->port_role;
newrec->remoteport.node_name = pinfo->node_name;
newrec->remoteport.port_name = pinfo->port_name;
@@ -711,6 +740,7 @@
newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
newrec->remoteport.port_num = idx;
__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
spin_lock_irqsave(&nvme_fc_lock, flags);
list_add_tail(&newrec->endp_list, &lport->endp_list);
@@ -800,6 +830,7 @@
break;
case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
default:
/* no action to take - let it delete */
break;
@@ -1000,6 +1031,7 @@
static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
@@ -1140,41 +1172,6 @@
return __nvme_fc_send_ls_req(rport, lsop, done);
}
-/* Validation Error indexes into the string table below */
-enum {
- VERR_NO_ERROR = 0,
- VERR_LSACC = 1,
- VERR_LSDESC_RQST = 2,
- VERR_LSDESC_RQST_LEN = 3,
- VERR_ASSOC_ID = 4,
- VERR_ASSOC_ID_LEN = 5,
- VERR_CONN_ID = 6,
- VERR_CONN_ID_LEN = 7,
- VERR_CR_ASSOC = 8,
- VERR_CR_ASSOC_ACC_LEN = 9,
- VERR_CR_CONN = 10,
- VERR_CR_CONN_ACC_LEN = 11,
- VERR_DISCONN = 12,
- VERR_DISCONN_ACC_LEN = 13,
-};
-
-static char *validation_errors[] = {
- "OK",
- "Not LS_ACC",
- "Not LSDESC_RQST",
- "Bad LSDESC_RQST Length",
- "Not Association ID",
- "Bad Association ID Length",
- "Not Connection ID",
- "Bad Connection ID Length",
- "Not CR_ASSOC Rqst",
- "Bad CR_ASSOC ACC Length",
- "Not CR_CONN Rqst",
- "Bad CR_CONN ACC Length",
- "Not Disconnect Rqst",
- "Bad Disconnect ACC Length",
-};
-
static int
nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
@@ -1183,21 +1180,27 @@
struct nvmefc_ls_req *lsreq;
struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+ unsigned long flags;
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
+ sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
+ ctrl->cnum);
ret = -ENOMEM;
goto out_no_memory;
}
- lsreq = &lsop->ls_req;
- lsreq->private = (void *)&lsop[1];
- assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = &assoc_acc[1];
+ else
+ lsreq->private = NULL;
assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
assoc_rqst->desc_list_len =
@@ -1224,7 +1227,7 @@
lsreq->rqstlen = sizeof(*assoc_rqst);
lsreq->rspaddr = assoc_acc;
lsreq->rsplen = sizeof(*assoc_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1264,14 +1267,16 @@
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id =
be64_to_cpu(assoc_acc->associd.association_id);
queue->connection_id =
be64_to_cpu(assoc_acc->connectid.connection_id);
set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
}
out_free_buffer:
@@ -1295,18 +1300,23 @@
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
+ sizeof(*conn_rqst) + sizeof(*conn_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
+ ctrl->cnum);
ret = -ENOMEM;
goto out_no_memory;
}
- lsreq = &lsop->ls_req;
- lsreq->private = (void *)&lsop[1];
- conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&conn_acc[1];
+ else
+ lsreq->private = NULL;
conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
conn_rqst->desc_list_len = cpu_to_be32(
@@ -1332,7 +1342,7 @@
lsreq->rqstlen = sizeof(*conn_rqst);
lsreq->rspaddr = conn_acc;
lsreq->rsplen = sizeof(*conn_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
if (ret)
@@ -1363,7 +1373,7 @@
if (fcret) {
ret = -EBADF;
dev_err(ctrl->dev,
- "q %d connect failed: %s\n",
+ "q %d Create I/O Connection LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
queue->connection_id =
@@ -1376,7 +1386,7 @@
out_no_memory:
if (ret)
dev_err(ctrl->dev,
- "queue %d connect command failed (%d).\n",
+ "queue %d connect I/O queue failed (%d).\n",
queue->qnum, ret);
return ret;
}
@@ -1413,67 +1423,393 @@
static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
{
- struct fcnvme_ls_disconnect_rqst *discon_rqst;
- struct fcnvme_ls_disconnect_acc *discon_acc;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
struct nvmefc_ls_req_op *lsop;
struct nvmefc_ls_req *lsreq;
int ret;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*discon_rqst) + sizeof(*discon_acc)),
- GFP_KERNEL);
- if (!lsop)
- /* couldn't sent it... too bad */
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Disconnect Association "
+ "failed: ENOMEM\n",
+ ctrl->cnum);
return;
+ }
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
- lsreq->private = (void *)&lsop[1];
- discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
- discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
-
- discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
- discon_rqst->desc_list_len = cpu_to_be32(
- sizeof(struct fcnvme_lsdesc_assoc_id) +
- sizeof(struct fcnvme_lsdesc_disconn_cmd));
-
- discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
- discon_rqst->associd.desc_len =
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_assoc_id));
-
- discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
-
- discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
- FCNVME_LSDESC_DISCONN_CMD);
- discon_rqst->discon_cmd.desc_len =
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_disconn_cmd));
- discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
- discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
-
- lsreq->rqstaddr = discon_rqst;
- lsreq->rqstlen = sizeof(*discon_rqst);
- lsreq->rspaddr = discon_acc;
- lsreq->rsplen = sizeof(*discon_acc);
- lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ ctrl->association_id);
ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
nvme_fc_disconnect_assoc_done);
if (ret)
kfree(lsop);
-
- /* only meaningful part to terminating the association */
- ctrl->association_id = 0;
}
+static void
+nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_del(&lsop->lsrcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ kfree(lsop);
+
+ nvme_fc_rport_put(rport);
+}
+
+static void
+nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ int ret;
+
+ fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
+ lsop->lsrsp);
+ if (ret) {
+ dev_warn(lport->dev,
+ "LLDD rejected LS RSP xmt: LS %d status %d\n",
+ w0->ls_cmd, ret);
+ nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
+ return;
+ }
+}
+
+static struct nvme_fc_ctrl *
+nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
+ struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct nvme_fc_ctrl *ctrl, *ret = NULL;
+ struct nvmefc_ls_rcv_op *oldls = NULL;
+ u64 association_id = be64_to_cpu(rqst->associd.association_id);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ if (!nvme_fc_ctrl_get(ctrl))
+ continue;
+ spin_lock(&ctrl->lock);
+ if (association_id == ctrl->association_id) {
+ oldls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = lsop;
+ ret = ctrl;
+ }
+ spin_unlock(&ctrl->lock);
+ if (ret)
+ /* leave the ctrl get reference */
+ break;
+ nvme_fc_ctrl_put(ctrl);
+ }
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ /* transmit a response for anything that was pending */
+ if (oldls) {
+ dev_info(rport->lport->dev,
+ "NVME-FC{%d}: Multiple Disconnect Association "
+ "LS's received\n", ctrl->cnum);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*oldls->rspbuf),
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvme_fc_xmt_ls_rsp(oldls);
+ }
+
+ return ret;
+}
+
+/*
+ * returns true to mean LS handled and ls_rsp can be sent
+ * returns false to defer ls_rsp xmt (will be done as part of
+ * association termination)
+ */
+static bool
+nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ &lsop->rspbuf->rsp_dis_assoc;
+ struct nvme_fc_ctrl *ctrl = NULL;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association */
+ ctrl = nvme_fc_match_disconn_ls(rport, lsop);
+ if (!ctrl)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret) {
+ dev_info(rport->lport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return true;
+ }
+
+ /* format an ACCept response */
+
+ lsop->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
+
+ /*
+ * the transmit of the response will occur after the exchanges
+ * for the association have been ABTS'd by
+ * nvme_fc_delete_association().
+ */
+
+ /* fail the association */
+ nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
+
+ /* release the reference taken by nvme_fc_match_disconn_ls() */
+ nvme_fc_ctrl_put(ctrl);
+
+ return false;
+}
+
+/*
+ * Actual Processing routine for received FC-NVME LS Requests from the LLD
+ * returns true if a response should be sent afterward, false if rsp will
+ * be sent asynchronously.
+ */
+static bool
+nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ bool ret = true;
+
+ lsop->lsrsp->nvme_fc_private = lsop;
+ lsop->lsrsp->rspbuf = lsop->rspbuf;
+ lsop->lsrsp->rspdma = lsop->rspdma;
+ lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ lsop->lsrsp->rsplen = 0;
+
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_DISCONNECT_ASSOC:
+ ret = nvme_fc_ls_disconnect_assoc(lsop);
+ break;
+ case FCNVME_LS_DISCONNECT_CONN:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ case FCNVME_LS_CREATE_CONNECTION:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ default:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ }
+
+ return(ret);
+}
+
+static void
+nvme_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvme_fc_rport *rport =
+ container_of(work, struct nvme_fc_rport, lsrcv_work);
+ struct fcnvme_ls_rqst_w0 *w0;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ bool sendrsp;
+
+restart:
+ sendrsp = true;
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
+ if (lsop->handled)
+ continue;
+
+ lsop->handled = true;
+ if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ sendrsp = nvme_fc_handle_ls_rqst(lsop);
+ } else {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ w0 = &lsop->rqstbuf->w0;
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(
+ lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ w0->ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ }
+ if (sendrsp)
+ nvme_fc_xmt_ls_rsp(lsop);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+}
+
+/**
+ * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvme-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @remoteport: pointer to the (registered) remote port that the LS
+ * was received from. The remoteport is associated with
+ * a specific localport.
+ * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
+ * used to reference the exchange corresponding to the LS
+ * when issuing an ls response.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ int ret;
+
+ nvme_fc_rport_get(rport);
+
+ /* validate there's a routine to transmit a response */
+ if (!lport->ops->xmt_ls_rsp) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: payload too large\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -E2BIG;
+ goto out_put;
+ }
+
+ lsop = kzalloc(sizeof(*lsop) +
+ sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
+ if (!lsop) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: No memory\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
+ lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
+
+ lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: DMA mapping failure\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ lsop->rport = rport;
+ lsop->lsrsp = lsrsp;
+
+ memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
+ lsop->rqstdatalen = lsreqbuf_len;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ ret = -ENOTCONN;
+ goto out_unmap;
+ }
+ list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ schedule_work(&rport->lsrcv_work);
+
+ return 0;
+
+out_unmap:
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+out_free:
+ kfree(lsop);
+out_put:
+ nvme_fc_rport_put(rport);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
+
/* *********************** NVME Ctrl Routines **************************** */
-static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
-
static void
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_fcp_op *op)
@@ -1505,8 +1841,10 @@
opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
if (opstate != FCPOP_STATE_ACTIVE)
atomic_set(&op->state, opstate);
- else if (ctrl->flags & FCCTRL_TERMIO)
+ else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
+ op->flags |= FCOP_FLAGS_TERMIO;
ctrl->iocnt++;
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
if (opstate != FCPOP_STATE_ACTIVE)
@@ -1542,7 +1880,8 @@
if (opstate == FCPOP_STATE_ABORTED) {
spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
+ op->flags & FCOP_FLAGS_TERMIO) {
if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
@@ -1551,6 +1890,15 @@
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -1662,7 +2010,7 @@
(freq->rcv_rsplen / 4) ||
be32_to_cpu(op->rsp_iu.xfrd_len) !=
freq->transferred_length ||
- op->rsp_iu.status_code ||
+ op->rsp_iu.ersp_result ||
sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
@@ -1672,7 +2020,7 @@
ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
be32_to_cpu(op->rsp_iu.xfrd_len),
freq->transferred_length,
- op->rsp_iu.status_code,
+ op->rsp_iu.ersp_result,
sqe->common.command_id,
cqe->command_id);
goto done;
@@ -1703,11 +2051,12 @@
}
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- nvme_end_request(rq, status, result);
+ if (!nvme_try_complete_req(rq, status, result))
+ nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -1731,9 +2080,14 @@
op->rq = rq;
op->rqno = rqno;
- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+ cmdiu->format_id = NVME_CMD_FORMAT_ID;
cmdiu->fc_id = NVME_CMD_FC_ID;
cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+ if (queue->qnum)
+ cmdiu->rsv_cat = fccmnd_set_cat_css(0,
+ (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
+ else
+ cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
@@ -1771,7 +2125,7 @@
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
if (res)
return res;
- op->op.fcp_req.first_sgl = &op->sgl[0];
+ op->op.fcp_req.first_sgl = op->sgl;
op->op.fcp_req.private = &op->priv[0];
nvme_req(rq)->ctrl = &ctrl->ctrl;
return res;
@@ -1783,15 +2137,17 @@
struct nvme_fc_fcp_op *aen_op;
struct nvme_fc_cmd_iu *cmdiu;
struct nvme_command *sqe;
- void *private;
+ void *private = NULL;
int i, ret;
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
+ if (ctrl->lport->ops->fcprqst_priv_sz) {
+ private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
GFP_KERNEL);
- if (!private)
- return -ENOMEM;
+ if (!private)
+ return -ENOMEM;
+ }
cmdiu = &aen_op->cmd_iu;
sqe = &cmdiu->sqe;
@@ -1823,9 +2179,6 @@
cancel_work_sync(&ctrl->ctrl.async_event_work);
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- if (!aen_op->fcp_req.private)
- continue;
-
__nvme_fc_exit_request(ctrl, aen_op);
kfree(aen_op->fcp_req.private);
@@ -1977,7 +2330,7 @@
return 0;
delete_queues:
- for (; i >= 0; i--)
+ for (; i > 0; i--)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
return ret;
}
@@ -2070,24 +2423,112 @@
nvme_fc_ctrl_put(ctrl);
}
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static bool
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+
+ op->nreq.flags |= NVME_REQ_CANCELLED;
+ __nvme_fc_abort_op(ctrl, op);
+ return true;
+}
+
+/*
+ * This routine runs through all outstanding commands on the association
+ * and aborts them. This routine is typically be called by the
+ * delete_association routine. It is also called due to an error during
+ * reconnect. In that scenario, it is most likely a command that initializes
+ * the controller, including fabric Connect commands on io queues, that
+ * may have timed out or failed thus the io must be killed for the connect
+ * thread to see the error.
+ */
+static void
+__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
+{
+ int q;
+
+ /*
+ * if aborting io, the queues are no longer good, mark them
+ * all as not live.
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ for (q = 1; q < ctrl->ctrl.queue_count; q++)
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+ }
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ if (start_queues)
+ nvme_start_queues(&ctrl->ctrl);
+ }
+
+ /*
+ * Other transports, which don't have link-level contexts bound
+ * to sqe's, would try to gracefully shutdown the controller by
+ * writing the registers for shutdown and polling (call
+ * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
+ * just aborted and we will wait on those contexts, and given
+ * there was no indication of how live the controlelr is on the
+ * link, don't send more io to create more contexts for the
+ * shutdown. Let the controller fail via keepalive failure if
+ * its still present.
+ */
+
+ /*
+ * clean up the admin queue. Same thing as above.
+ */
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+}
+
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
- int active;
-
/*
- * if an error (io timeout, etc) while (re)connecting,
- * it's an error on creating the new association.
- * Start the error recovery thread if it hasn't already
- * been started. It is expected there could be multiple
- * ios hitting this path before things are cleaned up.
+ * if an error (io timeout, etc) while (re)connecting, the remote
+ * port requested terminating of the association (disconnect_ls)
+ * or an error (timeout or abort) occurred on an io while creating
+ * the controller. Abort any ios on the association and let the
+ * create_association error path resolve things.
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
- active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
- atomic_set(&ctrl->err_work_active, 0);
- WARN_ON(1);
- }
+ __nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
return;
}
@@ -2096,7 +2537,7 @@
return;
dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: transport association error detected: %s\n",
+ "NVME-FC{%d}: transport association event: %s\n",
ctrl->cnum, errmsg);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
@@ -2109,15 +2550,20 @@
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
/*
- * we can't individually ABTS an io without affecting the queue,
- * thus killing the queue, and thus the association.
- * So resolve by performing a controller reset, which will stop
- * the host/io stack, terminate the association on the link,
- * and recreate an association on the link.
+ * Attempt to abort the offending command. Command completion
+ * will detect the aborted io and will fail the connection.
*/
- nvme_fc_error_recovery(ctrl, "io timeout error");
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "x%08x/x%08x\n",
+ ctrl->cnum, op->queue->qnum, sqe->common.opcode,
+ sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ if (__nvme_fc_abort_op(ctrl, op))
+ nvme_fc_error_recovery(ctrl, "io timeout abort failed");
/*
* the io abort has been initiated. Have the reset timer
@@ -2142,7 +2588,7 @@
freq->sg_table.sgl = freq->first_sgl;
ret = sg_alloc_table_chained(&freq->sg_table,
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
- SG_CHUNK_SIZE);
+ NVME_INLINE_SG_CNT);
if (ret)
return -ENOMEM;
@@ -2151,7 +2597,7 @@
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, rq_dma_dir(rq));
if (unlikely(freq->sg_cnt <= 0)) {
- sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
freq->sg_cnt = 0;
return -EFAULT;
}
@@ -2174,9 +2620,7 @@
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq));
- nvme_cleanup_cmd(rq);
-
- sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
freq->sg_cnt = 0;
}
@@ -2303,8 +2747,10 @@
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- if (!(op->flags & FCOP_FLAGS_AEN))
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
nvme_fc_ctrl_put(ctrl);
@@ -2368,16 +2814,9 @@
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
- unsigned long flags;
- bool terminating = false;
blk_status_t ret;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- terminating = true;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- if (terminating)
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
return;
aen_op = &ctrl->aen_ops[0];
@@ -2396,36 +2835,13 @@
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags &= ~FCOP_FLAGS_TERMIO;
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
}
-/*
- * This routine is used by the transport when it needs to find active
- * io on a queue that is to be terminated. The transport uses
- * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
- * this routine to kill them on a 1 by 1 basis.
- *
- * As FC allocates FC exchange for each io, the transport must contact
- * the LLDD to terminate the exchange, thus releasing the FC exchange.
- * After terminating the exchange the LLDD will call the transport's
- * normal io done path for the request, but it will have an aborted
- * status. The done path will return the io request back to the block
- * layer with an error status.
- */
-static bool
-nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
-{
- struct nvme_ctrl *nctrl = data;
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
-
- __nvme_fc_abort_op(ctrl, op);
- return true;
-}
-
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2538,6 +2954,15 @@
if (ctrl->ctrl.queue_count == 1)
return 0;
+ if (prior_ioq_cnt != nr_io_queues) {
+ dev_info(ctrl->ctrl.device,
+ "reconnect: revising io queue count from %d to %d\n",
+ prior_ioq_cnt, nr_io_queues);
+ nvme_wait_freeze(&ctrl->ctrl);
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ nvme_unfreeze(&ctrl->ctrl);
+ }
+
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
@@ -2546,12 +2971,6 @@
if (ret)
goto out_delete_hw_queues;
- if (prior_ioq_cnt != nr_io_queues)
- dev_info(ctrl->ctrl.device,
- "reconnect: revising io queue count from %d to %d\n",
- prior_ioq_cnt, nr_io_queues);
- blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
-
return 0;
out_delete_hw_queues:
@@ -2586,10 +3005,9 @@
struct nvme_fc_rport *rport = ctrl->rport;
u32 cnt;
- if (ctrl->assoc_active)
+ if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
return 1;
- ctrl->assoc_active = true;
cnt = atomic_inc_return(&rport->act_ctrl_cnt);
if (cnt == 1)
nvme_fc_rport_active_on_lport(rport);
@@ -2604,7 +3022,7 @@
struct nvme_fc_lport *lport = rport->lport;
u32 cnt;
- /* ctrl->assoc_active=false will be set independently */
+ /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
cnt = atomic_dec_return(&rport->act_ctrl_cnt);
if (cnt == 0) {
@@ -2624,6 +3042,8 @@
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ struct nvmefc_ls_rcv_op *disls = NULL;
+ unsigned long flags;
int ret;
bool changed;
@@ -2641,6 +3061,8 @@
ctrl->cnum, ctrl->lport->localport.port_name,
ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
+ clear_bit(ASSOC_FAILED, &ctrl->flags);
+
/*
* Create the admin queue
*/
@@ -2669,16 +3091,17 @@
*/
ret = nvme_enable_ctrl(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
- ctrl->ctrl.max_hw_sectors =
- (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
+ ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
+ ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
+ (ilog2(SZ_4K) - 9);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
ret = nvme_init_identify(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
/* sanity checks */
@@ -2696,7 +3119,7 @@
/* warn if maxcmd is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl maxcmd %u, reducing "
- "to queue_size\n",
+ "to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
@@ -2704,7 +3127,8 @@
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ "queue_size %zu > ctrl sqsize %u, reducing "
+ "to sqsize\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
@@ -2722,9 +3146,9 @@
ret = nvme_fc_create_io_queues(ctrl);
else
ret = nvme_fc_recreate_io_queues(ctrl);
- if (ret)
- goto out_term_aen_ops;
}
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
+ goto out_term_aen_ops;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -2740,16 +3164,24 @@
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ nvme_fc_xmt_ls_rsp(disls);
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
nvme_fc_free_queue(&ctrl->queues[0]);
- ctrl->assoc_active = false;
+ clear_bit(ASSOC_ACTIVE, &ctrl->flags);
nvme_fc_ctlr_inactive_on_rport(ctrl);
return ret;
}
+
/*
* This routine stops operation of the controller on the host side.
* On the host os stack side: Admin and IO queues are stopped,
@@ -2759,57 +3191,18 @@
static void
nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
+ struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
- if (!ctrl->assoc_active)
+ if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
return;
- ctrl->assoc_active = false;
spin_lock_irqsave(&ctrl->lock, flags);
- ctrl->flags |= FCCTRL_TERMIO;
+ set_bit(FCCTRL_TERMIO, &ctrl->flags);
ctrl->iocnt = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
- /*
- * If io queues are present, stop them and terminate all outstanding
- * ios on them. As FC allocates FC exchange for each io, the
- * transport must contact the LLDD to terminate the exchange,
- * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
- * to tell us what io's are busy and invoke a transport routine
- * to kill them with the LLDD. After terminating the exchange
- * the LLDD will call the transport's normal io done path, but it
- * will have an aborted status. The done path will return the
- * io requests back to the block layer as part of normal completions
- * (but with error status).
- */
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
- }
-
- /*
- * Other transports, which don't have link-level contexts bound
- * to sqe's, would try to gracefully shutdown the controller by
- * writing the registers for shutdown and polling (call
- * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
- * just aborted and we will wait on those contexts, and given
- * there was no indication of how live the controlelr is on the
- * link, don't send more io to create more contexts for the
- * shutdown. Let the controller fail via keepalive failure if
- * its still present.
- */
-
- /*
- * clean up the admin queue. Same thing as above.
- * use blk_mq_tagset_busy_itr() and the transport routine to
- * terminate the exchanges.
- */
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ __nvme_fc_abort_outstanding_ios(ctrl, false);
/* kill the aens as they are a separate path */
nvme_fc_abort_aen_ops(ctrl);
@@ -2817,7 +3210,7 @@
/* wait for all io that had to be aborted */
spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
- ctrl->flags &= ~FCCTRL_TERMIO;
+ clear_bit(FCCTRL_TERMIO, &ctrl->flags);
spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@@ -2831,6 +3224,18 @@
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ /*
+ * if a Disconnect Request was waiting for a response, send
+ * now that all ABTS's have been issued (and are complete).
+ */
+ nvme_fc_xmt_ls_rsp(disls);
+
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
nvme_fc_free_io_queues(ctrl);
@@ -2853,7 +3258,7 @@
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -2900,79 +3305,42 @@
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: dev_loss_tmo (%d) expired "
"while waiting for remoteport connectivity.\n",
- ctrl->cnum, portptr->dev_loss_tmo);
+ ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
+ (ctrl->ctrl.opts->max_reconnects *
+ ctrl->ctrl.opts->reconnect_delay)));
WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
}
}
static void
-__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
-{
- /*
- * if state is connecting - the error occurred as part of a
- * reconnect attempt. The create_association error paths will
- * clean up any outstanding io.
- *
- * if it's a different state - ensure all pending io is
- * terminated. Given this can delay while waiting for the
- * aborted io to return, we recheck adapter state below
- * before changing state.
- */
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
- nvme_stop_keep_alive(&ctrl->ctrl);
-
- /* will block will waiting for io to terminate */
- nvme_fc_delete_association(ctrl);
- }
-
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
- dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: error_recovery: Couldn't change state "
- "to CONNECTING\n", ctrl->cnum);
-}
-
-static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- int ret;
-
- __nvme_fc_terminate_io(ctrl);
nvme_stop_ctrl(&ctrl->ctrl);
- if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
- ret = nvme_fc_create_association(ctrl);
- else
- ret = -ENOTCONN;
+ /* will block will waiting for io to terminate */
+ nvme_fc_delete_association(ctrl);
- if (ret)
- nvme_fc_reconnect_or_delete(ctrl, ret);
- else
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reset complete\n",
- ctrl->cnum);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to CONNECTING\n", ctrl->cnum);
+
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule connect "
+ "after reset\n", ctrl->cnum);
+ } else {
+ flush_delayed_work(&ctrl->connect_work);
+ }
+ } else {
+ nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
+ }
}
-static void
-nvme_fc_connect_err_work(struct work_struct *work)
-{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, err_work);
-
- __nvme_fc_terminate_io(ctrl);
-
- atomic_set(&ctrl->err_work_active, 0);
-
- /*
- * Rescheduling the connection after recovering
- * from the io error is left to the reconnect work
- * item, which is what should have stalled waiting on
- * the io that had the error that scheduled this work.
- */
-}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
@@ -3049,7 +3417,7 @@
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx;
+ int ret, idx, ctrl_loss_tmo;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -3075,6 +3443,19 @@
goto out_free_ctrl;
}
+ /*
+ * if ctrl_loss_tmo is being enforced and the default reconnect delay
+ * is being used, change to a shorter reconnect delay for FC.
+ */
+ if (opts->max_reconnects != -1 &&
+ opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
+ opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
+ ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
+ opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ }
+
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
if (lport->dev)
@@ -3087,8 +3468,6 @@
ctrl->dev = lport->dev;
ctrl->cnum = idx;
ctrl->ioq_live = false;
- ctrl->assoc_active = false;
- atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@@ -3096,7 +3475,7 @@
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
- INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3188,15 +3567,14 @@
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
- cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
/* Remove core ctrl ref. */
nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/fc.h b/drivers/nvme/host/fc.h
new file mode 100644
index 0000000..05ce566
--- /dev/null
+++ b/drivers/nvme/host/fc.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016, Avago Technologies
+ */
+
+#ifndef _NVME_FC_TRANSPORT_H
+#define _NVME_FC_TRANSPORT_H 1
+
+
+/*
+ * Common definitions between the nvme_fc (host) transport and
+ * nvmet_fc (target) transport implementation.
+ */
+
+/*
+ * ****************** FC-NVME LS HANDLING ******************
+ */
+
+union nvmefc_ls_requests {
+ struct fcnvme_ls_rqst_w0 w0;
+ struct fcnvme_ls_cr_assoc_rqst rq_cr_assoc;
+ struct fcnvme_ls_cr_conn_rqst rq_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_rqst rq_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_rqst rq_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+union nvmefc_ls_responses {
+ struct fcnvme_ls_rjt rsp_rjt;
+ struct fcnvme_ls_cr_assoc_acc rsp_cr_assoc;
+ struct fcnvme_ls_cr_conn_acc rsp_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_acc rsp_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_acc rsp_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+static inline void
+nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
+{
+ struct fcnvme_ls_acc_hdr *acc = buf;
+
+ acc->w0.ls_cmd = ls_cmd;
+ acc->desc_list_len = desc_len;
+ acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ acc->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static inline int
+nvme_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+ u8 reason, u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ nvme_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+ ls_cmd);
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+
+ return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_CR_ASSOC_LEN = 1,
+ VERR_CR_ASSOC_RQST_LEN = 2,
+ VERR_CR_ASSOC_CMD = 3,
+ VERR_CR_ASSOC_CMD_LEN = 4,
+ VERR_ERSP_RATIO = 5,
+ VERR_ASSOC_ALLOC_FAIL = 6,
+ VERR_QUEUE_ALLOC_FAIL = 7,
+ VERR_CR_CONN_LEN = 8,
+ VERR_CR_CONN_RQST_LEN = 9,
+ VERR_ASSOC_ID = 10,
+ VERR_ASSOC_ID_LEN = 11,
+ VERR_NO_ASSOC = 12,
+ VERR_CONN_ID = 13,
+ VERR_CONN_ID_LEN = 14,
+ VERR_INVAL_CONN = 15,
+ VERR_CR_CONN_CMD = 16,
+ VERR_CR_CONN_CMD_LEN = 17,
+ VERR_DISCONN_LEN = 18,
+ VERR_DISCONN_RQST_LEN = 19,
+ VERR_DISCONN_CMD = 20,
+ VERR_DISCONN_CMD_LEN = 21,
+ VERR_DISCONN_SCOPE = 22,
+ VERR_RS_LEN = 23,
+ VERR_RS_RQST_LEN = 24,
+ VERR_RS_CMD = 25,
+ VERR_RS_CMD_LEN = 26,
+ VERR_RS_RCTL = 27,
+ VERR_RS_RO = 28,
+ VERR_LSACC = 29,
+ VERR_LSDESC_RQST = 30,
+ VERR_LSDESC_RQST_LEN = 31,
+ VERR_CR_ASSOC = 32,
+ VERR_CR_ASSOC_ACC_LEN = 33,
+ VERR_CR_CONN = 34,
+ VERR_CR_CONN_ACC_LEN = 35,
+ VERR_DISCONN = 36,
+ VERR_DISCONN_ACC_LEN = 37,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Bad CR_ASSOC Length",
+ "Bad CR_ASSOC Rqst Length",
+ "Not CR_ASSOC Cmd",
+ "Bad CR_ASSOC Cmd Length",
+ "Bad Ersp Ratio",
+ "Association Allocation Failed",
+ "Queue Allocation Failed",
+ "Bad CR_CONN Length",
+ "Bad CR_CONN Rqst Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "No Association",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "Invalid Connection ID",
+ "Not CR_CONN Cmd",
+ "Bad CR_CONN Cmd Length",
+ "Bad DISCONN Length",
+ "Bad DISCONN Rqst Length",
+ "Not DISCONN Cmd",
+ "Bad DISCONN Cmd Length",
+ "Bad Disconnect Scope",
+ "Bad RS Length",
+ "Bad RS Rqst Length",
+ "Not RS Cmd",
+ "Bad RS Cmd Length",
+ "Bad RS R_CTL",
+ "Bad RS Relative Offset",
+ "Not LS_ACC",
+ "Not LSDESC_RQST",
+ "Bad LSDESC_RQST Length",
+ "Not CR_ASSOC Rqst",
+ "Bad CR_ASSOC ACC Length",
+ "Not CR_CONN Rqst",
+ "Bad CR_CONN ACC Length",
+ "Not Disconnect Rqst",
+ "Bad Disconnect ACC Length",
+};
+
+#define NVME_FC_LAST_LS_CMD_VALUE FCNVME_LS_DISCONNECT_CONN
+
+static char *nvmefc_ls_names[] = {
+ "Reserved (0)",
+ "RJT (1)",
+ "ACC (2)",
+ "Create Association",
+ "Create Connection",
+ "Disconnect Association",
+ "Disconnect Connection",
+};
+
+static inline void
+nvmefc_fmt_lsreq_discon_assoc(struct nvmefc_ls_req *lsreq,
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst,
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc,
+ u64 association_id)
+{
+ lsreq->rqstaddr = discon_rqst;
+ lsreq->rqstlen = sizeof(*discon_rqst);
+ lsreq->rspaddr = discon_acc;
+ lsreq->rsplen = sizeof(*discon_acc);
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
+ discon_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+ discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ discon_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+
+ discon_rqst->associd.association_id = cpu_to_be64(association_id);
+
+ discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+ FCNVME_LSDESC_DISCONN_CMD);
+ discon_rqst->discon_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+}
+
+static inline int
+nvmefc_vldt_lsreq_discon_assoc(u32 rqstlen,
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst)
+{
+ int ret = 0;
+
+ if (rqstlen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
+ ret = VERR_DISCONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
+ ret = VERR_DISCONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->discon_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+ ret = VERR_DISCONN_CMD;
+ else if (rqst->discon_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+ ret = VERR_DISCONN_CMD_LEN;
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
+ ret = VERR_DISCONN_SCOPE;
+
+ return ret;
+}
+
+#endif /* _NVME_FC_TRANSPORT_H */
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
new file mode 100644
index 0000000..552dbc0
--- /dev/null
+++ b/drivers/nvme/host/hwmon.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express hardware monitoring support
+ * Copyright (c) 2019, Guenter Roeck
+ */
+
+#include <linux/hwmon.h>
+#include <linux/units.h>
+#include <asm/unaligned.h>
+
+#include "nvme.h"
+
+struct nvme_hwmon_data {
+ struct nvme_ctrl *ctrl;
+ struct nvme_smart_log log;
+ struct mutex read_lock;
+};
+
+static int nvme_get_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long *temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ u32 status;
+ int ret;
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ &status);
+ if (ret > 0)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+ *temp = kelvin_to_millicelsius(status & NVME_TEMP_THRESH_MASK);
+
+ return 0;
+}
+
+static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ long temp)
+{
+ unsigned int threshold = sensor << NVME_TEMP_THRESH_SELECT_SHIFT;
+ int ret;
+
+ temp = millicelsius_to_kelvin(temp);
+ threshold |= clamp_val(temp, 0, NVME_TEMP_THRESH_MASK);
+
+ if (under)
+ threshold |= NVME_TEMP_THRESH_TYPE_UNDER;
+
+ ret = nvme_set_features(ctrl, NVME_FEAT_TEMP_THRESH, threshold, NULL, 0,
+ NULL);
+ if (ret > 0)
+ return -EIO;
+
+ return ret;
+}
+
+static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
+{
+ return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+ NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
+}
+
+static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+ struct nvme_smart_log *log = &data->log;
+ int temp;
+ int err;
+
+ /*
+ * First handle attributes which don't require us to read
+ * the smart log.
+ */
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_get_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_get_temp_thresh(data->ctrl, channel, true, val);
+ case hwmon_temp_crit:
+ *val = kelvin_to_millicelsius(data->ctrl->cctemp);
+ return 0;
+ default:
+ break;
+ }
+
+ mutex_lock(&data->read_lock);
+ err = nvme_hwmon_get_smart_log(data);
+ if (err)
+ goto unlock;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!channel)
+ temp = get_unaligned_le16(log->temperature);
+ else
+ temp = le16_to_cpu(log->temp_sensor[channel - 1]);
+ *val = kelvin_to_millicelsius(temp);
+ break;
+ case hwmon_temp_alarm:
+ *val = !!(log->critical_warning & NVME_SMART_CRIT_TEMPERATURE);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+unlock:
+ mutex_unlock(&data->read_lock);
+ return err;
+}
+
+static int nvme_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ return nvme_set_temp_thresh(data->ctrl, channel, false, val);
+ case hwmon_temp_min:
+ return nvme_set_temp_thresh(data->ctrl, channel, true, val);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const char * const nvme_hwmon_sensor_names[] = {
+ "Composite",
+ "Sensor 1",
+ "Sensor 2",
+ "Sensor 3",
+ "Sensor 4",
+ "Sensor 5",
+ "Sensor 6",
+ "Sensor 7",
+ "Sensor 8",
+};
+
+static int nvme_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ *str = nvme_hwmon_sensor_names[channel];
+ return 0;
+}
+
+static umode_t nvme_hwmon_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct nvme_hwmon_data *data = _data;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ if (!channel && data->ctrl->cctemp)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ if ((!channel && data->ctrl->wctemp) ||
+ (channel && data->log.temp_sensor[channel - 1])) {
+ if (data->ctrl->quirks &
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ return 0444;
+ return 0644;
+ }
+ break;
+ case hwmon_temp_alarm:
+ if (!channel)
+ return 0444;
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ if (!channel || data->log.temp_sensor[channel - 1])
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_CRIT | HWMON_T_LABEL | HWMON_T_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops nvme_hwmon_ops = {
+ .is_visible = nvme_hwmon_is_visible,
+ .read = nvme_hwmon_read,
+ .read_string = nvme_hwmon_read_string,
+ .write = nvme_hwmon_write,
+};
+
+static const struct hwmon_chip_info nvme_hwmon_chip_info = {
+ .ops = &nvme_hwmon_ops,
+ .info = nvme_hwmon_info,
+};
+
+int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ struct nvme_hwmon_data *data;
+ struct device *hwmon;
+ int err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(ctrl->device,
+ "Failed to read smart log (error %d)\n", err);
+ devm_kfree(dev, data);
+ return err;
+ }
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
+ &nvme_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ devm_kfree(dev, data);
+ }
+
+ return 0;
+}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ec46693..8e562d0 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -171,7 +171,7 @@
__le32 tdresv;
__le32 thresv;
__le32 rsvd2[8];
- __u8 blk[0];
+ __u8 blk[];
};
struct nvme_nvm_id20_addrf {
@@ -593,8 +593,8 @@
dev_meta_off = dev_meta;
ret = nvme_get_log(ctrl, ns->head->ns_id,
- NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
- offset);
+ NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
+ dev_meta, len, offset);
if (ret) {
dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
break;
@@ -961,7 +961,10 @@
geo = &dev->geo;
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
- geo->ext = ns->ext;
+ if (ns->features & NVME_NS_EXT_LBAS)
+ geo->ext = true;
+ else
+ geo->ext = false;
geo->mdts = ns->ctrl->max_hw_sectors;
dev->q = q;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 590b040..18a7564 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -65,51 +65,30 @@
}
}
-bool nvme_failover_req(struct request *req)
+void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- u16 status = nvme_req(req)->status;
+ u16 status = nvme_req(req)->status & 0x7ff;
unsigned long flags;
- switch (status & 0x7ff) {
- case NVME_SC_ANA_TRANSITION:
- case NVME_SC_ANA_INACCESSIBLE:
- case NVME_SC_ANA_PERSISTENT_LOSS:
- /*
- * If we got back an ANA error we know the controller is alive,
- * but not ready to serve this namespaces. The spec suggests
- * we should update our general state here, but due to the fact
- * that the admin and I/O queues are not serialized that is
- * fundamentally racy. So instead just clear the current path,
- * mark the the path as pending and kick of a re-read of the ANA
- * log page ASAP.
- */
- nvme_mpath_clear_current_path(ns);
- if (ns->ctrl->ana_log_buf) {
- set_bit(NVME_NS_ANA_PENDING, &ns->flags);
- queue_work(nvme_wq, &ns->ctrl->ana_work);
- }
- break;
- case NVME_SC_HOST_PATH_ERROR:
- case NVME_SC_HOST_ABORTED_CMD:
- /*
- * Temporary transport disruption in talking to the controller.
- * Try to send on a new path.
- */
- nvme_mpath_clear_current_path(ns);
- break;
- default:
- /* This was a non-ANA error so follow the normal error path. */
- return false;
+ nvme_mpath_clear_current_path(ns);
+
+ /*
+ * If we got back an ANA error, we know the controller is alive but not
+ * ready to serve this namespace. Kick of a re-read of the ANA
+ * information page, and just try any other available path for now.
+ */
+ if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ blk_mq_end_request(req, 0);
kblockd_schedule_work(&ns->head->requeue_work);
- return true;
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
@@ -156,20 +135,28 @@
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (nvme_mpath_clear_current_path(ns))
- kblockd_schedule_work(&ns->head->requeue_work);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
up_read(&ctrl->namespaces_rwsem);
- mutex_unlock(&ctrl->scan_lock);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
- return ns->ctrl->state != NVME_CTRL_LIVE ||
- test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
- test_bit(NVME_NS_REMOVING, &ns->flags);
+ /*
+ * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
+ * still be able to complete assuming that the controller is connected.
+ * Otherwise it will fail immediately and return to the requeue list.
+ */
+ if (ns->ctrl->state != NVME_CTRL_LIVE &&
+ ns->ctrl->state != NVME_CTRL_DELETING)
+ return true;
+ if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
+ test_bit(NVME_NS_REMOVING, &ns->flags))
+ return true;
+ return false;
}
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
@@ -224,7 +211,7 @@
static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
int node, struct nvme_ns *old)
{
- struct nvme_ns *ns, *found, *fallback = NULL;
+ struct nvme_ns *ns, *found = NULL;
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
@@ -243,7 +230,7 @@
goto out;
}
if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
- fallback = ns;
+ found = ns;
}
/*
@@ -254,12 +241,11 @@
*/
if (!nvme_path_is_disabled(old) &&
(old->ana_state == NVME_ANA_OPTIMIZED ||
- (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED)))
+ (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
return old;
- if (!fallback)
+ if (!found)
return NULL;
- found = fallback;
out:
rcu_assign_pointer(head->current_path[node], found);
return found;
@@ -305,22 +291,20 @@
return false;
}
-static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
- struct bio *bio)
+blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
- struct nvme_ns_head *head = q->queuedata;
+ struct nvme_ns_head *head = bio->bi_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
/*
- * The namespace might be going away and the bio might
- * be moved to a different queue via blk_steal_bios(),
- * so we need to use the bio_split pool from the original
- * queue to allocate the bvecs from.
+ * The namespace might be going away and the bio might be moved to a
+ * different queue via blk_steal_bios(), so we need to use the bio_split
+ * pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
@@ -330,7 +314,7 @@
trace_block_bio_remap(bio->bi_disk->queue, bio,
disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
- ret = generic_make_request(bio);
+ ret = submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
@@ -367,7 +351,7 @@
* path.
*/
bio->bi_disk = head->disk;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -386,14 +370,12 @@
* We also do this for private namespaces as the namespace sharing data could
* change after a rescan.
*/
- if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
+ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
return 0;
- q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
+ q = blk_alloc_queue(ctrl->numa_node);
if (!q)
goto out;
- q->queuedata = head;
- blk_queue_make_request(q, nvme_ns_head_make_request);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
@@ -459,8 +441,14 @@
for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
struct nvme_ana_group_desc *desc = base + offset;
- u32 nr_nsids = le32_to_cpu(desc->nnsids);
- size_t nsid_buf_size = nr_nsids * sizeof(__le32);
+ u32 nr_nsids;
+ size_t nsid_buf_size;
+
+ if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
+ return -EINVAL;
+
+ nr_nsids = le32_to_cpu(desc->nnsids);
+ nsid_buf_size = nr_nsids * sizeof(__le32);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
@@ -480,8 +468,6 @@
return error;
offset += nsid_buf_size;
- if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
- return -EINVAL;
}
return 0;
@@ -522,14 +508,17 @@
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+ unsigned nsid;
+again:
+ nsid = le32_to_cpu(desc->nsids[n]);
if (ns->head->ns_id < nsid)
continue;
if (ns->head->ns_id == nsid)
nvme_update_ns_ana_state(desc, ns);
if (++n == nr_nsids)
break;
+ if (ns->head->ns_id > nsid)
+ goto again;
}
up_read(&ctrl->namespaces_rwsem);
return 0;
@@ -541,7 +530,7 @@
int error;
mutex_lock(&ctrl->ana_lock);
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
ctrl->ana_log_buf, ctrl->ana_log_size, 0);
if (error) {
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -577,6 +566,9 @@
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
nvme_read_ana_log(ctrl);
}
@@ -687,13 +679,13 @@
nvme_mpath_set_live(ns);
}
- if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct gendisk *disk = ns->head->disk;
-
- if (disk)
- disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
- }
+ if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ ns->head->disk->queue);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
+ ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
+#endif
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -732,7 +724,8 @@
int error = 0;
/* check if multipath is enabled and we have the capability */
- if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
+ if (!multipath || !ctrl->subsys ||
+ !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2df90d4..5dd1dd8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -16,6 +16,7 @@
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
#include <linux/wait.h>
+#include <linux/t10-pi.h>
#include <trace/events/block.h>
@@ -28,6 +29,22 @@
#define NVME_DEFAULT_KATO 5
#define NVME_KATO_GRACE 10
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define NVME_INLINE_SG_CNT 0
+#define NVME_INLINE_METADATA_SG_CNT 0
+#else
+#define NVME_INLINE_SG_CNT 2
+#define NVME_INLINE_METADATA_SG_CNT 1
+#endif
+
+/*
+ * Default to a 4K page size, with the intention to update this
+ * path in the future to accommodate architectures with differing
+ * kernel and IO page sizes.
+ */
+#define NVME_CTRL_PAGE_SHIFT 12
+#define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
+
extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
@@ -117,11 +134,22 @@
NVME_QUIRK_SHARED_TAGS = (1 << 13),
/*
+ * Don't change the value of the temperature threshold feature
+ */
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
+
+ /*
* The controller doesn't handle the Identify Namespace
* Identification Descriptor list subcommand despite claiming
* NVMe 1.3 compliance.
*/
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
+
+ /*
+ * The controller requires the command_id value be be limited, so skip
+ * encoding the generation sequence number.
+ */
+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
};
/*
@@ -131,6 +159,7 @@
struct nvme_request {
struct nvme_command *cmd;
union nvme_result result;
+ u8 genctr;
u8 retries;
u8 flags;
u16 status;
@@ -154,7 +183,7 @@
static inline u16 nvme_req_qid(struct request *req)
{
- if (!req->rq_disk)
+ if (!req->q->queuedata)
return 0;
return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
}
@@ -166,12 +195,32 @@
*/
#define NVME_QUIRK_DELAY_AMOUNT 2300
+/*
+ * enum nvme_ctrl_state: Controller state
+ *
+ * @NVME_CTRL_NEW: New controller just allocated, initial state
+ * @NVME_CTRL_LIVE: Controller is connected and I/O capable
+ * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
+ * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
+ * transport
+ * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
+ * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
+ * disabled/failed immediately. This state comes
+ * after all async event processing took place and
+ * before ns removal and the controller deletion
+ * progress
+ * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
+ * shutdown or removal. In this case we forcibly
+ * kill all inflight I/O as they have no chance to
+ * complete
+ */
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
+ NVME_CTRL_DELETING_NOIO,
NVME_CTRL_DEAD,
};
@@ -221,9 +270,12 @@
u32 queue_count;
u64 cap;
- u32 page_size;
u32 max_hw_sectors;
u32 max_segments;
+ u32 max_integrity_segments;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u32 max_zone_append;
+#endif
u16 crdt[3];
u16 oncs;
u16 oacs;
@@ -238,6 +290,8 @@
u16 kas;
u8 npss;
u8 apsta;
+ u16 wctemp;
+ u16 cctemp;
u32 oaes;
u32 aen_result;
u32 ctratt;
@@ -247,13 +301,13 @@
unsigned long quirks;
struct nvme_id_power_state psd[32];
struct nvme_effects_log *effects;
+ struct xarray cels;
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
- bool created;
#ifdef CONFIG_NVME_MULTIPATH
/* asymmetric namespace access: */
@@ -329,6 +383,7 @@
u8 eui64[8];
u8 nguid[16];
uuid_t uuid;
+ u8 csi;
};
/*
@@ -346,7 +401,9 @@
struct nvme_ns_ids ids;
struct list_head entry;
struct kref ref;
+ bool shared;
int instance;
+ struct nvme_effects_log *effects;
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
struct bio_list requeue_list;
@@ -359,6 +416,11 @@
#endif
};
+enum nvme_ns_features {
+ NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
+ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
+};
+
struct nvme_ns {
struct list_head list;
@@ -378,8 +440,11 @@
u16 ms;
u16 sgs;
u32 sws;
- bool ext;
u8 pi_type;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u64 zsze;
+#endif
+ unsigned long features;
unsigned long flags;
#define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
@@ -389,6 +454,12 @@
};
+/* NVMe ns supports metadata actions by the controller (generate/strip) */
+static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+{
+ return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
+}
+
struct nvme_ctrl_ops {
const char *name;
struct module *module;
@@ -405,6 +476,49 @@
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
};
+/*
+ * nvme command_id is constructed as such:
+ * | xxxx | xxxxxxxxxxxx |
+ * gen request tag
+ */
+#define nvme_genctr_mask(gen) (gen & 0xf)
+#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
+#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
+#define nvme_tag_from_cid(cid) (cid & 0xfff)
+
+static inline u16 nvme_cid(struct request *rq)
+{
+ return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
+}
+
+static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
+ u16 command_id)
+{
+ u8 genctr = nvme_genctr_from_cid(command_id);
+ u16 tag = nvme_tag_from_cid(command_id);
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(tags, tag);
+ if (unlikely(!rq)) {
+ pr_err("could not locate request for tag %#x\n",
+ tag);
+ return NULL;
+ }
+ if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
+ dev_err(nvme_req(rq)->ctrl->device,
+ "request %#x genctr mismatch (got %#x expected %#x)\n",
+ tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
+ return NULL;
+ }
+ return rq;
+}
+
+static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
+ u16 command_id)
+{
+ return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
+}
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
const char *dev_name);
@@ -444,7 +558,39 @@
return lba << (ns->lba_shift - SECTOR_SHIFT);
}
-static inline void nvme_end_request(struct request *req, __le16 status,
+/*
+ * Convert byte length to nvme's 0-based num dwords
+ */
+static inline u32 nvme_bytes_to_numd(size_t len)
+{
+ return (len >> 2) - 1;
+}
+
+static inline bool nvme_is_ana_error(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool nvme_is_path_error(u16 status)
+{
+ /* check for a status code type of 'path related status' */
+ return (status & 0x700) == 0x300;
+}
+
+/*
+ * Fill in the status and result information from the CQE, and then figure out
+ * if blk-mq will need to use IPI magic to complete the request, and if yes do
+ * so. If not let the caller complete the request without an indirect function
+ * call.
+ */
+static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
@@ -453,7 +599,9 @@
rq->result = result;
/* inject error when permitted by fault injection framework */
nvme_should_fail(req);
- blk_mq_complete_request(req);
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return true;
+ return blk_mq_complete_request_remote(req);
}
static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
@@ -466,6 +614,12 @@
put_device(ctrl->device);
}
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+ return !qid &&
+ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
@@ -481,7 +635,6 @@
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
-void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
@@ -527,8 +680,11 @@
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
+struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
+ struct nvme_ns_head **head, int *srcu_idx);
+void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
@@ -544,7 +700,7 @@
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
-bool nvme_failover_req(struct request *req);
+void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
@@ -556,6 +712,7 @@
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
@@ -571,18 +728,7 @@
struct nvme_ns *ns = req->q->queuedata;
if (req->cmd_flags & REQ_NVME_MPATH)
- trace_block_bio_complete(ns->head->disk->queue,
- req->bio, status);
-}
-
-static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
-{
- struct block_device *bdev = bdget_disk(disk, 0);
-
- if (bdev) {
- bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
- bdput(bdev);
- }
+ trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
extern struct device_attribute dev_attr_ana_grpid;
@@ -604,9 +750,8 @@
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
}
-static inline bool nvme_failover_req(struct request *req)
+static inline void nvme_failover_req(struct request *req)
{
- return false;
}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
@@ -663,11 +808,35 @@
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
-static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
-{
-}
#endif /* CONFIG_NVME_MULTIPATH */
+int nvme_revalidate_zones(struct nvme_ns *ns);
+#ifdef CONFIG_BLK_DEV_ZONED
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
+int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action);
+#else
+#define nvme_report_zones NULL
+
+static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action)
+{
+ return BLK_STS_NOTSUPP;
+}
+
+static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+{
+ dev_warn(ns->ctrl->device,
+ "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
+ return -EPROTONOSUPPORT;
+}
+#endif
+
#ifdef CONFIG_NVM
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
@@ -693,4 +862,20 @@
return dev_to_disk(dev)->private_data;
}
+#ifdef CONFIG_NVME_HWMON
+int nvme_hwmon_init(struct nvme_ctrl *ctrl);
+#else
+static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ return 0;
+}
+#endif
+
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode);
+void nvme_execute_passthru_rq(struct request *rq);
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+void nvme_put_ns(struct nvme_ns *ns);
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index af516c3..97afeb8 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
*/
+#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/async.h>
#include <linux/blkdev.h>
@@ -22,6 +23,7 @@
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
@@ -61,23 +63,43 @@
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
.set = io_queue_depth_set,
- .get = param_get_int,
+ .get = param_get_uint,
};
-static int io_queue_depth = 1024;
+static unsigned int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+static int io_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned int n;
+ int ret;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n > num_possible_cpus())
+ return -EINVAL;
+ return param_set_uint(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_count_ops = {
+ .set = io_queue_count_set,
+ .get = param_get_uint,
+};
+
static unsigned int write_queues;
-module_param(write_queues, uint, 0644);
+module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
MODULE_PARM_DESC(write_queues,
"Number of queues to use for writes. If not set, reads and writes "
"will share a queue set.");
static unsigned int poll_queues;
-module_param(poll_queues, uint, 0644);
+module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
+static bool noacpi;
+module_param(noacpi, bool, 0444);
+MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
+
struct nvme_dev;
struct nvme_queue;
@@ -99,7 +121,7 @@
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
- int q_depth;
+ u32 q_depth;
int io_sqes;
u32 db_stride;
void __iomem *bar;
@@ -135,13 +157,14 @@
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
- int n = 0, ret;
+ int ret;
+ u32 n;
- ret = kstrtoint(val, 10, &n);
+ ret = kstrtou32(val, 10, &n);
if (ret != 0 || n < 2)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_uint(val, kp);
}
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -169,16 +192,15 @@
void *sq_cmds;
/* only used for poll queues: */
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
- volatile struct nvme_completion *cqes;
+ struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
- u16 q_depth;
+ u32 q_depth;
u16 cq_vector;
u16 sq_tail;
u16 last_sq_tail;
u16 cq_head;
- u16 last_cq_head;
u16 qid;
u8 cq_phase;
u8 sqes;
@@ -346,10 +368,10 @@
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
*/
-static int nvme_npages(unsigned size, struct nvme_dev *dev)
+static int nvme_pci_npages_prp(void)
{
- unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
- dev->ctrl.page_size);
+ unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}
@@ -357,22 +379,18 @@
* Calculates the number of pages needed for the SGL segments. For example a 4k
* page can accommodate 256 SGL descriptors.
*/
-static int nvme_pci_npages_sgl(unsigned int num_seg)
+static int nvme_pci_npages_sgl(void)
{
- return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
+ return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
+ PAGE_SIZE);
}
-static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
- unsigned int size, unsigned int nseg, bool use_sgl)
+static size_t nvme_pci_iod_alloc_size(void)
{
- size_t alloc_size;
+ size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
- if (use_sgl)
- alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
- else
- alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
-
- return alloc_size + sizeof(struct scatterlist) * nseg;
+ return sizeof(__le64 *) * npages +
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -514,9 +532,6 @@
int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
- if (nseg == 0)
- return false;
-
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
@@ -530,7 +545,7 @@
static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
- const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;
@@ -619,34 +634,33 @@
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
- u32 page_size = dev->ctrl.page_size;
- int offset = dma_addr & (page_size - 1);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma;
int nprps, i;
- length -= (page_size - offset);
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
if (length <= 0) {
iod->first_dma = 0;
goto done;
}
- dma_len -= (page_size - offset);
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
if (dma_len) {
- dma_addr += (page_size - offset);
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
} else {
sg = sg_next(sg);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
- if (length <= page_size) {
+ if (length <= NVME_CTRL_PAGE_SIZE) {
iod->first_dma = dma_addr;
goto done;
}
- nprps = DIV_ROUND_UP(length, page_size);
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
iod->npages = 0;
@@ -665,7 +679,7 @@
iod->first_dma = prp_dma;
i = 0;
for (;;) {
- if (i == page_size >> 3) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
@@ -676,9 +690,9 @@
i = 1;
}
prp_list[i++] = cpu_to_le64(dma_addr);
- dma_len -= page_size;
- dma_addr += page_size;
- length -= page_size;
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
if (length <= 0)
break;
if (dma_len > 0)
@@ -791,8 +805,8 @@
struct bio_vec *bv)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
- unsigned int first_prp_len = dev->ctrl.page_size - offset;
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -802,7 +816,7 @@
cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
if (bv->bv_len > first_prp_len)
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
- return 0;
+ return BLK_STS_OK;
}
static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
@@ -820,7 +834,7 @@
cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
- return 0;
+ return BLK_STS_OK;
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -834,7 +848,7 @@
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
@@ -889,7 +903,7 @@
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
- return 0;
+ return BLK_STS_OK;
}
/*
@@ -948,7 +962,6 @@
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_cleanup_cmd(req);
if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
@@ -960,8 +973,9 @@
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
- return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
- nvmeq->cq_phase;
+ struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
}
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
@@ -982,7 +996,8 @@
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
- volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -991,58 +1006,53 @@
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvmeq->qid == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- nvme_end_request(req, cqe->status, cqe->result);
-}
-
-static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
-{
- while (start != end) {
- nvme_handle_cqe(nvmeq, start);
- if (++start == nvmeq->q_depth)
- start = 0;
- }
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result))
+ nvme_pci_complete_rq(req);
}
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- if (nvmeq->cq_head == nvmeq->q_depth - 1) {
+ u32 tmp = nvmeq->cq_head + 1;
+
+ if (tmp == nvmeq->q_depth) {
nvmeq->cq_head = 0;
- nvmeq->cq_phase = !nvmeq->cq_phase;
+ nvmeq->cq_phase ^= 1;
} else {
- nvmeq->cq_head++;
+ nvmeq->cq_head = tmp;
}
}
-static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
- u16 *end, unsigned int tag)
+static inline int nvme_process_cq(struct nvme_queue *nvmeq)
{
int found = 0;
- *start = nvmeq->cq_head;
while (nvme_cqe_pending(nvmeq)) {
- if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
- found++;
+ found++;
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ nvme_handle_cqe(nvmeq, nvmeq->cq_head);
nvme_update_cq_head(nvmeq);
}
- *end = nvmeq->cq_head;
- if (*start != *end)
+ if (found)
nvme_ring_cq_doorbell(nvmeq);
return found;
}
@@ -1051,76 +1061,53 @@
{
struct nvme_queue *nvmeq = data;
irqreturn_t ret = IRQ_NONE;
- u16 start, end;
/*
* The rmb/wmb pair ensures we see all updates from a previous run of
* the irq handler, even if that was on another CPU.
*/
rmb();
- if (nvmeq->cq_head != nvmeq->last_cq_head)
+ if (nvme_process_cq(nvmeq))
ret = IRQ_HANDLED;
- nvme_process_cq(nvmeq, &start, &end, -1);
- nvmeq->last_cq_head = nvmeq->cq_head;
wmb();
- if (start != end) {
- nvme_complete_cqes(nvmeq, start, end);
- return IRQ_HANDLED;
- }
-
return ret;
}
static irqreturn_t nvme_irq_check(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
+
if (nvme_cqe_pending(nvmeq))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
/*
- * Poll for completions any queue, including those not dedicated to polling.
+ * Poll for completions for any interrupt driven queue
* Can be called from any context.
*/
-static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
+static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
- u16 start, end;
- int found;
- /*
- * For a poll queue we need to protect against the polling thread
- * using the CQ lock. For normal interrupt driven threads we have
- * to disable the interrupt to avoid racing with it.
- */
- if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
- spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- spin_unlock(&nvmeq->cq_poll_lock);
- } else {
- disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- }
+ WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
- nvme_complete_cqes(nvmeq, start, end);
- return found;
+ disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ nvme_process_cq(nvmeq);
+ enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_queue *nvmeq = hctx->driver_data;
- u16 start, end;
bool found;
if (!nvme_cqe_pending(nvmeq))
return 0;
spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq, &start, &end, -1);
- nvme_complete_cqes(nvmeq, start, end);
+ found = nvme_process_cq(nvmeq);
spin_unlock(&nvmeq->cq_poll_lock);
return found;
@@ -1226,7 +1213,6 @@
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{
-
/* If true, indicates loss of adapter communication, possibly by a
* NVMe Subsystem reset.
*/
@@ -1297,7 +1283,12 @@
/*
* Did we miss an interrupt?
*/
- if (nvme_poll_irqdisable(nvmeq, req->tag)) {
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
+ nvme_poll(req->mq_hctx);
+ else
+ nvme_poll_irqdisable(nvmeq);
+
+ if (blk_mq_request_completed(req)) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
@@ -1313,7 +1304,7 @@
switch (dev->ctrl.state) {
case NVME_CTRL_CONNECTING:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- /* fall through */
+ fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
@@ -1328,9 +1319,9 @@
}
/*
- * Shutdown the controller immediately and schedule a reset if the
- * command was already aborted once before and still hasn't been
- * returned to the driver, or if this is the admin queue.
+ * Shutdown the controller immediately and schedule a reset if the
+ * command was already aborted once before and still hasn't been
+ * returned to the driver, or if this is the admin queue.
*/
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
@@ -1351,7 +1342,7 @@
memset(&cmd, 0, sizeof(cmd));
cmd.abort.opcode = nvme_admin_abort_cmd;
- cmd.abort.cid = req->tag;
+ cmd.abort.cid = nvme_cid(req);
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
@@ -1440,23 +1431,23 @@
else
nvme_disable_ctrl(&dev->ctrl);
- nvme_poll_irqdisable(nvmeq, -1);
+ nvme_poll_irqdisable(nvmeq);
}
/*
* Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
*/
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
{
- u16 start, end;
int i;
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
- nvme_process_cq(&dev->queues[i], &start, &end, -1);
- nvme_complete_cqes(&dev->queues[i], start, end);
+ spin_lock(&dev->queues[i].cq_poll_lock);
+ nvme_process_cq(&dev->queues[i]);
+ spin_unlock(&dev->queues[i].cq_poll_lock);
}
}
@@ -1465,11 +1456,12 @@
{
int q_depth = dev->q_depth;
unsigned q_size_aligned = roundup(q_depth * entry_size,
- dev->ctrl.page_size);
+ NVME_CTRL_PAGE_SIZE);
if (q_size_aligned * nr_io_queues > dev->cmb_size) {
u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
- mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
+
+ mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
q_depth = div_u64(mem_per_q, entry_size);
/*
@@ -1598,7 +1590,7 @@
result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0)
return result;
- else if (result)
+ if (result)
goto release_cq;
nvmeq->cq_vector = vector;
@@ -1662,7 +1654,7 @@
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev_to_node(dev->dev);
+ dev->admin_tagset.numa_node = dev->ctrl.numa_node;
dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1738,6 +1730,8 @@
if (result)
return result;
+ dev->ctrl.numa_node = dev_to_node(dev->dev);
+
nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1832,6 +1826,9 @@
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -1846,6 +1843,16 @@
return;
/*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
@@ -1883,6 +1890,7 @@
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
+ u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c;
int ret;
@@ -1891,8 +1899,7 @@
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
c.features.dword11 = cpu_to_le32(bits);
- c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
- ilog2(dev->ctrl.page_size));
+ c.features.dword12 = cpu_to_le32(host_mem_size);
c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
@@ -1912,7 +1919,7 @@
for (i = 0; i < dev->nr_host_mem_descs; i++) {
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
- size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
+ size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
le64_to_cpu(desc->addr),
@@ -1964,7 +1971,7 @@
break;
descs[i].addr = cpu_to_le64(dma_addr);
- descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
+ descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
i++;
}
@@ -1980,7 +1987,7 @@
out_free_bufs:
while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
+ size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
dma_free_attrs(dev->dev, size, bufs[i],
le64_to_cpu(descs[i].addr),
@@ -1998,12 +2005,12 @@
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
- u32 chunk_size;
+ u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
+ u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
+ u64 chunk_size;
/* start big and work our way down */
- for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
- chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
- chunk_size /= 2) {
+ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
if (!min || dev->host_mem_size >= min)
return 0;
@@ -2069,7 +2076,7 @@
unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
/*
- * If there is no interupt available for queues, ensure that
+ * If there is no interrupt available for queues, ensure that
* the default queue is set to 1. The affinity set size is
* also set to one, but the irq core ignores it for this case.
*
@@ -2105,32 +2112,30 @@
.calc_sets = nvme_calc_irq_sets,
.priv = dev,
};
- unsigned int irq_queues, this_p_queues;
+ unsigned int irq_queues, poll_queues;
/*
- * Poll queues don't need interrupts, but we need at least one IO
- * queue left over for non-polled IO.
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+ * left over for non-polled I/O.
*/
- this_p_queues = dev->nr_poll_queues;
- if (this_p_queues >= nr_io_queues) {
- this_p_queues = nr_io_queues - 1;
- irq_queues = 1;
- } else {
- irq_queues = nr_io_queues - this_p_queues + 1;
- }
- dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
+ poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
+ dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
- /* Initialize for the single interrupt case */
+ /*
+ * Initialize for the single interrupt case, will be updated in
+ * nvme_calc_irq_sets().
+ */
dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
/*
- * Some Apple controllers require all queues to use the
- * first vector.
+ * We need interrupts for the admin queue and each non-polled I/O queue,
+ * but some Apple controllers require all queues to use the first
+ * vector.
*/
- if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)
- irq_queues = 1;
-
+ irq_queues = 1;
+ if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
+ irq_queues += (nr_io_queues - poll_queues);
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
}
@@ -2326,9 +2331,9 @@
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev_to_node(dev->dev);
- dev->tagset.queue_depth =
- min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ dev->tagset.numa_node = dev->ctrl.numa_node;
+ dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
+ BLK_MQ_MAX_DEPTH) - 1;
dev->tagset.cmd_size = sizeof(struct nvme_iod);
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
@@ -2387,7 +2392,7 @@
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
- dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
@@ -2527,7 +2532,8 @@
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
- PAGE_SIZE, PAGE_SIZE, 0);
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
@@ -2559,13 +2565,13 @@
struct nvme_dev *dev = to_nvme_dev(ctrl);
nvme_dbbuf_dma_free(dev);
- put_device(dev->dev);
nvme_free_tagset(dev);
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
- kfree(dev->queues);
free_opal_dev(dev->ctrl.opal_dev);
mempool_destroy(dev->iod_mempool);
+ put_device(dev->dev);
+ kfree(dev->queues);
kfree(dev);
}
@@ -2630,6 +2636,7 @@
* Don't limit the IOMMU merged segment size.
*/
dma_set_max_seg_size(dev->dev, 0xffffffff);
+ dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
mutex_unlock(&dev->shutdown_lock);
@@ -2644,6 +2651,12 @@
goto out;
}
+ /*
+ * We do not support an SGL for metadata (yet), so we are limited to a
+ * single integrity segment for the separate metadata pointer.
+ */
+ dev->ctrl.max_integrity_segments = 1;
+
result = nvme_init_identify(&dev->ctrl);
if (result)
goto out;
@@ -2747,7 +2760,7 @@
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
- return snprintf(buf, size, "%s", dev_name(&pdev->dev));
+ return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -2822,6 +2835,32 @@
return 0;
}
+#ifdef CONFIG_ACPI
+static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ u8 val;
+
+ /*
+ * Look for _DSD property specifying that the storage device on the port
+ * must use D3 to support deep platform power savings during
+ * suspend-to-idle.
+ */
+
+ if (!adev)
+ return false;
+ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+ &val))
+ return false;
+ return val == 1;
+}
+#else
+static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
@@ -2871,12 +2910,21 @@
quirks |= check_vendor_combination_bug(pdev);
+ if (!noacpi && nvme_acpi_storage_d3(pdev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+ */
+ dev_info(&pdev->dev,
+ "platform quirk: setting simple suspend\n");
+ quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
+ }
+
/*
* Double check that our mempool alloc size will cover the biggest
* command we support.
*/
- alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
- NVME_MAX_SEGS, true);
+ alloc_size = nvme_pci_iod_alloc_size();
WARN_ON_ONCE(alloc_size > PAGE_SIZE);
dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
@@ -2938,6 +2986,7 @@
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
nvme_disable_prepare_reset(dev, true);
}
@@ -2966,10 +3015,9 @@
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_uninit_ctrl(&dev->ctrl);
nvme_release_prp_pools(dev);
nvme_dev_unmap(dev);
- nvme_put_ctrl(&dev->ctrl);
+ nvme_uninit_ctrl(&dev->ctrl);
}
#ifdef CONFIG_PM_SLEEP
@@ -3055,7 +3103,7 @@
/*
* Clearing npss forces a controller reset on resume. The
- * correct value will be resdicovered then.
+ * correct value will be rediscovered then.
*/
ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
@@ -3068,6 +3116,7 @@
static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
+
return nvme_disable_prepare_reset(ndev, true);
}
@@ -3141,21 +3190,23 @@
};
static const struct pci_device_id nvme_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x0953),
+ { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a53),
+ { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a54),
+ { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
- NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a55),
+ NVME_QUIRK_DEALLOCATE_ZEROES |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ |
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
@@ -3197,7 +3248,10 @@
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
- { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
@@ -3206,7 +3260,10 @@
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES |
- NVME_QUIRK_SHARED_TAGS },
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
+
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);
@@ -3232,6 +3289,7 @@
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dcc3d23..8eacc9b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -34,6 +34,11 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
+#define NVME_RDMA_DATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
+#define NVME_RDMA_METADATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
+
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
@@ -48,6 +53,11 @@
u64 dma;
};
+struct nvme_rdma_sgl {
+ int nents;
+ struct sg_table sg_table;
+};
+
struct nvme_rdma_queue;
struct nvme_rdma_request {
struct nvme_request req;
@@ -58,12 +68,12 @@
refcount_t ref;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
- int nents;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
- struct sg_table sg_table;
- struct scatterlist first_sgl[];
+ struct nvme_rdma_sgl data_sgl;
+ struct nvme_rdma_sgl *metadata_sgl;
+ bool use_sig_mr;
};
enum nvme_rdma_queue_flags {
@@ -85,6 +95,9 @@
struct rdma_cm_id *cm_id;
int cm_error;
struct completion cm_done;
+ bool pi_support;
+ int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
@@ -138,18 +151,11 @@
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvme_rdma_complete_rq(struct request *rq);
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
-/* XXX: really should move to a generic header sooner or later.. */
-static inline void put_unaligned_le24(u32 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
-}
-
static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
{
return queue - queue->ctrl->queues;
@@ -269,6 +275,9 @@
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = queue->ib_cq;
init_attr.recv_cq = queue->ib_cq;
+ if (queue->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+ init_attr.qp_context = queue;
ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
@@ -298,6 +307,12 @@
if (!req->sqe.data)
return -ENOMEM;
+ /* metadata nvme_rdma_sgl struct is located after command's data SGL */
+ if (queue->pi_support)
+ req->metadata_sgl = (void *)nvme_req(rq) +
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+
req->queue = queue;
return 0;
@@ -397,6 +412,14 @@
return NULL;
}
+static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
+{
+ if (nvme_rdma_poll_queue(queue))
+ ib_free_cq(queue->ib_cq);
+ else
+ ib_cq_pool_put(queue->ib_cq, queue->cq_size);
+}
+
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_device *dev;
@@ -408,6 +431,8 @@
dev = queue->device;
ibdev = dev->dev;
+ if (queue->pi_support)
+ ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
/*
@@ -416,7 +441,7 @@
* the destruction of the QP shouldn't use rdma_cm API.
*/
ib_destroy_qp(queue->qp);
- ib_free_cq(queue->ib_cq);
+ nvme_rdma_free_cq(queue);
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
sizeof(struct nvme_completion), DMA_FROM_DEVICE);
@@ -424,10 +449,47 @@
nvme_rdma_dev_put(dev);
}
-static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
{
- return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ibdev->attrs.max_fast_reg_page_list_len - 1);
+ u32 max_page_list_len;
+
+ if (pi_support)
+ max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
+ else
+ max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
+
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
+}
+
+static int nvme_rdma_create_cq(struct ib_device *ibdev,
+ struct nvme_rdma_queue *queue)
+{
+ int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
+ enum ib_poll_context poll_ctx;
+
+ /*
+ * Spread I/O queues completion vectors according their queue index.
+ * Admin queues can always go on completion vector 0.
+ */
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
+
+ /* Polling queues need direct cq polling context */
+ if (nvme_rdma_poll_queue(queue)) {
+ poll_ctx = IB_POLL_DIRECT;
+ queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
+ comp_vector, poll_ctx);
+ } else {
+ poll_ctx = IB_POLL_SOFTIRQ;
+ queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
+ comp_vector, poll_ctx);
+ }
+
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ return ret;
+ }
+
+ return 0;
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -435,8 +497,6 @@
struct ib_device *ibdev;
const int send_wr_factor = 3; /* MR, SEND, INV */
const int cq_factor = send_wr_factor + 1; /* + RECV */
- int comp_vector, idx = nvme_rdma_queue_idx(queue);
- enum ib_poll_context poll_ctx;
int ret, pages_per_mr;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
@@ -447,26 +507,12 @@
}
ibdev = queue->device->dev;
- /*
- * Spread I/O queues completion vectors according their queue index.
- * Admin queues can always go on completion vector 0.
- */
- comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
-
- /* Polling queues need direct cq polling context */
- if (nvme_rdma_poll_queue(queue))
- poll_ctx = IB_POLL_DIRECT;
- else
- poll_ctx = IB_POLL_SOFTIRQ;
-
/* +1 for ib_stop_cq */
- queue->ib_cq = ib_alloc_cq(ibdev, queue,
- cq_factor * queue->queue_size + 1,
- comp_vector, poll_ctx);
- if (IS_ERR(queue->ib_cq)) {
- ret = PTR_ERR(queue->ib_cq);
+ queue->cq_size = cq_factor * queue->queue_size + 1;
+
+ ret = nvme_rdma_create_cq(ibdev, queue);
+ if (ret)
goto out_put_dev;
- }
ret = nvme_rdma_create_qp(queue, send_wr_factor);
if (ret)
@@ -484,7 +530,7 @@
* misaligned we'll end up using two entries for a single data page,
* so one additional entry is required.
*/
- pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
+ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
@@ -492,21 +538,35 @@
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
- queue->queue_size, idx);
+ queue->queue_size, nvme_rdma_queue_idx(queue));
goto out_destroy_ring;
}
+ if (queue->pi_support) {
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
+ queue->queue_size, IB_MR_TYPE_INTEGRITY,
+ pages_per_mr, pages_per_mr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize PI MR pool sized %d for QID %d\n",
+ queue->queue_size, nvme_rdma_queue_idx(queue));
+ goto out_destroy_mr_pool;
+ }
+ }
+
set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
return 0;
+out_destroy_mr_pool:
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
out_destroy_ring:
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
sizeof(struct nvme_completion), DMA_FROM_DEVICE);
out_destroy_qp:
rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
- ib_free_cq(queue->ib_cq);
+ nvme_rdma_free_cq(queue);
out_put_dev:
nvme_rdma_dev_put(queue->device);
return ret;
@@ -520,7 +580,12 @@
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
+ if (idx && ctrl->ctrl.max_integrity_segments)
+ queue->pi_support = true;
+ else
+ queue->pi_support = false;
init_completion(&queue->cm_done);
if (idx > 0)
@@ -535,7 +600,8 @@
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -565,6 +631,8 @@
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -576,9 +644,10 @@
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -586,8 +655,9 @@
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
@@ -734,7 +804,7 @@
set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_RDMA_DATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT;
@@ -748,7 +818,10 @@
set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_RDMA_DATA_SGL_SIZE;
+ if (nctrl->max_integrity_segments)
+ set->cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
@@ -782,6 +855,7 @@
static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool new)
{
+ bool pi_capable = false;
int error;
error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
@@ -789,9 +863,15 @@
return error;
ctrl->device = ctrl->queues[0].device;
- ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
+ ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
- ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ /* T10-PI support */
+ if (ctrl->device->dev->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)
+ pi_capable = true;
+
+ ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
+ pi_capable);
/*
* Bind the async event SQE DMA mapping to the admin queue lifetime.
@@ -833,6 +913,10 @@
ctrl->ctrl.max_segments = ctrl->max_fr_pages;
ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+ if (pi_capable)
+ ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
+ else
+ ctrl->ctrl.max_integrity_segments = 0;
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1019,11 +1103,13 @@
return ret;
if (ctrl->ctrl.icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@@ -1053,8 +1139,14 @@
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
if (!changed) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ /*
+ * state change failure is ok if we started ctrl delete,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
@@ -1108,14 +1200,16 @@
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1131,10 +1225,20 @@
queue_work(nvme_reset_wq, &ctrl->err_work);
}
+static void nvme_rdma_end_request(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ if (!nvme_try_complete_req(rq, req->status, req->result))
+ nvme_rdma_complete_rq(rq);
+}
+
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
const char *op)
{
- struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -1155,16 +1259,11 @@
{
struct nvme_rdma_request *req =
container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
-
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1190,19 +1289,29 @@
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
+ struct list_head *pool = &queue->qp->rdma_mrs;
if (!blk_rq_nr_phys_segments(rq))
return;
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ if (req->use_sig_mr)
+ pool = &queue->qp->sig_mrs;
+
if (req->mr) {
- ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ ib_mr_pool_put(queue->qp, pool, req->mr);
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-
- nvme_cleanup_cmd(rq);
- sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1221,16 +1330,17 @@
int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- struct scatterlist *sgl = req->sg_table.sgl;
struct ib_sge *sge = &req->sge[1];
+ struct scatterlist *sgl;
u32 len = 0;
int i;
- for (i = 0; i < count; i++, sgl++, sge++) {
+ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
sge->addr = sg_dma_address(sgl);
sge->length = sg_dma_len(sgl);
sge->lkey = queue->device->pd->local_dma_lkey;
len += sge->length;
+ sge++;
}
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
@@ -1246,8 +1356,8 @@
{
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
- sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
- put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
+ sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
return 0;
@@ -1268,7 +1378,8 @@
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
+ nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
+ SZ_4K);
if (unlikely(nr < count)) {
ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req->mr = NULL;
@@ -1299,12 +1410,125 @@
return 0;
}
+static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
+ u8 pi_type)
+{
+ u16 control = le16_to_cpu(cmd->rw.control);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no memory domain */
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+}
+
+static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
+{
+ *mask = 0;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
+ *mask |= IB_SIG_CHECK_REFTAG;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
+ *mask |= IB_SIG_CHECK_GUARD;
+}
+
+static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SIG");
+}
+
+static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count, int pi_count)
+{
+ struct nvme_rdma_sgl *sgl = &req->data_sgl;
+ struct ib_reg_wr *wr = &req->reg_wr;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct bio *bio = rq->bio;
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
+ nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
+ req->metadata_sgl->sg_table.sgl, pi_count, NULL,
+ SZ_4K);
+ if (unlikely(nr))
+ goto mr_put;
+
+ nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
+ req->mr->sig_attrs, ns->pi_type);
+ nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_sig_done;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ wr->wr.wr_cqe = &req->reg_cqe;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = req->mr;
+ wr->key = req->mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+
+ return 0;
+
+mr_put:
+ ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
+ req->mr = NULL;
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+}
+
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
int count, ret;
req->num_sge = 1;
@@ -1315,22 +1539,52 @@
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
- req->sg_table.sgl = req->first_sgl;
- ret = sg_alloc_table_chained(&req->sg_table,
- blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
- SG_CHUNK_SIZE);
+ req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
+ ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
+ blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
+ NVME_INLINE_SG_CNT);
if (ret)
return -ENOMEM;
- req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
+ req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
+ req->data_sgl.sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
- rq_dma_dir(rq));
+ count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
if (unlikely(count <= 0)) {
ret = -EIO;
goto out_free_table;
}
+ if (blk_integrity_rq(rq)) {
+ req->metadata_sgl->sg_table.sgl =
+ (struct scatterlist *)(req->metadata_sgl + 1);
+ ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
+ blk_rq_count_integrity_sg(rq->q, rq->bio),
+ req->metadata_sgl->sg_table.sgl,
+ NVME_INLINE_METADATA_SG_CNT);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto out_unmap_sg;
+ }
+
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
+ rq->bio, req->metadata_sgl->sg_table.sgl);
+ pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(pi_count <= 0)) {
+ ret = -EIO;
+ goto out_free_pi_table;
+ }
+ }
+
+ if (req->use_sig_mr) {
+ ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
+ goto out;
+ }
+
if (count <= dev->num_inline_segments) {
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
queue->ctrl->use_inline_data &&
@@ -1349,14 +1603,23 @@
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
- goto out_unmap_sg;
+ goto out_unmap_pi_sg;
return 0;
+out_unmap_pi_sg:
+ if (blk_integrity_rq(rq))
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+out_free_pi_table:
+ if (blk_integrity_rq(rq))
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
out_free_table:
- sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
return ret;
}
@@ -1366,15 +1629,11 @@
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
struct nvme_rdma_request *req =
container_of(qe, struct nvme_rdma_request, sqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "SEND");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
@@ -1385,7 +1644,7 @@
int ret;
sge->addr = qe->dma;
- sge->length = sizeof(struct nvme_command),
+ sge->length = sizeof(struct nvme_command);
sge->lkey = queue->device->pd->local_dma_lkey;
wr.next = NULL;
@@ -1482,10 +1741,10 @@
struct request *rq;
struct nvme_rdma_request *req;
- rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "tag 0x%x on QP %#x not found\n",
+ "got bad command_id %#x on QP %#x\n",
cqe->command_id, queue->qp->qp_num);
nvme_rdma_error_recovery(queue->ctrl);
return;
@@ -1496,10 +1755,11 @@
req->result = cqe->result;
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
- if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+ if (unlikely(!req->mr ||
+ wc->ex.invalidate_rkey != req->mr->rkey)) {
dev_err(queue->ctrl->ctrl.device,
"Bogus remote invalidation for rkey %#x\n",
- req->mr->rkey);
+ req->mr ? req->mr->rkey : 0);
nvme_rdma_error_recovery(queue->ctrl);
}
} else if (req->mr) {
@@ -1516,15 +1776,14 @@
return;
}
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ nvme_rdma_end_request(req);
}
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_qe *qe =
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
- struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct ib_device *ibdev = queue->device->dev;
struct nvme_completion *cqe = qe->data;
const size_t len = sizeof(struct nvme_completion);
@@ -1549,8 +1808,8 @@
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -1567,14 +1826,10 @@
for (i = 0; i < queue->queue_size; i++) {
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
if (ret)
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1664,18 +1919,14 @@
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
}
- ret = rdma_connect(queue->cm_id, ¶m);
+ ret = rdma_connect_locked(queue->cm_id, ¶m);
if (ret) {
dev_err(ctrl->ctrl.device,
- "rdma_connect failed (%d).\n", ret);
- goto out_destroy_queue_ib;
+ "rdma_connect_locked failed (%d).\n", ret);
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1706,8 +1957,6 @@
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- nvme_rdma_destroy_queue_ib(queue);
- /* fall through */
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
@@ -1823,11 +2072,19 @@
blk_mq_start_request(rq);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ queue->pi_support &&
+ (c->common.opcode == nvme_cmd_write ||
+ c->common.opcode == nvme_cmd_read) &&
+ nvme_ns_has_pi(ns))
+ req->use_sig_mr = true;
+ else
+ req->use_sig_mr = false;
+
err = nvme_rdma_map_data(queue, rq, c);
if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", err);
- nvme_cleanup_cmd(rq);
goto err;
}
@@ -1838,18 +2095,19 @@
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr ? &req->reg_wr.wr : NULL);
- if (unlikely(err)) {
- nvme_rdma_unmap_data(queue, rq);
- goto err;
- }
+ if (unlikely(err))
+ goto err_unmap;
return BLK_STS_OK;
+err_unmap:
+ nvme_rdma_unmap_data(queue, rq);
err:
if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
+ nvme_cleanup_cmd(rq);
unmap_qe:
ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
@@ -1863,12 +2121,46 @@
return ib_process_cq_direct(queue->ib_cq, -1);
}
+static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct ib_mr_status mr_status;
+ int ret;
+
+ ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ nvme_req(rq)->status = NVME_SC_INVALID_PI;
+ return;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type, mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+}
+
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct ib_device *ibdev = queue->device->dev;
+ if (req->use_sig_mr)
+ nvme_rdma_check_pi_status(req);
+
nvme_rdma_unmap_data(queue, rq);
ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
@@ -1988,7 +2280,7 @@
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.name = "rdma",
.module = THIS_MODULE,
- .flags = NVME_F_FABRICS,
+ .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
@@ -2113,7 +2405,6 @@
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 38bbbbb..6105894 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -20,6 +20,16 @@
struct nvme_tcp_queue;
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+
enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU,
@@ -36,6 +46,7 @@
u32 pdu_sent;
u16 ttag;
struct list_head entry;
+ struct llist_node lentry;
__le32 ddgst;
struct bio *curr_bio;
@@ -50,6 +61,7 @@
enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
+ NVME_TCP_Q_POLLING = 2,
};
enum nvme_tcp_recv_state {
@@ -64,8 +76,11 @@
struct work_struct io_work;
int io_cpu;
- spinlock_t lock;
+ struct mutex queue_lock;
+ struct mutex send_mutex;
+ struct llist_head req_list;
struct list_head send_list;
+ bool more_requests;
/* recv state */
void *pdu;
@@ -119,8 +134,9 @@
static LIST_HEAD(nvme_tcp_ctrl_list);
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
-static struct blk_mq_ops nvme_tcp_mq_ops;
-static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static const struct blk_mq_ops nvme_tcp_mq_ops;
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
{
@@ -247,15 +263,57 @@
}
}
-static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
+static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ bool sync, bool last)
{
struct nvme_tcp_queue *queue = req->queue;
+ bool empty;
- spin_lock(&queue->lock);
- list_add_tail(&req->entry, &queue->send_list);
- spin_unlock(&queue->lock);
+ empty = llist_add(&req->lentry, &queue->req_list) &&
+ list_empty(&queue->send_list) && !queue->request;
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ /*
+ * if we're the first on the send_list and we can try to send
+ * directly, otherwise queue io_work. Also, only do that if we
+ * are on the same cpu, so we don't introduce contention.
+ */
+ if (queue->io_cpu == raw_smp_processor_id() &&
+ sync && empty && mutex_trylock(&queue->send_mutex)) {
+ queue->more_requests = !last;
+ nvme_tcp_send_all(queue);
+ queue->more_requests = false;
+ mutex_unlock(&queue->send_mutex);
+ }
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+ struct llist_node *node;
+
+ for (node = llist_del_all(&queue->req_list); node; node = node->next) {
+ req = llist_entry(node, struct nvme_tcp_request, lentry);
+ list_add(&req->entry, &queue->send_list);
+ }
}
static inline struct nvme_tcp_request *
@@ -263,13 +321,17 @@
{
struct nvme_tcp_request *req;
- spin_lock(&queue->lock);
req = list_first_entry_or_null(&queue->send_list,
struct nvme_tcp_request, entry);
- if (req)
- list_del(&req->entry);
- spin_unlock(&queue->lock);
+ if (!req) {
+ nvme_tcp_process_req_list(queue);
+ req = list_first_entry_or_null(&queue->send_list,
+ struct nvme_tcp_request, entry);
+ if (unlikely(!req))
+ return NULL;
+ }
+ list_del(&req->entry);
return req;
}
@@ -429,16 +491,17 @@
{
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "queue %d tag 0x%x not found\n",
- nvme_tcp_queue_id(queue), cqe->command_id);
+ "got bad cqe.command_id %#x on queue %d\n",
+ cqe->command_id, nvme_tcp_queue_id(queue));
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
return -EINVAL;
}
- nvme_end_request(rq, cqe->status, cqe->result);
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ nvme_complete_rq(rq);
queue->nr_cqe++;
return 0;
@@ -449,11 +512,11 @@
{
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x not found\n",
- nvme_tcp_queue_id(queue), pdu->command_id);
+ "got bad c2hdata.command_id %#x on queue %d\n",
+ pdu->command_id, nvme_tcp_queue_id(queue));
return -ENOENT;
}
@@ -490,8 +553,8 @@
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+ cqe->command_id)))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -547,8 +610,8 @@
data->hdr.plen =
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
data->ttag = pdu->ttag;
- data->command_id = rq->tag;
- data->data_offset = cpu_to_le32(req->data_sent);
+ data->command_id = nvme_cid(rq);
+ data->data_offset = pdu->r2t_offset;
data->data_length = cpu_to_le32(req->pdu_len);
return 0;
}
@@ -560,11 +623,11 @@
struct request *rq;
int ret;
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x not found\n",
- nvme_tcp_queue_id(queue), pdu->command_id);
+ "got bad r2t.command_id %#x on queue %d\n",
+ pdu->command_id, nvme_tcp_queue_id(queue));
return -ENOENT;
}
req = blk_mq_rq_to_pdu(rq);
@@ -576,7 +639,7 @@
req->state = NVME_TCP_SEND_H2C_PDU;
req->offset = 0;
- nvme_tcp_queue_request(req);
+ nvme_tcp_queue_request(req, false, true);
return 0;
}
@@ -635,7 +698,8 @@
{
union nvme_result res = {};
- nvme_end_request(rq, cpu_to_le16(status << 1), res);
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
+ nvme_complete_rq(rq);
}
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
@@ -643,7 +707,7 @@
{
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
struct request *rq =
- blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
while (true) {
@@ -736,8 +800,8 @@
}
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
queue->nr_cqe++;
@@ -786,7 +850,8 @@
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
- if (likely(queue && queue->rd_enabled))
+ if (likely(queue && queue->rd_enabled) &&
+ !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -819,7 +884,6 @@
case TCP_LAST_ACK:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
- /* fallthrough */
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
break;
default:
@@ -840,24 +904,34 @@
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ int req_data_len = req->data_len;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
size_t offset = nvme_tcp_req_cur_offset(req);
size_t len = nvme_tcp_req_cur_length(req);
bool last = nvme_tcp_pdu_last_send(req, len);
+ int req_data_sent = req->data_sent;
int ret, flags = MSG_DONTWAIT;
- if (last && !queue->data_digest)
+ if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
flags |= MSG_EOR;
else
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
if (sendpage_ok(page)) {
ret = kernel_sendpage(queue->sock, page, offset, len,
@@ -869,12 +943,19 @@
if (ret <= 0)
return ret;
- nvme_tcp_advance_req(req, ret);
if (queue->data_digest)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
- /* fully successful last write*/
+ /*
+ * update the request iterator except for the last payload send
+ * in the request where we don't want to modify it as we may
+ * compete with the RX path completing the request.
+ */
+ if (req_data_sent + ret < req_data_len)
+ nvme_tcp_advance_req(req, ret);
+
+ /* fully successful last send in current PDU */
if (last && ret == len) {
if (queue->data_digest) {
nvme_tcp_ddgst_final(queue->snd_hash,
@@ -895,11 +976,16 @@
struct nvme_tcp_queue *queue = req->queue;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
bool inline_data = nvme_tcp_has_inline_data(req);
- int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
+ int flags = MSG_DONTWAIT;
int ret;
+ if (inline_data || nvme_tcp_queue_more(queue))
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+ else
+ flags |= MSG_EOR;
+
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
@@ -938,7 +1024,7 @@
ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE);
+ MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
if (unlikely(ret <= 0))
return ret;
@@ -959,18 +1045,24 @@
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ size_t offset = req->offset;
int ret;
- struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &req->ddgst + req->offset,
+ .iov_base = (u8 *)&req->ddgst + req->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
};
+ if (nvme_tcp_queue_more(queue))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
- if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
nvme_tcp_done_send_req(queue);
return 1;
}
@@ -1014,8 +1106,15 @@
if (req->state == NVME_TCP_SEND_DDGST)
ret = nvme_tcp_try_send_ddgst(req);
done:
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
ret = 0;
+ } else if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to send request %d\n", ret);
+ if (ret != -EPIPE && ret != -ECONNRESET)
+ nvme_tcp_fail_request(queue->request);
+ nvme_tcp_done_send_req(queue);
+ }
return ret;
}
@@ -1045,26 +1144,20 @@
bool pending = false;
int result;
- result = nvme_tcp_try_send(queue);
- if (result > 0) {
- pending = true;
- } else if (unlikely(result < 0)) {
- dev_err(queue->ctrl->ctrl.device,
- "failed to send request %d\n", result);
-
- /*
- * Fail the request unless peer closed the connection,
- * in which case error recovery flow will complete all.
- */
- if ((result != -EPIPE) && (result != -ECONNRESET))
- nvme_tcp_fail_request(queue->request);
- nvme_tcp_done_send_req(queue);
- return;
+ if (mutex_trylock(&queue->send_mutex)) {
+ result = nvme_tcp_try_send(queue);
+ mutex_unlock(&queue->send_mutex);
+ if (result > 0)
+ pending = true;
+ else if (unlikely(result < 0))
+ break;
}
result = nvme_tcp_try_recv(queue);
if (result > 0)
pending = true;
+ else if (unlikely(result < 0))
+ return;
if (!pending)
return;
@@ -1145,6 +1238,7 @@
sock_release(queue->sock);
kfree(queue->pdu);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1245,17 +1339,72 @@
return ret;
}
+static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
+{
+ return nvme_tcp_queue_id(queue) == 0;
+}
+
+static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
+}
+
+static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
+}
+
+static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ !nvme_tcp_read_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ] +
+ ctrl->io_queues[HCTX_TYPE_POLL];
+}
+
+static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+ int n = 0;
+
+ if (nvme_tcp_default_queue(queue))
+ n = qid - 1;
+ else if (nvme_tcp_read_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ else if (nvme_tcp_poll_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+ ctrl->io_queues[HCTX_TYPE_READ] - 1;
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+}
+
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
int qid, size_t queue_size)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- struct linger sol = { .l_onoff = 1, .l_linger = 0 };
- int ret, opt, rcv_pdu_size, n;
+ int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
+ init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
- spin_lock_init(&queue->lock);
+ mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work);
queue->queue_size = queue_size;
@@ -1270,63 +1419,34 @@
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
/* Single syn retry */
- opt = 1;
- ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
- (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set TCP_SYNCNT sock opt %d\n", ret);
- goto err_sock;
- }
+ tcp_sock_set_syncnt(queue->sock->sk, 1);
/* Set TCP no delay */
- opt = 1;
- ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
- TCP_NODELAY, (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set TCP_NODELAY sock opt %d\n", ret);
- goto err_sock;
- }
+ tcp_sock_set_nodelay(queue->sock->sk);
/*
* Cleanup whatever is sitting in the TCP transmit queue on socket
* close. This is done to prevent stale data from being sent should
* the network connection be restored before TCP times out.
*/
- ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
- (char *)&sol, sizeof(sol));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set SO_LINGER sock opt %d\n", ret);
- goto err_sock;
- }
+ sock_no_linger(queue->sock->sk);
+
+ if (so_priority > 0)
+ sock_set_priority(queue->sock->sk, so_priority);
/* Set socket type of service */
- if (nctrl->opts->tos >= 0) {
- opt = nctrl->opts->tos;
- ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
- (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set IP_TOS sock opt %d\n", ret);
- goto err_sock;
- }
- }
+ if (nctrl->opts->tos >= 0)
+ ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
/* Set 10 seconds timeout for icresp recvmsg */
queue->sock->sk->sk_rcvtimeo = 10 * HZ;
queue->sock->sk->sk_allocation = GFP_ATOMIC;
- if (!qid)
- n = 0;
- else
- n = (qid - 1) % num_online_cpus();
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ nvme_tcp_set_queue_io_cpu(queue);
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1408,6 +1528,8 @@
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -1435,9 +1557,10 @@
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
@@ -1474,7 +1597,8 @@
set->ops = &nvme_tcp_admin_mq_ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = 1;
@@ -1485,8 +1609,8 @@
set->ops = &nvme_tcp_mq_ops;
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
- set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
@@ -1890,8 +2014,14 @@
}
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /*
+ * state change failure is ok if we started ctrl delete,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
@@ -1947,6 +2077,7 @@
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
@@ -1954,8 +2085,9 @@
blk_mq_unquiesce_queue(ctrl->admin_q);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1990,8 +2122,9 @@
nvme_tcp_teardown_ctrl(ctrl, false);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -2078,7 +2211,7 @@
ctrl->async_req.curr_bio = NULL;
ctrl->async_req.data_len = 0;
- nvme_tcp_queue_request(&ctrl->async_req);
+ nvme_tcp_queue_request(&ctrl->async_req, true, true);
}
static void nvme_tcp_complete_timed_out(struct request *rq)
@@ -2202,6 +2335,14 @@
return 0;
}
+static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+
+ if (!llist_empty(&queue->req_list))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2221,7 +2362,7 @@
blk_mq_start_request(rq);
- nvme_tcp_queue_request(req);
+ nvme_tcp_queue_request(req, true, bd->last);
return BLK_STS_OK;
}
@@ -2276,14 +2417,20 @@
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return 0;
+
+ set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
+ clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
return queue->nr_cqe;
}
-static struct blk_mq_ops nvme_tcp_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
+ .commit_rqs = nvme_tcp_commit_rqs,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
.exit_request = nvme_tcp_exit_request,
@@ -2293,7 +2440,7 @@
.poll = nvme_tcp_poll,
};
-static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
@@ -2420,7 +2567,6 @@
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
new file mode 100644
index 0000000..67e87e9
--- /dev/null
+++ b/drivers/nvme/host/zns.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+#include "nvme.h"
+
+int nvme_revalidate_zones(struct nvme_ns *ns)
+{
+ struct request_queue *q = ns->queue;
+ int ret;
+
+ ret = blk_revalidate_disk_zones(ns->disk, NULL);
+ if (!ret)
+ blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
+ return ret;
+}
+
+static int nvme_set_max_append(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c = { };
+ struct nvme_id_ctrl_zns *id;
+ int status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (status) {
+ kfree(id);
+ return status;
+ }
+
+ if (id->zasl)
+ ctrl->max_zone_append = 1 << (id->zasl + 3);
+ else
+ ctrl->max_zone_append = ctrl->max_hw_sectors;
+ kfree(id);
+ return 0;
+}
+
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+{
+ struct nvme_effects_log *log = ns->head->effects;
+ struct request_queue *q = ns->queue;
+ struct nvme_command c = { };
+ struct nvme_id_ns_zns *id;
+ int status;
+
+ /* Driver requires zone append support */
+ if (!(le32_to_cpu(log->iocs[nvme_cmd_zone_append]) &
+ NVME_CMD_EFFECTS_CSUPP)) {
+ dev_warn(ns->ctrl->device,
+ "append not supported for zoned namespace:%d\n",
+ ns->head->ns_id);
+ return -EINVAL;
+ }
+
+ /* Lazily query controller append limit for the first zoned namespace */
+ if (!ns->ctrl->max_zone_append) {
+ status = nvme_set_max_append(ns->ctrl);
+ if (status)
+ return status;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.cns = NVME_ID_CNS_CS_NS;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id));
+ if (status)
+ goto free_data;
+
+ /*
+ * We currently do not handle devices requiring any of the zoned
+ * operation characteristics.
+ */
+ if (id->zoc) {
+ dev_warn(ns->ctrl->device,
+ "zone operations:%x not supported for namespace:%u\n",
+ le16_to_cpu(id->zoc), ns->head->ns_id);
+ status = -EINVAL;
+ goto free_data;
+ }
+
+ ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
+ if (!is_power_of_2(ns->zsze)) {
+ dev_warn(ns->ctrl->device,
+ "invalid zone size:%llu for namespace:%u\n",
+ ns->zsze, ns->head->ns_id);
+ status = -EINVAL;
+ goto free_data;
+ }
+
+ q->limits.zoned = BLK_ZONED_HM;
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
+ blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+free_data:
+ kfree(id);
+ return status;
+}
+
+static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
+ unsigned int nr_zones, size_t *buflen)
+{
+ struct request_queue *q = ns->disk->queue;
+ size_t bufsize;
+ void *buf;
+
+ const size_t min_bufsize = sizeof(struct nvme_zone_report) +
+ sizeof(struct nvme_zone_descriptor);
+
+ nr_zones = min_t(unsigned int, nr_zones,
+ get_capacity(ns->disk) >> ilog2(ns->zsze));
+
+ bufsize = sizeof(struct nvme_zone_report) +
+ nr_zones * sizeof(struct nvme_zone_descriptor);
+ bufsize = min_t(size_t, bufsize,
+ queue_max_hw_sectors(q) << SECTOR_SHIFT);
+ bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+
+ while (bufsize >= min_bufsize) {
+ buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
+ return NULL;
+}
+
+static int nvme_zone_parse_entry(struct nvme_ns *ns,
+ struct nvme_zone_descriptor *entry,
+ unsigned int idx, report_zones_cb cb,
+ void *data)
+{
+ struct blk_zone zone = { };
+
+ if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
+ dev_err(ns->ctrl->device, "invalid zone type %#x\n",
+ entry->zt);
+ return -EINVAL;
+ }
+
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone.cond = entry->zs >> 4;
+ zone.len = ns->zsze;
+ zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
+ zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
+ zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+
+ return cb(&zone, idx, data);
+}
+
+static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_zone_report *report;
+ struct nvme_command c = { };
+ int ret, zone_idx = 0;
+ unsigned int nz, i;
+ size_t buflen;
+
+ report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
+ if (!report)
+ return -ENOMEM;
+
+ c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
+ c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
+ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
+ c.zmr.zra = NVME_ZRA_ZONE_REPORT;
+ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
+ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
+
+ sector &= ~(ns->zsze - 1);
+ while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
+ memset(report, 0, buflen);
+
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out_free;
+ }
+
+ nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+ if (!nz)
+ break;
+
+ for (i = 0; i < nz && zone_idx < nr_zones; i++) {
+ ret = nvme_zone_parse_entry(ns, &report->entries[i],
+ zone_idx, cb, data);
+ if (ret)
+ goto out_free;
+ zone_idx++;
+ }
+
+ sector += ns->zsze * nz;
+ }
+
+ if (zone_idx > 0)
+ ret = zone_idx;
+ else
+ ret = -EINVAL;
+out_free:
+ kvfree(report);
+ return ret;
+}
+
+int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_ns_head *head = NULL;
+ struct nvme_ns *ns;
+ int srcu_idx, ret;
+
+ ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ return -EWOULDBLOCK;
+
+ if (ns->head->ids.csi == NVME_CSI_ZNS)
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ else
+ ret = -EINVAL;
+ nvme_put_ns_from_disk(head, srcu_idx);
+
+ return ret;
+}
+
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *c, enum nvme_zone_mgmt_action action)
+{
+ c->zms.opcode = nvme_cmd_zone_mgmt_send;
+ c->zms.nsid = cpu_to_le32(ns->head->ns_id);
+ c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ c->zms.zsa = action;
+
+ if (req_op(req) == REQ_OP_ZONE_RESET_ALL)
+ c->zms.select_all = 1;
+
+ return BLK_STS_OK;
+}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index d7f48c0..8056955 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -4,6 +4,7 @@
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
@@ -15,6 +16,18 @@
To configure the NVMe target you probably want to use the nvmetcli
tool from http://git.infradead.org/users/hch/nvmetcli.git.
+config NVME_TARGET_PASSTHRU
+ bool "NVMe Target Passthrough support"
+ depends on NVME_TARGET
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables target side NVMe passthru controller support for the
+ NVMe Over Fabrics protocol. It allows for hosts to manage and
+ directly access an actual NVMe controller residing on the target
+ side, incuding executing Vendor Unique Commands.
+
+ If unsure, say N.
+
config NVME_TARGET_LOOP
tristate "NVMe loopback device support"
depends on NVME_TARGET
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 2b33836..ebf91fc 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -11,6 +11,7 @@
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
+nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 831a062..6a8274c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/rculist.h>
+#include <linux/part_stat.h>
#include <generated/utsrelease.h>
#include <asm/unaligned.h>
@@ -24,6 +25,16 @@
return len;
}
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_HOST_ID:
+ return sizeof(req->sq->ctrl->hostid);
+ default:
+ return 0;
+ }
+}
+
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{
return le64_to_cpu(cmd->get_log_page.lpo);
@@ -31,7 +42,7 @@
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
- nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
}
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
@@ -102,11 +113,10 @@
u64 data_units_read = 0, data_units_written = 0;
struct nvmet_ns *ns;
struct nvmet_ctrl *ctrl;
+ unsigned long idx;
ctrl = req->sq->ctrl;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
@@ -116,9 +126,7 @@
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
data_units_written += DIV_ROUND_UP(
part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
-
}
- rcu_read_unlock();
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -134,7 +142,7 @@
u16 status = NVME_SC_INTERNAL;
unsigned long flags;
- if (req->data_len != sizeof(*log))
+ if (req->transfer_len != sizeof(*log))
goto out;
log = kzalloc(sizeof(*log), GFP_KERNEL);
@@ -196,7 +204,7 @@
u16 status = NVME_SC_INTERNAL;
size_t len;
- if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+ if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
goto out;
mutex_lock(&ctrl->lock);
@@ -206,7 +214,7 @@
len = ctrl->nr_changed_ns * sizeof(__le32);
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
if (!status)
- status = nvmet_zero_sgl(req, len, req->data_len - len);
+ status = nvmet_zero_sgl(req, len, req->transfer_len - len);
ctrl->nr_changed_ns = 0;
nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
mutex_unlock(&ctrl->lock);
@@ -219,14 +227,13 @@
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
+ unsigned long idx;
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
- rcu_read_unlock();
}
desc->grpid = cpu_to_le32(grpid);
@@ -282,12 +289,56 @@
nvmet_req_complete(req, status);
}
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
+ return;
+
+ switch (req->cmd->get_log_page.lid) {
+ case NVME_LOG_ERROR:
+ return nvmet_execute_get_log_page_error(req);
+ case NVME_LOG_SMART:
+ return nvmet_execute_get_log_page_smart(req);
+ case NVME_LOG_FW_SLOT:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ return nvmet_execute_get_log_page_noop(req);
+ case NVME_LOG_CHANGED_NS:
+ return nvmet_execute_get_log_changed_ns(req);
+ case NVME_LOG_CMD_EFFECTS:
+ return nvmet_execute_get_log_cmd_effects_ns(req);
+ case NVME_LOG_ANA:
+ return nvmet_execute_get_log_page_ana(req);
+ }
+ pr_debug("unhandled lid %d on qid %d\n",
+ req->cmd->get_log_page.lid, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
+static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
+ struct nvmet_subsys *subsys)
+{
+ const char *model = NVMET_DEFAULT_CTRL_MODEL;
+ struct nvmet_subsys_model *subsys_model;
+
+ rcu_read_lock();
+ subsys_model = rcu_dereference(subsys->model);
+ if (subsys_model)
+ model = subsys_model->number;
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
+ rcu_read_unlock();
+}
+
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
+ u32 cmd_capsule_size;
u16 status = 0;
- const char model[] = "Linux";
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
@@ -302,7 +353,7 @@
memset(id->sn, ' ', sizeof(id->sn));
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ nvmet_id_set_model_number(id, ctrl->subsys);
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
@@ -316,8 +367,12 @@
/* we support multiple ports, multiples hosts and ANA: */
id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
- /* no limit on data transfer sizes for now */
- id->mdts = 0;
+ /* Limit MDTS according to transport capability */
+ if (ctrl->ops->get_mdts)
+ id->mdts = ctrl->ops->get_mdts(ctrl);
+ else
+ id->mdts = 0;
+
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
@@ -368,16 +423,22 @@
id->awupf = 0;
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
- if (ctrl->ops->has_keyed_sgls)
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
- /* Max command capsule size is sqe + single page of in-capsule data */
- id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
- req->port->inline_data_size) / 16);
+ /*
+ * Max command capsule size is sqe + in-capsule data size.
+ * Disable in-capsule data for Metadata capable controllers.
+ */
+ cmd_capsule_size = sizeof(struct nvme_command);
+ if (!ctrl->pi_support)
+ cmd_capsule_size += req->port->inline_data_size;
+ id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
+
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
@@ -407,7 +468,7 @@
static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ns *id;
u16 status = 0;
@@ -424,16 +485,21 @@
}
/* return an all zeroed buffer if we can't find an active namespace */
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns)
+ req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
+ if (!req->ns) {
+ status = 0;
goto done;
+ }
+
+ nvmet_ns_revalidate(req->ns);
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
- switch (req->port->ana_state[ns->anagrpid]) {
+ id->ncap = id->nsze =
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
+ switch (req->port->ana_state[req->ns->anagrpid]) {
case NVME_ANA_INACCESSIBLE:
case NVME_ANA_PERSISTENT_LOSS:
break;
@@ -442,8 +508,8 @@
break;
}
- if (ns->bdev)
- nvmet_bdev_set_limits(ns->bdev, id);
+ if (req->ns->bdev)
+ nvmet_bdev_set_limits(req->ns->bdev, id);
/*
* We just provide a single LBA format that matches what the
@@ -457,17 +523,28 @@
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
- id->anagrpid = cpu_to_le32(ns->anagrpid);
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
- id->lbaf[0].ds = ns->blksize_shift;
+ id->lbaf[0].ds = req->ns->blksize_shift;
- if (ns->readonly)
+ if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
+ id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
+ NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
+ NVME_NS_DPC_PI_TYPE3;
+ id->mc = NVME_MC_EXTENDED_LBA;
+ id->dps = req->ns->pi_type;
+ id->flbas = NVME_NS_FLBAS_META_EXT;
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
+ }
+
+ if (req->ns->readonly)
id->nsattr |= (1 << 0);
- nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
@@ -478,6 +555,7 @@
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
+ unsigned long idx;
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
__le32 *list;
u16 status = 0;
@@ -489,15 +567,13 @@
goto out;
}
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
list[i++] = cpu_to_le32(ns->nsid);
if (i == buf_size / sizeof(__le32))
break;
}
- rcu_read_unlock();
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
@@ -565,6 +641,28 @@
nvmet_req_complete(req, status);
}
+static void nvmet_execute_identify(struct nvmet_req *req)
+{
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_NS:
+ return nvmet_execute_identify_ns(req);
+ case NVME_ID_CNS_CTRL:
+ return nvmet_execute_identify_ctrl(req);
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
+ return nvmet_execute_identify_nslist(req);
+ case NVME_ID_CNS_NS_DESC_LIST:
+ return nvmet_execute_identify_desclist(req);
+ }
+
+ pr_debug("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
/*
* A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
@@ -574,6 +672,8 @@
*/
static void nvmet_execute_abort(struct nvmet_req *req)
{
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
}
@@ -630,7 +730,9 @@
{
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+ nvmet_stop_keep_alive_timer(req->sq->ctrl);
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+ nvmet_start_keep_alive_timer(req->sq->ctrl);
nvmet_set_result(req, req->sq->ctrl->kato);
@@ -652,14 +754,26 @@
return 0;
}
-static void nvmet_execute_set_features(struct nvmet_req *req)
+void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
+ u16 nsqr;
+ u16 ncqr;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
+ ncqr = (cdw11 >> 16) & 0xffff;
+ nsqr = cdw11 & 0xffff;
+ if (ncqr == 0xffff || nsqr == 0xffff) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
@@ -715,12 +829,15 @@
nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
}
-static void nvmet_execute_get_features(struct nvmet_req *req)
+void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
+ if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
+ return;
+
switch (cdw10 & 0xff) {
/*
* These features are mandatory in the spec, but we don't
@@ -785,6 +902,9 @@
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock);
@@ -801,6 +921,9 @@
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
@@ -813,81 +936,43 @@
struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_cmd(req);
+ if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ return nvmet_parse_discovery_cmd(req);
+
ret = nvmet_check_ctrl_status(req, cmd);
if (unlikely(ret))
return ret;
+ if (nvmet_req_passthru_ctrl(req))
+ return nvmet_parse_passthru_admin_cmd(req);
+
switch (cmd->common.opcode) {
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_ERROR:
- req->execute = nvmet_execute_get_log_page_error;
- return 0;
- case NVME_LOG_SMART:
- req->execute = nvmet_execute_get_log_page_smart;
- return 0;
- case NVME_LOG_FW_SLOT:
- /*
- * We only support a single firmware slot which always
- * is active, so we can zero out the whole firmware slot
- * log and still claim to fully implement this mandatory
- * log page.
- */
- req->execute = nvmet_execute_get_log_page_noop;
- return 0;
- case NVME_LOG_CHANGED_NS:
- req->execute = nvmet_execute_get_log_changed_ns;
- return 0;
- case NVME_LOG_CMD_EFFECTS:
- req->execute = nvmet_execute_get_log_cmd_effects_ns;
- return 0;
- case NVME_LOG_ANA:
- req->execute = nvmet_execute_get_log_page_ana;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_NS:
- req->execute = nvmet_execute_identify_ns;
- return 0;
- case NVME_ID_CNS_CTRL:
- req->execute = nvmet_execute_identify_ctrl;
- return 0;
- case NVME_ID_CNS_NS_ACTIVE_LIST:
- req->execute = nvmet_execute_identify_nslist;
- return 0;
- case NVME_ID_CNS_NS_DESC_LIST:
- req->execute = nvmet_execute_identify_desclist;
- return 0;
- }
- break;
+ req->execute = nvmet_execute_identify;
+ return 0;
case nvme_admin_abort_cmd:
req->execute = nvmet_execute_abort;
- req->data_len = 0;
return 0;
case nvme_admin_set_features:
req->execute = nvmet_execute_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
}
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
+ pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 98613a4..9aed5cc 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -20,61 +20,71 @@
static LIST_HEAD(nvmet_ports_list);
struct list_head *nvmet_ports = &nvmet_ports_list;
-static const struct nvmet_transport_name {
+struct nvmet_type_name_map {
u8 type;
const char *name;
-} nvmet_transport_names[] = {
+};
+
+static struct nvmet_type_name_map nvmet_transport[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
{ NVMF_TRTYPE_TCP, "tcp" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
+static const struct nvmet_type_name_map nvmet_addr_family[] = {
+ { NVMF_ADDR_FAMILY_PCI, "pcie" },
+ { NVMF_ADDR_FAMILY_IP4, "ipv4" },
+ { NVMF_ADDR_FAMILY_IP6, "ipv6" },
+ { NVMF_ADDR_FAMILY_IB, "ib" },
+ { NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_LOOP, "loop" },
+};
+
+static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
+{
+ if (p->enabled)
+ pr_err("Disable port '%u' before changing attribute in %s\n",
+ le16_to_cpu(p->disc_addr.portid), caller);
+ return p->enabled;
+}
+
/*
* nvmet_port Generic ConfigFS definitions.
* Used in any place in the ConfigFS tree that refers to an address.
*/
-static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
- char *page)
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
{
- switch (to_nvmet_port(item)->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- return sprintf(page, "ipv4\n");
- case NVMF_ADDR_FAMILY_IP6:
- return sprintf(page, "ipv6\n");
- case NVMF_ADDR_FAMILY_IB:
- return sprintf(page, "ib\n");
- case NVMF_ADDR_FAMILY_FC:
- return sprintf(page, "fc\n");
- default:
- return sprintf(page, "\n");
+ u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (nvmet_addr_family[i].type == adrfam)
+ return sprintf(page, "%s\n", nvmet_addr_family[i].name);
}
+
+ return sprintf(page, "\n");
}
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
+ int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
+
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (sysfs_streq(page, nvmet_addr_family[i].name))
+ goto found;
}
- if (sysfs_streq(page, "ipv4")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
- } else if (sysfs_streq(page, "ipv6")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
- } else if (sysfs_streq(page, "ib")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
- } else if (sysfs_streq(page, "fc")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
- } else {
- pr_err("Invalid value '%s' for adrfam\n", page);
- return -EINVAL;
- }
+ pr_err("Invalid value '%s' for adrfam\n", page);
+ return -EINVAL;
+found:
+ port->disc_addr.adrfam = nvmet_addr_family[i].type;
return count;
}
@@ -100,11 +110,9 @@
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
+
port->disc_addr.portid = cpu_to_le16(portid);
return count;
}
@@ -130,11 +138,8 @@
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
return -EINVAL;
@@ -143,20 +148,24 @@
CONFIGFS_ATTR(nvmet_, addr_traddr);
-static ssize_t nvmet_addr_treq_show(struct config_item *item,
- char *page)
+static const struct nvmet_type_name_map nvmet_addr_treq[] = {
+ { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
+ { NVMF_TREQ_REQUIRED, "required" },
+ { NVMF_TREQ_NOT_REQUIRED, "not required" },
+};
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
{
- switch (to_nvmet_port(item)->disc_addr.treq &
- NVME_TREQ_SECURE_CHANNEL_MASK) {
- case NVMF_TREQ_NOT_SPECIFIED:
- return sprintf(page, "not specified\n");
- case NVMF_TREQ_REQUIRED:
- return sprintf(page, "required\n");
- case NVMF_TREQ_NOT_REQUIRED:
- return sprintf(page, "not required\n");
- default:
- return sprintf(page, "\n");
+ u8 treq = to_nvmet_port(item)->disc_addr.treq &
+ NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (treq == nvmet_addr_treq[i].type)
+ return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
}
+
+ return sprintf(page, "\n");
}
static ssize_t nvmet_addr_treq_store(struct config_item *item,
@@ -164,25 +173,22 @@
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (sysfs_streq(page, nvmet_addr_treq[i].name))
+ goto found;
}
- if (sysfs_streq(page, "not specified")) {
- treq |= NVMF_TREQ_NOT_SPECIFIED;
- } else if (sysfs_streq(page, "required")) {
- treq |= NVMF_TREQ_REQUIRED;
- } else if (sysfs_streq(page, "not required")) {
- treq |= NVMF_TREQ_NOT_REQUIRED;
- } else {
- pr_err("Invalid value '%s' for treq\n", page);
- return -EINVAL;
- }
+ pr_err("Invalid value '%s' for treq\n", page);
+ return -EINVAL;
+
+found:
+ treq |= nvmet_addr_treq[i].type;
port->disc_addr.treq = treq;
-
return count;
}
@@ -206,11 +212,8 @@
pr_err("Invalid value '%s' for trsvcid\n", page);
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
return -EINVAL;
@@ -233,11 +236,8 @@
struct nvmet_port *port = to_nvmet_port(item);
int ret;
- if (port->enabled) {
- pr_err("Cannot modify inline_data_size while port enabled\n");
- pr_err("Disable the port before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
ret = kstrtoint(page, 0, &port->inline_data_size);
if (ret) {
pr_err("Invalid value '%s' for inline_data_size\n", page);
@@ -248,16 +248,45 @@
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
+}
+
+static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ if (port->enabled) {
+ pr_err("Disable port before setting pi_enable value.\n");
+ return -EACCES;
+ }
+
+ port->pi_enable = val;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_pi_enable);
+#endif
+
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
- if (port->disc_addr.trtype != nvmet_transport_names[i].type)
- continue;
- return sprintf(page, "%s\n", nvmet_transport_names[i].name);
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (port->disc_addr.trtype == nvmet_transport[i].type)
+ return sprintf(page, "%s\n", nvmet_transport[i].name);
}
return sprintf(page, "\n");
@@ -276,22 +305,20 @@
struct nvmet_port *port = to_nvmet_port(item);
int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
- if (sysfs_streq(page, nvmet_transport_names[i].name))
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (sysfs_streq(page, nvmet_transport[i].name))
goto found;
}
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
+
found:
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
- port->disc_addr.trtype = nvmet_transport_names[i].type;
+ port->disc_addr.trtype = nvmet_transport[i].type;
if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
nvmet_port_init_tsas_rdma(port);
return count;
@@ -327,7 +354,7 @@
kfree(ns->device_path);
ret = -ENOMEM;
- ns->device_path = kstrndup(page, len, GFP_KERNEL);
+ ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;
@@ -395,14 +422,12 @@
struct nvmet_subsys *subsys = ns->subsys;
int ret = 0;
-
mutex_lock(&subsys->lock);
if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
-
if (uuid_parse(page, &ns->uuid))
ret = -EINVAL;
@@ -545,6 +570,31 @@
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (!ns->enabled) {
+ pr_err("enable ns before revalidate.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+ nvmet_ns_revalidate(ns);
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
@@ -552,6 +602,7 @@
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+ &nvmet_ns_attr_revalidate_size,
#ifdef CONFIG_PCI_P2PDMA
&nvmet_ns_attr_p2pmem,
#endif
@@ -615,6 +666,103 @@
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+
+static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
+}
+
+static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ size_t len;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+
+ ret = -EBUSY;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+
+ kfree(subsys->passthru_ctrl_path);
+ ret = -ENOMEM;
+ subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+
+ return count;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_passthru_, device_path);
+
+static ssize_t nvmet_passthru_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
+}
+
+static ssize_t nvmet_passthru_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ bool enable;
+ int ret = 0;
+
+ if (strtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_passthru_ctrl_enable(subsys);
+ else
+ nvmet_passthru_ctrl_disable(subsys);
+
+ return ret ? ret : count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, enable);
+
+static struct configfs_attribute *nvmet_passthru_attrs[] = {
+ &nvmet_passthru_attr_device_path,
+ &nvmet_passthru_attr_enable,
+ NULL,
+};
+
+static const struct config_item_type nvmet_passthru_type = {
+ .ct_attrs = nvmet_passthru_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+ config_group_init_type_name(&subsys->passthru_group,
+ "passthru", &nvmet_passthru_type);
+ configfs_add_default_group(&subsys->passthru_group,
+ &subsys->group);
+}
+
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+}
+
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
static int nvmet_port_subsys_allow_link(struct config_item *parent,
struct config_item *target)
{
@@ -811,14 +959,14 @@
struct nvmet_subsys *subsys = to_subsys(item);
if (NVME_TERTIARY(subsys->ver))
- return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver),
- (int)NVME_TERTIARY(subsys->ver));
- else
- return snprintf(page, PAGE_SIZE, "%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver));
+ return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+
+ return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
}
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
@@ -828,6 +976,9 @@
int major, minor, tertiary = 0;
int ret;
+ /* passthru subsystems use the underlying controller's version */
+ if (nvmet_passthru_ctrl(subsys))
+ return -EINVAL;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
if (ret != 2 && ret != 3)
@@ -852,20 +1003,177 @@
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
const char *page, size_t count)
{
- struct nvmet_subsys *subsys = to_subsys(item);
+ u64 serial;
+
+ if (sscanf(page, "%llx\n", &serial) != 1)
+ return -EINVAL;
down_write(&nvmet_config_sem);
- sscanf(page, "%llx\n", &subsys->serial);
+ to_subsys(item)->serial = serial;
up_write(&nvmet_config_sem);
return count;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
+static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_min;
+
+ if (sscanf(page, "%hu\n", &cntlid_min) != 1)
+ return -EINVAL;
+
+ if (cntlid_min == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_min >= to_subsys(item)->cntlid_max)
+ goto out_unlock;
+ to_subsys(item)->cntlid_min = cntlid_min;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
+
+static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_max;
+
+ if (sscanf(page, "%hu\n", &cntlid_max) != 1)
+ return -EINVAL;
+
+ if (cntlid_max == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_max <= to_subsys(item)->cntlid_min)
+ goto out_unlock;
+ to_subsys(item)->cntlid_max = cntlid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+
+static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ struct nvmet_subsys_model *subsys_model;
+ char *model = NVMET_DEFAULT_CTRL_MODEL;
+ int ret;
+
+ rcu_read_lock();
+ subsys_model = rcu_dereference(subsys->model);
+ if (subsys_model)
+ model = subsys_model->number;
+ ret = snprintf(page, PAGE_SIZE, "%s\n", model);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/* See Section 1.5 of NVMe 1.4 */
+static bool nvmet_is_ascii(const char c)
+{
+ return c >= 0x20 && c <= 0x7e;
+}
+
+static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ struct nvmet_subsys_model *new_model;
+ char *new_model_number;
+ int pos = 0, len;
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos]))
+ return -EINVAL;
+ }
+
+ new_model_number = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!new_model_number)
+ return -ENOMEM;
+
+ new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL);
+ if (!new_model) {
+ kfree(new_model_number);
+ return -ENOMEM;
+ }
+ memcpy(new_model->number, new_model_number, len);
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ new_model = rcu_replace_pointer(subsys->model, new_model,
+ mutex_is_locked(&subsys->lock));
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ kfree_rcu(new_model, rcuhead);
+ kfree(new_model_number);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
+}
+
+static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool pi_enable;
+
+ if (strtobool(page, &pi_enable))
+ return -EINVAL;
+
+ subsys->pi_support = pi_enable;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
+#endif
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
&nvmet_subsys_attr_attr_serial,
+ &nvmet_subsys_attr_attr_cntlid_min,
+ &nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_model,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_subsys_attr_attr_pi_enable,
+#endif
NULL,
};
@@ -915,6 +1223,8 @@
configfs_add_default_group(&subsys->allowed_hosts_group,
&subsys->group);
+ nvmet_add_passthru_group(subsys);
+
return &subsys->group;
}
@@ -970,12 +1280,19 @@
NULL,
};
-static void nvmet_referral_release(struct config_item *item)
+static void nvmet_referral_notify(struct config_group *group,
+ struct config_item *item)
{
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
struct nvmet_port *port = to_nvmet_port(item);
nvmet_referral_disable(parent, port);
+}
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
kfree(port);
}
@@ -1006,6 +1323,7 @@
static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
+ .disconnect_notify = nvmet_referral_notify,
};
static const struct config_item_type nvmet_referrals_type = {
@@ -1013,10 +1331,7 @@
.ct_group_ops = &nvmet_referral_group_ops,
};
-static struct {
- enum nvme_ana_state state;
- const char *name;
-} nvmet_ana_state_names[] = {
+static struct nvmet_type_name_map nvmet_ana_state[] = {
{ NVME_ANA_OPTIMIZED, "optimized" },
{ NVME_ANA_NONOPTIMIZED, "non-optimized" },
{ NVME_ANA_INACCESSIBLE, "inaccessible" },
@@ -1031,10 +1346,9 @@
enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
- if (state != nvmet_ana_state_names[i].state)
- continue;
- return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (state == nvmet_ana_state[i].type)
+ return sprintf(page, "%s\n", nvmet_ana_state[i].name);
}
return sprintf(page, "\n");
@@ -1044,10 +1358,11 @@
const char *page, size_t count)
{
struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state *ana_state = grp->port->ana_state;
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
- if (sysfs_streq(page, nvmet_ana_state_names[i].name))
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (sysfs_streq(page, nvmet_ana_state[i].name))
goto found;
}
@@ -1056,10 +1371,9 @@
found:
down_write(&nvmet_ana_sem);
- grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
+ ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
nvmet_ana_chgcnt++;
up_write(&nvmet_ana_sem);
-
nvmet_port_send_ana_event(grp->port);
return count;
}
@@ -1148,6 +1462,8 @@
{
struct nvmet_port *port = to_nvmet_port(item);
+ /* Let inflight controllers teardown complete */
+ flush_scheduled_work();
list_del(&port->global_entry);
kfree(port->ana_state);
@@ -1161,6 +1477,9 @@
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
&nvmet_attr_param_inline_data_size,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_attr_param_pi_enable,
+#endif
NULL,
};
@@ -1210,6 +1529,7 @@
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
config_group_init_type_name(&port->group, name, &nvmet_port_type);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ee81d94..9a8fa2e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -73,7 +73,7 @@
status = NVME_SC_ACCESS_DENIED;
break;
case -EIO:
- /* FALLTHRU */
+ fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
status = NVME_SC_INTERNAL | NVME_SC_DNR;
@@ -115,13 +115,14 @@
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
{
- struct nvmet_ns *ns;
+ unsigned long nsid = 0;
+ struct nvmet_ns *cur;
+ unsigned long idx;
- if (list_empty(&subsys->namespaces))
- return 0;
+ xa_for_each(&subsys->namespaces, idx, cur)
+ nsid = cur->nsid;
- ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
- return ns->nsid;
+ return nsid;
}
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
@@ -129,39 +130,30 @@
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}
-static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- if (!ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
-
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ nvmet_req_complete(req, status);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
}
-static void nvmet_async_event_work(struct work_struct *work)
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
{
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, async_event_work);
struct nvmet_async_event *aen;
struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- aen = list_first_entry_or_null(&ctrl->async_events,
- struct nvmet_async_event, entry);
- if (!aen || !ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
-
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
+ aen = list_first_entry(&ctrl->async_events,
+ struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
nvmet_set_result(req, nvmet_async_event_result(aen));
@@ -169,8 +161,31 @@
kfree(aen);
mutex_unlock(&ctrl->lock);
+ trace_nvmet_async_event(ctrl, req->cqe->result.u32);
nvmet_req_complete(req, 0);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_async_event *aen, *tmp;
+
+ mutex_lock(&ctrl->lock);
+ list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
+ list_del(&aen->entry);
+ kfree(aen);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, async_event_work);
+
+ nvmet_async_events_process(ctrl);
}
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -318,12 +333,21 @@
if (!try_module_get(ops->owner))
return -EINVAL;
- ret = ops->add_port(port);
- if (ret) {
- module_put(ops->owner);
- return ret;
+ /*
+ * If the user requested PI support and the transport isn't pi capable,
+ * don't enable the port.
+ */
+ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
+ pr_err("T10-PI is not supported by transport type %d\n",
+ port->disc_addr.trtype);
+ ret = -EINVAL;
+ goto out_put;
}
+ ret = ops->add_port(port);
+ if (ret)
+ goto out_put;
+
/* If the transport didn't set inline_data_size, then disable it. */
if (port->inline_data_size < 0)
port->inline_data_size = 0;
@@ -331,6 +355,10 @@
port->enabled = true;
port->tr_ops = ops;
return 0;
+
+out_put:
+ module_put(ops->owner);
+ return ret;
}
void nvmet_disable_port(struct nvmet_port *port)
@@ -351,10 +379,10 @@
{
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work);
- bool cmd_seen = ctrl->cmd_seen;
+ bool reset_tbkas = ctrl->reset_tbkas;
- ctrl->cmd_seen = false;
- if (cmd_seen) {
+ ctrl->reset_tbkas = false;
+ if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
@@ -367,7 +395,7 @@
nvmet_ctrl_fatal_error(ctrl);
}
-static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -379,7 +407,7 @@
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
-static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -389,28 +417,13 @@
cancel_delayed_work_sync(&ctrl->ka_work);
}
-static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
- __le32 nsid)
-{
- struct nvmet_ns *ns;
-
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
- if (ns->nsid == le32_to_cpu(nsid))
- return ns;
- }
-
- return NULL;
-}
-
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
{
struct nvmet_ns *ns;
- rcu_read_lock();
- ns = __nvmet_find_namespace(ctrl, nsid);
+ ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
if (ns)
percpu_ref_get(&ns->ref);
- rcu_read_unlock();
return ns;
}
@@ -446,7 +459,7 @@
return -EINVAL;
}
- if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
ns->device_path);
return -EINVAL;
@@ -516,6 +529,19 @@
ns->nsid);
}
+void nvmet_ns_revalidate(struct nvmet_ns *ns)
+{
+ loff_t oldsize = ns->size;
+
+ if (ns->bdev)
+ nvmet_bdev_ns_revalidate(ns);
+ else
+ nvmet_file_ns_revalidate(ns);
+
+ if (oldsize != ns->size)
+ nvmet_ns_changed(ns->subsys, ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
@@ -524,6 +550,12 @@
mutex_lock(&subsys->lock);
ret = 0;
+
+ if (nvmet_passthru_ctrl(subsys)) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
if (ns->enabled)
goto out_unlock;
@@ -552,24 +584,10 @@
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = ns->nsid;
- /*
- * The namespaces list needs to be sorted to simplify the implementation
- * of the Identify Namepace List subcommand.
- */
- if (list_empty(&subsys->namespaces)) {
- list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
- } else {
- struct nvmet_ns *old;
+ ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
+ if (ret)
+ goto out_restore_subsys_maxnsid;
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
- lockdep_is_held(&subsys->lock)) {
- BUG_ON(ns->nsid == old->nsid);
- if (ns->nsid < old->nsid)
- break;
- }
-
- list_add_tail_rcu(&ns->dev_link, &old->dev_link);
- }
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
@@ -578,6 +596,10 @@
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+
+out_restore_subsys_maxnsid:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -596,7 +618,7 @@
goto out_unlock;
ns->enabled = false;
- list_del_rcu(&ns->dev_link);
+ xa_erase(&ns->subsys->namespaces, ns->nsid);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
@@ -647,7 +669,6 @@
if (!ns)
return NULL;
- INIT_LIST_HEAD(&ns->dev_link);
init_completion(&ns->disable_done);
ns->nsid = nsid;
@@ -736,8 +757,6 @@
{
cq->qid = qid;
cq->size = size;
-
- ctrl->cqs[qid] = cq;
}
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
@@ -759,19 +778,28 @@
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
+ struct nvmet_ctrl *ctrl = sq->ctrl;
+
/*
* If this is the admin queue, complete all AERs so that our
* queue doesn't have outstanding requests on it.
*/
- if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
- nvmet_async_events_free(sq->ctrl);
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
+ nvmet_async_events_failall(ctrl);
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
- if (sq->ctrl) {
- nvmet_ctrl_put(sq->ctrl);
+ if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+ * send us keep-alive during this period, hence reset the
+ * traffic based keep-alive timer so we don't trigger a
+ * controller teardown as a result of a keep-alive expiration.
+ */
+ ctrl->reset_tbkas = true;
+ nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
}
@@ -838,6 +866,9 @@
if (unlikely(ret))
return ret;
+ if (nvmet_req_passthru_ctrl(req))
+ return nvmet_parse_passthru_io_cmd(req);
+
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
if (unlikely(!req->ns)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
@@ -870,8 +901,11 @@
req->sq = sq;
req->ops = ops;
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
req->transfer_len = 0;
+ req->metadata_len = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
@@ -897,14 +931,10 @@
}
if (unlikely(!req->sq->ctrl))
- /* will return an error for any Non-connect command: */
+ /* will return an error for any non-connect command: */
status = nvmet_parse_connect_cmd(req);
else if (likely(req->sq->qid != 0))
status = nvmet_parse_io_cmd(req);
- else if (nvme_is_fabrics(req->cmd))
- status = nvmet_parse_fabrics_cmd(req);
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
- status = nvmet_parse_discovery_cmd(req);
else
status = nvmet_parse_admin_cmd(req);
@@ -919,7 +949,7 @@
}
if (sq->ctrl)
- sq->ctrl->cmd_seen = true;
+ sq->ctrl->reset_tbkas = true;
return true;
@@ -937,60 +967,112 @@
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-void nvmet_req_execute(struct nvmet_req *req)
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
- if (unlikely(req->data_len != req->transfer_len)) {
+ if (unlikely(len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
- } else
- req->execute(req);
-}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
-
-int nvmet_req_alloc_sgl(struct nvmet_req *req)
-{
- struct pci_dev *p2p_dev = NULL;
-
- if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
- if (req->sq->ctrl && req->ns)
- p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
- req->ns->nsid);
-
- req->p2p_dev = NULL;
- if (req->sq->qid && p2p_dev) {
- req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
- req->transfer_len);
- if (req->sg) {
- req->p2p_dev = p2p_dev;
- return 0;
- }
- }
-
- /*
- * If no P2P memory was available we fallback to using
- * regular memory
- */
+ return false;
}
- req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
+
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+ if (unlikely(data_len > req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
+{
+ return req->transfer_len - req->metadata_len;
+}
+
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+ struct nvmet_req *req)
+{
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ nvmet_data_transfer_len(req));
if (!req->sg)
- return -ENOMEM;
+ goto out_err;
+
+ if (req->metadata_len) {
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
+ &req->metadata_sg_cnt, req->metadata_len);
+ if (!req->metadata_sg)
+ goto out_free_sg;
+ }
+
+ req->p2p_dev = p2p_dev;
return 0;
+out_free_sg:
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+out_err:
+ return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
-void nvmet_req_free_sgl(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
{
- if (req->p2p_dev)
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
+ return NULL;
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
+}
+
+int nvmet_req_alloc_sgls(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
+ return 0;
+
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+ &req->sg_cnt);
+ if (unlikely(!req->sg))
+ goto out;
+
+ if (req->metadata_len) {
+ req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
+ &req->metadata_sg_cnt);
+ if (unlikely(!req->metadata_sg))
+ goto out_free;
+ }
+
+ return 0;
+out_free:
+ sgl_free(req->sg);
+out:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
+
+void nvmet_req_free_sgls(struct nvmet_req *req)
+{
+ if (req->p2p_dev) {
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
- else
+ if (req->metadata_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ req->p2p_dev = NULL;
+ } else {
sgl_free(req->sg);
+ if (req->metadata_sg)
+ sgl_free(req->metadata_sg);
+ }
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
}
-EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
static inline bool nvmet_cc_en(u32 cc)
{
@@ -1187,14 +1269,14 @@
struct nvmet_req *req)
{
struct nvmet_ns *ns;
+ unsigned long idx;
if (!req->p2p_client)
return;
ctrl->p2p_client = get_device(req->p2p_client);
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
- lockdep_is_held(&ctrl->subsys->lock))
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
@@ -1277,20 +1359,17 @@
if (!ctrl->changed_ns_list)
goto out_free_ctrl;
- ctrl->cqs = kcalloc(subsys->max_qid + 1,
- sizeof(struct nvmet_cq *),
- GFP_KERNEL);
- if (!ctrl->cqs)
- goto out_free_changed_ns_list;
-
ctrl->sqs = kcalloc(subsys->max_qid + 1,
sizeof(struct nvmet_sq *),
GFP_KERNEL);
if (!ctrl->sqs)
- goto out_free_cqs;
+ goto out_free_changed_ns_list;
+
+ if (subsys->cntlid_min > subsys->cntlid_max)
+ goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida,
- NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+ subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
@@ -1325,8 +1404,6 @@
out_free_sqs:
kfree(ctrl->sqs);
-out_free_cqs:
- kfree(ctrl->cqs);
out_free_changed_ns_list:
kfree(ctrl->changed_ns_list);
out_free_ctrl:
@@ -1354,8 +1431,8 @@
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+ nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
- kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
kfree(ctrl);
@@ -1415,7 +1492,7 @@
if (!subsys)
return ERR_PTR(-ENOMEM);
- subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+ subsys->ver = NVMET_DEFAULT_VS;
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&subsys->serial, sizeof(subsys->serial));
@@ -1438,11 +1515,12 @@
kfree(subsys);
return ERR_PTR(-ENOMEM);
}
-
+ subsys->cntlid_min = NVME_CNTLID_MIN;
+ subsys->cntlid_max = NVME_CNTLID_MAX;
kref_init(&subsys->ref);
mutex_init(&subsys->lock);
- INIT_LIST_HEAD(&subsys->namespaces);
+ xa_init(&subsys->namespaces);
INIT_LIST_HEAD(&subsys->ctrls);
INIT_LIST_HEAD(&subsys->hosts);
@@ -1454,9 +1532,13 @@
struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref);
- WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+ WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+
+ xa_destroy(&subsys->namespaces);
+ nvmet_passthru_subsys_free(subsys);
kfree(subsys->subsysnqn);
+ kfree_rcu(subsys->model, rcuhead);
kfree(subsys);
}
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 3764a89..5b8ee82 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -157,7 +157,7 @@
return entries;
}
-static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -171,8 +171,20 @@
u16 status = 0;
void *buffer;
+ if (!nvmet_check_transfer_len(req, data_len))
+ return;
+
+ if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lid);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lpo);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out;
}
@@ -227,20 +239,35 @@
nvmet_req_complete(req, status);
}
-static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
+ const char model[] = "Linux";
u16 status = 0;
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
+ return;
+
+ if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
goto out;
}
+ memset(id->sn, ' ', sizeof(id->sn));
+ bin2hex(id->sn, &ctrl->subsys->serial,
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
memset(id->fr, ' ', sizeof(id->fr));
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->fr, sizeof(id->fr),
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -252,7 +279,7 @@
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
- if (ctrl->ops->has_keyed_sgls)
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
@@ -273,6 +300,9 @@
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
stat = nvmet_set_feat_kato(req);
@@ -296,6 +326,9 @@
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat = 0;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
switch (cdw10 & 0xff) {
case NVME_FEAT_KATO:
nvmet_get_feat_kato(req);
@@ -328,47 +361,22 @@
switch (cmd->common.opcode) {
case nvme_admin_set_features:
req->execute = nvmet_execute_disc_set_features;
- req->data_len = 0;
return 0;
case nvme_admin_get_features:
req->execute = nvmet_execute_disc_get_features;
- req->data_len = 0;
return 0;
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
- req->data_len = 0;
return 0;
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
- req->data_len = 0;
return 0;
case nvme_admin_get_log_page:
- req->data_len = nvmet_get_log_page_len(cmd);
-
- switch (cmd->get_log_page.lid) {
- case NVME_LOG_DISC:
- req->execute = nvmet_execute_get_disc_log_page;
- return 0;
- default:
- pr_err("unsupported get_log_page lid %d\n",
- cmd->get_log_page.lid);
- req->error_loc =
- offsetof(struct nvme_get_log_page_command, lid);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_get_log_page;
+ return 0;
case nvme_admin_identify:
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
- switch (cmd->identify.cns) {
- case NVME_ID_CNS_CTRL:
- req->execute =
- nvmet_execute_identify_disc_ctrl;
- return 0;
- default:
- pr_err("unsupported identify cns %d\n",
- cmd->identify.cns);
- req->error_loc = offsetof(struct nvme_identify, cns);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
- }
+ req->execute = nvmet_execute_disc_identify;
+ return 0;
default:
pr_err("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 5e47395..e62d3d0 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,6 +12,9 @@
u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
if (req->cmd->prop_set.attrib & 1) {
req->error_loc =
offsetof(struct nvmf_property_set_command, attrib);
@@ -38,6 +41,9 @@
u16 status = 0;
u64 val = 0;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
if (req->cmd->prop_get.attrib & 1) {
switch (le32_to_cpu(req->cmd->prop_get.offset)) {
case NVME_REG_CAP:
@@ -82,11 +88,9 @@
switch (cmd->fabrics.fctype) {
case nvme_fabrics_type_property_set:
- req->data_len = 0;
req->execute = nvmet_execute_prop_set;
break;
case nvme_fabrics_type_property_get:
- req->data_len = 0;
req->execute = nvmet_execute_prop_get;
break;
default:
@@ -153,6 +157,9 @@
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -191,6 +198,8 @@
goto out;
}
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
+
uuid_copy(&ctrl->hostid, &d->hostid);
status = nvmet_install_queue(ctrl, req);
@@ -199,8 +208,9 @@
goto out;
}
- pr_info("creating controller %d for subsystem %s for NQN %s.\n",
- ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
+ pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
@@ -217,6 +227,9 @@
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
+ return;
+
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
status = NVME_SC_INTERNAL;
@@ -287,7 +300,6 @@
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
- req->data_len = sizeof(struct nvmf_connect_data);
if (cmd->connect.qid == 0)
req->execute = nvmet_execute_admin_connect;
else
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 9b07e8c..640031c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -14,6 +14,7 @@
#include "nvmet.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
+#include "../host/fc.h"
/* *************************** Data Structures/Defines ****************** */
@@ -21,23 +22,21 @@
#define NVMET_LS_CTX_COUNT 256
-/* for this implementation, assume small single frame rqst/rsp */
-#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
-
struct nvmet_fc_tgtport;
struct nvmet_fc_tgt_assoc;
-struct nvmet_fc_ls_iod {
- struct nvmefc_tgt_ls_req *lsreq;
+struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
+ struct nvmefc_ls_rsp *lsrsp;
struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
- struct list_head ls_list; /* tgtport->ls_list */
+ struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
struct nvmet_fc_tgtport *tgtport;
struct nvmet_fc_tgt_assoc *assoc;
+ void *hosthandle;
- u8 *rqstbuf;
- u8 *rspbuf;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
u16 rqstdatalen;
dma_addr_t rspdma;
@@ -46,6 +45,18 @@
struct work_struct work;
} __aligned(sizeof(unsigned long long));
+struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
+ struct nvmefc_ls_req ls_req;
+
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+
+ int ls_error;
+ struct list_head lsreq_list; /* tgtport->ls_req_list */
+ bool req_queued;
+};
+
+
/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
@@ -83,7 +94,6 @@
};
struct nvmet_fc_tgtport {
-
struct nvmet_fc_target_port fc_target_port;
struct list_head tgt_list; /* nvmet_fc_target_list */
@@ -92,9 +102,11 @@
struct nvmet_fc_ls_iod *iod;
spinlock_t lock;
- struct list_head ls_list;
+ struct list_head ls_rcv_list;
+ struct list_head ls_req_list;
struct list_head ls_busylist;
struct list_head assoc_list;
+ struct list_head host_list;
struct ida assoc_cnt;
struct nvmet_fc_port_entry *pe;
struct kref ref;
@@ -136,10 +148,21 @@
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
+struct nvmet_fc_hostport {
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+ struct list_head host_list;
+ struct kref ref;
+ u8 invalid;
+};
+
struct nvmet_fc_tgt_assoc {
u64 association_id;
u32 a_id;
+ atomic_t terminating;
struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_hostport *hostport;
+ struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
@@ -227,6 +250,8 @@
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -318,6 +343,188 @@
}
+/* ********************** FC-NVME LS XMT Handling ************************* */
+
+
+static void
+__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+{
+ struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static int
+__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!tgtport->ops->ls_req)
+ return -EOPNOTSUPP;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+
+ lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
+ ret = -EFAULT;
+ goto out_puttgtport;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
+ lsreq);
+ if (ret)
+ goto out_unlink;
+
+ return 0;
+
+out_unlink:
+ lsop->ls_error = ret;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ lsop->req_queued = false;
+ list_del(&lsop->lsreq_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+out_puttgtport:
+ nvmet_fc_tgtport_put(tgtport);
+
+ return ret;
+}
+
+static int
+nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ /* don't wait for completion */
+
+ return __nvmet_fc_send_ls_req(tgtport, lsop, done);
+}
+
+static void
+nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
+
+ __nvmet_fc_finish_ls_req(lsop);
+
+ /* fc-nvme target doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme target is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme host, so the target may never get a
+ * response even if it tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme host
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
+ struct nvmet_fc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ int ret;
+
+ /*
+ * If ls_req is NULL or no hosthandle, it's an older lldd and no
+ * message is normal. Otherwise, send unless the hostport has
+ * already been invalidated by the lldd.
+ */
+ if (!tgtport->ops->ls_req || !assoc->hostport ||
+ assoc->hostport->invalid)
+ return;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(tgtport->dev,
+ "{%d:%d} send Disconnect Association failed: ENOMEM\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ return;
+ }
+
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (tgtport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
+
+ lsop->tgtport = tgtport;
+ lsop->hosthandle = assoc->hostport->hosthandle;
+
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ assoc->association_id);
+
+ ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
+ nvmet_fc_disconnect_assoc_done);
+ if (ret) {
+ dev_info(tgtport->dev,
+ "{%d:%d} XMT Disconnect Association failed: %d\n",
+ tgtport->fc_target_port.port_num, assoc->a_id, ret);
+ kfree(lsop);
+ }
+}
+
+
/* *********************** FC-NVME Port Management ************************ */
@@ -337,17 +544,18 @@
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
iod->tgtport = tgtport;
- list_add_tail(&iod->ls_list, &tgtport->ls_list);
+ list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
- iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
- GFP_KERNEL);
+ iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
if (!iod->rqstbuf)
goto out_fail;
- iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+ iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
- NVME_FC_MAX_LS_BUFFER_SIZE,
+ sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
goto out_fail;
@@ -357,12 +565,12 @@
out_fail:
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
for (iod--, i--; i >= 0; iod--, i--) {
fc_dma_unmap_single(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
}
kfree(iod);
@@ -378,10 +586,10 @@
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
fc_dma_unmap_single(tgtport->dev,
- iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+ iod->rspdma, sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
}
kfree(tgtport->iod);
}
@@ -393,10 +601,10 @@
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
- iod = list_first_entry_or_null(&tgtport->ls_list,
- struct nvmet_fc_ls_iod, ls_list);
+ iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
+ struct nvmet_fc_ls_iod, ls_rcv_list);
if (iod)
- list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+ list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
spin_unlock_irqrestore(&tgtport->lock, flags);
return iod;
}
@@ -409,7 +617,7 @@
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
- list_move(&iod->ls_list, &tgtport->ls_list);
+ list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -678,31 +886,33 @@
struct nvmet_fc_fcp_iod *fod = queue->fod;
struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags;
- int i, writedataactive;
+ int i;
bool disconnect;
disconnect = atomic_xchg(&queue->connected, 0);
+ /* if not connected, nothing to do */
+ if (!disconnect)
+ return;
+
spin_lock_irqsave(&queue->qlock, flags);
- /* about outstanding io's */
+ /* abort outstanding io's */
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->active) {
spin_lock(&fod->flock);
fod->abort = true;
- writedataactive = fod->writedataactive;
- spin_unlock(&fod->flock);
/*
* only call lldd abort routine if waiting for
* writedata. other outstanding ops should finish
* on their own.
*/
- if (writedataactive) {
- spin_lock(&fod->flock);
+ if (fod->writedataactive) {
fod->aborted = true;
spin_unlock(&fod->flock);
tgtport->ops->fcp_abort(
&tgtport->fc_target_port, fod->fcpreq);
- }
+ } else
+ spin_unlock(&fod->flock);
}
}
@@ -742,8 +952,7 @@
flush_workqueue(queue->work_q);
- if (disconnect)
- nvmet_sq_destroy(&queue->nvme_sq);
+ nvmet_sq_destroy(&queue->nvme_sq);
nvmet_fc_tgt_q_put(queue);
}
@@ -778,6 +987,102 @@
}
static void
+nvmet_fc_hostport_free(struct kref *ref)
+{
+ struct nvmet_fc_hostport *hostport =
+ container_of(ref, struct nvmet_fc_hostport, ref);
+ struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del(&hostport->host_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ if (tgtport->ops->host_release && hostport->invalid)
+ tgtport->ops->host_release(hostport->hosthandle);
+ kfree(hostport);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
+{
+ kref_put(&hostport->ref, nvmet_fc_hostport_free);
+}
+
+static int
+nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
+{
+ return kref_get_unless_zero(&hostport->ref);
+}
+
+static void
+nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
+{
+ /* if LLDD not implemented, leave as NULL */
+ if (!hostport || !hostport->hosthandle)
+ return;
+
+ nvmet_fc_hostport_put(hostport);
+}
+
+static struct nvmet_fc_hostport *
+nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_hostport *newhost, *host, *match = NULL;
+ unsigned long flags;
+
+ /* if LLDD not implemented, leave as NULL */
+ if (!hosthandle)
+ return NULL;
+
+ /* take reference for what will be the newly allocated hostport */
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return ERR_PTR(-EINVAL);
+
+ newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
+ if (!newhost) {
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(host, &tgtport->host_list, host_list) {
+ if (host->hosthandle == hosthandle && !host->invalid) {
+ if (nvmet_fc_hostport_get(host)) {
+ match = host;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* no allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ return (match) ? match : ERR_PTR(-ENOMEM);
+ }
+
+ newhost->tgtport = tgtport;
+ newhost->hosthandle = hosthandle;
+ INIT_LIST_HEAD(&newhost->host_list);
+ kref_init(&newhost->ref);
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry(host, &tgtport->host_list, host_list) {
+ if (host->hosthandle == hosthandle && !host->invalid) {
+ if (nvmet_fc_hostport_get(host)) {
+ match = host;
+ break;
+ }
+ }
+ }
+ if (match) {
+ kfree(newhost);
+ newhost = NULL;
+ /* releasing allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ } else
+ list_add_tail(&newhost->host_list, &tgtport->host_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ return (match) ? match : newhost;
+}
+
+static void
nvmet_fc_delete_assoc(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
@@ -788,7 +1093,7 @@
}
static struct nvmet_fc_tgt_assoc *
-nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
unsigned long flags;
@@ -805,13 +1110,18 @@
goto out_free_assoc;
if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida_put;
+ goto out_ida;
+
+ assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
+ if (IS_ERR(assoc->hostport))
+ goto out_put;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ atomic_set(&assoc->terminating, 0);
while (needrandom) {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
@@ -819,11 +1129,12 @@
spin_lock_irqsave(&tgtport->lock, flags);
needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+ list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
if (ran == tmpassoc->association_id) {
needrandom = true;
break;
}
+ }
if (!needrandom) {
assoc->association_id = ran;
list_add_tail(&assoc->a_list, &tgtport->assoc_list);
@@ -833,7 +1144,9 @@
return assoc;
-out_ida_put:
+out_put:
+ nvmet_fc_tgtport_put(tgtport);
+out_ida:
ida_simple_remove(&tgtport->assoc_cnt, idx);
out_free_assoc:
kfree(assoc);
@@ -846,12 +1159,24 @@
struct nvmet_fc_tgt_assoc *assoc =
container_of(ref, struct nvmet_fc_tgt_assoc, ref);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ /* Send Disconnect now that all i/o has completed */
+ nvmet_fc_xmt_disconnect_assoc(assoc);
+
+ nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
list_del(&assoc->a_list);
+ oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* if pending Rcv Disconnect Association LS, send rsp now */
+ if (oldls)
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+ dev_info(tgtport->dev,
+ "{%d:%d} Association freed\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
kfree(assoc);
nvmet_fc_tgtport_put(tgtport);
}
@@ -874,7 +1199,13 @@
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
- int i;
+ int i, terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
spin_lock_irqsave(&tgtport->lock, flags);
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
@@ -890,6 +1221,10 @@
}
spin_unlock_irqrestore(&tgtport->lock, flags);
+ dev_info(tgtport->dev,
+ "{%d:%d} Association deleted\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+
nvmet_fc_tgt_a_put(assoc);
}
@@ -905,7 +1240,8 @@
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
- nvmet_fc_tgt_a_get(assoc);
+ if (!nvmet_fc_tgt_a_get(assoc))
+ ret = NULL;
break;
}
}
@@ -1048,16 +1384,21 @@
newrec->fc_target_port.node_name = pinfo->node_name;
newrec->fc_target_port.port_name = pinfo->port_name;
- newrec->fc_target_port.private = &newrec[1];
+ if (template->target_priv_sz)
+ newrec->fc_target_port.private = &newrec[1];
+ else
+ newrec->fc_target_port.private = NULL;
newrec->fc_target_port.port_id = pinfo->port_id;
newrec->fc_target_port.port_num = idx;
INIT_LIST_HEAD(&newrec->tgt_list);
newrec->dev = dev;
newrec->ops = template;
spin_lock_init(&newrec->lock);
- INIT_LIST_HEAD(&newrec->ls_list);
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
+ INIT_LIST_HEAD(&newrec->ls_req_list);
INIT_LIST_HEAD(&newrec->ls_busylist);
INIT_LIST_HEAD(&newrec->assoc_list);
+ INIT_LIST_HEAD(&newrec->host_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
@@ -1141,11 +1482,72 @@
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!schedule_work(&assoc->del_work))
+ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
+/**
+ * nvmet_fc_invalidate_host - transport entry point called by an LLDD
+ * to remove references to a hosthandle for LS's.
+ *
+ * The nvmet-fc layer ensures that any references to the hosthandle
+ * on the targetport are forgotten (set to NULL). The LLDD will
+ * typically call this when a login with a remote host port has been
+ * lost, thus LS's for the remote host port are no longer possible.
+ *
+ * If an LS request is outstanding to the targetport/hosthandle (or
+ * issued concurrently with the call to invalidate the host), the
+ * LLDD is responsible for terminating/aborting the LS and completing
+ * the LS request. It is recommended that these terminations/aborts
+ * occur after calling to invalidate the host handle to avoid additional
+ * retries by the nvmet-fc transport. The nvmet-fc transport may
+ * continue to reference host handle while it cleans up outstanding
+ * NVME associations. The nvmet-fc transport will call the
+ * ops->host_release() callback to notify the LLDD that all references
+ * are complete and the related host handle can be recovered.
+ * Note: if there are no references, the callback may be called before
+ * the invalidate host call returns.
+ *
+ * @target_port: pointer to the (registered) target port that a prior
+ * LS was received on and which supplied the transport the
+ * hosthandle.
+ * @hosthandle: the handle (pointer) that represents the host port
+ * that no longer has connectivity and that LS's should
+ * no longer be directed to.
+ */
+void
+nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ void *hosthandle)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ struct nvmet_fc_tgt_assoc *assoc, *next;
+ unsigned long flags;
+ bool noassoc = true;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_for_each_entry_safe(assoc, next,
+ &tgtport->assoc_list, a_list) {
+ if (!assoc->hostport ||
+ assoc->hostport->hosthandle != hosthandle)
+ continue;
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+ if (!schedule_work(&assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ /* if there's nothing to wait for - call the callback */
+ if (noassoc && tgtport->ops->host_release)
+ tgtport->ops->host_release(hosthandle);
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
+
/*
* nvmet layer has called to terminate an association
*/
@@ -1181,6 +1583,7 @@
if (found_ctrl) {
if (!schedule_work(&assoc->del_work))
+ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1211,6 +1614,13 @@
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ /*
+ * should terminate LS's as well. However, LS's will be generated
+ * at the tail end of association termination, so they likely don't
+ * exist yet. And even if they did, it's worthwhile to just let
+ * them finish and targetport ref counting will clean things up.
+ */
+
nvmet_fc_tgtport_put(tgtport);
return 0;
@@ -1218,113 +1628,15 @@
EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
-/* *********************** FC-NVME LS Handling **************************** */
+/* ********************** FC-NVME LS RCV Handling ************************* */
static void
-nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
-{
- struct fcnvme_ls_acc_hdr *acc = buf;
-
- acc->w0.ls_cmd = ls_cmd;
- acc->desc_list_len = desc_len;
- acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
- acc->rqst.desc_len =
- fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
- acc->rqst.w0.ls_cmd = rqst_ls_cmd;
-}
-
-static int
-nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
- u8 reason, u8 explanation, u8 vendor)
-{
- struct fcnvme_ls_rjt *rjt = buf;
-
- nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
- fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
- ls_cmd);
- rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
- rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
- rjt->rjt.reason_code = reason;
- rjt->rjt.reason_explanation = explanation;
- rjt->rjt.vendor = vendor;
-
- return sizeof(struct fcnvme_ls_rjt);
-}
-
-/* Validation Error indexes into the string table below */
-enum {
- VERR_NO_ERROR = 0,
- VERR_CR_ASSOC_LEN = 1,
- VERR_CR_ASSOC_RQST_LEN = 2,
- VERR_CR_ASSOC_CMD = 3,
- VERR_CR_ASSOC_CMD_LEN = 4,
- VERR_ERSP_RATIO = 5,
- VERR_ASSOC_ALLOC_FAIL = 6,
- VERR_QUEUE_ALLOC_FAIL = 7,
- VERR_CR_CONN_LEN = 8,
- VERR_CR_CONN_RQST_LEN = 9,
- VERR_ASSOC_ID = 10,
- VERR_ASSOC_ID_LEN = 11,
- VERR_NO_ASSOC = 12,
- VERR_CONN_ID = 13,
- VERR_CONN_ID_LEN = 14,
- VERR_NO_CONN = 15,
- VERR_CR_CONN_CMD = 16,
- VERR_CR_CONN_CMD_LEN = 17,
- VERR_DISCONN_LEN = 18,
- VERR_DISCONN_RQST_LEN = 19,
- VERR_DISCONN_CMD = 20,
- VERR_DISCONN_CMD_LEN = 21,
- VERR_DISCONN_SCOPE = 22,
- VERR_RS_LEN = 23,
- VERR_RS_RQST_LEN = 24,
- VERR_RS_CMD = 25,
- VERR_RS_CMD_LEN = 26,
- VERR_RS_RCTL = 27,
- VERR_RS_RO = 28,
-};
-
-static char *validation_errors[] = {
- "OK",
- "Bad CR_ASSOC Length",
- "Bad CR_ASSOC Rqst Length",
- "Not CR_ASSOC Cmd",
- "Bad CR_ASSOC Cmd Length",
- "Bad Ersp Ratio",
- "Association Allocation Failed",
- "Queue Allocation Failed",
- "Bad CR_CONN Length",
- "Bad CR_CONN Rqst Length",
- "Not Association ID",
- "Bad Association ID Length",
- "No Association",
- "Not Connection ID",
- "Bad Connection ID Length",
- "No Connection",
- "Not CR_CONN Cmd",
- "Bad CR_CONN Cmd Length",
- "Bad DISCONN Length",
- "Bad DISCONN Rqst Length",
- "Not DISCONN Cmd",
- "Bad DISCONN Cmd Length",
- "Bad Disconnect Scope",
- "Bad RS Length",
- "Bad RS Rqst Length",
- "Not RS Cmd",
- "Bad RS Cmd Length",
- "Bad RS R_CTL",
- "Bad RS Relative Offset",
-};
-
-static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_cr_assoc_rqst *rqst =
- (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
- struct fcnvme_ls_cr_assoc_acc *acc =
- (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+ struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
+ struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
struct nvmet_fc_tgt_queue *queue;
int ret = 0;
@@ -1356,7 +1668,8 @@
else {
/* new association w/ admin queue */
- iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+ iod->assoc = nvmet_fc_alloc_target_assoc(
+ tgtport, iod->hosthandle);
if (!iod->assoc)
ret = VERR_ASSOC_ALLOC_FAIL;
else {
@@ -1371,8 +1684,8 @@
dev_err(tgtport->dev,
"Create Association LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
return;
@@ -1382,11 +1695,15 @@
atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */
+ dev_info(tgtport->dev,
+ "{%d:%d} Association created\n",
+ tgtport->fc_target_port.port_num, iod->assoc->a_id);
+
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_ls_cr_assoc_acc)),
FCNVME_LS_CREATE_ASSOCIATION);
@@ -1407,10 +1724,8 @@
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_cr_conn_rqst *rqst =
- (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
- struct fcnvme_ls_cr_conn_acc *acc =
- (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+ struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
+ struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
struct nvmet_fc_tgt_queue *queue;
int ret = 0;
@@ -1462,8 +1777,8 @@
dev_err(tgtport->dev,
"Create Connection LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
FCNVME_RJT_RC_LOGIC,
@@ -1477,9 +1792,9 @@
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
FCNVME_LS_CREATE_CONNECTION);
acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
@@ -1491,43 +1806,28 @@
be16_to_cpu(rqst->connect_cmd.qid)));
}
-static void
+/*
+ * Returns true if the LS response is to be transmit
+ * Returns false if the LS response is to be delayed
+ */
+static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_disconnect_rqst *rqst =
- (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
- struct fcnvme_ls_disconnect_acc *acc =
- (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_assoc *assoc;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &iod->rqstbuf->rq_dis_assoc;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ &iod->rspbuf->rsp_dis_assoc;
+ struct nvmet_fc_tgt_assoc *assoc = NULL;
+ struct nvmet_fc_ls_iod *oldls = NULL;
+ unsigned long flags;
int ret = 0;
memset(acc, 0, sizeof(*acc));
- if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
- ret = VERR_DISCONN_LEN;
- else if (rqst->desc_list_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_rqst)))
- ret = VERR_DISCONN_RQST_LEN;
- else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
- ret = VERR_ASSOC_ID;
- else if (rqst->associd.desc_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_assoc_id)))
- ret = VERR_ASSOC_ID_LEN;
- else if (rqst->discon_cmd.desc_tag !=
- cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
- ret = VERR_DISCONN_CMD;
- else if (rqst->discon_cmd.desc_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_disconn_cmd)))
- ret = VERR_DISCONN_CMD_LEN;
- else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
- (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
- ret = VERR_DISCONN_SCOPE;
- else {
- /* match an active association */
+ ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association - takes an assoc ref if !NULL */
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
@@ -1535,34 +1835,63 @@
ret = VERR_NO_ASSOC;
}
- if (ret) {
+ if (ret || !assoc) {
dev_err(tgtport->dev,
"Disconnect LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
- (ret == VERR_NO_CONN) ?
- FCNVME_RJT_RC_INV_CONN :
- FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
- return;
+ return true;
}
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_acc)),
- FCNVME_LS_DISCONNECT);
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(iod->assoc);
+ nvmet_fc_tgt_a_put(assoc);
- nvmet_fc_delete_target_assoc(iod->assoc);
+ /*
+ * The rules for LS response says the response cannot
+ * go back until ABTS's have been sent for all outstanding
+ * I/O and a Disconnect Association LS has been sent.
+ * So... save off the Disconnect LS to send the response
+ * later. If there was a prior LS already saved, replace
+ * it with the newer one and send a can't perform reject
+ * on the older one.
+ */
+ spin_lock_irqsave(&tgtport->lock, flags);
+ oldls = assoc->rcv_disconn;
+ assoc->rcv_disconn = iod;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_delete_target_assoc(assoc);
+
+ if (oldls) {
+ dev_info(tgtport->dev,
+ "{%d:%d} Multiple Disconnect Association LS's "
+ "received\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*iod->rspbuf),
+ /* ok to use rqst, LS is same */
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ }
+
+ return false;
}
@@ -1574,13 +1903,13 @@
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
static void
-nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
{
- struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+ struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
struct nvmet_fc_tgtport *tgtport = iod->tgtport;
fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
nvmet_fc_free_ls_iod(tgtport, iod);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1592,11 +1921,11 @@
int ret;
fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
- ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+ ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
if (ret)
- nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+ nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
}
/*
@@ -1606,15 +1935,15 @@
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_rqst_w0 *w0 =
- (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+ struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
+ bool sendrsp = true;
- iod->lsreq->nvmet_fc_private = iod;
- iod->lsreq->rspbuf = iod->rspbuf;
- iod->lsreq->rspdma = iod->rspdma;
- iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+ iod->lsrsp->nvme_fc_private = iod;
+ iod->lsrsp->rspbuf = iod->rspbuf;
+ iod->lsrsp->rspdma = iod->rspdma;
+ iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
/* Be preventative. handlers will later set to valid length */
- iod->lsreq->rsplen = 0;
+ iod->lsrsp->rsplen = 0;
iod->assoc = NULL;
@@ -1632,17 +1961,18 @@
/* Creates an IO Queue/Connection */
nvmet_fc_ls_create_connection(tgtport, iod);
break;
- case FCNVME_LS_DISCONNECT:
+ case FCNVME_LS_DISCONNECT_ASSOC:
/* Terminate a Queue/Connection or the Association */
- nvmet_fc_ls_disconnect(tgtport, iod);
+ sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
break;
default:
- iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
- NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
+ sizeof(*iod->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
}
- nvmet_fc_xmt_ls_rsp(tgtport, iod);
+ if (sendrsp)
+ nvmet_fc_xmt_ls_rsp(tgtport, iod);
}
/*
@@ -1671,35 +2001,53 @@
*
* @target_port: pointer to the (registered) target port the LS was
* received on.
- * @lsreq: pointer to a lsreq request structure to be used to reference
+ * @lsrsp: pointer to a lsrsp structure to be used to reference
* the exchange corresponding to the LS.
* @lsreqbuf: pointer to the buffer containing the LS Request
* @lsreqbuf_len: length, in bytes, of the received LS request
*/
int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
- struct nvmefc_tgt_ls_req *lsreq,
+ void *hosthandle,
+ struct nvmefc_ls_rsp *lsrsp,
void *lsreqbuf, u32 lsreqbuf_len)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
struct nvmet_fc_ls_iod *iod;
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
- if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: payload too large (%d)\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "",
+ lsreqbuf_len);
return -E2BIG;
+ }
- if (!nvmet_fc_tgtport_get(tgtport))
+ if (!nvmet_fc_tgtport_get(tgtport)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: target deleting\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
return -ESHUTDOWN;
+ }
iod = nvmet_fc_alloc_ls_iod(tgtport);
if (!iod) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: context allocation failed\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
nvmet_fc_tgtport_put(tgtport);
return -ENOENT;
}
- iod->lsreq = lsreq;
+ iod->lsrsp = lsrsp;
iod->fcpreq = NULL;
memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
iod->rqstdatalen = lsreqbuf_len;
+ iod->hosthandle = hosthandle;
schedule_work(&iod->work);
@@ -2015,7 +2363,7 @@
}
/* data transfer complete, resume with nvmet layer */
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
break;
case NVMET_FCOP_READDATA:
@@ -2225,7 +2573,7 @@
* can invoke the nvmet_layer now. If read data, cmd completion will
* push the data
*/
- nvmet_req_execute(&fod->req);
+ fod->req.execute(&fod->req);
return;
transport_error:
@@ -2293,7 +2641,7 @@
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
- (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+ (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
(cmdiu->fc_id != NVME_CMD_FC_ID) ||
(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
return -EIO;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index b50b53d..3da067a 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -43,6 +43,17 @@
{ NVMF_OPT_ERR, NULL }
};
+static int fcloop_verify_addr(substring_t *s)
+{
+ size_t blen = s->to - s->from + 1;
+
+ if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
+ strncmp(s->from, "0x", 2))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
fcloop_parse_options(struct fcloop_ctrl_options *opts,
const char *buf)
@@ -64,14 +75,16 @@
opts->mask |= token;
switch (token) {
case NVMF_OPT_WWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
opts->wwnn = token64;
break;
case NVMF_OPT_WWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -92,14 +105,16 @@
opts->fcaddr = token;
break;
case NVMF_OPT_LPWWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
opts->lpwwnn = token64;
break;
case NVMF_OPT_LPWWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -141,14 +156,16 @@
token = match_token(p, opt_tokens, args);
switch (token) {
case NVMF_OPT_WWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
*nname = token64;
break;
case NVMF_OPT_WWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -198,17 +215,23 @@
};
struct fcloop_rport {
- struct nvme_fc_remote_port *remoteport;
- struct nvmet_fc_target_port *targetport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_tport {
- struct nvmet_fc_target_port *targetport;
- struct nvme_fc_remote_port *remoteport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_nport {
@@ -224,11 +247,11 @@
};
struct fcloop_lsreq {
- struct fcloop_tport *tport;
struct nvmefc_ls_req *lsreq;
- struct work_struct work;
- struct nvmefc_tgt_ls_req tgt_ls_req;
+ struct nvmefc_ls_rsp ls_rsp;
+ int lsdir; /* H2T or T2H */
int status;
+ struct list_head ls_list; /* fcloop_rport->ls_list */
};
struct fcloop_rscn {
@@ -265,9 +288,9 @@
};
static inline struct fcloop_lsreq *
-tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
{
- return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+ return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
}
static inline struct fcloop_fcpreq *
@@ -292,25 +315,36 @@
{
}
-
-/*
- * Transmit of LS RSP done (e.g. buffers all set). call back up
- * initiator "done" flows.
- */
static void
-fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+fcloop_rport_lsrqst_work(struct work_struct *work)
{
- struct fcloop_lsreq *tls_req =
- container_of(work, struct fcloop_lsreq, work);
- struct fcloop_tport *tport = tls_req->tport;
- struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport =
+ container_of(work, struct fcloop_rport, ls_work);
+ struct fcloop_lsreq *tls_req;
- if (!tport || tport->remoteport)
- lsreq->done(lsreq, tls_req->status);
+ spin_lock(&rport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&rport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&rport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&rport->lock);
+ }
+ spin_unlock(&rport->lock);
}
static int
-fcloop_ls_req(struct nvme_fc_local_port *localport,
+fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
@@ -319,40 +353,145 @@
int ret = 0;
tls_req->lsreq = lsreq;
- INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+ INIT_LIST_HEAD(&tls_req->ls_list);
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
- tls_req->tport = NULL;
- schedule_work(&tls_req->work);
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ schedule_work(&rport->ls_work);
return ret;
}
tls_req->status = 0;
- tls_req->tport = rport->targetport->private;
- ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
+ &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ struct nvmefc_ls_rsp *lsrsp)
+{
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_tport *tport = targetport->private;
+ struct nvme_fc_remote_port *remoteport = tport->remoteport;
+ struct fcloop_rport *rport;
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+
+ lsrsp->done(lsrsp);
+
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ schedule_work(&rport->ls_work);
+ }
+
+ return 0;
+}
+
+static void
+fcloop_tport_lsrqst_work(struct work_struct *work)
+{
+ struct fcloop_tport *tport =
+ container_of(work, struct fcloop_tport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&tport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&tport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&tport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&tport->lock);
+ }
+ spin_unlock(&tport->lock);
+}
+
+static int
+fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_tport *tport = targetport->private;
+ int ret = 0;
+
+ /*
+ * hosthandle should be the dst.rport value.
+ * hosthandle ignored as fcloop currently is
+ * 1:1 tgtport vs remoteport
+ */
+ tls_req->lsreq = lsreq;
+ INIT_LIST_HEAD(&tls_req->ls_list);
+
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ schedule_work(&tport->ls_work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
lsreq->rqstaddr, lsreq->rqstlen);
return ret;
}
static int
-fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
- struct nvmefc_tgt_ls_req *tgt_lsreq)
+fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp)
{
- struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
- memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
- ((lsreq->rsplen < tgt_lsreq->rsplen) ?
- lsreq->rsplen : tgt_lsreq->rsplen));
- tgt_lsreq->done(tgt_lsreq);
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+ lsrsp->done(lsrsp);
- schedule_work(&tls_req->work);
+ if (targetport) {
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ schedule_work(&tport->ls_work);
+ }
return 0;
}
+static void
+fcloop_t2h_host_release(void *hosthandle)
+{
+ /* host handle ignored for now */
+}
+
/*
* Simulate reception of RSCN and converting it to a initiator transport
* call to rescan a remote port.
@@ -673,7 +812,7 @@
break;
/* Fall-Thru to RSP handling */
- /* FALLTHRU */
+ fallthrough;
case NVMET_FCOP_RSP:
if (fcpreq) {
@@ -738,13 +877,19 @@
}
static void
-fcloop_ls_abort(struct nvme_fc_local_port *localport,
+fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
}
static void
+fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
void *hw_queue_handle,
@@ -834,6 +979,7 @@
{
struct fcloop_rport *rport = remoteport->private;
+ flush_work(&rport->ls_work);
fcloop_nport_put(rport->nport);
}
@@ -842,6 +988,7 @@
{
struct fcloop_tport *tport = targetport->private;
+ flush_work(&tport->ls_work);
fcloop_nport_put(tport->nport);
}
@@ -854,10 +1001,11 @@
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
.delete_queue = fcloop_delete_queue,
- .ls_req = fcloop_ls_req,
+ .ls_req = fcloop_h2t_ls_req,
.fcp_io = fcloop_fcp_req,
- .ls_abort = fcloop_ls_abort,
+ .ls_abort = fcloop_h2t_ls_abort,
.fcp_abort = fcloop_fcp_abort,
+ .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -871,11 +1019,14 @@
static struct nvmet_fc_target_template tgttemplate = {
.targetport_delete = fcloop_targetport_delete,
- .xmt_ls_rsp = fcloop_xmt_ls_rsp,
+ .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
.fcp_op = fcloop_fcp_op,
.fcp_abort = fcloop_tgt_fcp_abort,
.fcp_req_release = fcloop_fcp_req_release,
.discovery_event = fcloop_tgt_discovery_evt,
+ .ls_req = fcloop_t2h_ls_req,
+ .ls_abort = fcloop_t2h_ls_abort,
+ .host_release = fcloop_t2h_host_release,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -884,6 +1035,7 @@
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
};
static ssize_t
@@ -1135,6 +1287,9 @@
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
+ spin_lock_init(&rport->lock);
+ INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
+ INIT_LIST_HEAD(&rport->ls_list);
return count;
}
@@ -1230,6 +1385,9 @@
tport->nport = nport;
tport->lport = nport->lport;
nport->tport = tport;
+ spin_lock_init(&tport->lock);
+ INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
+ INIT_LIST_HEAD(&tport->ls_list);
return count;
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 32008d8..6a9626f 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -47,6 +47,22 @@
id->nows = to0based(ql->io_opt / ql->logical_block_size);
}
+static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
+{
+ struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
+
+ if (bi) {
+ ns->metadata_size = bi->tuple_size;
+ if (bi->profile == &t10_pi_type1_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE1;
+ else if (bi->profile == &t10_pi_type3_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ else
+ /* Unsupported metadata type */
+ ns->metadata_size = 0;
+ }
+}
+
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
@@ -64,6 +80,12 @@
}
ns->size = i_size_read(ns->bdev->bd_inode);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ ns->pi_type = 0;
+ ns->metadata_size = 0;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ nvmet_bdev_ns_enable_integrity(ns);
+
return 0;
}
@@ -75,6 +97,11 @@
}
}
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
+{
+ ns->size = i_size_read(ns->bdev->bd_inode);
+}
+
static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
{
u16 status = NVME_SC_SUCCESS;
@@ -112,7 +139,6 @@
req->error_loc = offsetof(struct nvme_rw_command, nsid);
break;
case BLK_STS_IOERR:
- /* fallthru */
default:
status = NVME_SC_INTERNAL | NVME_SC_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
@@ -142,13 +168,75 @@
bio_put(bio);
}
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ struct block_device *bdev = req->ns->bdev;
+ int rc;
+ size_t resid, len;
+
+ bi = bdev_get_integrity(bdev);
+ if (unlikely(!bi)) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
+ if (IS_ERR(bip)) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return PTR_ERR(bip);
+ }
+
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
+
+ resid = bip->bip_iter.bi_size;
+ while (resid > 0 && sg_miter_next(miter)) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (unlikely(rc != len)) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
+ return -ENOMEM;
+ }
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
+ }
+ sg_miter_stop(miter);
+
+ return 0;
+}
+#else
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
+ struct blk_plug plug;
sector_t sector;
- int op, op_flags = 0, i;
+ int op, i, rc;
+ struct sg_mapping_iter prot_miter;
+ unsigned int iter_flags;
+ unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
+
+ if (!nvmet_check_transfer_len(req, total_len))
+ return;
if (!req->sg_cnt) {
nvmet_req_complete(req, 0);
@@ -156,21 +244,21 @@
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE;
- op_flags = REQ_SYNC | REQ_IDLE;
+ op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op_flags |= REQ_FUA;
+ op |= REQ_FUA;
+ iter_flags = SG_MITER_TO_SG;
} else {
op = REQ_OP_READ;
+ iter_flags = SG_MITER_FROM_SG;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op_flags |= REQ_NOMERGE;
+ op |= REQ_NOMERGE;
- sector = le64_to_cpu(req->cmd->rw.slba);
- sector <<= (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
@@ -180,17 +268,31 @@
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
+
+ blk_start_plug(&plug);
+ if (req->metadata_len)
+ sg_miter_start(&prot_miter, req->metadata_sg,
+ req->metadata_sg_cnt, iter_flags);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
struct bio *prev = bio;
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio,
+ &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -200,13 +302,25 @@
sg_cnt--;
}
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
submit_bio(bio);
+ blk_finish_plug(&plug);
}
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
@@ -218,7 +332,7 @@
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
@@ -230,7 +344,7 @@
int ret;
ret = __blkdev_issue_discard(ns->bdev,
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
@@ -261,12 +375,10 @@
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- if (status) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- } else {
+ if (status)
+ bio_io_error(bio);
+ else
submit_bio(bio);
- }
} else {
nvmet_req_complete(req, status);
}
@@ -274,6 +386,9 @@
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ return;
+
switch (le32_to_cpu(req->cmd->dsm.attributes)) {
case NVME_DSMGMT_AD:
nvmet_bdev_execute_discard(req);
@@ -295,8 +410,10 @@
sector_t nr_sector;
int ret;
- sector = le64_to_cpu(write_zeroes->slba) <<
- (req->ns->blksize_shift - 9);
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
@@ -319,20 +436,17 @@
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
- req->data_len = nvmet_rw_len(req);
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ req->metadata_len = nvmet_rw_metadata_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_bdev_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_bdev_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6ca17a0..c81690b 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -8,11 +8,24 @@
#include <linux/uio.h>
#include <linux/falloc.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include "nvmet.h"
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
+int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+{
+ struct kstat stat;
+ int ret;
+
+ ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
+ AT_STATX_FORCE_SYNC);
+ if (!ret)
+ ns->size = stat.size;
+ return ret;
+}
+
void nvmet_file_ns_disable(struct nvmet_ns *ns)
{
if (ns->file) {
@@ -30,7 +43,6 @@
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int flags = O_RDWR | O_LARGEFILE;
- struct kstat stat;
int ret;
if (!ns->buffered_io)
@@ -45,12 +57,10 @@
return ret;
}
- ret = vfs_getattr(&ns->file->f_path,
- &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+ ret = nvmet_file_ns_revalidate(ns);
if (ret)
goto err;
- ns->size = stat.size;
/*
* i_blkbits can be greater than the universally accepted upper bound,
* so make sure we export a sane namespace lba_shift.
@@ -128,7 +138,7 @@
mempool_free(req->f.bvec, req->ns->bvec_pool);
}
- if (unlikely(ret != req->data_len))
+ if (unlikely(ret != req->transfer_len))
status = errno_to_nvme_status(req, ret);
nvmet_req_complete(req, status);
}
@@ -148,7 +158,7 @@
is_sync = true;
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
- if (unlikely(pos + req->data_len > req->ns->size)) {
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
return true;
}
@@ -175,7 +185,7 @@
nr_bvec--;
}
- if (WARN_ON_ONCE(total_len != req->data_len)) {
+ if (WARN_ON_ONCE(total_len != req->transfer_len)) {
ret = -EIO;
goto complete;
}
@@ -234,6 +244,9 @@
{
ssize_t nr_bvec = req->sg_cnt;
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
+ return;
+
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
return;
@@ -254,7 +267,8 @@
if (req->ns->buffered_io) {
if (likely(!req->f.mpool_alloc) &&
- nvmet_file_execute_io(req, IOCB_NOWAIT))
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
return;
nvmet_file_submit_buffered_io(req);
} else
@@ -275,6 +289,8 @@
static void nvmet_file_execute_flush(struct nvmet_req *req)
{
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work);
}
@@ -333,6 +349,8 @@
static void nvmet_file_execute_dsm(struct nvmet_req *req)
{
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
+ return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work);
}
@@ -361,6 +379,8 @@
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
{
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work);
}
@@ -373,20 +393,15 @@
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
- req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_file_execute_flush;
- req->data_len = 0;
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_file_execute_dsm;
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
- sizeof(struct nvme_dsm_range);
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_file_execute_write_zeroes;
- req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f657a12..ff3258c 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,7 +36,6 @@
struct nvme_loop_iod async_event_iod;
struct nvme_ctrl ctrl;
- struct nvmet_ctrl *target_ctrl;
struct nvmet_port *port;
};
@@ -76,8 +75,7 @@
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
- nvme_cleanup_cmd(req);
- sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
+ sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
nvme_complete_rq(req);
}
@@ -102,22 +100,23 @@
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+ if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+ cqe->command_id))) {
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "tag 0x%x on queue %d not found\n",
+ "got bad command_id %#x on queue %d\n",
cqe->command_id, nvme_loop_queue_idx(queue));
return;
}
- nvme_end_request(rq, cqe->status, cqe->result);
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ nvme_loop_complete_rq(rq);
}
}
@@ -126,7 +125,7 @@
struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work);
- nvmet_req_execute(&iod->req);
+ iod->req.execute(&iod->req);
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -157,7 +156,7 @@
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
- iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+ iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
nvme_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
@@ -343,9 +342,9 @@
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
- ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -448,15 +447,16 @@
{
struct nvme_loop_ctrl *ctrl =
container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
- bool changed;
int ret;
nvme_stop_ctrl(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure should never happen */
- WARN_ON_ONCE(1);
+ if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ /* state change failure for non-deleted ctrl? */
+ WARN_ON_ONCE(1);
return;
}
@@ -475,8 +475,8 @@
blk_mq_update_nr_hw_queues(&ctrl->tag_set,
ctrl->ctrl.queue_count - 1);
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
nvme_start_ctrl(&ctrl->ctrl);
@@ -489,7 +489,6 @@
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
}
static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
@@ -517,10 +516,10 @@
ctrl->tag_set.ops = &nvme_loop_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
- ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
@@ -572,7 +571,6 @@
struct nvmf_ctrl_options *opts)
{
struct nvme_loop_ctrl *ctrl;
- bool changed;
int ret;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -585,8 +583,13 @@
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
- if (ret)
- goto out_put_ctrl;
+ if (ret) {
+ kfree(ctrl);
+ goto out;
+ }
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ WARN_ON_ONCE(1);
ret = -ENOMEM;
@@ -622,8 +625,8 @@
dev_info(ctrl->ctrl.device,
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
mutex_lock(&nvme_loop_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
@@ -640,8 +643,7 @@
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
-out_put_ctrl:
- nvme_put_ctrl(&ctrl->ctrl);
+out:
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c51f8dd..4bf6d21 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -19,10 +19,14 @@
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
#include <linux/radix-tree.h>
+#include <linux/t10-pi.h>
+
+#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
+#define NVMET_DEFAULT_CTRL_MODEL "Linux"
/*
* Supported optional AENs:
@@ -50,7 +54,6 @@
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
struct nvmet_ns {
- struct list_head dev_link;
struct percpu_ref ref;
struct block_device *bdev;
struct file *file;
@@ -76,6 +79,8 @@
int use_p2pmem;
struct pci_dev *p2p_dev;
+ int pi_type;
+ int metadata_size;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -141,6 +146,7 @@
bool enabled;
int inline_data_size;
const struct nvmet_fabrics_ops *tr_ops;
+ bool pi_enable;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -158,10 +164,9 @@
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
- struct nvmet_cq **cqs;
struct nvmet_sq **sqs;
- bool cmd_seen;
+ bool reset_tbkas;
struct mutex lock;
u64 cap;
@@ -200,6 +205,12 @@
spinlock_t error_lock;
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
+ bool pi_support;
+};
+
+struct nvmet_subsys_model {
+ struct rcu_head rcuhead;
+ char number[];
};
struct nvmet_subsys {
@@ -208,9 +219,11 @@
struct mutex lock;
struct kref ref;
- struct list_head namespaces;
+ struct xarray namespaces;
unsigned int nr_namespaces;
unsigned int max_nsid;
+ u16 cntlid_min;
+ u16 cntlid_max;
struct list_head ctrls;
@@ -222,11 +235,20 @@
u64 ver;
u64 serial;
char *subsysnqn;
+ bool pi_support;
struct config_group group;
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
+
+ struct nvmet_subsys_model __rcu *model;
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ struct nvme_ctrl *passthru_ctrl;
+ char *passthru_ctrl_path;
+ struct config_group passthru_group;
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
};
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
@@ -270,7 +292,9 @@
struct module *owner;
unsigned int type;
unsigned int msdbd;
- bool has_keyed_sgls : 1;
+ unsigned int flags;
+#define NVMF_KEYED_SGLS (1 << 0)
+#define NVMF_METADATA_SUPPORTED (1 << 1)
void (*queue_response)(struct nvmet_req *req);
int (*add_port)(struct nvmet_port *port);
void (*remove_port)(struct nvmet_port *port);
@@ -279,6 +303,7 @@
struct nvmet_port *port, char *traddr);
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
+ u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -291,6 +316,7 @@
struct nvmet_cq *cq;
struct nvmet_ns *ns;
struct scatterlist *sg;
+ struct scatterlist *metadata_sg;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
union {
struct {
@@ -302,12 +328,17 @@
struct bio_vec *bvec;
struct work_struct work;
} f;
+ struct {
+ struct request *rq;
+ struct work_struct work;
+ bool use_workqueue;
+ } p;
};
int sg_cnt;
- /* data length as parsed from the command: */
- size_t data_len;
+ int metadata_sg_cnt;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
+ size_t metadata_len;
struct nvmet_port *port;
@@ -363,6 +394,8 @@
u16 nvmet_set_feat_kato(struct nvmet_req *req);
u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
void nvmet_execute_async_event(struct nvmet_req *req);
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
@@ -375,11 +408,14 @@
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
-void nvmet_req_execute(struct nvmet_req *req);
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
-int nvmet_req_alloc_sgl(struct nvmet_req *req);
-void nvmet_req_free_sgl(struct nvmet_req *req);
+int nvmet_req_alloc_sgls(struct nvmet_req *req);
+void nvmet_req_free_sgls(struct nvmet_req *req);
+void nvmet_execute_set_features(struct nvmet_req *req);
+void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
@@ -488,13 +524,67 @@
u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
+int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
+void nvmet_ns_revalidate(struct nvmet_ns *ns);
-static inline u32 nvmet_rw_len(struct nvmet_req *req)
+static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
{
return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
req->ns->blksize_shift;
}
+static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return 0;
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
+ req->ns->metadata_size;
+}
+
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
+{
+ return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
+ sizeof(struct nvme_dsm_range);
+}
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
+static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+{
+ return subsys->passthru_ctrl;
+}
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+}
+static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+}
+static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+{
+ return NULL;
+}
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static inline struct nvme_ctrl *
+nvmet_req_passthru_ctrl(struct nvmet_req *req)
+{
+ return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
+}
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
/* Convert a 32-bit number to a 16-bit 0's based number */
@@ -503,4 +593,27 @@
return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
}
+static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return false;
+ return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
+}
+
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
+{
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
+}
+
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+{
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+}
+
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
+{
+ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
+ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
new file mode 100644
index 0000000..8ee94f0
--- /dev/null
+++ b/drivers/nvme/target/passthru.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target Passthrough command implementation.
+ *
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ * Copyright (c) 2019-2020, Eideticom Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+
+#include "../host/nvme.h"
+#include "nvmet.h"
+
+MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
+
+/*
+ * xarray to maintain one passthru subsystem per nvme controller.
+ */
+static DEFINE_XARRAY(passthru_subsystems);
+
+static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ctrl *id;
+ int max_hw_sectors;
+ int page_shift;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
+ if (status)
+ goto out_free;
+
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /*
+ * The passthru NVMe driver may have a limit on the number of segments
+ * which depends on the host's memory fragementation. To solve this,
+ * ensure mdts is limited to the pages equal to the number of segments.
+ */
+ max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
+ pctrl->max_hw_sectors);
+
+ /*
+ * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * the mdts based on BIO_MAX_PAGES as well
+ */
+ max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
+ max_hw_sectors);
+
+ page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+
+ id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
+
+ id->acl = 3;
+ /*
+ * We export aerl limit for the fabrics controller, update this when
+ * passthru based aerl support is added.
+ */
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* emulate kas as most of the PCIe ctrl don't have a support for kas */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ /* don't support host memory buffer */
+ id->hmpre = 0;
+ id->hmmin = 0;
+
+ id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
+ id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ /* don't support fuse commands */
+ id->fuses = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (req->port->inline_data_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ /*
+ * When passsthru controller is setup using nvme-loop transport it will
+ * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
+ * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
+ * code path with duplicate ctr subsynqn. In order to prevent that we
+ * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
+ */
+ memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
+
+ /* use fabric id-ctrl values */
+ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+ req->port->inline_data_size) / 16);
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ /* Support multipath connections with fabrics */
+ id->cmic |= 1 << 1;
+
+ /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
+ id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
+{
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ns *id;
+ int i;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
+ if (status)
+ goto out_free;
+
+ for (i = 0; i < (id->nlbaf + 1); i++)
+ if (id->lbaf[i].ms)
+ memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
+
+ id->flbas = id->flbas & ~(1 << 4);
+
+ /*
+ * Presently the NVMEof target code does not support sending
+ * metadata, so we must disable it here. This should be updated
+ * once target starts supporting metadata.
+ */
+ id->mc = 0;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
+ struct request *rq = req->p.rq;
+ u16 status;
+
+ nvme_execute_passthru_rq(rq);
+
+ status = nvme_req(rq)->status;
+ if (status == NVME_SC_SUCCESS &&
+ req->cmd->common.opcode == nvme_admin_identify) {
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ nvmet_passthru_override_id_ctrl(req);
+ break;
+ case NVME_ID_CNS_NS:
+ nvmet_passthru_override_id_ns(req);
+ break;
+ }
+ }
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, status);
+ blk_mq_free_request(rq);
+}
+
+static void nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
+{
+ struct nvmet_req *req = rq->end_io_data;
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, nvme_req(rq)->status);
+ blk_mq_free_request(rq);
+}
+
+static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+{
+ struct scatterlist *sg;
+ int op_flags = 0;
+ struct bio *bio;
+ int i, ret;
+
+ if (req->sg_cnt > BIO_MAX_PAGES)
+ return -EINVAL;
+
+ if (req->cmd->common.opcode == nvme_cmd_flush)
+ op_flags = REQ_FUA;
+ else if (nvme_is_write(req->cmd))
+ op_flags = REQ_SYNC | REQ_IDLE;
+
+ bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+ bio->bi_end_io = bio_put;
+ bio->bi_opf = req_op(rq) | op_flags;
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
+ sg->offset) < sg->length) {
+ bio_put(bio);
+ return -EINVAL;
+ }
+ }
+
+ ret = blk_rq_append_bio(rq, &bio);
+ if (unlikely(ret)) {
+ bio_put(bio);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct request_queue *q = ctrl->admin_q;
+ struct nvme_ns *ns = NULL;
+ struct request *rq = NULL;
+ u32 effects;
+ u16 status;
+ int ret;
+
+ if (likely(req->sq->qid != 0)) {
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+
+ ns = nvme_find_get_ns(ctrl, nsid);
+ if (unlikely(!ns)) {
+ pr_err("failed to get passthru ns nsid:%u\n", nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ q = ns->queue;
+ }
+
+ rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
+ if (IS_ERR(rq)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_ns;
+ }
+
+ if (req->sg_cnt) {
+ ret = nvmet_passthru_map_sg(req, rq);
+ if (unlikely(ret)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_req;
+ }
+ }
+
+ /*
+ * If there are effects for the command we are about to execute, or
+ * an end_req function we need to use nvme_execute_passthru_rq()
+ * synchronously in a work item seeing the end_req function and
+ * nvme_passthru_end() can't be called in the request done callback
+ * which is typically in interrupt context.
+ */
+ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
+ if (req->p.use_workqueue || effects) {
+ INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ req->p.rq = rq;
+ schedule_work(&req->p.work);
+ } else {
+ rq->end_io_data = req;
+ blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
+ nvmet_passthru_req_done);
+ }
+
+ if (ns)
+ nvme_put_ns(ns);
+
+ return;
+
+out_put_req:
+ blk_mq_free_request(rq);
+out_put_ns:
+ if (ns)
+ nvme_put_ns(ns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+/*
+ * We need to emulate set host behaviour to ensure that any requested
+ * behaviour of the target's host matches the requested behaviour
+ * of the device's host and fail otherwise.
+ */
+static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_feat_host_behavior *host;
+ u16 status = NVME_SC_INTERNAL;
+ int ret;
+
+ host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
+ if (!host)
+ goto out_complete_req;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
+ host, sizeof(*host), NULL);
+ if (ret)
+ goto out_free_host;
+
+ status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
+ if (status)
+ goto out_free_host;
+
+ if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
+ pr_warn("target host has requested different behaviour from the local host\n");
+ status = NVME_SC_INTERNAL;
+ }
+
+out_free_host:
+ kfree(host);
+out_complete_req:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
+{
+ req->p.use_workqueue = false;
+ req->execute = nvmet_passthru_execute_cmd;
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_resv_register:
+ case nvme_cmd_resv_report:
+ case nvme_cmd_resv_acquire:
+ case nvme_cmd_resv_release:
+ /*
+ * Reservations cannot be supported properly because the
+ * underlying device has no way of differentiating different
+ * hosts that connect via fabrics. This could potentially be
+ * emulated in the future if regular targets grow support for
+ * this feature.
+ */
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return nvmet_setup_passthru_command(req);
+}
+
+/*
+ * Only features that are emulated or specifically allowed in the list are
+ * passed down to the controller. This function implements the allow list for
+ * both get and set features.
+ */
+static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
+{
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ARBITRATION:
+ case NVME_FEAT_POWER_MGMT:
+ case NVME_FEAT_LBA_RANGE:
+ case NVME_FEAT_TEMP_THRESH:
+ case NVME_FEAT_ERR_RECOVERY:
+ case NVME_FEAT_VOLATILE_WC:
+ case NVME_FEAT_WRITE_ATOMIC:
+ case NVME_FEAT_AUTO_PST:
+ case NVME_FEAT_TIMESTAMP:
+ case NVME_FEAT_HCTM:
+ case NVME_FEAT_NOPSC:
+ case NVME_FEAT_RRL:
+ case NVME_FEAT_PLM_CONFIG:
+ case NVME_FEAT_PLM_WINDOW:
+ case NVME_FEAT_HOST_BEHAVIOR:
+ case NVME_FEAT_SANITIZE:
+ case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
+ return nvmet_setup_passthru_command(req);
+
+ case NVME_FEAT_ASYNC_EVENT:
+ /* There is no support for forwarding ASYNC events */
+ case NVME_FEAT_IRQ_COALESCE:
+ case NVME_FEAT_IRQ_CONFIG:
+ /* The IRQ settings will not apply to the target controller */
+ case NVME_FEAT_HOST_MEM_BUF:
+ /*
+ * Any HMB that's set will not be passed through and will
+ * not work as expected
+ */
+ case NVME_FEAT_SW_PROGRESS:
+ /*
+ * The Pre-Boot Software Load Count doesn't make much
+ * sense for a target to export
+ */
+ case NVME_FEAT_RESV_MASK:
+ case NVME_FEAT_RESV_PERSIST:
+ /* No reservations, see nvmet_parse_passthru_io_cmd() */
+ default:
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
+
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
+ /*
+ * Passthru all vendor specific commands
+ */
+ if (req->cmd->common.opcode >= nvme_admin_vendor_start)
+ return nvmet_setup_passthru_command(req);
+
+ switch (req->cmd->common.opcode) {
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_keep_alive:
+ /*
+ * Most PCIe ctrls don't support keep alive cmd, we route keep
+ * alive to the non-passthru mode. In future please change this
+ * code when PCIe ctrls with keep alive support available.
+ */
+ req->execute = nvmet_execute_keep_alive;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_set_features;
+ return NVME_SC_SUCCESS;
+ case NVME_FEAT_HOST_BEHAVIOR:
+ req->execute = nvmet_passthru_set_host_behaviour;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_get_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_get_features;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_identify:
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_CTRL:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ case NVME_ID_CNS_NS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_NS:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ default:
+ return nvmet_setup_passthru_command(req);
+ }
+ case nvme_admin_get_log_page:
+ return nvmet_setup_passthru_command(req);
+ default:
+ /* Reject commands not in the allowlist above */
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
+
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
+{
+ struct nvme_ctrl *ctrl;
+ struct file *file;
+ int ret = -EINVAL;
+ void *old;
+
+ mutex_lock(&subsys->lock);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ if (subsys->nr_namespaces) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
+ file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_unlock;
+ }
+
+ ctrl = nvme_ctrl_from_file(file);
+ if (!ctrl) {
+ pr_err("failed to open nvme controller %s\n",
+ subsys->passthru_ctrl_path);
+
+ goto out_put_file;
+ }
+
+ old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+ subsys, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ ret = xa_err(old);
+ goto out_put_file;
+ }
+
+ if (old)
+ goto out_put_file;
+
+ subsys->passthru_ctrl = ctrl;
+ subsys->ver = ctrl->vs;
+
+ if (subsys->ver < NVME_VS(1, 2, 1)) {
+ pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
+ NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ subsys->ver = NVME_VS(1, 2, 1);
+ }
+ nvme_get_ctrl(ctrl);
+ __module_get(subsys->passthru_ctrl->ops->module);
+ ret = 0;
+
+out_put_file:
+ filp_close(file, NULL);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ if (subsys->passthru_ctrl) {
+ xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ module_put(subsys->passthru_ctrl->ops->module);
+ nvme_put_ctrl(subsys->passthru_ctrl);
+ }
+ subsys->passthru_ctrl = NULL;
+ subsys->ver = NVMET_DEFAULT_VS;
+}
+
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+ kfree(subsys->passthru_ctrl_path);
+}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 50e2007..6d5552f 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -20,6 +20,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
+#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvmet.h"
@@ -31,6 +32,12 @@
#define NVMET_RDMA_MAX_INLINE_SGE 4
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
+/* Assume mpsmin == device_page_size == 4KB */
+#define NVMET_RDMA_MAX_MDTS 8
+#define NVMET_RDMA_MAX_METADATA_MDTS 5
+
+struct nvmet_rdma_srq;
+
struct nvmet_rdma_cmd {
struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
struct ib_cqe cqe;
@@ -38,6 +45,7 @@
struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
struct nvme_command *nvme_cmd;
struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_srq *nsrq;
};
enum {
@@ -54,6 +62,7 @@
struct nvmet_rdma_queue *queue;
struct ib_cqe read_cqe;
+ struct ib_cqe write_cqe;
struct rdma_rw_ctx rw;
struct nvmet_req req;
@@ -80,6 +89,7 @@
struct ib_cq *cq;
atomic_t sq_wr_avail;
struct nvmet_rdma_device *dev;
+ struct nvmet_rdma_srq *nsrq;
spinlock_t state_lock;
enum nvmet_rdma_queue_state state;
struct nvmet_cq nvme_cq;
@@ -97,17 +107,31 @@
int idx;
int host_qid;
+ int comp_vector;
int recv_queue_size;
int send_queue_size;
struct list_head queue_list;
};
+struct nvmet_rdma_port {
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ struct rdma_cm_id *cm_id;
+ struct delayed_work repair_work;
+};
+
+struct nvmet_rdma_srq {
+ struct ib_srq *srq;
+ struct nvmet_rdma_cmd *cmds;
+ struct nvmet_rdma_device *ndev;
+};
+
struct nvmet_rdma_device {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_srq *srq;
- struct nvmet_rdma_cmd *srq_cmds;
+ struct nvmet_rdma_srq **srqs;
+ int srq_count;
size_t srq_size;
struct kref ref;
struct list_head entry;
@@ -119,6 +143,16 @@
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+static int srq_size_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops srq_size_ops = {
+ .set = srq_size_set,
+ .get = param_get_int,
+};
+
+static int nvmet_rdma_srq_size = 1024;
+module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
+MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
+
static DEFINE_IDA(nvmet_rdma_queue_ida);
static LIST_HEAD(nvmet_rdma_queue_list);
static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
@@ -130,6 +164,7 @@
static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
@@ -139,17 +174,22 @@
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+static int srq_size_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 256)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
static int num_pages(int len)
{
return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
}
-/* XXX: really should move to a generic header sooner or later.. */
-static inline u32 get_unaligned_le24(const u8 *p)
-{
- return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
-}
-
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{
return nvme_is_write(rsp->req.cmd) &&
@@ -374,7 +414,8 @@
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- r->req.p2p_client = &ndev->device->dev;
+ if (!ib_uses_virt_dma(ndev->device))
+ r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
@@ -387,6 +428,9 @@
/* Data In / RDMA READ */
r->read_cqe.done = nvmet_rdma_read_data_done;
+ /* Data Out / RDMA WRITE */
+ r->write_cqe.done = nvmet_rdma_write_data_done;
+
return 0;
out_free_rsp:
@@ -462,8 +506,8 @@
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
- if (ndev->srq)
- ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ if (cmd->nsrq)
+ ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
else
ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
@@ -496,6 +540,129 @@
spin_unlock(&queue->rsp_wr_wait_lock);
}
+static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
+{
+ struct ib_mr_status mr_status;
+ int ret;
+ u16 status = 0;
+
+ ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ return NVME_SC_INVALID_PI;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+
+ return status;
+}
+
+static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 control = le16_to_cpu(cmd->rw.control);
+ u8 pi_type = req->ns->pi_type;
+ struct blk_integrity *bi;
+
+ bi = bdev_get_integrity(req->ns->bdev);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no wire domain */
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ /* PI is added by the HW */
+ req->transfer_len += req->metadata_len;
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+ if (control & NVME_RW_PRINFO_PRCHK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (control & NVME_RW_PRINFO_PRCHK_APP)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+}
+
+static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+ int ret;
+
+ if (req->metadata_len)
+ ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
+ addr, key, nvmet_data_dir(req));
+ else
+ ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, 0, addr, key,
+ nvmet_data_dir(req));
+
+ return ret;
+}
+
+static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+
+ if (req->metadata_len)
+ rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt,
+ nvmet_data_dir(req));
+ else
+ rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, nvmet_data_dir(req));
+}
static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
{
@@ -503,14 +670,11 @@
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
- if (rsp->n_rdma) {
- rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
- queue->cm_id->port_num, rsp->req.sg,
- rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
- }
+ if (rsp->n_rdma)
+ nvmet_rdma_rw_ctx_destroy(rsp);
if (rsp->req.sg != rsp->cmd->inline_sg)
- nvmet_req_free_sgl(&rsp->req);
+ nvmet_req_free_sgls(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -536,7 +700,7 @@
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
nvmet_rdma_release_rsp(rsp);
@@ -562,11 +726,16 @@
rsp->send_wr.opcode = IB_WR_SEND;
}
- if (nvmet_rdma_need_data_out(rsp))
- first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
- cm_id->port_num, NULL, &rsp->send_wr);
- else
+ if (nvmet_rdma_need_data_out(rsp)) {
+ if (rsp->req.metadata_len)
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, &rsp->write_cqe, NULL);
+ else
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, NULL, &rsp->send_wr);
+ } else {
first_wr = &rsp->send_wr;
+ }
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
@@ -584,16 +753,15 @@
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ u16 status = 0;
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
- rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
- queue->cm_id->port_num, rsp->req.sg,
- rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
nvmet_req_uninit(&rsp->req);
nvmet_rdma_release_rsp(rsp);
if (wc->status != IB_WC_WR_FLUSH_ERR) {
@@ -604,7 +772,57 @@
return;
}
- nvmet_req_execute(&rsp->req);
+ if (rsp->req.metadata_len)
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(status))
+ nvmet_req_complete(&rsp->req, status);
+ else
+ rsp->req.execute(&rsp->req);
+}
+
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ u16 status;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
+ nvmet_req_uninit(&rsp->req);
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
+ ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ /*
+ * Upon RDMA completion check the signature status
+ * - if succeeded send good NVMe response
+ * - if failed send bad NVMe response with appropriate error
+ */
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ if (unlikely(status))
+ rsp->req.cqe->status = cpu_to_le16(status << 1);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -661,9 +879,9 @@
static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
struct nvme_keyed_sgl_desc *sgl, bool invalidate)
{
- struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
u32 key = get_unaligned_le32(sgl->key);
+ struct ib_sig_attrs sig_attrs;
int ret;
rsp->req.transfer_len = get_unaligned_le24(sgl->length);
@@ -672,14 +890,15 @@
if (!rsp->req.transfer_len)
return 0;
- ret = nvmet_req_alloc_sgl(&rsp->req);
- if (ret < 0)
+ if (rsp->req.metadata_len)
+ nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
+
+ ret = nvmet_req_alloc_sgls(&rsp->req);
+ if (unlikely(ret < 0))
goto error_out;
- ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
- rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
- nvmet_data_dir(&rsp->req));
- if (ret < 0)
+ ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
+ if (unlikely(ret < 0))
goto error_out;
rsp->n_rdma += ret;
@@ -747,7 +966,7 @@
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
- nvmet_req_execute(&rsp->req);
+ rsp->req.execute(&rsp->req);
}
return true;
@@ -789,7 +1008,7 @@
{
struct nvmet_rdma_cmd *cmd =
container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
struct nvmet_rdma_rsp *rsp;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
@@ -841,23 +1060,40 @@
nvmet_rdma_handle_command(queue, rsp);
}
-static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
{
- if (!ndev->srq)
- return;
+ nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
+ false);
+ ib_destroy_srq(nsrq->srq);
- nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
- ib_destroy_srq(ndev->srq);
+ kfree(nsrq);
}
-static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i;
+
+ if (!ndev->srqs)
+ return;
+
+ for (i = 0; i < ndev->srq_count; i++)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+
+ kfree(ndev->srqs);
+}
+
+static struct nvmet_rdma_srq *
+nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
{
struct ib_srq_init_attr srq_attr = { NULL, };
+ size_t srq_size = ndev->srq_size;
+ struct nvmet_rdma_srq *nsrq;
struct ib_srq *srq;
- size_t srq_size;
int ret, i;
- srq_size = 4095; /* XXX: tune */
+ nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
+ if (!nsrq)
+ return ERR_PTR(-ENOMEM);
srq_attr.attr.max_wr = srq_size;
srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
@@ -865,6 +1101,42 @@
srq_attr.srq_type = IB_SRQT_BASIC;
srq = ib_create_srq(ndev->pd, &srq_attr);
if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto out_free;
+ }
+
+ nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+ if (IS_ERR(nsrq->cmds)) {
+ ret = PTR_ERR(nsrq->cmds);
+ goto out_destroy_srq;
+ }
+
+ nsrq->srq = srq;
+ nsrq->ndev = ndev;
+
+ for (i = 0; i < srq_size; i++) {
+ nsrq->cmds[i].nsrq = nsrq;
+ ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
+ if (ret)
+ goto out_free_cmds;
+ }
+
+ return nsrq;
+
+out_free_cmds:
+ nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
+out_destroy_srq:
+ ib_destroy_srq(srq);
+out_free:
+ kfree(nsrq);
+ return ERR_PTR(ret);
+}
+
+static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i, ret;
+
+ if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
/*
* If SRQs aren't supported we just go ahead and use normal
* non-shared receive queues.
@@ -873,27 +1145,29 @@
return 0;
}
- ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
- if (IS_ERR(ndev->srq_cmds)) {
- ret = PTR_ERR(ndev->srq_cmds);
- goto out_destroy_srq;
- }
+ ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
+ nvmet_rdma_srq_size);
+ ndev->srq_count = min(ndev->device->num_comp_vectors,
+ ndev->device->attrs.max_srq);
- ndev->srq = srq;
- ndev->srq_size = srq_size;
+ ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
+ if (!ndev->srqs)
+ return -ENOMEM;
- for (i = 0; i < srq_size; i++) {
- ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
- if (ret)
- goto out_free_cmds;
+ for (i = 0; i < ndev->srq_count; i++) {
+ ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
+ if (IS_ERR(ndev->srqs[i])) {
+ ret = PTR_ERR(ndev->srqs[i]);
+ goto err_srq;
+ }
}
return 0;
-out_free_cmds:
- nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
-out_destroy_srq:
- ib_destroy_srq(srq);
+err_srq:
+ while (--i >= 0)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+ kfree(ndev->srqs);
return ret;
}
@@ -906,7 +1180,7 @@
list_del(&ndev->entry);
mutex_unlock(&device_list_mutex);
- nvmet_rdma_destroy_srq(ndev);
+ nvmet_rdma_destroy_srqs(ndev);
ib_dealloc_pd(ndev->pd);
kfree(ndev);
@@ -915,7 +1189,8 @@
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
- struct nvmet_port *port = cm_id->context;
+ struct nvmet_rdma_port *port = cm_id->context;
+ struct nvmet_port *nport = port->nport;
struct nvmet_rdma_device *ndev;
int inline_page_count;
int inline_sge_count;
@@ -932,18 +1207,26 @@
if (!ndev)
goto out_err;
- inline_page_count = num_pages(port->inline_data_size);
+ inline_page_count = num_pages(nport->inline_data_size);
inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
cm_id->device->attrs.max_recv_sge) - 1;
if (inline_page_count > inline_sge_count) {
pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
- port->inline_data_size, cm_id->device->name,
+ nport->inline_data_size, cm_id->device->name,
inline_sge_count * PAGE_SIZE);
- port->inline_data_size = inline_sge_count * PAGE_SIZE;
+ nport->inline_data_size = inline_sge_count * PAGE_SIZE;
inline_page_count = inline_sge_count;
}
- ndev->inline_data_size = port->inline_data_size;
+ ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+ IB_DEVICE_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -952,7 +1235,7 @@
goto out_free_dev;
if (nvmet_rdma_use_srq) {
- ret = nvmet_rdma_init_srq(ndev);
+ ret = nvmet_rdma_init_srqs(ndev);
if (ret)
goto out_free_pd;
}
@@ -976,23 +1259,15 @@
{
struct ib_qp_init_attr qp_attr;
struct nvmet_rdma_device *ndev = queue->dev;
- int comp_vector, nr_cqe, ret, i;
-
- /*
- * Spread the io queues across completion vectors,
- * but still keep all admin queues on vector 0.
- */
- comp_vector = !queue->host_qid ? 0 :
- queue->idx % ndev->device->num_comp_vectors;
+ int nr_cqe, ret, i, factor;
/*
* Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
*/
nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
- queue->cq = ib_alloc_cq(ndev->device, queue,
- nr_cqe + 1, comp_vector,
- IB_POLL_WORKQUEUE);
+ queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
+ queue->comp_vector, IB_POLL_WORKQUEUE);
if (IS_ERR(queue->cq)) {
ret = PTR_ERR(queue->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
@@ -1009,18 +1284,23 @@
qp_attr.qp_type = IB_QPT_RC;
/* +1 for drain */
qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
- qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
+ factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
+ 1 << NVMET_RDMA_MAX_MDTS);
+ qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
ndev->device->attrs.max_send_sge);
- if (ndev->srq) {
- qp_attr.srq = ndev->srq;
+ if (queue->nsrq) {
+ qp_attr.srq = queue->nsrq->srq;
} else {
/* +1 for drain */
qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
}
+ if (queue->port->pi_enable && queue->host_qid)
+ qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+
ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
if (ret) {
pr_err("failed to create_qp ret= %d\n", ret);
@@ -1034,7 +1314,7 @@
__func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
qp_attr.cap.max_send_wr, queue->cm_id);
- if (!ndev->srq) {
+ if (!queue->nsrq) {
for (i = 0; i < queue->recv_queue_size; i++) {
queue->cmds[i].queue = queue;
ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
@@ -1049,7 +1329,7 @@
err_destroy_qp:
rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
- ib_free_cq(queue->cq);
+ ib_cq_pool_put(queue->cq, nr_cqe + 1);
goto out;
}
@@ -1059,7 +1339,8 @@
if (queue->cm_id)
rdma_destroy_id(queue->cm_id);
ib_destroy_qp(queue->qp);
- ib_free_cq(queue->cq);
+ ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
+ queue->send_queue_size + 1);
}
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
@@ -1069,7 +1350,7 @@
nvmet_sq_destroy(&queue->nvme_sq);
nvmet_rdma_destroy_queue_ib(queue);
- if (!queue->dev->srq) {
+ if (!queue->nsrq) {
nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue->recv_queue_size,
!queue->host_qid);
@@ -1131,7 +1412,8 @@
rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
rej.sts = cpu_to_le16(status);
- return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
}
static struct nvmet_rdma_queue *
@@ -1139,6 +1421,7 @@
struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
+ struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_queue *queue;
int ret;
@@ -1165,6 +1448,7 @@
INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
queue->dev = ndev;
queue->cm_id = cm_id;
+ queue->port = port->nport;
spin_lock_init(&queue->state_lock);
queue->state = NVMET_RDMA_Q_CONNECTING;
@@ -1181,13 +1465,23 @@
goto out_destroy_sq;
}
+ /*
+ * Spread the io queues across completion vectors,
+ * but still keep all admin queues on vector 0.
+ */
+ queue->comp_vector = !queue->host_qid ? 0 :
+ queue->idx % ndev->device->num_comp_vectors;
+
+
ret = nvmet_rdma_alloc_rsps(queue);
if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_ida_remove;
}
- if (!ndev->srq) {
+ if (ndev->srqs) {
+ queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
+ } else {
queue->cmds = nvmet_rdma_alloc_cmds(ndev,
queue->recv_queue_size,
!queue->host_qid);
@@ -1208,7 +1502,7 @@
return queue;
out_free_cmds:
- if (!ndev->srq) {
+ if (!queue->nsrq) {
nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue->recv_queue_size,
!queue->host_qid);
@@ -1234,6 +1528,10 @@
case IB_EVENT_COMM_EST:
rdma_notify(queue->cm_id, event->event);
break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ pr_debug("received last WQE reached event for queue=0x%p\n",
+ queue);
+ break;
default:
pr_err("received IB QP event: %s (%d)\n",
ib_event_msg(event->event), event->event);
@@ -1283,7 +1581,6 @@
ret = -ENOMEM;
goto put_device;
}
- queue->port = cm_id->context;
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
@@ -1423,7 +1720,7 @@
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
{
- struct nvmet_port *port;
+ struct nvmet_rdma_port *port;
if (queue) {
/*
@@ -1442,7 +1739,7 @@
* cm_id destroy. use atomic xchg to make sure
* we don't compete with remove_port.
*/
- if (xchg(&port->priv, NULL) != cm_id)
+ if (xchg(&port->cm_id, NULL) != cm_id)
return 0;
/*
@@ -1473,6 +1770,13 @@
nvmet_rdma_queue_established(queue);
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
+ if (!queue) {
+ struct nvmet_rdma_port *port = cm_id->context;
+
+ schedule_delayed_work(&port->repair_work, 0);
+ break;
+ }
+ fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
@@ -1483,7 +1787,7 @@
case RDMA_CM_EVENT_REJECTED:
pr_debug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
nvmet_rdma_queue_connect_fail(cm_id, queue);
@@ -1515,43 +1819,44 @@
mutex_unlock(&nvmet_rdma_queue_mutex);
}
-static int nvmet_rdma_add_port(struct nvmet_port *port)
+static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
{
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_port *nport = port->nport;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->port != nport)
+ continue;
+
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
+static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
+{
+ struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
+
+ if (cm_id)
+ rdma_destroy_id(cm_id);
+
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet. Do it here after the RDMA-CM was destroyed
+ * guarantees that no new queue will be created.
+ */
+ nvmet_rdma_destroy_port_queues(port);
+}
+
+static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
+{
+ struct sockaddr *addr = (struct sockaddr *)&port->addr;
struct rdma_cm_id *cm_id;
- struct sockaddr_storage addr = { };
- __kernel_sa_family_t af;
int ret;
- switch (port->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- af = AF_INET;
- break;
- case NVMF_ADDR_FAMILY_IP6:
- af = AF_INET6;
- break;
- default:
- pr_err("address family %d not supported\n",
- port->disc_addr.adrfam);
- return -EINVAL;
- }
-
- if (port->inline_data_size < 0) {
- port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
- } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
- pr_warn("inline_data_size %u is too large, reducing to %u\n",
- port->inline_data_size,
- NVMET_RDMA_MAX_INLINE_DATA_SIZE);
- port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
- }
-
- ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
- port->disc_addr.trsvcid, &addr);
- if (ret) {
- pr_err("malformed ip/port passed: %s:%s\n",
- port->disc_addr.traddr, port->disc_addr.trsvcid);
- return ret;
- }
-
cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
@@ -1569,23 +1874,19 @@
goto out_destroy_id;
}
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
+ ret = rdma_bind_addr(cm_id, addr);
if (ret) {
- pr_err("binding CM ID to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
- pr_err("listening to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
- pr_info("enabling port %d (%pISpcs)\n",
- le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
- port->priv = cm_id;
+ port->cm_id = cm_id;
return 0;
out_destroy_id:
@@ -1593,18 +1894,92 @@
return ret;
}
-static void nvmet_rdma_remove_port(struct nvmet_port *port)
+static void nvmet_rdma_repair_port_work(struct work_struct *w)
{
- struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
+ struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
+ struct nvmet_rdma_port, repair_work);
+ int ret;
- if (cm_id)
- rdma_destroy_id(cm_id);
+ nvmet_rdma_disable_port(port);
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ schedule_delayed_work(&port->repair_work, 5 * HZ);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port;
+ __kernel_sa_family_t af;
+ int ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ nport->priv = port;
+ port->nport = nport;
+ INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto out_free_port;
+ }
+
+ if (nport->inline_data_size < 0) {
+ nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ nport->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto out_free_port;
+ }
+
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ goto out_free_port;
+
+ pr_info("enabling port %d (%pISpcs)\n",
+ le16_to_cpu(nport->disc_addr.portid),
+ (struct sockaddr *)&port->addr);
+
+ return 0;
+
+out_free_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port = nport->priv;
+
+ cancel_delayed_work_sync(&port->repair_work);
+ nvmet_rdma_disable_port(port);
+ kfree(port);
}
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
- struct nvmet_port *port, char *traddr)
+ struct nvmet_port *nport, char *traddr)
{
- struct rdma_cm_id *cm_id = port->priv;
+ struct nvmet_rdma_port *port = nport->priv;
+ struct rdma_cm_id *cm_id = port->cm_id;
if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
struct nvmet_rdma_rsp *rsp =
@@ -1614,20 +1989,28 @@
sprintf(traddr, "%pISc", addr);
} else {
- memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
}
+static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
+{
+ if (ctrl->pi_support)
+ return NVMET_RDMA_MAX_METADATA_MDTS;
+ return NVMET_RDMA_MAX_MDTS;
+}
+
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
.msdbd = 1,
- .has_keyed_sgls = 1,
+ .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
.add_port = nvmet_rdma_add_port,
.remove_port = nvmet_rdma_remove_port,
.queue_response = nvmet_rdma_queue_response,
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
+ .get_mdts = nvmet_rdma_get_mdts,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 2ae8462..96b67a7 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -19,6 +19,16 @@
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
+
#define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8
#define NVMET_TCP_IO_WORK_BUDGET 64
@@ -84,7 +94,6 @@
struct socket *sock;
struct nvmet_tcp_port *port;
struct work_struct io_work;
- int cpu;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
@@ -134,7 +143,6 @@
struct work_struct accept_work;
struct nvmet_port *nport;
struct sockaddr_storage addr;
- int last_cpu;
void (*data_ready)(struct sock *);
};
@@ -143,7 +151,7 @@
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
-static struct nvmet_fabrics_ops nvmet_tcp_ops;
+static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
@@ -209,6 +217,11 @@
list_add_tail(&cmd->entry, &cmd->queue->free_list);
}
+static inline int queue_cpu(struct nvmet_tcp_queue *queue)
+{
+ return queue->sock->sk->sk_incoming_cpu;
+}
+
static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
{
return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
@@ -321,12 +334,20 @@
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
}
+static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+{
+ if (status == -EPIPE || status == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+}
+
static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
{
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
u32 len = le32_to_cpu(sgl->length);
- if (!cmd->req.data_len)
+ if (!len)
return 0;
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
@@ -358,7 +379,7 @@
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -366,6 +387,23 @@
crypto_ahash_digest(hash);
}
+static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ struct scatterlist sg;
+ struct kvec *iov;
+ int i;
+
+ crypto_ahash_init(hash);
+ for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+ sg_init_one(&sg, iov->iov_base, iov->iov_len);
+ ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+ crypto_ahash_update(hash);
+ }
+ ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+ crypto_ahash_final(hash);
+}
+
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -390,7 +428,7 @@
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -447,17 +485,11 @@
static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
{
struct llist_node *node;
+ struct nvmet_tcp_cmd *cmd;
- node = llist_del_all(&queue->resp_list);
- if (!node)
- return;
-
- while (node) {
- struct nvmet_tcp_cmd *cmd = llist_entry(node,
- struct nvmet_tcp_cmd, lentry);
-
+ for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
+ cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
list_add(&cmd->entry, &queue->resp_send_list);
- node = node->next;
queue->send_list_len++;
}
}
@@ -493,9 +525,34 @@
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ struct nvme_sgl_desc *sgl;
+ u32 len;
+
+ if (unlikely(cmd == queue->cmd)) {
+ sgl = &cmd->req.cmd->common.dptr.sgl;
+ len = le32_to_cpu(sgl->length);
+
+ /*
+ * Wait for inline data before processing the response.
+ * Avoid using helpers, this might happen before
+ * nvmet_req_init is completed.
+ */
+ if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ len && len <= cmd->req.port->inline_data_size &&
+ nvme_is_write(cmd->req.cmd))
+ return;
+ }
llist_add(&cmd->lentry, &queue->resp_list);
- queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
+}
+
+static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
+{
+ if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
+ nvmet_tcp_queue_response(&cmd->req);
+ else
+ cmd->req.execute(&cmd->req);
}
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
@@ -506,7 +563,7 @@
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
offset_in_page(cmd->data_pdu) + cmd->offset,
- left, MSG_DONTWAIT | MSG_MORE);
+ left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
if (ret <= 0)
return ret;
@@ -534,7 +591,7 @@
if ((!last_in_batch && cmd->queue->send_list_len) ||
cmd->wbytes_done + left < cmd->req.transfer_len ||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
left, flags);
@@ -581,7 +638,7 @@
int ret;
if (!last_in_batch && cmd->queue->send_list_len)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -610,7 +667,7 @@
int ret;
if (!last_in_batch && cmd->queue->send_list_len)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -628,21 +685,31 @@
return 1;
}
-static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
+ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &cmd->exp_ddgst + cmd->offset,
- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
+ .iov_len = left
};
int ret;
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
if (queue->nvme_sq.sqhd_disabled) {
cmd->queue->snd_cmd = NULL;
@@ -678,7 +745,7 @@
}
if (cmd->state == NVMET_TCP_SEND_DDGST) {
- ret = nvmet_try_send_ddgst(cmd);
+ ret = nvmet_try_send_ddgst(cmd, last_in_batch);
if (ret <= 0)
goto done_send;
}
@@ -709,11 +776,15 @@
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*sends)++;
}
-
+done:
return ret;
}
@@ -825,13 +896,11 @@
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{
+ size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- /* recover the expected data transfer length */
- req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
-
if (!nvme_is_write(cmd->req.cmd) ||
- req->data_len > cmd->req.port->inline_data_size) {
+ data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
}
@@ -922,7 +991,7 @@
le32_to_cpu(req->cmd->common.dptr.sgl.length));
nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
- return -EAGAIN;
+ return 0;
}
ret = nvmet_tcp_map_data(queue->cmd);
@@ -947,7 +1016,7 @@
goto out;
}
- nvmet_req_execute(&queue->cmd->req);
+ queue->cmd->req.execute(&queue->cmd->req);
out:
nvmet_prepare_receive_pdu(queue);
return ret;
@@ -1020,7 +1089,7 @@
}
if (queue->hdr_digest &&
- nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
@@ -1038,7 +1107,7 @@
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1060,16 +1129,14 @@
}
nvmet_tcp_unmap_pdu_iovec(cmd);
-
- if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
- cmd->rbytes_done == cmd->req.transfer_len) {
- if (queue->data_digest) {
- nvmet_tcp_prep_recv_ddgst(cmd);
- return 0;
- }
- nvmet_req_execute(&cmd->req);
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
}
+ if (cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_tcp_execute_request(cmd);
+
nvmet_prepare_receive_pdu(queue);
return 0;
}
@@ -1105,9 +1172,9 @@
goto out;
}
- if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
- cmd->rbytes_done == cmd->req.transfer_len)
- nvmet_req_execute(&cmd->req);
+ if (cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_tcp_execute_request(cmd);
+
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
@@ -1155,11 +1222,15 @@
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_recv_one(queue);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*recvs)++;
}
-
+done:
return ret;
}
@@ -1184,27 +1255,16 @@
pending = false;
ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
- if (ret > 0) {
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
- if (ret > 0) {
- /* transmitted message/data */
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
@@ -1212,7 +1272,7 @@
* We exahusted our budget, requeue our selves
*/
if (pending)
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
@@ -1343,6 +1403,7 @@
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
+ struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1362,6 +1423,8 @@
nvmet_tcp_free_crypto(queue);
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue);
}
@@ -1372,7 +1435,7 @@
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue))
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1392,7 +1455,7 @@
if (sk_stream_is_writeable(sk)) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -1426,7 +1489,6 @@
{
struct socket *sock = queue->sock;
struct inet_sock *inet = inet_sk(sock->sk);
- struct linger sol = { .l_onoff = 1, .l_linger = 0 };
int ret;
ret = kernel_getsockname(sock,
@@ -1444,32 +1506,36 @@
* close. This is done to prevent stale data from being sent should
* the network connection be restored before TCP times out.
*/
- ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&sol, sizeof(sol));
- if (ret)
- return ret;
+ sock_no_linger(sock->sk);
+
+ if (so_priority > 0)
+ sock_set_priority(sock->sk, so_priority);
/* Set socket type of service */
- if (inet->rcv_tos > 0) {
- int tos = inet->rcv_tos;
+ if (inet->rcv_tos > 0)
+ ip_sock_set_tos(sock->sk, inet->rcv_tos);
- ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
- (char *)&tos, sizeof(tos));
- if (ret)
- return ret;
- }
-
+ ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1507,9 +1573,6 @@
if (ret)
goto out_free_connect;
- port->last_cpu = cpumask_next_wrap(port->last_cpu,
- cpu_online_mask, -1, false);
- queue->cpu = port->last_cpu;
nvmet_prepare_receive_pdu(queue);
mutex_lock(&nvmet_tcp_queue_mutex);
@@ -1520,8 +1583,6 @@
if (ret)
goto out_destroy_sq;
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
-
return 0;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);
@@ -1578,7 +1639,7 @@
{
struct nvmet_tcp_port *port;
__kernel_sa_family_t af;
- int opt, ret;
+ int ret;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -1607,7 +1668,6 @@
}
port->nport = nport;
- port->last_cpu = -1;
INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
if (port->nport->inline_data_size < 0)
port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
@@ -1622,21 +1682,10 @@
port->sock->sk->sk_user_data = port;
port->data_ready = port->sock->sk->sk_data_ready;
port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
-
- opt = 1;
- ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
- TCP_NODELAY, (char *)&opt, sizeof(opt));
- if (ret) {
- pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
- goto err_sock;
- }
-
- ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&opt, sizeof(opt));
- if (ret) {
- pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
- goto err_sock;
- }
+ sock_set_reuseaddr(port->sock->sk);
+ tcp_sock_set_nodelay(port->sock->sk);
+ if (so_priority > 0)
+ sock_set_priority(port->sock->sk, so_priority);
ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
sizeof(port->addr));
@@ -1664,6 +1713,17 @@
return ret;
}
+static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->port == port)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@@ -1673,6 +1733,11 @@
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet.
+ */
+ nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);
@@ -1721,11 +1786,10 @@
}
}
-static struct nvmet_fabrics_ops nvmet_tcp_ops = {
+static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
.msdbd = 1,
- .has_keyed_sgls = 0,
.add_port = nvmet_tcp_add_port,
.remove_port = nvmet_tcp_remove_port,
.queue_response = nvmet_tcp_queue_response,
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 3f61b66..c14e324 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -123,6 +123,34 @@
);
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvmet_async_event,
+ TP_PROTO(struct nvmet_ctrl *ctrl, __le32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->cntlid;
+ __entry->result = (le32_to_cpu(result) & 0xff00) >> 8;
+ ),
+ TP_printk("nvmet%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result,
+ aer_name(NVME_AER_NOTICE_NS_CHANGED),
+ aer_name(NVME_AER_NOTICE_ANA),
+ aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+#undef aer_name
+
#endif /* _TRACE_NVMET_H */
#undef TRACE_INCLUDE_PATH