Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 81a9dc5..05453f5 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -49,7 +49,12 @@
goto err;
ns->size = stat.size;
- ns->blksize_shift = file_inode(ns->file)->i_blkbits;
+ /*
+ * i_blkbits can be greater than the universally accepted upper bound,
+ * so make sure we export a sane namespace lba_shift.
+ */
+ ns->blksize_shift = min_t(u8,
+ file_inode(ns->file)->i_blkbits, 12);
ns->bvec_cache = kmem_cache_create("nvmet-bvec",
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
@@ -75,25 +80,24 @@
return ret;
}
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
{
- bv->bv_page = sg_page_iter_page(iter);
- bv->bv_offset = iter->sg->offset;
- bv->bv_len = PAGE_SIZE - iter->sg->offset;
+ bv->bv_page = sg_page(sg);
+ bv->bv_offset = sg->offset;
+ bv->bv_len = sg->length;
}
static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
- unsigned long nr_segs, size_t count)
+ unsigned long nr_segs, size_t count, int ki_flags)
{
struct kiocb *iocb = &req->f.iocb;
ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
struct iov_iter iter;
- int ki_flags = 0, rw;
- ssize_t ret;
+ int rw;
if (req->cmd->rw.opcode == nvme_cmd_write) {
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- ki_flags = IOCB_DSYNC;
+ ki_flags |= IOCB_DSYNC;
call_iter = req->ns->file->f_op->write_iter;
rw = WRITE;
} else {
@@ -101,23 +105,19 @@
rw = READ;
}
- iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
+ iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
- ret = call_iter(iocb, &iter);
-
- if (ret != -EIOCBQUEUED && iocb->ki_complete)
- iocb->ki_complete(iocb, ret, 0);
-
- return ret;
+ return call_iter(iocb, &iter);
}
static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
{
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
+ u16 status = NVME_SC_SUCCESS;
if (req->f.bvec != req->inline_bvec) {
if (likely(req->f.mpool_alloc == false))
@@ -126,28 +126,114 @@
mempool_free(req->f.bvec, req->ns->bvec_pool);
}
- nvmet_req_complete(req, ret != req->data_len ?
- NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+ if (unlikely(ret != req->data_len))
+ status = errno_to_nvme_status(req, ret);
+ nvmet_req_complete(req, status);
}
-static void nvmet_file_execute_rw(struct nvmet_req *req)
+static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
- struct sg_page_iter sg_pg_iter;
+ ssize_t nr_bvec = req->sg_cnt;
unsigned long bv_cnt = 0;
bool is_sync = false;
size_t len = 0, total_len = 0;
ssize_t ret = 0;
loff_t pos;
+ int i;
+ struct scatterlist *sg;
- if (!req->sg_cnt || !nr_bvec) {
- nvmet_req_complete(req, 0);
- return;
- }
+ if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
+ is_sync = true;
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
if (unlikely(pos + req->data_len > req->ns->size)) {
- nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return true;
+ }
+
+ memset(&req->f.iocb, 0, sizeof(struct kiocb));
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
+ len += req->f.bvec[bv_cnt].bv_len;
+ total_len += req->f.bvec[bv_cnt].bv_len;
+ bv_cnt++;
+
+ WARN_ON_ONCE((nr_bvec - 1) < 0);
+
+ if (unlikely(is_sync) &&
+ (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
+ if (ret < 0)
+ goto complete;
+
+ pos += len;
+ bv_cnt = 0;
+ len = 0;
+ }
+ nr_bvec--;
+ }
+
+ if (WARN_ON_ONCE(total_len != req->data_len)) {
+ ret = -EIO;
+ goto complete;
+ }
+
+ if (unlikely(is_sync)) {
+ ret = total_len;
+ goto complete;
+ }
+
+ /*
+ * A NULL ki_complete ask for synchronous execution, which we want
+ * for the IOCB_NOWAIT case.
+ */
+ if (!(ki_flags & IOCB_NOWAIT))
+ req->f.iocb.ki_complete = nvmet_file_io_done;
+
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
+
+ switch (ret) {
+ case -EIOCBQUEUED:
+ return true;
+ case -EAGAIN:
+ if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
+ goto complete;
+ return false;
+ case -EOPNOTSUPP:
+ /*
+ * For file systems returning error -EOPNOTSUPP, handle
+ * IOCB_NOWAIT error case separately and retry without
+ * IOCB_NOWAIT.
+ */
+ if ((ki_flags & IOCB_NOWAIT))
+ return false;
+ break;
+ }
+
+complete:
+ nvmet_file_io_done(&req->f.iocb, ret, 0);
+ return true;
+}
+
+static void nvmet_file_buffered_io_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
+
+ nvmet_file_execute_io(req, 0);
+}
+
+static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
+{
+ INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
+ queue_work(buffered_io_wq, &req->f.work);
+}
+
+static void nvmet_file_execute_rw(struct nvmet_req *req)
+{
+ ssize_t nr_bvec = req->sg_cnt;
+
+ if (!req->sg_cnt || !nr_bvec) {
+ nvmet_req_complete(req, 0);
return;
}
@@ -157,65 +243,25 @@
else
req->f.bvec = req->inline_bvec;
- req->f.mpool_alloc = false;
if (unlikely(!req->f.bvec)) {
/* fallback under memory pressure */
req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
req->f.mpool_alloc = true;
- if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
- is_sync = true;
- }
+ } else
+ req->f.mpool_alloc = false;
- memset(&req->f.iocb, 0, sizeof(struct kiocb));
- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
- len += req->f.bvec[bv_cnt].bv_len;
- total_len += req->f.bvec[bv_cnt].bv_len;
- bv_cnt++;
-
- WARN_ON_ONCE((nr_bvec - 1) < 0);
-
- if (unlikely(is_sync) &&
- (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
- ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
- if (ret < 0)
- goto out;
- pos += len;
- bv_cnt = 0;
- len = 0;
- }
- nr_bvec--;
- }
-
- if (WARN_ON_ONCE(total_len != req->data_len))
- ret = -EIO;
-out:
- if (unlikely(is_sync || ret)) {
- nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
- return;
- }
- req->f.iocb.ki_complete = nvmet_file_io_done;
- nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
-}
-
-static void nvmet_file_buffered_io_work(struct work_struct *w)
-{
- struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
-
- nvmet_file_execute_rw(req);
-}
-
-static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
-{
- INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
- queue_work(buffered_io_wq, &req->f.work);
+ if (req->ns->buffered_io) {
+ if (likely(!req->f.mpool_alloc) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
+ return;
+ nvmet_file_submit_buffered_io(req);
+ } else
+ nvmet_file_execute_io(req, 0);
}
u16 nvmet_file_flush(struct nvmet_req *req)
{
- if (vfs_fsync(req->ns->file, 1) < 0)
- return NVME_SC_INTERNAL | NVME_SC_DNR;
- return 0;
+ return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
}
static void nvmet_file_flush_work(struct work_struct *w)
@@ -236,29 +282,34 @@
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
struct nvme_dsm_range range;
loff_t offset, len;
- u16 ret;
+ u16 status = 0;
+ int ret;
int i;
for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
- ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
sizeof(range));
- if (ret)
+ if (status)
break;
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
- len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+ len = le32_to_cpu(range.nlb);
+ len <<= req->ns->blksize_shift;
if (offset + len > req->ns->size) {
- ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ req->error_slba = le64_to_cpu(range.slba);
+ status = errno_to_nvme_status(req, -ENOSPC);
break;
}
- if (vfs_fallocate(req->ns->file, mode, offset, len)) {
- ret = NVME_SC_INTERNAL | NVME_SC_DNR;
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
+ if (ret && ret != -EOPNOTSUPP) {
+ req->error_slba = le64_to_cpu(range.slba);
+ status = errno_to_nvme_status(req, ret);
break;
}
}
- nvmet_req_complete(req, ret);
+ nvmet_req_complete(req, status);
}
static void nvmet_file_dsm_work(struct work_struct *w)
@@ -298,12 +349,12 @@
req->ns->blksize_shift);
if (unlikely(offset + len > req->ns->size)) {
- nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
return;
}
ret = vfs_fallocate(req->ns->file, mode, offset, len);
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+ nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
}
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
@@ -319,10 +370,7 @@
switch (cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
- if (req->ns->buffered_io)
- req->execute = nvmet_file_execute_rw_buffered_io;
- else
- req->execute = nvmet_file_execute_rw;
+ req->execute = nvmet_file_execute_rw;
req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
@@ -341,6 +389,7 @@
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
cmd->common.opcode, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
}