blob: 6ca17a0babae2cb7810c52037b717d20d1bb4d9b [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
5 * affiliates.
6 */
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8#include <linux/uio.h>
9#include <linux/falloc.h>
10#include <linux/file.h>
11#include "nvmet.h"
12
13#define NVMET_MAX_MPOOL_BVEC 16
14#define NVMET_MIN_MPOOL_OBJ 16
15
16void nvmet_file_ns_disable(struct nvmet_ns *ns)
17{
18 if (ns->file) {
19 if (ns->buffered_io)
20 flush_workqueue(buffered_io_wq);
21 mempool_destroy(ns->bvec_pool);
22 ns->bvec_pool = NULL;
23 kmem_cache_destroy(ns->bvec_cache);
24 ns->bvec_cache = NULL;
25 fput(ns->file);
26 ns->file = NULL;
27 }
28}
29
30int nvmet_file_ns_enable(struct nvmet_ns *ns)
31{
32 int flags = O_RDWR | O_LARGEFILE;
33 struct kstat stat;
34 int ret;
35
36 if (!ns->buffered_io)
37 flags |= O_DIRECT;
38
39 ns->file = filp_open(ns->device_path, flags, 0);
40 if (IS_ERR(ns->file)) {
Olivier Deprez0e641232021-09-23 10:07:05 +020041 ret = PTR_ERR(ns->file);
42 pr_err("failed to open file %s: (%d)\n",
43 ns->device_path, ret);
44 ns->file = NULL;
45 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046 }
47
48 ret = vfs_getattr(&ns->file->f_path,
49 &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
50 if (ret)
51 goto err;
52
53 ns->size = stat.size;
David Brazdil0f672f62019-12-10 10:32:29 +000054 /*
55 * i_blkbits can be greater than the universally accepted upper bound,
56 * so make sure we export a sane namespace lba_shift.
57 */
58 ns->blksize_shift = min_t(u8,
59 file_inode(ns->file)->i_blkbits, 12);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060
61 ns->bvec_cache = kmem_cache_create("nvmet-bvec",
62 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
63 0, SLAB_HWCACHE_ALIGN, NULL);
64 if (!ns->bvec_cache) {
65 ret = -ENOMEM;
66 goto err;
67 }
68
69 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
70 mempool_free_slab, ns->bvec_cache);
71
72 if (!ns->bvec_pool) {
73 ret = -ENOMEM;
74 goto err;
75 }
76
77 return ret;
78err:
79 ns->size = 0;
80 ns->blksize_shift = 0;
81 nvmet_file_ns_disable(ns);
82 return ret;
83}
84
David Brazdil0f672f62019-12-10 10:32:29 +000085static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086{
David Brazdil0f672f62019-12-10 10:32:29 +000087 bv->bv_page = sg_page(sg);
88 bv->bv_offset = sg->offset;
89 bv->bv_len = sg->length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090}
91
92static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
David Brazdil0f672f62019-12-10 10:32:29 +000093 unsigned long nr_segs, size_t count, int ki_flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094{
95 struct kiocb *iocb = &req->f.iocb;
96 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
97 struct iov_iter iter;
David Brazdil0f672f62019-12-10 10:32:29 +000098 int rw;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099
100 if (req->cmd->rw.opcode == nvme_cmd_write) {
101 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
David Brazdil0f672f62019-12-10 10:32:29 +0000102 ki_flags |= IOCB_DSYNC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103 call_iter = req->ns->file->f_op->write_iter;
104 rw = WRITE;
105 } else {
106 call_iter = req->ns->file->f_op->read_iter;
107 rw = READ;
108 }
109
David Brazdil0f672f62019-12-10 10:32:29 +0000110 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111
112 iocb->ki_pos = pos;
113 iocb->ki_filp = req->ns->file;
114 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
115
David Brazdil0f672f62019-12-10 10:32:29 +0000116 return call_iter(iocb, &iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117}
118
119static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
120{
121 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
David Brazdil0f672f62019-12-10 10:32:29 +0000122 u16 status = NVME_SC_SUCCESS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123
124 if (req->f.bvec != req->inline_bvec) {
125 if (likely(req->f.mpool_alloc == false))
126 kfree(req->f.bvec);
127 else
128 mempool_free(req->f.bvec, req->ns->bvec_pool);
129 }
130
David Brazdil0f672f62019-12-10 10:32:29 +0000131 if (unlikely(ret != req->data_len))
132 status = errno_to_nvme_status(req, ret);
133 nvmet_req_complete(req, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134}
135
David Brazdil0f672f62019-12-10 10:32:29 +0000136static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137{
David Brazdil0f672f62019-12-10 10:32:29 +0000138 ssize_t nr_bvec = req->sg_cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139 unsigned long bv_cnt = 0;
140 bool is_sync = false;
141 size_t len = 0, total_len = 0;
142 ssize_t ret = 0;
143 loff_t pos;
David Brazdil0f672f62019-12-10 10:32:29 +0000144 int i;
145 struct scatterlist *sg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146
David Brazdil0f672f62019-12-10 10:32:29 +0000147 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
148 is_sync = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149
150 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
151 if (unlikely(pos + req->data_len > req->ns->size)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000152 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
153 return true;
154 }
155
156 memset(&req->f.iocb, 0, sizeof(struct kiocb));
157 for_each_sg(req->sg, sg, req->sg_cnt, i) {
158 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
159 len += req->f.bvec[bv_cnt].bv_len;
160 total_len += req->f.bvec[bv_cnt].bv_len;
161 bv_cnt++;
162
163 WARN_ON_ONCE((nr_bvec - 1) < 0);
164
165 if (unlikely(is_sync) &&
166 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
167 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
168 if (ret < 0)
169 goto complete;
170
171 pos += len;
172 bv_cnt = 0;
173 len = 0;
174 }
175 nr_bvec--;
176 }
177
178 if (WARN_ON_ONCE(total_len != req->data_len)) {
179 ret = -EIO;
180 goto complete;
181 }
182
183 if (unlikely(is_sync)) {
184 ret = total_len;
185 goto complete;
186 }
187
188 /*
189 * A NULL ki_complete ask for synchronous execution, which we want
190 * for the IOCB_NOWAIT case.
191 */
192 if (!(ki_flags & IOCB_NOWAIT))
193 req->f.iocb.ki_complete = nvmet_file_io_done;
194
195 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
196
197 switch (ret) {
198 case -EIOCBQUEUED:
199 return true;
200 case -EAGAIN:
201 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
202 goto complete;
203 return false;
204 case -EOPNOTSUPP:
205 /*
206 * For file systems returning error -EOPNOTSUPP, handle
207 * IOCB_NOWAIT error case separately and retry without
208 * IOCB_NOWAIT.
209 */
210 if ((ki_flags & IOCB_NOWAIT))
211 return false;
212 break;
213 }
214
215complete:
216 nvmet_file_io_done(&req->f.iocb, ret, 0);
217 return true;
218}
219
220static void nvmet_file_buffered_io_work(struct work_struct *w)
221{
222 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
223
224 nvmet_file_execute_io(req, 0);
225}
226
227static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
228{
229 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
230 queue_work(buffered_io_wq, &req->f.work);
231}
232
233static void nvmet_file_execute_rw(struct nvmet_req *req)
234{
235 ssize_t nr_bvec = req->sg_cnt;
236
237 if (!req->sg_cnt || !nr_bvec) {
238 nvmet_req_complete(req, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000239 return;
240 }
241
242 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
243 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
244 GFP_KERNEL);
245 else
246 req->f.bvec = req->inline_bvec;
247
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 if (unlikely(!req->f.bvec)) {
249 /* fallback under memory pressure */
250 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
251 req->f.mpool_alloc = true;
David Brazdil0f672f62019-12-10 10:32:29 +0000252 } else
253 req->f.mpool_alloc = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254
David Brazdil0f672f62019-12-10 10:32:29 +0000255 if (req->ns->buffered_io) {
256 if (likely(!req->f.mpool_alloc) &&
257 nvmet_file_execute_io(req, IOCB_NOWAIT))
258 return;
259 nvmet_file_submit_buffered_io(req);
260 } else
261 nvmet_file_execute_io(req, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262}
263
264u16 nvmet_file_flush(struct nvmet_req *req)
265{
David Brazdil0f672f62019-12-10 10:32:29 +0000266 return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267}
268
269static void nvmet_file_flush_work(struct work_struct *w)
270{
271 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
272
273 nvmet_req_complete(req, nvmet_file_flush(req));
274}
275
276static void nvmet_file_execute_flush(struct nvmet_req *req)
277{
278 INIT_WORK(&req->f.work, nvmet_file_flush_work);
279 schedule_work(&req->f.work);
280}
281
282static void nvmet_file_execute_discard(struct nvmet_req *req)
283{
284 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
285 struct nvme_dsm_range range;
286 loff_t offset, len;
David Brazdil0f672f62019-12-10 10:32:29 +0000287 u16 status = 0;
288 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289 int i;
290
291 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
David Brazdil0f672f62019-12-10 10:32:29 +0000292 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293 sizeof(range));
David Brazdil0f672f62019-12-10 10:32:29 +0000294 if (status)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295 break;
296
297 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
David Brazdil0f672f62019-12-10 10:32:29 +0000298 len = le32_to_cpu(range.nlb);
299 len <<= req->ns->blksize_shift;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000300 if (offset + len > req->ns->size) {
David Brazdil0f672f62019-12-10 10:32:29 +0000301 req->error_slba = le64_to_cpu(range.slba);
302 status = errno_to_nvme_status(req, -ENOSPC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303 break;
304 }
305
David Brazdil0f672f62019-12-10 10:32:29 +0000306 ret = vfs_fallocate(req->ns->file, mode, offset, len);
307 if (ret && ret != -EOPNOTSUPP) {
308 req->error_slba = le64_to_cpu(range.slba);
309 status = errno_to_nvme_status(req, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 break;
311 }
312 }
313
David Brazdil0f672f62019-12-10 10:32:29 +0000314 nvmet_req_complete(req, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315}
316
317static void nvmet_file_dsm_work(struct work_struct *w)
318{
319 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
320
321 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
322 case NVME_DSMGMT_AD:
323 nvmet_file_execute_discard(req);
324 return;
325 case NVME_DSMGMT_IDR:
326 case NVME_DSMGMT_IDW:
327 default:
328 /* Not supported yet */
329 nvmet_req_complete(req, 0);
330 return;
331 }
332}
333
334static void nvmet_file_execute_dsm(struct nvmet_req *req)
335{
336 INIT_WORK(&req->f.work, nvmet_file_dsm_work);
337 schedule_work(&req->f.work);
338}
339
340static void nvmet_file_write_zeroes_work(struct work_struct *w)
341{
342 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
343 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
344 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
345 loff_t offset;
346 loff_t len;
347 int ret;
348
349 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
350 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
351 req->ns->blksize_shift);
352
353 if (unlikely(offset + len > req->ns->size)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000354 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355 return;
356 }
357
358 ret = vfs_fallocate(req->ns->file, mode, offset, len);
David Brazdil0f672f62019-12-10 10:32:29 +0000359 nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360}
361
362static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
363{
364 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
365 schedule_work(&req->f.work);
366}
367
368u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
369{
370 struct nvme_command *cmd = req->cmd;
371
372 switch (cmd->common.opcode) {
373 case nvme_cmd_read:
374 case nvme_cmd_write:
David Brazdil0f672f62019-12-10 10:32:29 +0000375 req->execute = nvmet_file_execute_rw;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376 req->data_len = nvmet_rw_len(req);
377 return 0;
378 case nvme_cmd_flush:
379 req->execute = nvmet_file_execute_flush;
380 req->data_len = 0;
381 return 0;
382 case nvme_cmd_dsm:
383 req->execute = nvmet_file_execute_dsm;
384 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
385 sizeof(struct nvme_dsm_range);
386 return 0;
387 case nvme_cmd_write_zeroes:
388 req->execute = nvmet_file_execute_write_zeroes;
389 req->data_len = 0;
390 return 0;
391 default:
392 pr_err("unhandled cmd for file ns %d on qid %d\n",
393 cmd->common.opcode, req->sq->qid);
David Brazdil0f672f62019-12-10 10:32:29 +0000394 req->error_loc = offsetof(struct nvme_common_command, opcode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
396 }
397}