blob: 50e2007092bc09daead968b5d2481b1cf17a4572 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * NVMe over Fabrics RDMA target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/atomic.h>
8#include <linux/ctype.h>
9#include <linux/delay.h>
10#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/nvme.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/wait.h>
17#include <linux/inet.h>
18#include <asm/unaligned.h>
19
20#include <rdma/ib_verbs.h>
21#include <rdma/rdma_cm.h>
22#include <rdma/rw.h>
23
24#include <linux/nvme-rdma.h>
25#include "nvmet.h"
26
27/*
28 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
29 */
30#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
31#define NVMET_RDMA_MAX_INLINE_SGE 4
32#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
33
34struct nvmet_rdma_cmd {
35 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
36 struct ib_cqe cqe;
37 struct ib_recv_wr wr;
38 struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
39 struct nvme_command *nvme_cmd;
40 struct nvmet_rdma_queue *queue;
41};
42
43enum {
44 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
45 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
46};
47
48struct nvmet_rdma_rsp {
49 struct ib_sge send_sge;
50 struct ib_cqe send_cqe;
51 struct ib_send_wr send_wr;
52
53 struct nvmet_rdma_cmd *cmd;
54 struct nvmet_rdma_queue *queue;
55
56 struct ib_cqe read_cqe;
57 struct rdma_rw_ctx rw;
58
59 struct nvmet_req req;
60
61 bool allocated;
62 u8 n_rdma;
63 u32 flags;
64 u32 invalidate_rkey;
65
66 struct list_head wait_list;
67 struct list_head free_list;
68};
69
70enum nvmet_rdma_queue_state {
71 NVMET_RDMA_Q_CONNECTING,
72 NVMET_RDMA_Q_LIVE,
73 NVMET_RDMA_Q_DISCONNECTING,
74};
75
76struct nvmet_rdma_queue {
77 struct rdma_cm_id *cm_id;
Olivier Deprez0e641232021-09-23 10:07:05 +020078 struct ib_qp *qp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 struct nvmet_port *port;
80 struct ib_cq *cq;
81 atomic_t sq_wr_avail;
82 struct nvmet_rdma_device *dev;
83 spinlock_t state_lock;
84 enum nvmet_rdma_queue_state state;
85 struct nvmet_cq nvme_cq;
86 struct nvmet_sq nvme_sq;
87
88 struct nvmet_rdma_rsp *rsps;
89 struct list_head free_rsps;
90 spinlock_t rsps_lock;
91 struct nvmet_rdma_cmd *cmds;
92
93 struct work_struct release_work;
94 struct list_head rsp_wait_list;
95 struct list_head rsp_wr_wait_list;
96 spinlock_t rsp_wr_wait_lock;
97
98 int idx;
99 int host_qid;
100 int recv_queue_size;
101 int send_queue_size;
102
103 struct list_head queue_list;
104};
105
106struct nvmet_rdma_device {
107 struct ib_device *device;
108 struct ib_pd *pd;
109 struct ib_srq *srq;
110 struct nvmet_rdma_cmd *srq_cmds;
111 size_t srq_size;
112 struct kref ref;
113 struct list_head entry;
114 int inline_data_size;
115 int inline_page_count;
116};
117
118static bool nvmet_rdma_use_srq;
119module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
120MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
121
122static DEFINE_IDA(nvmet_rdma_queue_ida);
123static LIST_HEAD(nvmet_rdma_queue_list);
124static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
125
126static LIST_HEAD(device_list);
127static DEFINE_MUTEX(device_list_mutex);
128
129static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
130static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
131static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
132static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
133static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
134static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000135static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
136 struct nvmet_rdma_rsp *r);
137static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
138 struct nvmet_rdma_rsp *r);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139
140static const struct nvmet_fabrics_ops nvmet_rdma_ops;
141
142static int num_pages(int len)
143{
144 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
145}
146
147/* XXX: really should move to a generic header sooner or later.. */
148static inline u32 get_unaligned_le24(const u8 *p)
149{
150 return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
151}
152
153static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
154{
155 return nvme_is_write(rsp->req.cmd) &&
156 rsp->req.transfer_len &&
157 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
158}
159
160static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
161{
162 return !nvme_is_write(rsp->req.cmd) &&
163 rsp->req.transfer_len &&
David Brazdil0f672f62019-12-10 10:32:29 +0000164 !rsp->req.cqe->status &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
166}
167
168static inline struct nvmet_rdma_rsp *
169nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
170{
171 struct nvmet_rdma_rsp *rsp;
172 unsigned long flags;
173
174 spin_lock_irqsave(&queue->rsps_lock, flags);
175 rsp = list_first_entry_or_null(&queue->free_rsps,
176 struct nvmet_rdma_rsp, free_list);
177 if (likely(rsp))
178 list_del(&rsp->free_list);
179 spin_unlock_irqrestore(&queue->rsps_lock, flags);
180
181 if (unlikely(!rsp)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000182 int ret;
183
184 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185 if (unlikely(!rsp))
186 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000187 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
188 if (unlikely(ret)) {
189 kfree(rsp);
190 return NULL;
191 }
192
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193 rsp->allocated = true;
194 }
195
196 return rsp;
197}
198
199static inline void
200nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
201{
202 unsigned long flags;
203
David Brazdil0f672f62019-12-10 10:32:29 +0000204 if (unlikely(rsp->allocated)) {
205 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 kfree(rsp);
207 return;
208 }
209
210 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
211 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
212 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
213}
214
215static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
216 struct nvmet_rdma_cmd *c)
217{
218 struct scatterlist *sg;
219 struct ib_sge *sge;
220 int i;
221
222 if (!ndev->inline_data_size)
223 return;
224
225 sg = c->inline_sg;
226 sge = &c->sge[1];
227
228 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
229 if (sge->length)
230 ib_dma_unmap_page(ndev->device, sge->addr,
231 sge->length, DMA_FROM_DEVICE);
232 if (sg_page(sg))
233 __free_page(sg_page(sg));
234 }
235}
236
237static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
238 struct nvmet_rdma_cmd *c)
239{
240 struct scatterlist *sg;
241 struct ib_sge *sge;
242 struct page *pg;
243 int len;
244 int i;
245
246 if (!ndev->inline_data_size)
247 return 0;
248
249 sg = c->inline_sg;
250 sg_init_table(sg, ndev->inline_page_count);
251 sge = &c->sge[1];
252 len = ndev->inline_data_size;
253
254 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
255 pg = alloc_page(GFP_KERNEL);
256 if (!pg)
257 goto out_err;
258 sg_assign_page(sg, pg);
259 sge->addr = ib_dma_map_page(ndev->device,
260 pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
261 if (ib_dma_mapping_error(ndev->device, sge->addr))
262 goto out_err;
263 sge->length = min_t(int, len, PAGE_SIZE);
264 sge->lkey = ndev->pd->local_dma_lkey;
265 len -= sge->length;
266 }
267
268 return 0;
269out_err:
270 for (; i >= 0; i--, sg--, sge--) {
271 if (sge->length)
272 ib_dma_unmap_page(ndev->device, sge->addr,
273 sge->length, DMA_FROM_DEVICE);
274 if (sg_page(sg))
275 __free_page(sg_page(sg));
276 }
277 return -ENOMEM;
278}
279
280static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
281 struct nvmet_rdma_cmd *c, bool admin)
282{
283 /* NVMe command / RDMA RECV */
284 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
285 if (!c->nvme_cmd)
286 goto out;
287
288 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
289 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
290 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
291 goto out_free_cmd;
292
293 c->sge[0].length = sizeof(*c->nvme_cmd);
294 c->sge[0].lkey = ndev->pd->local_dma_lkey;
295
296 if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
297 goto out_unmap_cmd;
298
299 c->cqe.done = nvmet_rdma_recv_done;
300
301 c->wr.wr_cqe = &c->cqe;
302 c->wr.sg_list = c->sge;
303 c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
304
305 return 0;
306
307out_unmap_cmd:
308 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
309 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
310out_free_cmd:
311 kfree(c->nvme_cmd);
312
313out:
314 return -ENOMEM;
315}
316
317static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
318 struct nvmet_rdma_cmd *c, bool admin)
319{
320 if (!admin)
321 nvmet_rdma_free_inline_pages(ndev, c);
322 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
323 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
324 kfree(c->nvme_cmd);
325}
326
327static struct nvmet_rdma_cmd *
328nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
329 int nr_cmds, bool admin)
330{
331 struct nvmet_rdma_cmd *cmds;
332 int ret = -EINVAL, i;
333
334 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
335 if (!cmds)
336 goto out;
337
338 for (i = 0; i < nr_cmds; i++) {
339 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
340 if (ret)
341 goto out_free;
342 }
343
344 return cmds;
345
346out_free:
347 while (--i >= 0)
348 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
349 kfree(cmds);
350out:
351 return ERR_PTR(ret);
352}
353
354static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
355 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
356{
357 int i;
358
359 for (i = 0; i < nr_cmds; i++)
360 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
361 kfree(cmds);
362}
363
364static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
365 struct nvmet_rdma_rsp *r)
366{
367 /* NVMe CQE / RDMA SEND */
David Brazdil0f672f62019-12-10 10:32:29 +0000368 r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
369 if (!r->req.cqe)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000370 goto out;
371
David Brazdil0f672f62019-12-10 10:32:29 +0000372 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
373 sizeof(*r->req.cqe), DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000374 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
375 goto out_free_rsp;
376
David Brazdil0f672f62019-12-10 10:32:29 +0000377 r->req.p2p_client = &ndev->device->dev;
378 r->send_sge.length = sizeof(*r->req.cqe);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000379 r->send_sge.lkey = ndev->pd->local_dma_lkey;
380
381 r->send_cqe.done = nvmet_rdma_send_done;
382
383 r->send_wr.wr_cqe = &r->send_cqe;
384 r->send_wr.sg_list = &r->send_sge;
385 r->send_wr.num_sge = 1;
386 r->send_wr.send_flags = IB_SEND_SIGNALED;
387
388 /* Data In / RDMA READ */
389 r->read_cqe.done = nvmet_rdma_read_data_done;
390 return 0;
391
392out_free_rsp:
David Brazdil0f672f62019-12-10 10:32:29 +0000393 kfree(r->req.cqe);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000394out:
395 return -ENOMEM;
396}
397
398static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
399 struct nvmet_rdma_rsp *r)
400{
401 ib_dma_unmap_single(ndev->device, r->send_sge.addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000402 sizeof(*r->req.cqe), DMA_TO_DEVICE);
403 kfree(r->req.cqe);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404}
405
406static int
407nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
408{
409 struct nvmet_rdma_device *ndev = queue->dev;
410 int nr_rsps = queue->recv_queue_size * 2;
411 int ret = -EINVAL, i;
412
413 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
414 GFP_KERNEL);
415 if (!queue->rsps)
416 goto out;
417
418 for (i = 0; i < nr_rsps; i++) {
419 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
420
421 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
422 if (ret)
423 goto out_free;
424
425 list_add_tail(&rsp->free_list, &queue->free_rsps);
426 }
427
428 return 0;
429
430out_free:
431 while (--i >= 0) {
432 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
433
434 list_del(&rsp->free_list);
435 nvmet_rdma_free_rsp(ndev, rsp);
436 }
437 kfree(queue->rsps);
438out:
439 return ret;
440}
441
442static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
443{
444 struct nvmet_rdma_device *ndev = queue->dev;
445 int i, nr_rsps = queue->recv_queue_size * 2;
446
447 for (i = 0; i < nr_rsps; i++) {
448 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
449
450 list_del(&rsp->free_list);
451 nvmet_rdma_free_rsp(ndev, rsp);
452 }
453 kfree(queue->rsps);
454}
455
456static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
457 struct nvmet_rdma_cmd *cmd)
458{
459 int ret;
460
461 ib_dma_sync_single_for_device(ndev->device,
462 cmd->sge[0].addr, cmd->sge[0].length,
463 DMA_FROM_DEVICE);
464
465 if (ndev->srq)
466 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
467 else
Olivier Deprez0e641232021-09-23 10:07:05 +0200468 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469
470 if (unlikely(ret))
471 pr_err("post_recv cmd failed\n");
472
473 return ret;
474}
475
476static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
477{
478 spin_lock(&queue->rsp_wr_wait_lock);
479 while (!list_empty(&queue->rsp_wr_wait_list)) {
480 struct nvmet_rdma_rsp *rsp;
481 bool ret;
482
483 rsp = list_entry(queue->rsp_wr_wait_list.next,
484 struct nvmet_rdma_rsp, wait_list);
485 list_del(&rsp->wait_list);
486
487 spin_unlock(&queue->rsp_wr_wait_lock);
488 ret = nvmet_rdma_execute_command(rsp);
489 spin_lock(&queue->rsp_wr_wait_lock);
490
491 if (!ret) {
492 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
493 break;
494 }
495 }
496 spin_unlock(&queue->rsp_wr_wait_lock);
497}
498
499
500static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
501{
502 struct nvmet_rdma_queue *queue = rsp->queue;
503
504 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
505
506 if (rsp->n_rdma) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200507 rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508 queue->cm_id->port_num, rsp->req.sg,
509 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
510 }
511
512 if (rsp->req.sg != rsp->cmd->inline_sg)
David Brazdil0f672f62019-12-10 10:32:29 +0000513 nvmet_req_free_sgl(&rsp->req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514
515 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
516 nvmet_rdma_process_wr_wait_list(queue);
517
518 nvmet_rdma_put_rsp(rsp);
519}
520
521static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
522{
523 if (queue->nvme_sq.ctrl) {
524 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
525 } else {
526 /*
527 * we didn't setup the controller yet in case
528 * of admin connect error, just disconnect and
529 * cleanup the queue
530 */
531 nvmet_rdma_queue_disconnect(queue);
532 }
533}
534
535static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
536{
537 struct nvmet_rdma_rsp *rsp =
538 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
539 struct nvmet_rdma_queue *queue = cq->cq_context;
540
541 nvmet_rdma_release_rsp(rsp);
542
543 if (unlikely(wc->status != IB_WC_SUCCESS &&
544 wc->status != IB_WC_WR_FLUSH_ERR)) {
545 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
546 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
547 nvmet_rdma_error_comp(queue);
548 }
549}
550
551static void nvmet_rdma_queue_response(struct nvmet_req *req)
552{
553 struct nvmet_rdma_rsp *rsp =
554 container_of(req, struct nvmet_rdma_rsp, req);
555 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
556 struct ib_send_wr *first_wr;
557
558 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
559 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
560 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
561 } else {
562 rsp->send_wr.opcode = IB_WR_SEND;
563 }
564
565 if (nvmet_rdma_need_data_out(rsp))
566 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
567 cm_id->port_num, NULL, &rsp->send_wr);
568 else
569 first_wr = &rsp->send_wr;
570
571 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
572
573 ib_dma_sync_single_for_device(rsp->queue->dev->device,
574 rsp->send_sge.addr, rsp->send_sge.length,
575 DMA_TO_DEVICE);
576
577 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
578 pr_err("sending cmd response failed\n");
579 nvmet_rdma_release_rsp(rsp);
580 }
581}
582
583static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
584{
585 struct nvmet_rdma_rsp *rsp =
586 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
587 struct nvmet_rdma_queue *queue = cq->cq_context;
588
589 WARN_ON(rsp->n_rdma <= 0);
590 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
Olivier Deprez0e641232021-09-23 10:07:05 +0200591 rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000592 queue->cm_id->port_num, rsp->req.sg,
593 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
594 rsp->n_rdma = 0;
595
596 if (unlikely(wc->status != IB_WC_SUCCESS)) {
597 nvmet_req_uninit(&rsp->req);
598 nvmet_rdma_release_rsp(rsp);
599 if (wc->status != IB_WC_WR_FLUSH_ERR) {
600 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
601 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
602 nvmet_rdma_error_comp(queue);
603 }
604 return;
605 }
606
607 nvmet_req_execute(&rsp->req);
608}
609
610static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
611 u64 off)
612{
613 int sg_count = num_pages(len);
614 struct scatterlist *sg;
615 int i;
616
617 sg = rsp->cmd->inline_sg;
618 for (i = 0; i < sg_count; i++, sg++) {
619 if (i < sg_count - 1)
620 sg_unmark_end(sg);
621 else
622 sg_mark_end(sg);
623 sg->offset = off;
624 sg->length = min_t(int, len, PAGE_SIZE - off);
625 len -= sg->length;
626 if (!i)
627 off = 0;
628 }
629
630 rsp->req.sg = rsp->cmd->inline_sg;
631 rsp->req.sg_cnt = sg_count;
632}
633
634static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
635{
636 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
637 u64 off = le64_to_cpu(sgl->addr);
638 u32 len = le32_to_cpu(sgl->length);
639
David Brazdil0f672f62019-12-10 10:32:29 +0000640 if (!nvme_is_write(rsp->req.cmd)) {
641 rsp->req.error_loc =
642 offsetof(struct nvme_common_command, opcode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
David Brazdil0f672f62019-12-10 10:32:29 +0000644 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000645
646 if (off + len > rsp->queue->dev->inline_data_size) {
647 pr_err("invalid inline data offset!\n");
648 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
649 }
650
651 /* no data command? */
652 if (!len)
653 return 0;
654
655 nvmet_rdma_use_inline_sg(rsp, len, off);
656 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
657 rsp->req.transfer_len += len;
658 return 0;
659}
660
661static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
662 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
663{
664 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
665 u64 addr = le64_to_cpu(sgl->addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000666 u32 key = get_unaligned_le32(sgl->key);
667 int ret;
668
David Brazdil0f672f62019-12-10 10:32:29 +0000669 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
670
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671 /* no data command? */
David Brazdil0f672f62019-12-10 10:32:29 +0000672 if (!rsp->req.transfer_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673 return 0;
674
David Brazdil0f672f62019-12-10 10:32:29 +0000675 ret = nvmet_req_alloc_sgl(&rsp->req);
676 if (ret < 0)
677 goto error_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000678
679 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
680 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
681 nvmet_data_dir(&rsp->req));
682 if (ret < 0)
David Brazdil0f672f62019-12-10 10:32:29 +0000683 goto error_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 rsp->n_rdma += ret;
685
686 if (invalidate) {
687 rsp->invalidate_rkey = key;
688 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
689 }
690
691 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000692
693error_out:
694 rsp->req.transfer_len = 0;
695 return NVME_SC_INTERNAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000696}
697
698static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
699{
700 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
701
702 switch (sgl->type >> 4) {
703 case NVME_SGL_FMT_DATA_DESC:
704 switch (sgl->type & 0xf) {
705 case NVME_SGL_FMT_OFFSET:
706 return nvmet_rdma_map_sgl_inline(rsp);
707 default:
708 pr_err("invalid SGL subtype: %#x\n", sgl->type);
David Brazdil0f672f62019-12-10 10:32:29 +0000709 rsp->req.error_loc =
710 offsetof(struct nvme_common_command, dptr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
712 }
713 case NVME_KEY_SGL_FMT_DATA_DESC:
714 switch (sgl->type & 0xf) {
715 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
716 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
717 case NVME_SGL_FMT_ADDRESS:
718 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
719 default:
720 pr_err("invalid SGL subtype: %#x\n", sgl->type);
David Brazdil0f672f62019-12-10 10:32:29 +0000721 rsp->req.error_loc =
722 offsetof(struct nvme_common_command, dptr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
724 }
725 default:
726 pr_err("invalid SGL type: %#x\n", sgl->type);
David Brazdil0f672f62019-12-10 10:32:29 +0000727 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
729 }
730}
731
732static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
733{
734 struct nvmet_rdma_queue *queue = rsp->queue;
735
736 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
737 &queue->sq_wr_avail) < 0)) {
738 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
739 1 + rsp->n_rdma, queue->idx,
740 queue->nvme_sq.ctrl->cntlid);
741 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
742 return false;
743 }
744
745 if (nvmet_rdma_need_data_in(rsp)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200746 if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000747 queue->cm_id->port_num, &rsp->read_cqe, NULL))
748 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
749 } else {
750 nvmet_req_execute(&rsp->req);
751 }
752
753 return true;
754}
755
756static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
757 struct nvmet_rdma_rsp *cmd)
758{
759 u16 status;
760
761 ib_dma_sync_single_for_cpu(queue->dev->device,
762 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
763 DMA_FROM_DEVICE);
764 ib_dma_sync_single_for_cpu(queue->dev->device,
765 cmd->send_sge.addr, cmd->send_sge.length,
766 DMA_TO_DEVICE);
767
768 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
769 &queue->nvme_sq, &nvmet_rdma_ops))
770 return;
771
772 status = nvmet_rdma_map_sgl(cmd);
773 if (status)
774 goto out_err;
775
776 if (unlikely(!nvmet_rdma_execute_command(cmd))) {
777 spin_lock(&queue->rsp_wr_wait_lock);
778 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
779 spin_unlock(&queue->rsp_wr_wait_lock);
780 }
781
782 return;
783
784out_err:
785 nvmet_req_complete(&cmd->req, status);
786}
787
788static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
789{
790 struct nvmet_rdma_cmd *cmd =
791 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
792 struct nvmet_rdma_queue *queue = cq->cq_context;
793 struct nvmet_rdma_rsp *rsp;
794
795 if (unlikely(wc->status != IB_WC_SUCCESS)) {
796 if (wc->status != IB_WC_WR_FLUSH_ERR) {
797 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
798 wc->wr_cqe, ib_wc_status_msg(wc->status),
799 wc->status);
800 nvmet_rdma_error_comp(queue);
801 }
802 return;
803 }
804
805 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
806 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
807 nvmet_rdma_error_comp(queue);
808 return;
809 }
810
811 cmd->queue = queue;
812 rsp = nvmet_rdma_get_rsp(queue);
813 if (unlikely(!rsp)) {
814 /*
815 * we get here only under memory pressure,
816 * silently drop and have the host retry
817 * as we can't even fail it.
818 */
819 nvmet_rdma_post_recv(queue->dev, cmd);
820 return;
821 }
822 rsp->queue = queue;
823 rsp->cmd = cmd;
824 rsp->flags = 0;
825 rsp->req.cmd = cmd->nvme_cmd;
826 rsp->req.port = queue->port;
827 rsp->n_rdma = 0;
828
829 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
830 unsigned long flags;
831
832 spin_lock_irqsave(&queue->state_lock, flags);
833 if (queue->state == NVMET_RDMA_Q_CONNECTING)
834 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
835 else
836 nvmet_rdma_put_rsp(rsp);
837 spin_unlock_irqrestore(&queue->state_lock, flags);
838 return;
839 }
840
841 nvmet_rdma_handle_command(queue, rsp);
842}
843
844static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
845{
846 if (!ndev->srq)
847 return;
848
849 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
850 ib_destroy_srq(ndev->srq);
851}
852
853static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
854{
855 struct ib_srq_init_attr srq_attr = { NULL, };
856 struct ib_srq *srq;
857 size_t srq_size;
858 int ret, i;
859
860 srq_size = 4095; /* XXX: tune */
861
862 srq_attr.attr.max_wr = srq_size;
863 srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
864 srq_attr.attr.srq_limit = 0;
865 srq_attr.srq_type = IB_SRQT_BASIC;
866 srq = ib_create_srq(ndev->pd, &srq_attr);
867 if (IS_ERR(srq)) {
868 /*
869 * If SRQs aren't supported we just go ahead and use normal
870 * non-shared receive queues.
871 */
872 pr_info("SRQ requested but not supported.\n");
873 return 0;
874 }
875
876 ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
877 if (IS_ERR(ndev->srq_cmds)) {
878 ret = PTR_ERR(ndev->srq_cmds);
879 goto out_destroy_srq;
880 }
881
882 ndev->srq = srq;
883 ndev->srq_size = srq_size;
884
885 for (i = 0; i < srq_size; i++) {
886 ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
887 if (ret)
888 goto out_free_cmds;
889 }
890
891 return 0;
892
893out_free_cmds:
894 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
895out_destroy_srq:
896 ib_destroy_srq(srq);
897 return ret;
898}
899
900static void nvmet_rdma_free_dev(struct kref *ref)
901{
902 struct nvmet_rdma_device *ndev =
903 container_of(ref, struct nvmet_rdma_device, ref);
904
905 mutex_lock(&device_list_mutex);
906 list_del(&ndev->entry);
907 mutex_unlock(&device_list_mutex);
908
909 nvmet_rdma_destroy_srq(ndev);
910 ib_dealloc_pd(ndev->pd);
911
912 kfree(ndev);
913}
914
915static struct nvmet_rdma_device *
916nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
917{
918 struct nvmet_port *port = cm_id->context;
919 struct nvmet_rdma_device *ndev;
920 int inline_page_count;
921 int inline_sge_count;
922 int ret;
923
924 mutex_lock(&device_list_mutex);
925 list_for_each_entry(ndev, &device_list, entry) {
926 if (ndev->device->node_guid == cm_id->device->node_guid &&
927 kref_get_unless_zero(&ndev->ref))
928 goto out_unlock;
929 }
930
931 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
932 if (!ndev)
933 goto out_err;
934
935 inline_page_count = num_pages(port->inline_data_size);
936 inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
937 cm_id->device->attrs.max_recv_sge) - 1;
938 if (inline_page_count > inline_sge_count) {
939 pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
940 port->inline_data_size, cm_id->device->name,
941 inline_sge_count * PAGE_SIZE);
942 port->inline_data_size = inline_sge_count * PAGE_SIZE;
943 inline_page_count = inline_sge_count;
944 }
945 ndev->inline_data_size = port->inline_data_size;
946 ndev->inline_page_count = inline_page_count;
947 ndev->device = cm_id->device;
948 kref_init(&ndev->ref);
949
950 ndev->pd = ib_alloc_pd(ndev->device, 0);
951 if (IS_ERR(ndev->pd))
952 goto out_free_dev;
953
954 if (nvmet_rdma_use_srq) {
955 ret = nvmet_rdma_init_srq(ndev);
956 if (ret)
957 goto out_free_pd;
958 }
959
960 list_add(&ndev->entry, &device_list);
961out_unlock:
962 mutex_unlock(&device_list_mutex);
963 pr_debug("added %s.\n", ndev->device->name);
964 return ndev;
965
966out_free_pd:
967 ib_dealloc_pd(ndev->pd);
968out_free_dev:
969 kfree(ndev);
970out_err:
971 mutex_unlock(&device_list_mutex);
972 return NULL;
973}
974
975static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
976{
977 struct ib_qp_init_attr qp_attr;
978 struct nvmet_rdma_device *ndev = queue->dev;
979 int comp_vector, nr_cqe, ret, i;
980
981 /*
982 * Spread the io queues across completion vectors,
983 * but still keep all admin queues on vector 0.
984 */
985 comp_vector = !queue->host_qid ? 0 :
986 queue->idx % ndev->device->num_comp_vectors;
987
988 /*
989 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
990 */
991 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
992
993 queue->cq = ib_alloc_cq(ndev->device, queue,
994 nr_cqe + 1, comp_vector,
995 IB_POLL_WORKQUEUE);
996 if (IS_ERR(queue->cq)) {
997 ret = PTR_ERR(queue->cq);
998 pr_err("failed to create CQ cqe= %d ret= %d\n",
999 nr_cqe + 1, ret);
1000 goto out;
1001 }
1002
1003 memset(&qp_attr, 0, sizeof(qp_attr));
1004 qp_attr.qp_context = queue;
1005 qp_attr.event_handler = nvmet_rdma_qp_event;
1006 qp_attr.send_cq = queue->cq;
1007 qp_attr.recv_cq = queue->cq;
1008 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1009 qp_attr.qp_type = IB_QPT_RC;
1010 /* +1 for drain */
1011 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1012 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
1013 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
1014 ndev->device->attrs.max_send_sge);
1015
1016 if (ndev->srq) {
1017 qp_attr.srq = ndev->srq;
1018 } else {
1019 /* +1 for drain */
1020 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1021 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1022 }
1023
1024 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1025 if (ret) {
1026 pr_err("failed to create_qp ret= %d\n", ret);
1027 goto err_destroy_cq;
1028 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001029 queue->qp = queue->cm_id->qp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001030
1031 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1032
1033 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1034 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1035 qp_attr.cap.max_send_wr, queue->cm_id);
1036
1037 if (!ndev->srq) {
1038 for (i = 0; i < queue->recv_queue_size; i++) {
1039 queue->cmds[i].queue = queue;
1040 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1041 if (ret)
1042 goto err_destroy_qp;
1043 }
1044 }
1045
1046out:
1047 return ret;
1048
1049err_destroy_qp:
1050 rdma_destroy_qp(queue->cm_id);
1051err_destroy_cq:
1052 ib_free_cq(queue->cq);
1053 goto out;
1054}
1055
1056static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1057{
Olivier Deprez0e641232021-09-23 10:07:05 +02001058 ib_drain_qp(queue->qp);
1059 if (queue->cm_id)
1060 rdma_destroy_id(queue->cm_id);
1061 ib_destroy_qp(queue->qp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062 ib_free_cq(queue->cq);
1063}
1064
1065static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1066{
1067 pr_debug("freeing queue %d\n", queue->idx);
1068
1069 nvmet_sq_destroy(&queue->nvme_sq);
1070
1071 nvmet_rdma_destroy_queue_ib(queue);
1072 if (!queue->dev->srq) {
1073 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1074 queue->recv_queue_size,
1075 !queue->host_qid);
1076 }
1077 nvmet_rdma_free_rsps(queue);
1078 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1079 kfree(queue);
1080}
1081
1082static void nvmet_rdma_release_queue_work(struct work_struct *w)
1083{
1084 struct nvmet_rdma_queue *queue =
1085 container_of(w, struct nvmet_rdma_queue, release_work);
1086 struct nvmet_rdma_device *dev = queue->dev;
1087
1088 nvmet_rdma_free_queue(queue);
1089
1090 kref_put(&dev->ref, nvmet_rdma_free_dev);
1091}
1092
1093static int
1094nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1095 struct nvmet_rdma_queue *queue)
1096{
1097 struct nvme_rdma_cm_req *req;
1098
1099 req = (struct nvme_rdma_cm_req *)conn->private_data;
1100 if (!req || conn->private_data_len == 0)
1101 return NVME_RDMA_CM_INVALID_LEN;
1102
1103 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1104 return NVME_RDMA_CM_INVALID_RECFMT;
1105
1106 queue->host_qid = le16_to_cpu(req->qid);
1107
1108 /*
1109 * req->hsqsize corresponds to our recv queue size plus 1
1110 * req->hrqsize corresponds to our send queue size
1111 */
1112 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1113 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1114
1115 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1116 return NVME_RDMA_CM_INVALID_HSQSIZE;
1117
1118 /* XXX: Should we enforce some kind of max for IO queues? */
1119
1120 return 0;
1121}
1122
1123static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1124 enum nvme_rdma_cm_status status)
1125{
1126 struct nvme_rdma_cm_rej rej;
1127
1128 pr_debug("rejecting connect request: status %d (%s)\n",
1129 status, nvme_rdma_cm_msg(status));
1130
1131 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1132 rej.sts = cpu_to_le16(status);
1133
1134 return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1135}
1136
1137static struct nvmet_rdma_queue *
1138nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1139 struct rdma_cm_id *cm_id,
1140 struct rdma_cm_event *event)
1141{
1142 struct nvmet_rdma_queue *queue;
1143 int ret;
1144
1145 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1146 if (!queue) {
1147 ret = NVME_RDMA_CM_NO_RSC;
1148 goto out_reject;
1149 }
1150
1151 ret = nvmet_sq_init(&queue->nvme_sq);
1152 if (ret) {
1153 ret = NVME_RDMA_CM_NO_RSC;
1154 goto out_free_queue;
1155 }
1156
1157 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1158 if (ret)
1159 goto out_destroy_sq;
1160
1161 /*
1162 * Schedules the actual release because calling rdma_destroy_id from
1163 * inside a CM callback would trigger a deadlock. (great API design..)
1164 */
1165 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1166 queue->dev = ndev;
1167 queue->cm_id = cm_id;
1168
1169 spin_lock_init(&queue->state_lock);
1170 queue->state = NVMET_RDMA_Q_CONNECTING;
1171 INIT_LIST_HEAD(&queue->rsp_wait_list);
1172 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1173 spin_lock_init(&queue->rsp_wr_wait_lock);
1174 INIT_LIST_HEAD(&queue->free_rsps);
1175 spin_lock_init(&queue->rsps_lock);
1176 INIT_LIST_HEAD(&queue->queue_list);
1177
1178 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1179 if (queue->idx < 0) {
1180 ret = NVME_RDMA_CM_NO_RSC;
1181 goto out_destroy_sq;
1182 }
1183
1184 ret = nvmet_rdma_alloc_rsps(queue);
1185 if (ret) {
1186 ret = NVME_RDMA_CM_NO_RSC;
1187 goto out_ida_remove;
1188 }
1189
1190 if (!ndev->srq) {
1191 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1192 queue->recv_queue_size,
1193 !queue->host_qid);
1194 if (IS_ERR(queue->cmds)) {
1195 ret = NVME_RDMA_CM_NO_RSC;
1196 goto out_free_responses;
1197 }
1198 }
1199
1200 ret = nvmet_rdma_create_queue_ib(queue);
1201 if (ret) {
1202 pr_err("%s: creating RDMA queue failed (%d).\n",
1203 __func__, ret);
1204 ret = NVME_RDMA_CM_NO_RSC;
1205 goto out_free_cmds;
1206 }
1207
1208 return queue;
1209
1210out_free_cmds:
1211 if (!ndev->srq) {
1212 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1213 queue->recv_queue_size,
1214 !queue->host_qid);
1215 }
1216out_free_responses:
1217 nvmet_rdma_free_rsps(queue);
1218out_ida_remove:
1219 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1220out_destroy_sq:
1221 nvmet_sq_destroy(&queue->nvme_sq);
1222out_free_queue:
1223 kfree(queue);
1224out_reject:
1225 nvmet_rdma_cm_reject(cm_id, ret);
1226 return NULL;
1227}
1228
1229static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1230{
1231 struct nvmet_rdma_queue *queue = priv;
1232
1233 switch (event->event) {
1234 case IB_EVENT_COMM_EST:
1235 rdma_notify(queue->cm_id, event->event);
1236 break;
1237 default:
1238 pr_err("received IB QP event: %s (%d)\n",
1239 ib_event_msg(event->event), event->event);
1240 break;
1241 }
1242}
1243
1244static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1245 struct nvmet_rdma_queue *queue,
1246 struct rdma_conn_param *p)
1247{
1248 struct rdma_conn_param param = { };
1249 struct nvme_rdma_cm_rep priv = { };
1250 int ret = -ENOMEM;
1251
1252 param.rnr_retry_count = 7;
1253 param.flow_control = 1;
1254 param.initiator_depth = min_t(u8, p->initiator_depth,
1255 queue->dev->device->attrs.max_qp_init_rd_atom);
1256 param.private_data = &priv;
1257 param.private_data_len = sizeof(priv);
1258 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1259 priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1260
1261 ret = rdma_accept(cm_id, &param);
1262 if (ret)
1263 pr_err("rdma_accept failed (error code = %d)\n", ret);
1264
1265 return ret;
1266}
1267
1268static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1269 struct rdma_cm_event *event)
1270{
1271 struct nvmet_rdma_device *ndev;
1272 struct nvmet_rdma_queue *queue;
1273 int ret = -EINVAL;
1274
1275 ndev = nvmet_rdma_find_get_device(cm_id);
1276 if (!ndev) {
1277 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1278 return -ECONNREFUSED;
1279 }
1280
1281 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1282 if (!queue) {
1283 ret = -ENOMEM;
1284 goto put_device;
1285 }
1286 queue->port = cm_id->context;
1287
1288 if (queue->host_qid == 0) {
1289 /* Let inflight controller teardown complete */
1290 flush_scheduled_work();
1291 }
1292
1293 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1294 if (ret) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001295 /*
1296 * Don't destroy the cm_id in free path, as we implicitly
1297 * destroy the cm_id here with non-zero ret code.
1298 */
1299 queue->cm_id = NULL;
1300 goto free_queue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001301 }
1302
1303 mutex_lock(&nvmet_rdma_queue_mutex);
1304 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1305 mutex_unlock(&nvmet_rdma_queue_mutex);
1306
1307 return 0;
1308
Olivier Deprez0e641232021-09-23 10:07:05 +02001309free_queue:
1310 nvmet_rdma_free_queue(queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001311put_device:
1312 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1313
1314 return ret;
1315}
1316
1317static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1318{
1319 unsigned long flags;
1320
1321 spin_lock_irqsave(&queue->state_lock, flags);
1322 if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1323 pr_warn("trying to establish a connected queue\n");
1324 goto out_unlock;
1325 }
1326 queue->state = NVMET_RDMA_Q_LIVE;
1327
1328 while (!list_empty(&queue->rsp_wait_list)) {
1329 struct nvmet_rdma_rsp *cmd;
1330
1331 cmd = list_first_entry(&queue->rsp_wait_list,
1332 struct nvmet_rdma_rsp, wait_list);
1333 list_del(&cmd->wait_list);
1334
1335 spin_unlock_irqrestore(&queue->state_lock, flags);
1336 nvmet_rdma_handle_command(queue, cmd);
1337 spin_lock_irqsave(&queue->state_lock, flags);
1338 }
1339
1340out_unlock:
1341 spin_unlock_irqrestore(&queue->state_lock, flags);
1342}
1343
1344static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1345{
1346 bool disconnect = false;
1347 unsigned long flags;
1348
1349 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1350
1351 spin_lock_irqsave(&queue->state_lock, flags);
1352 switch (queue->state) {
1353 case NVMET_RDMA_Q_CONNECTING:
Olivier Deprez0e641232021-09-23 10:07:05 +02001354 while (!list_empty(&queue->rsp_wait_list)) {
1355 struct nvmet_rdma_rsp *rsp;
1356
1357 rsp = list_first_entry(&queue->rsp_wait_list,
1358 struct nvmet_rdma_rsp,
1359 wait_list);
1360 list_del(&rsp->wait_list);
1361 nvmet_rdma_put_rsp(rsp);
1362 }
1363 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001364 case NVMET_RDMA_Q_LIVE:
1365 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1366 disconnect = true;
1367 break;
1368 case NVMET_RDMA_Q_DISCONNECTING:
1369 break;
1370 }
1371 spin_unlock_irqrestore(&queue->state_lock, flags);
1372
1373 if (disconnect) {
1374 rdma_disconnect(queue->cm_id);
1375 schedule_work(&queue->release_work);
1376 }
1377}
1378
1379static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1380{
1381 bool disconnect = false;
1382
1383 mutex_lock(&nvmet_rdma_queue_mutex);
1384 if (!list_empty(&queue->queue_list)) {
1385 list_del_init(&queue->queue_list);
1386 disconnect = true;
1387 }
1388 mutex_unlock(&nvmet_rdma_queue_mutex);
1389
1390 if (disconnect)
1391 __nvmet_rdma_queue_disconnect(queue);
1392}
1393
1394static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1395 struct nvmet_rdma_queue *queue)
1396{
1397 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1398
1399 mutex_lock(&nvmet_rdma_queue_mutex);
1400 if (!list_empty(&queue->queue_list))
1401 list_del_init(&queue->queue_list);
1402 mutex_unlock(&nvmet_rdma_queue_mutex);
1403
1404 pr_err("failed to connect queue %d\n", queue->idx);
1405 schedule_work(&queue->release_work);
1406}
1407
1408/**
1409 * nvme_rdma_device_removal() - Handle RDMA device removal
1410 * @cm_id: rdma_cm id, used for nvmet port
1411 * @queue: nvmet rdma queue (cm id qp_context)
1412 *
1413 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1414 * to unplug. Note that this event can be generated on a normal
1415 * queue cm_id and/or a device bound listener cm_id (where in this
1416 * case queue will be null).
1417 *
1418 * We registered an ib_client to handle device removal for queues,
1419 * so we only need to handle the listening port cm_ids. In this case
1420 * we nullify the priv to prevent double cm_id destruction and destroying
1421 * the cm_id implicitely by returning a non-zero rc to the callout.
1422 */
1423static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1424 struct nvmet_rdma_queue *queue)
1425{
1426 struct nvmet_port *port;
1427
1428 if (queue) {
1429 /*
1430 * This is a queue cm_id. we have registered
1431 * an ib_client to handle queues removal
1432 * so don't interfear and just return.
1433 */
1434 return 0;
1435 }
1436
1437 port = cm_id->context;
1438
1439 /*
1440 * This is a listener cm_id. Make sure that
1441 * future remove_port won't invoke a double
1442 * cm_id destroy. use atomic xchg to make sure
1443 * we don't compete with remove_port.
1444 */
1445 if (xchg(&port->priv, NULL) != cm_id)
1446 return 0;
1447
1448 /*
1449 * We need to return 1 so that the core will destroy
1450 * it's own ID. What a great API design..
1451 */
1452 return 1;
1453}
1454
1455static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1456 struct rdma_cm_event *event)
1457{
1458 struct nvmet_rdma_queue *queue = NULL;
1459 int ret = 0;
1460
1461 if (cm_id->qp)
1462 queue = cm_id->qp->qp_context;
1463
1464 pr_debug("%s (%d): status %d id %p\n",
1465 rdma_event_msg(event->event), event->event,
1466 event->status, cm_id);
1467
1468 switch (event->event) {
1469 case RDMA_CM_EVENT_CONNECT_REQUEST:
1470 ret = nvmet_rdma_queue_connect(cm_id, event);
1471 break;
1472 case RDMA_CM_EVENT_ESTABLISHED:
1473 nvmet_rdma_queue_established(queue);
1474 break;
1475 case RDMA_CM_EVENT_ADDR_CHANGE:
1476 case RDMA_CM_EVENT_DISCONNECTED:
1477 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1478 nvmet_rdma_queue_disconnect(queue);
1479 break;
1480 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1481 ret = nvmet_rdma_device_removal(cm_id, queue);
1482 break;
1483 case RDMA_CM_EVENT_REJECTED:
1484 pr_debug("Connection rejected: %s\n",
1485 rdma_reject_msg(cm_id, event->status));
1486 /* FALLTHROUGH */
1487 case RDMA_CM_EVENT_UNREACHABLE:
1488 case RDMA_CM_EVENT_CONNECT_ERROR:
1489 nvmet_rdma_queue_connect_fail(cm_id, queue);
1490 break;
1491 default:
1492 pr_err("received unrecognized RDMA CM event %d\n",
1493 event->event);
1494 break;
1495 }
1496
1497 return ret;
1498}
1499
1500static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1501{
1502 struct nvmet_rdma_queue *queue;
1503
1504restart:
1505 mutex_lock(&nvmet_rdma_queue_mutex);
1506 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1507 if (queue->nvme_sq.ctrl == ctrl) {
1508 list_del_init(&queue->queue_list);
1509 mutex_unlock(&nvmet_rdma_queue_mutex);
1510
1511 __nvmet_rdma_queue_disconnect(queue);
1512 goto restart;
1513 }
1514 }
1515 mutex_unlock(&nvmet_rdma_queue_mutex);
1516}
1517
1518static int nvmet_rdma_add_port(struct nvmet_port *port)
1519{
1520 struct rdma_cm_id *cm_id;
1521 struct sockaddr_storage addr = { };
1522 __kernel_sa_family_t af;
1523 int ret;
1524
1525 switch (port->disc_addr.adrfam) {
1526 case NVMF_ADDR_FAMILY_IP4:
1527 af = AF_INET;
1528 break;
1529 case NVMF_ADDR_FAMILY_IP6:
1530 af = AF_INET6;
1531 break;
1532 default:
1533 pr_err("address family %d not supported\n",
1534 port->disc_addr.adrfam);
1535 return -EINVAL;
1536 }
1537
1538 if (port->inline_data_size < 0) {
1539 port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1540 } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1541 pr_warn("inline_data_size %u is too large, reducing to %u\n",
1542 port->inline_data_size,
1543 NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1544 port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1545 }
1546
1547 ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
1548 port->disc_addr.trsvcid, &addr);
1549 if (ret) {
1550 pr_err("malformed ip/port passed: %s:%s\n",
1551 port->disc_addr.traddr, port->disc_addr.trsvcid);
1552 return ret;
1553 }
1554
1555 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1556 RDMA_PS_TCP, IB_QPT_RC);
1557 if (IS_ERR(cm_id)) {
1558 pr_err("CM ID creation failed\n");
1559 return PTR_ERR(cm_id);
1560 }
1561
1562 /*
1563 * Allow both IPv4 and IPv6 sockets to bind a single port
1564 * at the same time.
1565 */
1566 ret = rdma_set_afonly(cm_id, 1);
1567 if (ret) {
1568 pr_err("rdma_set_afonly failed (%d)\n", ret);
1569 goto out_destroy_id;
1570 }
1571
1572 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1573 if (ret) {
1574 pr_err("binding CM ID to %pISpcs failed (%d)\n",
1575 (struct sockaddr *)&addr, ret);
1576 goto out_destroy_id;
1577 }
1578
1579 ret = rdma_listen(cm_id, 128);
1580 if (ret) {
1581 pr_err("listening to %pISpcs failed (%d)\n",
1582 (struct sockaddr *)&addr, ret);
1583 goto out_destroy_id;
1584 }
1585
1586 pr_info("enabling port %d (%pISpcs)\n",
1587 le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
1588 port->priv = cm_id;
1589 return 0;
1590
1591out_destroy_id:
1592 rdma_destroy_id(cm_id);
1593 return ret;
1594}
1595
1596static void nvmet_rdma_remove_port(struct nvmet_port *port)
1597{
1598 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1599
1600 if (cm_id)
1601 rdma_destroy_id(cm_id);
1602}
1603
1604static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1605 struct nvmet_port *port, char *traddr)
1606{
1607 struct rdma_cm_id *cm_id = port->priv;
1608
1609 if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1610 struct nvmet_rdma_rsp *rsp =
1611 container_of(req, struct nvmet_rdma_rsp, req);
1612 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
1613 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
1614
1615 sprintf(traddr, "%pISc", addr);
1616 } else {
1617 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
1618 }
1619}
1620
1621static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1622 .owner = THIS_MODULE,
1623 .type = NVMF_TRTYPE_RDMA,
1624 .msdbd = 1,
1625 .has_keyed_sgls = 1,
1626 .add_port = nvmet_rdma_add_port,
1627 .remove_port = nvmet_rdma_remove_port,
1628 .queue_response = nvmet_rdma_queue_response,
1629 .delete_ctrl = nvmet_rdma_delete_ctrl,
1630 .disc_traddr = nvmet_rdma_disc_port_addr,
1631};
1632
1633static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1634{
1635 struct nvmet_rdma_queue *queue, *tmp;
1636 struct nvmet_rdma_device *ndev;
1637 bool found = false;
1638
1639 mutex_lock(&device_list_mutex);
1640 list_for_each_entry(ndev, &device_list, entry) {
1641 if (ndev->device == ib_device) {
1642 found = true;
1643 break;
1644 }
1645 }
1646 mutex_unlock(&device_list_mutex);
1647
1648 if (!found)
1649 return;
1650
1651 /*
1652 * IB Device that is used by nvmet controllers is being removed,
1653 * delete all queues using this device.
1654 */
1655 mutex_lock(&nvmet_rdma_queue_mutex);
1656 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1657 queue_list) {
1658 if (queue->dev->device != ib_device)
1659 continue;
1660
1661 pr_info("Removing queue %d\n", queue->idx);
1662 list_del_init(&queue->queue_list);
1663 __nvmet_rdma_queue_disconnect(queue);
1664 }
1665 mutex_unlock(&nvmet_rdma_queue_mutex);
1666
1667 flush_scheduled_work();
1668}
1669
1670static struct ib_client nvmet_rdma_ib_client = {
1671 .name = "nvmet_rdma",
1672 .remove = nvmet_rdma_remove_one
1673};
1674
1675static int __init nvmet_rdma_init(void)
1676{
1677 int ret;
1678
1679 ret = ib_register_client(&nvmet_rdma_ib_client);
1680 if (ret)
1681 return ret;
1682
1683 ret = nvmet_register_transport(&nvmet_rdma_ops);
1684 if (ret)
1685 goto err_ib_client;
1686
1687 return 0;
1688
1689err_ib_client:
1690 ib_unregister_client(&nvmet_rdma_ib_client);
1691 return ret;
1692}
1693
1694static void __exit nvmet_rdma_exit(void)
1695{
1696 nvmet_unregister_transport(&nvmet_rdma_ops);
1697 ib_unregister_client(&nvmet_rdma_ib_client);
1698 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
1699 ida_destroy(&nvmet_rdma_queue_ida);
1700}
1701
1702module_init(nvmet_rdma_init);
1703module_exit(nvmet_rdma_exit);
1704
1705MODULE_LICENSE("GPL v2");
1706MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */