blob: 38bbbbbc6f47f9d13dbdb187e4a6f9e76f4ac0a6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
16#include <net/busy_poll.h>
17
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
23enum nvme_tcp_send_state {
24 NVME_TCP_SEND_CMD_PDU = 0,
25 NVME_TCP_SEND_H2C_PDU,
26 NVME_TCP_SEND_DATA,
27 NVME_TCP_SEND_DDGST,
28};
29
30struct nvme_tcp_request {
31 struct nvme_request req;
32 void *pdu;
33 struct nvme_tcp_queue *queue;
34 u32 data_len;
35 u32 pdu_len;
36 u32 pdu_sent;
37 u16 ttag;
38 struct list_head entry;
39 __le32 ddgst;
40
41 struct bio *curr_bio;
42 struct iov_iter iter;
43
44 /* send state */
45 size_t offset;
46 size_t data_sent;
47 enum nvme_tcp_send_state state;
48};
49
50enum nvme_tcp_queue_flags {
51 NVME_TCP_Q_ALLOCATED = 0,
52 NVME_TCP_Q_LIVE = 1,
53};
54
55enum nvme_tcp_recv_state {
56 NVME_TCP_RECV_PDU = 0,
57 NVME_TCP_RECV_DATA,
58 NVME_TCP_RECV_DDGST,
59};
60
61struct nvme_tcp_ctrl;
62struct nvme_tcp_queue {
63 struct socket *sock;
64 struct work_struct io_work;
65 int io_cpu;
66
67 spinlock_t lock;
68 struct list_head send_list;
69
70 /* recv state */
71 void *pdu;
72 int pdu_remaining;
73 int pdu_offset;
74 size_t data_remaining;
75 size_t ddgst_remaining;
76 unsigned int nr_cqe;
77
78 /* send state */
79 struct nvme_tcp_request *request;
80
81 int queue_size;
82 size_t cmnd_capsule_len;
83 struct nvme_tcp_ctrl *ctrl;
84 unsigned long flags;
85 bool rd_enabled;
86
87 bool hdr_digest;
88 bool data_digest;
89 struct ahash_request *rcv_hash;
90 struct ahash_request *snd_hash;
91 __le32 exp_ddgst;
92 __le32 recv_ddgst;
93
94 struct page_frag_cache pf_cache;
95
96 void (*state_change)(struct sock *);
97 void (*data_ready)(struct sock *);
98 void (*write_space)(struct sock *);
99};
100
101struct nvme_tcp_ctrl {
102 /* read only in the hot path */
103 struct nvme_tcp_queue *queues;
104 struct blk_mq_tag_set tag_set;
105
106 /* other member variables */
107 struct list_head list;
108 struct blk_mq_tag_set admin_tag_set;
109 struct sockaddr_storage addr;
110 struct sockaddr_storage src_addr;
111 struct nvme_ctrl ctrl;
112
113 struct work_struct err_work;
114 struct delayed_work connect_work;
115 struct nvme_tcp_request async_req;
116 u32 io_queues[HCTX_MAX_TYPES];
117};
118
119static LIST_HEAD(nvme_tcp_ctrl_list);
120static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
121static struct workqueue_struct *nvme_tcp_wq;
122static struct blk_mq_ops nvme_tcp_mq_ops;
123static struct blk_mq_ops nvme_tcp_admin_mq_ops;
124
125static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
126{
127 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
128}
129
130static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
131{
132 return queue - queue->ctrl->queues;
133}
134
135static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
136{
137 u32 queue_idx = nvme_tcp_queue_id(queue);
138
139 if (queue_idx == 0)
140 return queue->ctrl->admin_tag_set.tags[queue_idx];
141 return queue->ctrl->tag_set.tags[queue_idx - 1];
142}
143
144static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
145{
146 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
147}
148
149static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
150{
151 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
152}
153
154static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
155{
156 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
157}
158
159static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
160{
161 return req == &req->queue->ctrl->async_req;
162}
163
164static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
165{
166 struct request *rq;
David Brazdil0f672f62019-12-10 10:32:29 +0000167
168 if (unlikely(nvme_tcp_async_req(req)))
169 return false; /* async events don't have a request */
170
171 rq = blk_mq_rq_from_pdu(req);
David Brazdil0f672f62019-12-10 10:32:29 +0000172
Olivier Deprez0e641232021-09-23 10:07:05 +0200173 return rq_data_dir(rq) == WRITE && req->data_len &&
174 req->data_len <= nvme_tcp_inline_data_size(req->queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000175}
176
177static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
178{
179 return req->iter.bvec->bv_page;
180}
181
182static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
183{
184 return req->iter.bvec->bv_offset + req->iter.iov_offset;
185}
186
187static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
188{
Olivier Deprez0e641232021-09-23 10:07:05 +0200189 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
David Brazdil0f672f62019-12-10 10:32:29 +0000190 req->pdu_len - req->pdu_sent);
191}
192
193static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
194{
195 return req->iter.iov_offset;
196}
197
198static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
199{
200 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
201 req->pdu_len - req->pdu_sent : 0;
202}
203
204static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
205 int len)
206{
207 return nvme_tcp_pdu_data_left(req) <= len;
208}
209
210static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
211 unsigned int dir)
212{
213 struct request *rq = blk_mq_rq_from_pdu(req);
214 struct bio_vec *vec;
215 unsigned int size;
216 int nsegs;
217 size_t offset;
218
219 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
220 vec = &rq->special_vec;
221 nsegs = 1;
222 size = blk_rq_payload_bytes(rq);
223 offset = 0;
224 } else {
225 struct bio *bio = req->curr_bio;
226
227 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
228 nsegs = bio_segments(bio);
229 size = bio->bi_iter.bi_size;
230 offset = bio->bi_iter.bi_bvec_done;
231 }
232
233 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
234 req->iter.iov_offset = offset;
235}
236
237static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
238 int len)
239{
240 req->data_sent += len;
241 req->pdu_sent += len;
242 iov_iter_advance(&req->iter, len);
243 if (!iov_iter_count(&req->iter) &&
244 req->data_sent < req->data_len) {
245 req->curr_bio = req->curr_bio->bi_next;
246 nvme_tcp_init_iter(req, WRITE);
247 }
248}
249
250static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
251{
252 struct nvme_tcp_queue *queue = req->queue;
253
254 spin_lock(&queue->lock);
255 list_add_tail(&req->entry, &queue->send_list);
256 spin_unlock(&queue->lock);
257
258 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
259}
260
261static inline struct nvme_tcp_request *
262nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
263{
264 struct nvme_tcp_request *req;
265
266 spin_lock(&queue->lock);
267 req = list_first_entry_or_null(&queue->send_list,
268 struct nvme_tcp_request, entry);
269 if (req)
270 list_del(&req->entry);
271 spin_unlock(&queue->lock);
272
273 return req;
274}
275
276static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
277 __le32 *dgst)
278{
279 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
280 crypto_ahash_final(hash);
281}
282
283static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
284 struct page *page, off_t off, size_t len)
285{
286 struct scatterlist sg;
287
288 sg_init_marker(&sg, 1);
289 sg_set_page(&sg, page, len, off);
290 ahash_request_set_crypt(hash, &sg, NULL, len);
291 crypto_ahash_update(hash);
292}
293
294static inline void nvme_tcp_hdgst(struct ahash_request *hash,
295 void *pdu, size_t len)
296{
297 struct scatterlist sg;
298
299 sg_init_one(&sg, pdu, len);
300 ahash_request_set_crypt(hash, &sg, pdu + len, len);
301 crypto_ahash_digest(hash);
302}
303
304static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
305 void *pdu, size_t pdu_len)
306{
307 struct nvme_tcp_hdr *hdr = pdu;
308 __le32 recv_digest;
309 __le32 exp_digest;
310
311 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
312 dev_err(queue->ctrl->ctrl.device,
313 "queue %d: header digest flag is cleared\n",
314 nvme_tcp_queue_id(queue));
315 return -EPROTO;
316 }
317
318 recv_digest = *(__le32 *)(pdu + hdr->hlen);
319 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
320 exp_digest = *(__le32 *)(pdu + hdr->hlen);
321 if (recv_digest != exp_digest) {
322 dev_err(queue->ctrl->ctrl.device,
323 "header digest error: recv %#x expected %#x\n",
324 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
325 return -EIO;
326 }
327
328 return 0;
329}
330
331static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
332{
333 struct nvme_tcp_hdr *hdr = pdu;
334 u8 digest_len = nvme_tcp_hdgst_len(queue);
335 u32 len;
336
337 len = le32_to_cpu(hdr->plen) - hdr->hlen -
338 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
339
340 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
341 dev_err(queue->ctrl->ctrl.device,
342 "queue %d: data digest flag is cleared\n",
343 nvme_tcp_queue_id(queue));
344 return -EPROTO;
345 }
346 crypto_ahash_init(queue->rcv_hash);
347
348 return 0;
349}
350
351static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
352 struct request *rq, unsigned int hctx_idx)
353{
354 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
355
356 page_frag_free(req->pdu);
357}
358
359static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
360 struct request *rq, unsigned int hctx_idx,
361 unsigned int numa_node)
362{
363 struct nvme_tcp_ctrl *ctrl = set->driver_data;
364 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
365 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
366 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
367 u8 hdgst = nvme_tcp_hdgst_len(queue);
368
369 req->pdu = page_frag_alloc(&queue->pf_cache,
370 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
371 GFP_KERNEL | __GFP_ZERO);
372 if (!req->pdu)
373 return -ENOMEM;
374
375 req->queue = queue;
376 nvme_req(rq)->ctrl = &ctrl->ctrl;
377
378 return 0;
379}
380
381static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
382 unsigned int hctx_idx)
383{
384 struct nvme_tcp_ctrl *ctrl = data;
385 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
386
387 hctx->driver_data = queue;
388 return 0;
389}
390
391static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
392 unsigned int hctx_idx)
393{
394 struct nvme_tcp_ctrl *ctrl = data;
395 struct nvme_tcp_queue *queue = &ctrl->queues[0];
396
397 hctx->driver_data = queue;
398 return 0;
399}
400
401static enum nvme_tcp_recv_state
402nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
403{
404 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
405 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
406 NVME_TCP_RECV_DATA;
407}
408
409static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
410{
411 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
412 nvme_tcp_hdgst_len(queue);
413 queue->pdu_offset = 0;
414 queue->data_remaining = -1;
415 queue->ddgst_remaining = 0;
416}
417
418static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
419{
420 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
421 return;
422
Olivier Deprez0e641232021-09-23 10:07:05 +0200423 dev_warn(ctrl->device, "starting error recovery\n");
424 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
David Brazdil0f672f62019-12-10 10:32:29 +0000425}
426
427static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
428 struct nvme_completion *cqe)
429{
430 struct request *rq;
431
432 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
433 if (!rq) {
434 dev_err(queue->ctrl->ctrl.device,
435 "queue %d tag 0x%x not found\n",
436 nvme_tcp_queue_id(queue), cqe->command_id);
437 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
438 return -EINVAL;
439 }
440
441 nvme_end_request(rq, cqe->status, cqe->result);
442 queue->nr_cqe++;
443
444 return 0;
445}
446
447static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
448 struct nvme_tcp_data_pdu *pdu)
449{
450 struct request *rq;
451
452 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
453 if (!rq) {
454 dev_err(queue->ctrl->ctrl.device,
455 "queue %d tag %#x not found\n",
456 nvme_tcp_queue_id(queue), pdu->command_id);
457 return -ENOENT;
458 }
459
460 if (!blk_rq_payload_bytes(rq)) {
461 dev_err(queue->ctrl->ctrl.device,
462 "queue %d tag %#x unexpected data\n",
463 nvme_tcp_queue_id(queue), rq->tag);
464 return -EIO;
465 }
466
467 queue->data_remaining = le32_to_cpu(pdu->data_length);
468
469 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
470 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
471 dev_err(queue->ctrl->ctrl.device,
472 "queue %d tag %#x SUCCESS set but not last PDU\n",
473 nvme_tcp_queue_id(queue), rq->tag);
474 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
475 return -EPROTO;
476 }
477
478 return 0;
479}
480
481static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
482 struct nvme_tcp_rsp_pdu *pdu)
483{
484 struct nvme_completion *cqe = &pdu->cqe;
485 int ret = 0;
486
487 /*
488 * AEN requests are special as they don't time out and can
489 * survive any kind of queue freeze and often don't respond to
490 * aborts. We don't even bother to allocate a struct request
491 * for them but rather special case them here.
492 */
493 if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
494 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
495 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
496 &cqe->result);
497 else
498 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
499
500 return ret;
501}
502
503static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
504 struct nvme_tcp_r2t_pdu *pdu)
505{
506 struct nvme_tcp_data_pdu *data = req->pdu;
507 struct nvme_tcp_queue *queue = req->queue;
508 struct request *rq = blk_mq_rq_from_pdu(req);
509 u8 hdgst = nvme_tcp_hdgst_len(queue);
510 u8 ddgst = nvme_tcp_ddgst_len(queue);
511
512 req->pdu_len = le32_to_cpu(pdu->r2t_length);
513 req->pdu_sent = 0;
514
Olivier Deprez0e641232021-09-23 10:07:05 +0200515 if (unlikely(!req->pdu_len)) {
516 dev_err(queue->ctrl->ctrl.device,
517 "req %d r2t len is %u, probably a bug...\n",
518 rq->tag, req->pdu_len);
519 return -EPROTO;
520 }
521
David Brazdil0f672f62019-12-10 10:32:29 +0000522 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
523 dev_err(queue->ctrl->ctrl.device,
524 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
525 rq->tag, req->pdu_len, req->data_len,
526 req->data_sent);
527 return -EPROTO;
528 }
529
530 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
531 dev_err(queue->ctrl->ctrl.device,
532 "req %d unexpected r2t offset %u (expected %zu)\n",
533 rq->tag, le32_to_cpu(pdu->r2t_offset),
534 req->data_sent);
535 return -EPROTO;
536 }
537
538 memset(data, 0, sizeof(*data));
539 data->hdr.type = nvme_tcp_h2c_data;
540 data->hdr.flags = NVME_TCP_F_DATA_LAST;
541 if (queue->hdr_digest)
542 data->hdr.flags |= NVME_TCP_F_HDGST;
543 if (queue->data_digest)
544 data->hdr.flags |= NVME_TCP_F_DDGST;
545 data->hdr.hlen = sizeof(*data);
546 data->hdr.pdo = data->hdr.hlen + hdgst;
547 data->hdr.plen =
548 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
549 data->ttag = pdu->ttag;
550 data->command_id = rq->tag;
551 data->data_offset = cpu_to_le32(req->data_sent);
552 data->data_length = cpu_to_le32(req->pdu_len);
553 return 0;
554}
555
556static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
557 struct nvme_tcp_r2t_pdu *pdu)
558{
559 struct nvme_tcp_request *req;
560 struct request *rq;
561 int ret;
562
563 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
564 if (!rq) {
565 dev_err(queue->ctrl->ctrl.device,
566 "queue %d tag %#x not found\n",
567 nvme_tcp_queue_id(queue), pdu->command_id);
568 return -ENOENT;
569 }
570 req = blk_mq_rq_to_pdu(rq);
571
572 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
573 if (unlikely(ret))
574 return ret;
575
576 req->state = NVME_TCP_SEND_H2C_PDU;
577 req->offset = 0;
578
579 nvme_tcp_queue_request(req);
580
581 return 0;
582}
583
584static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
585 unsigned int *offset, size_t *len)
586{
587 struct nvme_tcp_hdr *hdr;
588 char *pdu = queue->pdu;
589 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
590 int ret;
591
592 ret = skb_copy_bits(skb, *offset,
593 &pdu[queue->pdu_offset], rcv_len);
594 if (unlikely(ret))
595 return ret;
596
597 queue->pdu_remaining -= rcv_len;
598 queue->pdu_offset += rcv_len;
599 *offset += rcv_len;
600 *len -= rcv_len;
601 if (queue->pdu_remaining)
602 return 0;
603
604 hdr = queue->pdu;
605 if (queue->hdr_digest) {
606 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
607 if (unlikely(ret))
608 return ret;
609 }
610
611
612 if (queue->data_digest) {
613 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
614 if (unlikely(ret))
615 return ret;
616 }
617
618 switch (hdr->type) {
619 case nvme_tcp_c2h_data:
620 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
621 case nvme_tcp_rsp:
622 nvme_tcp_init_recv_ctx(queue);
623 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
624 case nvme_tcp_r2t:
625 nvme_tcp_init_recv_ctx(queue);
626 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
627 default:
628 dev_err(queue->ctrl->ctrl.device,
629 "unsupported pdu type (%d)\n", hdr->type);
630 return -EINVAL;
631 }
632}
633
634static inline void nvme_tcp_end_request(struct request *rq, u16 status)
635{
636 union nvme_result res = {};
637
638 nvme_end_request(rq, cpu_to_le16(status << 1), res);
639}
640
641static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
642 unsigned int *offset, size_t *len)
643{
644 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Olivier Deprez0e641232021-09-23 10:07:05 +0200645 struct request *rq =
646 blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
647 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
David Brazdil0f672f62019-12-10 10:32:29 +0000648
649 while (true) {
650 int recv_len, ret;
651
652 recv_len = min_t(size_t, *len, queue->data_remaining);
653 if (!recv_len)
654 break;
655
656 if (!iov_iter_count(&req->iter)) {
657 req->curr_bio = req->curr_bio->bi_next;
658
659 /*
660 * If we don`t have any bios it means that controller
661 * sent more data than we requested, hence error
662 */
663 if (!req->curr_bio) {
664 dev_err(queue->ctrl->ctrl.device,
665 "queue %d no space in request %#x",
666 nvme_tcp_queue_id(queue), rq->tag);
667 nvme_tcp_init_recv_ctx(queue);
668 return -EIO;
669 }
670 nvme_tcp_init_iter(req, READ);
671 }
672
673 /* we can read only from what is left in this bio */
674 recv_len = min_t(size_t, recv_len,
675 iov_iter_count(&req->iter));
676
677 if (queue->data_digest)
678 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
679 &req->iter, recv_len, queue->rcv_hash);
680 else
681 ret = skb_copy_datagram_iter(skb, *offset,
682 &req->iter, recv_len);
683 if (ret) {
684 dev_err(queue->ctrl->ctrl.device,
685 "queue %d failed to copy request %#x data",
686 nvme_tcp_queue_id(queue), rq->tag);
687 return ret;
688 }
689
690 *len -= recv_len;
691 *offset += recv_len;
692 queue->data_remaining -= recv_len;
693 }
694
695 if (!queue->data_remaining) {
696 if (queue->data_digest) {
697 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
698 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
699 } else {
700 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
701 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
702 queue->nr_cqe++;
703 }
704 nvme_tcp_init_recv_ctx(queue);
705 }
706 }
707
708 return 0;
709}
710
711static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
712 struct sk_buff *skb, unsigned int *offset, size_t *len)
713{
714 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
715 char *ddgst = (char *)&queue->recv_ddgst;
716 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
717 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
718 int ret;
719
720 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
721 if (unlikely(ret))
722 return ret;
723
724 queue->ddgst_remaining -= recv_len;
725 *offset += recv_len;
726 *len -= recv_len;
727 if (queue->ddgst_remaining)
728 return 0;
729
730 if (queue->recv_ddgst != queue->exp_ddgst) {
731 dev_err(queue->ctrl->ctrl.device,
732 "data digest error: recv %#x expected %#x\n",
733 le32_to_cpu(queue->recv_ddgst),
734 le32_to_cpu(queue->exp_ddgst));
735 return -EIO;
736 }
737
738 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
739 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
740 pdu->command_id);
741
742 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
743 queue->nr_cqe++;
744 }
745
746 nvme_tcp_init_recv_ctx(queue);
747 return 0;
748}
749
750static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
751 unsigned int offset, size_t len)
752{
753 struct nvme_tcp_queue *queue = desc->arg.data;
754 size_t consumed = len;
755 int result;
756
757 while (len) {
758 switch (nvme_tcp_recv_state(queue)) {
759 case NVME_TCP_RECV_PDU:
760 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
761 break;
762 case NVME_TCP_RECV_DATA:
763 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
764 break;
765 case NVME_TCP_RECV_DDGST:
766 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
767 break;
768 default:
769 result = -EFAULT;
770 }
771 if (result) {
772 dev_err(queue->ctrl->ctrl.device,
773 "receive failed: %d\n", result);
774 queue->rd_enabled = false;
775 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
776 return result;
777 }
778 }
779
780 return consumed;
781}
782
783static void nvme_tcp_data_ready(struct sock *sk)
784{
785 struct nvme_tcp_queue *queue;
786
Olivier Deprez0e641232021-09-23 10:07:05 +0200787 read_lock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000788 queue = sk->sk_user_data;
789 if (likely(queue && queue->rd_enabled))
790 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Olivier Deprez0e641232021-09-23 10:07:05 +0200791 read_unlock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000792}
793
794static void nvme_tcp_write_space(struct sock *sk)
795{
796 struct nvme_tcp_queue *queue;
797
798 read_lock_bh(&sk->sk_callback_lock);
799 queue = sk->sk_user_data;
800 if (likely(queue && sk_stream_is_writeable(sk))) {
801 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
802 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
803 }
804 read_unlock_bh(&sk->sk_callback_lock);
805}
806
807static void nvme_tcp_state_change(struct sock *sk)
808{
809 struct nvme_tcp_queue *queue;
810
Olivier Deprez0e641232021-09-23 10:07:05 +0200811 read_lock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000812 queue = sk->sk_user_data;
813 if (!queue)
814 goto done;
815
816 switch (sk->sk_state) {
817 case TCP_CLOSE:
818 case TCP_CLOSE_WAIT:
819 case TCP_LAST_ACK:
820 case TCP_FIN_WAIT1:
821 case TCP_FIN_WAIT2:
822 /* fallthrough */
823 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
824 break;
825 default:
826 dev_info(queue->ctrl->ctrl.device,
827 "queue %d socket state %d\n",
828 nvme_tcp_queue_id(queue), sk->sk_state);
829 }
830
831 queue->state_change(sk);
832done:
Olivier Deprez0e641232021-09-23 10:07:05 +0200833 read_unlock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000834}
835
836static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
837{
838 queue->request = NULL;
839}
840
841static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
842{
843 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
844}
845
846static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
847{
848 struct nvme_tcp_queue *queue = req->queue;
849
850 while (true) {
851 struct page *page = nvme_tcp_req_cur_page(req);
852 size_t offset = nvme_tcp_req_cur_offset(req);
853 size_t len = nvme_tcp_req_cur_length(req);
854 bool last = nvme_tcp_pdu_last_send(req, len);
855 int ret, flags = MSG_DONTWAIT;
856
857 if (last && !queue->data_digest)
858 flags |= MSG_EOR;
859 else
860 flags |= MSG_MORE;
861
Olivier Deprez0e641232021-09-23 10:07:05 +0200862 if (sendpage_ok(page)) {
863 ret = kernel_sendpage(queue->sock, page, offset, len,
David Brazdil0f672f62019-12-10 10:32:29 +0000864 flags);
865 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +0200866 ret = sock_no_sendpage(queue->sock, page, offset, len,
David Brazdil0f672f62019-12-10 10:32:29 +0000867 flags);
868 }
869 if (ret <= 0)
870 return ret;
871
872 nvme_tcp_advance_req(req, ret);
873 if (queue->data_digest)
874 nvme_tcp_ddgst_update(queue->snd_hash, page,
875 offset, ret);
876
877 /* fully successful last write*/
878 if (last && ret == len) {
879 if (queue->data_digest) {
880 nvme_tcp_ddgst_final(queue->snd_hash,
881 &req->ddgst);
882 req->state = NVME_TCP_SEND_DDGST;
883 req->offset = 0;
884 } else {
885 nvme_tcp_done_send_req(queue);
886 }
887 return 1;
888 }
889 }
890 return -EAGAIN;
891}
892
893static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
894{
895 struct nvme_tcp_queue *queue = req->queue;
896 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
897 bool inline_data = nvme_tcp_has_inline_data(req);
898 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
899 u8 hdgst = nvme_tcp_hdgst_len(queue);
900 int len = sizeof(*pdu) + hdgst - req->offset;
901 int ret;
902
903 if (queue->hdr_digest && !req->offset)
904 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
905
906 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
907 offset_in_page(pdu) + req->offset, len, flags);
908 if (unlikely(ret <= 0))
909 return ret;
910
911 len -= ret;
912 if (!len) {
913 if (inline_data) {
914 req->state = NVME_TCP_SEND_DATA;
915 if (queue->data_digest)
916 crypto_ahash_init(queue->snd_hash);
917 nvme_tcp_init_iter(req, WRITE);
918 } else {
919 nvme_tcp_done_send_req(queue);
920 }
921 return 1;
922 }
923 req->offset += ret;
924
925 return -EAGAIN;
926}
927
928static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
929{
930 struct nvme_tcp_queue *queue = req->queue;
931 struct nvme_tcp_data_pdu *pdu = req->pdu;
932 u8 hdgst = nvme_tcp_hdgst_len(queue);
933 int len = sizeof(*pdu) - req->offset + hdgst;
934 int ret;
935
936 if (queue->hdr_digest && !req->offset)
937 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
938
939 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
940 offset_in_page(pdu) + req->offset, len,
941 MSG_DONTWAIT | MSG_MORE);
942 if (unlikely(ret <= 0))
943 return ret;
944
945 len -= ret;
946 if (!len) {
947 req->state = NVME_TCP_SEND_DATA;
948 if (queue->data_digest)
949 crypto_ahash_init(queue->snd_hash);
950 if (!req->data_sent)
951 nvme_tcp_init_iter(req, WRITE);
952 return 1;
953 }
954 req->offset += ret;
955
956 return -EAGAIN;
957}
958
959static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
960{
961 struct nvme_tcp_queue *queue = req->queue;
962 int ret;
963 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
964 struct kvec iov = {
965 .iov_base = &req->ddgst + req->offset,
966 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
967 };
968
969 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
970 if (unlikely(ret <= 0))
971 return ret;
972
973 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
974 nvme_tcp_done_send_req(queue);
975 return 1;
976 }
977
978 req->offset += ret;
979 return -EAGAIN;
980}
981
982static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
983{
984 struct nvme_tcp_request *req;
985 int ret = 1;
986
987 if (!queue->request) {
988 queue->request = nvme_tcp_fetch_request(queue);
989 if (!queue->request)
990 return 0;
991 }
992 req = queue->request;
993
994 if (req->state == NVME_TCP_SEND_CMD_PDU) {
995 ret = nvme_tcp_try_send_cmd_pdu(req);
996 if (ret <= 0)
997 goto done;
998 if (!nvme_tcp_has_inline_data(req))
999 return ret;
1000 }
1001
1002 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1003 ret = nvme_tcp_try_send_data_pdu(req);
1004 if (ret <= 0)
1005 goto done;
1006 }
1007
1008 if (req->state == NVME_TCP_SEND_DATA) {
1009 ret = nvme_tcp_try_send_data(req);
1010 if (ret <= 0)
1011 goto done;
1012 }
1013
1014 if (req->state == NVME_TCP_SEND_DDGST)
1015 ret = nvme_tcp_try_send_ddgst(req);
1016done:
1017 if (ret == -EAGAIN)
1018 ret = 0;
1019 return ret;
1020}
1021
1022static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1023{
1024 struct socket *sock = queue->sock;
1025 struct sock *sk = sock->sk;
1026 read_descriptor_t rd_desc;
1027 int consumed;
1028
1029 rd_desc.arg.data = queue;
1030 rd_desc.count = 1;
1031 lock_sock(sk);
1032 queue->nr_cqe = 0;
1033 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1034 release_sock(sk);
1035 return consumed;
1036}
1037
1038static void nvme_tcp_io_work(struct work_struct *w)
1039{
1040 struct nvme_tcp_queue *queue =
1041 container_of(w, struct nvme_tcp_queue, io_work);
1042 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1043
1044 do {
1045 bool pending = false;
1046 int result;
1047
1048 result = nvme_tcp_try_send(queue);
1049 if (result > 0) {
1050 pending = true;
1051 } else if (unlikely(result < 0)) {
1052 dev_err(queue->ctrl->ctrl.device,
1053 "failed to send request %d\n", result);
Olivier Deprez0e641232021-09-23 10:07:05 +02001054
1055 /*
1056 * Fail the request unless peer closed the connection,
1057 * in which case error recovery flow will complete all.
1058 */
1059 if ((result != -EPIPE) && (result != -ECONNRESET))
David Brazdil0f672f62019-12-10 10:32:29 +00001060 nvme_tcp_fail_request(queue->request);
1061 nvme_tcp_done_send_req(queue);
1062 return;
1063 }
1064
1065 result = nvme_tcp_try_recv(queue);
1066 if (result > 0)
1067 pending = true;
1068
1069 if (!pending)
1070 return;
1071
1072 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1073
1074 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1075}
1076
1077static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1078{
1079 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1080
1081 ahash_request_free(queue->rcv_hash);
1082 ahash_request_free(queue->snd_hash);
1083 crypto_free_ahash(tfm);
1084}
1085
1086static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1087{
1088 struct crypto_ahash *tfm;
1089
1090 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1091 if (IS_ERR(tfm))
1092 return PTR_ERR(tfm);
1093
1094 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1095 if (!queue->snd_hash)
1096 goto free_tfm;
1097 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1098
1099 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1100 if (!queue->rcv_hash)
1101 goto free_snd_hash;
1102 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1103
1104 return 0;
1105free_snd_hash:
1106 ahash_request_free(queue->snd_hash);
1107free_tfm:
1108 crypto_free_ahash(tfm);
1109 return -ENOMEM;
1110}
1111
1112static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1113{
1114 struct nvme_tcp_request *async = &ctrl->async_req;
1115
1116 page_frag_free(async->pdu);
1117}
1118
1119static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1120{
1121 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1122 struct nvme_tcp_request *async = &ctrl->async_req;
1123 u8 hdgst = nvme_tcp_hdgst_len(queue);
1124
1125 async->pdu = page_frag_alloc(&queue->pf_cache,
1126 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1127 GFP_KERNEL | __GFP_ZERO);
1128 if (!async->pdu)
1129 return -ENOMEM;
1130
1131 async->queue = &ctrl->queues[0];
1132 return 0;
1133}
1134
1135static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1136{
1137 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1138 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1139
1140 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1141 return;
1142
1143 if (queue->hdr_digest || queue->data_digest)
1144 nvme_tcp_free_crypto(queue);
1145
1146 sock_release(queue->sock);
1147 kfree(queue->pdu);
1148}
1149
1150static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1151{
1152 struct nvme_tcp_icreq_pdu *icreq;
1153 struct nvme_tcp_icresp_pdu *icresp;
1154 struct msghdr msg = {};
1155 struct kvec iov;
1156 bool ctrl_hdgst, ctrl_ddgst;
1157 int ret;
1158
1159 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1160 if (!icreq)
1161 return -ENOMEM;
1162
1163 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1164 if (!icresp) {
1165 ret = -ENOMEM;
1166 goto free_icreq;
1167 }
1168
1169 icreq->hdr.type = nvme_tcp_icreq;
1170 icreq->hdr.hlen = sizeof(*icreq);
1171 icreq->hdr.pdo = 0;
1172 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1173 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1174 icreq->maxr2t = 0; /* single inflight r2t supported */
1175 icreq->hpda = 0; /* no alignment constraint */
1176 if (queue->hdr_digest)
1177 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1178 if (queue->data_digest)
1179 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1180
1181 iov.iov_base = icreq;
1182 iov.iov_len = sizeof(*icreq);
1183 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1184 if (ret < 0)
1185 goto free_icresp;
1186
1187 memset(&msg, 0, sizeof(msg));
1188 iov.iov_base = icresp;
1189 iov.iov_len = sizeof(*icresp);
1190 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1191 iov.iov_len, msg.msg_flags);
1192 if (ret < 0)
1193 goto free_icresp;
1194
1195 ret = -EINVAL;
1196 if (icresp->hdr.type != nvme_tcp_icresp) {
1197 pr_err("queue %d: bad type returned %d\n",
1198 nvme_tcp_queue_id(queue), icresp->hdr.type);
1199 goto free_icresp;
1200 }
1201
1202 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1203 pr_err("queue %d: bad pdu length returned %d\n",
1204 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1205 goto free_icresp;
1206 }
1207
1208 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1209 pr_err("queue %d: bad pfv returned %d\n",
1210 nvme_tcp_queue_id(queue), icresp->pfv);
1211 goto free_icresp;
1212 }
1213
1214 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1215 if ((queue->data_digest && !ctrl_ddgst) ||
1216 (!queue->data_digest && ctrl_ddgst)) {
1217 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1218 nvme_tcp_queue_id(queue),
1219 queue->data_digest ? "enabled" : "disabled",
1220 ctrl_ddgst ? "enabled" : "disabled");
1221 goto free_icresp;
1222 }
1223
1224 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1225 if ((queue->hdr_digest && !ctrl_hdgst) ||
1226 (!queue->hdr_digest && ctrl_hdgst)) {
1227 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1228 nvme_tcp_queue_id(queue),
1229 queue->hdr_digest ? "enabled" : "disabled",
1230 ctrl_hdgst ? "enabled" : "disabled");
1231 goto free_icresp;
1232 }
1233
1234 if (icresp->cpda != 0) {
1235 pr_err("queue %d: unsupported cpda returned %d\n",
1236 nvme_tcp_queue_id(queue), icresp->cpda);
1237 goto free_icresp;
1238 }
1239
1240 ret = 0;
1241free_icresp:
1242 kfree(icresp);
1243free_icreq:
1244 kfree(icreq);
1245 return ret;
1246}
1247
1248static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1249 int qid, size_t queue_size)
1250{
1251 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1252 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1253 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1254 int ret, opt, rcv_pdu_size, n;
1255
1256 queue->ctrl = ctrl;
1257 INIT_LIST_HEAD(&queue->send_list);
1258 spin_lock_init(&queue->lock);
1259 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1260 queue->queue_size = queue_size;
1261
1262 if (qid > 0)
1263 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1264 else
1265 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1266 NVME_TCP_ADMIN_CCSZ;
1267
1268 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1269 IPPROTO_TCP, &queue->sock);
1270 if (ret) {
1271 dev_err(nctrl->device,
1272 "failed to create socket: %d\n", ret);
1273 return ret;
1274 }
1275
1276 /* Single syn retry */
1277 opt = 1;
1278 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1279 (char *)&opt, sizeof(opt));
1280 if (ret) {
1281 dev_err(nctrl->device,
1282 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1283 goto err_sock;
1284 }
1285
1286 /* Set TCP no delay */
1287 opt = 1;
1288 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1289 TCP_NODELAY, (char *)&opt, sizeof(opt));
1290 if (ret) {
1291 dev_err(nctrl->device,
1292 "failed to set TCP_NODELAY sock opt %d\n", ret);
1293 goto err_sock;
1294 }
1295
1296 /*
1297 * Cleanup whatever is sitting in the TCP transmit queue on socket
1298 * close. This is done to prevent stale data from being sent should
1299 * the network connection be restored before TCP times out.
1300 */
1301 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1302 (char *)&sol, sizeof(sol));
1303 if (ret) {
1304 dev_err(nctrl->device,
1305 "failed to set SO_LINGER sock opt %d\n", ret);
1306 goto err_sock;
1307 }
1308
1309 /* Set socket type of service */
1310 if (nctrl->opts->tos >= 0) {
1311 opt = nctrl->opts->tos;
1312 ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
1313 (char *)&opt, sizeof(opt));
1314 if (ret) {
1315 dev_err(nctrl->device,
1316 "failed to set IP_TOS sock opt %d\n", ret);
1317 goto err_sock;
1318 }
1319 }
1320
Olivier Deprez0e641232021-09-23 10:07:05 +02001321 /* Set 10 seconds timeout for icresp recvmsg */
1322 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1323
David Brazdil0f672f62019-12-10 10:32:29 +00001324 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1325 if (!qid)
1326 n = 0;
1327 else
1328 n = (qid - 1) % num_online_cpus();
1329 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1330 queue->request = NULL;
1331 queue->data_remaining = 0;
1332 queue->ddgst_remaining = 0;
1333 queue->pdu_remaining = 0;
1334 queue->pdu_offset = 0;
1335 sk_set_memalloc(queue->sock->sk);
1336
1337 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1338 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1339 sizeof(ctrl->src_addr));
1340 if (ret) {
1341 dev_err(nctrl->device,
1342 "failed to bind queue %d socket %d\n",
1343 qid, ret);
1344 goto err_sock;
1345 }
1346 }
1347
1348 queue->hdr_digest = nctrl->opts->hdr_digest;
1349 queue->data_digest = nctrl->opts->data_digest;
1350 if (queue->hdr_digest || queue->data_digest) {
1351 ret = nvme_tcp_alloc_crypto(queue);
1352 if (ret) {
1353 dev_err(nctrl->device,
1354 "failed to allocate queue %d crypto\n", qid);
1355 goto err_sock;
1356 }
1357 }
1358
1359 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1360 nvme_tcp_hdgst_len(queue);
1361 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1362 if (!queue->pdu) {
1363 ret = -ENOMEM;
1364 goto err_crypto;
1365 }
1366
1367 dev_dbg(nctrl->device, "connecting queue %d\n",
1368 nvme_tcp_queue_id(queue));
1369
1370 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1371 sizeof(ctrl->addr), 0);
1372 if (ret) {
1373 dev_err(nctrl->device,
1374 "failed to connect socket: %d\n", ret);
1375 goto err_rcv_pdu;
1376 }
1377
1378 ret = nvme_tcp_init_connection(queue);
1379 if (ret)
1380 goto err_init_connect;
1381
1382 queue->rd_enabled = true;
1383 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1384 nvme_tcp_init_recv_ctx(queue);
1385
1386 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1387 queue->sock->sk->sk_user_data = queue;
1388 queue->state_change = queue->sock->sk->sk_state_change;
1389 queue->data_ready = queue->sock->sk->sk_data_ready;
1390 queue->write_space = queue->sock->sk->sk_write_space;
1391 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1392 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1393 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1394#ifdef CONFIG_NET_RX_BUSY_POLL
1395 queue->sock->sk->sk_ll_usec = 1;
1396#endif
1397 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1398
1399 return 0;
1400
1401err_init_connect:
1402 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1403err_rcv_pdu:
1404 kfree(queue->pdu);
1405err_crypto:
1406 if (queue->hdr_digest || queue->data_digest)
1407 nvme_tcp_free_crypto(queue);
1408err_sock:
1409 sock_release(queue->sock);
1410 queue->sock = NULL;
1411 return ret;
1412}
1413
1414static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1415{
1416 struct socket *sock = queue->sock;
1417
1418 write_lock_bh(&sock->sk->sk_callback_lock);
1419 sock->sk->sk_user_data = NULL;
1420 sock->sk->sk_data_ready = queue->data_ready;
1421 sock->sk->sk_state_change = queue->state_change;
1422 sock->sk->sk_write_space = queue->write_space;
1423 write_unlock_bh(&sock->sk->sk_callback_lock);
1424}
1425
1426static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1427{
1428 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1429 nvme_tcp_restore_sock_calls(queue);
1430 cancel_work_sync(&queue->io_work);
1431}
1432
1433static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1434{
1435 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1436 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1437
1438 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1439 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001440 __nvme_tcp_stop_queue(queue);
1441}
1442
1443static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1444{
1445 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1446 int ret;
1447
1448 if (idx)
1449 ret = nvmf_connect_io_queue(nctrl, idx, false);
1450 else
1451 ret = nvmf_connect_admin_queue(nctrl);
1452
1453 if (!ret) {
1454 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1455 } else {
1456 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1457 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1458 dev_err(nctrl->device,
1459 "failed to connect queue: %d ret=%d\n", idx, ret);
1460 }
1461 return ret;
1462}
1463
1464static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1465 bool admin)
1466{
1467 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1468 struct blk_mq_tag_set *set;
1469 int ret;
1470
1471 if (admin) {
1472 set = &ctrl->admin_tag_set;
1473 memset(set, 0, sizeof(*set));
1474 set->ops = &nvme_tcp_admin_mq_ops;
1475 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1476 set->reserved_tags = 2; /* connect + keep-alive */
1477 set->numa_node = NUMA_NO_NODE;
1478 set->cmd_size = sizeof(struct nvme_tcp_request);
1479 set->driver_data = ctrl;
1480 set->nr_hw_queues = 1;
1481 set->timeout = ADMIN_TIMEOUT;
1482 } else {
1483 set = &ctrl->tag_set;
1484 memset(set, 0, sizeof(*set));
1485 set->ops = &nvme_tcp_mq_ops;
1486 set->queue_depth = nctrl->sqsize + 1;
1487 set->reserved_tags = 1; /* fabric connect */
1488 set->numa_node = NUMA_NO_NODE;
1489 set->flags = BLK_MQ_F_SHOULD_MERGE;
1490 set->cmd_size = sizeof(struct nvme_tcp_request);
1491 set->driver_data = ctrl;
1492 set->nr_hw_queues = nctrl->queue_count - 1;
1493 set->timeout = NVME_IO_TIMEOUT;
1494 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1495 }
1496
1497 ret = blk_mq_alloc_tag_set(set);
1498 if (ret)
1499 return ERR_PTR(ret);
1500
1501 return set;
1502}
1503
1504static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1505{
1506 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001507 cancel_work_sync(&ctrl->async_event_work);
David Brazdil0f672f62019-12-10 10:32:29 +00001508 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1509 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1510 }
1511
1512 nvme_tcp_free_queue(ctrl, 0);
1513}
1514
1515static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1516{
1517 int i;
1518
1519 for (i = 1; i < ctrl->queue_count; i++)
1520 nvme_tcp_free_queue(ctrl, i);
1521}
1522
1523static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1524{
1525 int i;
1526
1527 for (i = 1; i < ctrl->queue_count; i++)
1528 nvme_tcp_stop_queue(ctrl, i);
1529}
1530
1531static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1532{
1533 int i, ret = 0;
1534
1535 for (i = 1; i < ctrl->queue_count; i++) {
1536 ret = nvme_tcp_start_queue(ctrl, i);
1537 if (ret)
1538 goto out_stop_queues;
1539 }
1540
1541 return 0;
1542
1543out_stop_queues:
1544 for (i--; i >= 1; i--)
1545 nvme_tcp_stop_queue(ctrl, i);
1546 return ret;
1547}
1548
1549static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1550{
1551 int ret;
1552
1553 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1554 if (ret)
1555 return ret;
1556
1557 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1558 if (ret)
1559 goto out_free_queue;
1560
1561 return 0;
1562
1563out_free_queue:
1564 nvme_tcp_free_queue(ctrl, 0);
1565 return ret;
1566}
1567
1568static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1569{
1570 int i, ret;
1571
1572 for (i = 1; i < ctrl->queue_count; i++) {
1573 ret = nvme_tcp_alloc_queue(ctrl, i,
1574 ctrl->sqsize + 1);
1575 if (ret)
1576 goto out_free_queues;
1577 }
1578
1579 return 0;
1580
1581out_free_queues:
1582 for (i--; i >= 1; i--)
1583 nvme_tcp_free_queue(ctrl, i);
1584
1585 return ret;
1586}
1587
1588static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1589{
1590 unsigned int nr_io_queues;
1591
1592 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1593 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1594 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1595
1596 return nr_io_queues;
1597}
1598
1599static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1600 unsigned int nr_io_queues)
1601{
1602 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1603 struct nvmf_ctrl_options *opts = nctrl->opts;
1604
1605 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1606 /*
1607 * separate read/write queues
1608 * hand out dedicated default queues only after we have
1609 * sufficient read queues.
1610 */
1611 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1612 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1613 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1614 min(opts->nr_write_queues, nr_io_queues);
1615 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1616 } else {
1617 /*
1618 * shared read/write queues
1619 * either no write queues were requested, or we don't have
1620 * sufficient queue count to have dedicated default queues.
1621 */
1622 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1623 min(opts->nr_io_queues, nr_io_queues);
1624 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1625 }
1626
1627 if (opts->nr_poll_queues && nr_io_queues) {
1628 /* map dedicated poll queues only if we have queues left */
1629 ctrl->io_queues[HCTX_TYPE_POLL] =
1630 min(opts->nr_poll_queues, nr_io_queues);
1631 }
1632}
1633
1634static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1635{
1636 unsigned int nr_io_queues;
1637 int ret;
1638
1639 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1640 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1641 if (ret)
1642 return ret;
1643
Olivier Deprez0e641232021-09-23 10:07:05 +02001644 if (nr_io_queues == 0) {
1645 dev_err(ctrl->device,
1646 "unable to set any I/O queues\n");
1647 return -ENOMEM;
1648 }
David Brazdil0f672f62019-12-10 10:32:29 +00001649
Olivier Deprez0e641232021-09-23 10:07:05 +02001650 ctrl->queue_count = nr_io_queues + 1;
David Brazdil0f672f62019-12-10 10:32:29 +00001651 dev_info(ctrl->device,
1652 "creating %d I/O queues.\n", nr_io_queues);
1653
1654 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1655
1656 return __nvme_tcp_alloc_io_queues(ctrl);
1657}
1658
1659static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1660{
1661 nvme_tcp_stop_io_queues(ctrl);
1662 if (remove) {
1663 blk_cleanup_queue(ctrl->connect_q);
1664 blk_mq_free_tag_set(ctrl->tagset);
1665 }
1666 nvme_tcp_free_io_queues(ctrl);
1667}
1668
1669static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1670{
1671 int ret;
1672
1673 ret = nvme_tcp_alloc_io_queues(ctrl);
1674 if (ret)
1675 return ret;
1676
1677 if (new) {
1678 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1679 if (IS_ERR(ctrl->tagset)) {
1680 ret = PTR_ERR(ctrl->tagset);
1681 goto out_free_io_queues;
1682 }
1683
1684 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1685 if (IS_ERR(ctrl->connect_q)) {
1686 ret = PTR_ERR(ctrl->connect_q);
1687 goto out_free_tag_set;
1688 }
David Brazdil0f672f62019-12-10 10:32:29 +00001689 }
1690
1691 ret = nvme_tcp_start_io_queues(ctrl);
1692 if (ret)
1693 goto out_cleanup_connect_q;
1694
Olivier Deprez0e641232021-09-23 10:07:05 +02001695 if (!new) {
1696 nvme_start_queues(ctrl);
1697 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1698 /*
1699 * If we timed out waiting for freeze we are likely to
1700 * be stuck. Fail the controller initialization just
1701 * to be safe.
1702 */
1703 ret = -ENODEV;
1704 goto out_wait_freeze_timed_out;
1705 }
1706 blk_mq_update_nr_hw_queues(ctrl->tagset,
1707 ctrl->queue_count - 1);
1708 nvme_unfreeze(ctrl);
1709 }
1710
David Brazdil0f672f62019-12-10 10:32:29 +00001711 return 0;
1712
Olivier Deprez0e641232021-09-23 10:07:05 +02001713out_wait_freeze_timed_out:
1714 nvme_stop_queues(ctrl);
1715 nvme_sync_io_queues(ctrl);
1716 nvme_tcp_stop_io_queues(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001717out_cleanup_connect_q:
Olivier Deprez0e641232021-09-23 10:07:05 +02001718 nvme_cancel_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001719 if (new)
1720 blk_cleanup_queue(ctrl->connect_q);
1721out_free_tag_set:
1722 if (new)
1723 blk_mq_free_tag_set(ctrl->tagset);
1724out_free_io_queues:
1725 nvme_tcp_free_io_queues(ctrl);
1726 return ret;
1727}
1728
1729static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1730{
1731 nvme_tcp_stop_queue(ctrl, 0);
1732 if (remove) {
1733 blk_cleanup_queue(ctrl->admin_q);
1734 blk_cleanup_queue(ctrl->fabrics_q);
1735 blk_mq_free_tag_set(ctrl->admin_tagset);
1736 }
1737 nvme_tcp_free_admin_queue(ctrl);
1738}
1739
1740static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1741{
1742 int error;
1743
1744 error = nvme_tcp_alloc_admin_queue(ctrl);
1745 if (error)
1746 return error;
1747
1748 if (new) {
1749 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1750 if (IS_ERR(ctrl->admin_tagset)) {
1751 error = PTR_ERR(ctrl->admin_tagset);
1752 goto out_free_queue;
1753 }
1754
1755 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1756 if (IS_ERR(ctrl->fabrics_q)) {
1757 error = PTR_ERR(ctrl->fabrics_q);
1758 goto out_free_tagset;
1759 }
1760
1761 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1762 if (IS_ERR(ctrl->admin_q)) {
1763 error = PTR_ERR(ctrl->admin_q);
1764 goto out_cleanup_fabrics_q;
1765 }
1766 }
1767
1768 error = nvme_tcp_start_queue(ctrl, 0);
1769 if (error)
1770 goto out_cleanup_queue;
1771
1772 error = nvme_enable_ctrl(ctrl);
1773 if (error)
1774 goto out_stop_queue;
1775
1776 blk_mq_unquiesce_queue(ctrl->admin_q);
1777
1778 error = nvme_init_identify(ctrl);
1779 if (error)
Olivier Deprez0e641232021-09-23 10:07:05 +02001780 goto out_quiesce_queue;
David Brazdil0f672f62019-12-10 10:32:29 +00001781
1782 return 0;
1783
Olivier Deprez0e641232021-09-23 10:07:05 +02001784out_quiesce_queue:
1785 blk_mq_quiesce_queue(ctrl->admin_q);
1786 blk_sync_queue(ctrl->admin_q);
David Brazdil0f672f62019-12-10 10:32:29 +00001787out_stop_queue:
1788 nvme_tcp_stop_queue(ctrl, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +02001789 nvme_cancel_admin_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001790out_cleanup_queue:
1791 if (new)
1792 blk_cleanup_queue(ctrl->admin_q);
1793out_cleanup_fabrics_q:
1794 if (new)
1795 blk_cleanup_queue(ctrl->fabrics_q);
1796out_free_tagset:
1797 if (new)
1798 blk_mq_free_tag_set(ctrl->admin_tagset);
1799out_free_queue:
1800 nvme_tcp_free_admin_queue(ctrl);
1801 return error;
1802}
1803
1804static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1805 bool remove)
1806{
1807 blk_mq_quiesce_queue(ctrl->admin_q);
Olivier Deprez0e641232021-09-23 10:07:05 +02001808 blk_sync_queue(ctrl->admin_q);
David Brazdil0f672f62019-12-10 10:32:29 +00001809 nvme_tcp_stop_queue(ctrl, 0);
1810 if (ctrl->admin_tagset) {
1811 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1812 nvme_cancel_request, ctrl);
1813 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1814 }
1815 if (remove)
1816 blk_mq_unquiesce_queue(ctrl->admin_q);
1817 nvme_tcp_destroy_admin_queue(ctrl, remove);
1818}
1819
1820static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1821 bool remove)
1822{
1823 if (ctrl->queue_count <= 1)
1824 return;
Olivier Deprez0e641232021-09-23 10:07:05 +02001825 blk_mq_quiesce_queue(ctrl->admin_q);
1826 nvme_start_freeze(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001827 nvme_stop_queues(ctrl);
Olivier Deprez0e641232021-09-23 10:07:05 +02001828 nvme_sync_io_queues(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001829 nvme_tcp_stop_io_queues(ctrl);
1830 if (ctrl->tagset) {
1831 blk_mq_tagset_busy_iter(ctrl->tagset,
1832 nvme_cancel_request, ctrl);
1833 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1834 }
1835 if (remove)
1836 nvme_start_queues(ctrl);
1837 nvme_tcp_destroy_io_queues(ctrl, remove);
1838}
1839
1840static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1841{
1842 /* If we are resetting/deleting then do nothing */
1843 if (ctrl->state != NVME_CTRL_CONNECTING) {
1844 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1845 ctrl->state == NVME_CTRL_LIVE);
1846 return;
1847 }
1848
1849 if (nvmf_should_reconnect(ctrl)) {
1850 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1851 ctrl->opts->reconnect_delay);
1852 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1853 ctrl->opts->reconnect_delay * HZ);
1854 } else {
1855 dev_info(ctrl->device, "Removing controller...\n");
1856 nvme_delete_ctrl(ctrl);
1857 }
1858}
1859
1860static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1861{
1862 struct nvmf_ctrl_options *opts = ctrl->opts;
1863 int ret;
1864
1865 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1866 if (ret)
1867 return ret;
1868
1869 if (ctrl->icdoff) {
1870 dev_err(ctrl->device, "icdoff is not supported!\n");
1871 goto destroy_admin;
1872 }
1873
1874 if (opts->queue_size > ctrl->sqsize + 1)
1875 dev_warn(ctrl->device,
1876 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1877 opts->queue_size, ctrl->sqsize + 1);
1878
1879 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1880 dev_warn(ctrl->device,
1881 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1882 ctrl->sqsize + 1, ctrl->maxcmd);
1883 ctrl->sqsize = ctrl->maxcmd - 1;
1884 }
1885
1886 if (ctrl->queue_count > 1) {
1887 ret = nvme_tcp_configure_io_queues(ctrl, new);
1888 if (ret)
1889 goto destroy_admin;
1890 }
1891
1892 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1893 /* state change failure is ok if we're in DELETING state */
1894 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1895 ret = -EINVAL;
1896 goto destroy_io;
1897 }
1898
1899 nvme_start_ctrl(ctrl);
1900 return 0;
1901
1902destroy_io:
Olivier Deprez0e641232021-09-23 10:07:05 +02001903 if (ctrl->queue_count > 1) {
1904 nvme_stop_queues(ctrl);
1905 nvme_sync_io_queues(ctrl);
1906 nvme_tcp_stop_io_queues(ctrl);
1907 nvme_cancel_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001908 nvme_tcp_destroy_io_queues(ctrl, new);
Olivier Deprez0e641232021-09-23 10:07:05 +02001909 }
David Brazdil0f672f62019-12-10 10:32:29 +00001910destroy_admin:
Olivier Deprez0e641232021-09-23 10:07:05 +02001911 blk_mq_quiesce_queue(ctrl->admin_q);
1912 blk_sync_queue(ctrl->admin_q);
David Brazdil0f672f62019-12-10 10:32:29 +00001913 nvme_tcp_stop_queue(ctrl, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +02001914 nvme_cancel_admin_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001915 nvme_tcp_destroy_admin_queue(ctrl, new);
1916 return ret;
1917}
1918
1919static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1920{
1921 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1922 struct nvme_tcp_ctrl, connect_work);
1923 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1924
1925 ++ctrl->nr_reconnects;
1926
1927 if (nvme_tcp_setup_ctrl(ctrl, false))
1928 goto requeue;
1929
1930 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1931 ctrl->nr_reconnects);
1932
1933 ctrl->nr_reconnects = 0;
1934
1935 return;
1936
1937requeue:
1938 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1939 ctrl->nr_reconnects);
1940 nvme_tcp_reconnect_or_remove(ctrl);
1941}
1942
1943static void nvme_tcp_error_recovery_work(struct work_struct *work)
1944{
1945 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1946 struct nvme_tcp_ctrl, err_work);
1947 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1948
1949 nvme_stop_keep_alive(ctrl);
1950 nvme_tcp_teardown_io_queues(ctrl, false);
1951 /* unquiesce to fail fast pending requests */
1952 nvme_start_queues(ctrl);
1953 nvme_tcp_teardown_admin_queue(ctrl, false);
1954 blk_mq_unquiesce_queue(ctrl->admin_q);
1955
1956 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1957 /* state change failure is ok if we're in DELETING state */
1958 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1959 return;
1960 }
1961
1962 nvme_tcp_reconnect_or_remove(ctrl);
1963}
1964
1965static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1966{
1967 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1968 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1969
1970 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1971 blk_mq_quiesce_queue(ctrl->admin_q);
1972 if (shutdown)
1973 nvme_shutdown_ctrl(ctrl);
1974 else
1975 nvme_disable_ctrl(ctrl);
1976 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1977}
1978
1979static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1980{
1981 nvme_tcp_teardown_ctrl(ctrl, true);
1982}
1983
1984static void nvme_reset_ctrl_work(struct work_struct *work)
1985{
1986 struct nvme_ctrl *ctrl =
1987 container_of(work, struct nvme_ctrl, reset_work);
1988
1989 nvme_stop_ctrl(ctrl);
1990 nvme_tcp_teardown_ctrl(ctrl, false);
1991
1992 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1993 /* state change failure is ok if we're in DELETING state */
1994 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1995 return;
1996 }
1997
1998 if (nvme_tcp_setup_ctrl(ctrl, false))
1999 goto out_fail;
2000
2001 return;
2002
2003out_fail:
2004 ++ctrl->nr_reconnects;
2005 nvme_tcp_reconnect_or_remove(ctrl);
2006}
2007
2008static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2009{
2010 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2011
2012 if (list_empty(&ctrl->list))
2013 goto free_ctrl;
2014
2015 mutex_lock(&nvme_tcp_ctrl_mutex);
2016 list_del(&ctrl->list);
2017 mutex_unlock(&nvme_tcp_ctrl_mutex);
2018
2019 nvmf_free_options(nctrl->opts);
2020free_ctrl:
2021 kfree(ctrl->queues);
2022 kfree(ctrl);
2023}
2024
2025static void nvme_tcp_set_sg_null(struct nvme_command *c)
2026{
2027 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2028
2029 sg->addr = 0;
2030 sg->length = 0;
2031 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2032 NVME_SGL_FMT_TRANSPORT_A;
2033}
2034
2035static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2036 struct nvme_command *c, u32 data_len)
2037{
2038 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2039
2040 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2041 sg->length = cpu_to_le32(data_len);
2042 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2043}
2044
2045static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2046 u32 data_len)
2047{
2048 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2049
2050 sg->addr = 0;
2051 sg->length = cpu_to_le32(data_len);
2052 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2053 NVME_SGL_FMT_TRANSPORT_A;
2054}
2055
2056static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2057{
2058 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2059 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2060 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2061 struct nvme_command *cmd = &pdu->cmd;
2062 u8 hdgst = nvme_tcp_hdgst_len(queue);
2063
2064 memset(pdu, 0, sizeof(*pdu));
2065 pdu->hdr.type = nvme_tcp_cmd;
2066 if (queue->hdr_digest)
2067 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2068 pdu->hdr.hlen = sizeof(*pdu);
2069 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2070
2071 cmd->common.opcode = nvme_admin_async_event;
2072 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2073 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2074 nvme_tcp_set_sg_null(cmd);
2075
2076 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2077 ctrl->async_req.offset = 0;
2078 ctrl->async_req.curr_bio = NULL;
2079 ctrl->async_req.data_len = 0;
2080
2081 nvme_tcp_queue_request(&ctrl->async_req);
2082}
2083
Olivier Deprez0e641232021-09-23 10:07:05 +02002084static void nvme_tcp_complete_timed_out(struct request *rq)
2085{
2086 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2087 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2088
2089 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2090 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2091 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2092 blk_mq_complete_request(rq);
2093 }
2094}
2095
David Brazdil0f672f62019-12-10 10:32:29 +00002096static enum blk_eh_timer_return
2097nvme_tcp_timeout(struct request *rq, bool reserved)
2098{
2099 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Olivier Deprez0e641232021-09-23 10:07:05 +02002100 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
David Brazdil0f672f62019-12-10 10:32:29 +00002101 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2102
Olivier Deprez0e641232021-09-23 10:07:05 +02002103 dev_warn(ctrl->device,
David Brazdil0f672f62019-12-10 10:32:29 +00002104 "queue %d: timeout request %#x type %d\n",
2105 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2106
Olivier Deprez0e641232021-09-23 10:07:05 +02002107 if (ctrl->state != NVME_CTRL_LIVE) {
David Brazdil0f672f62019-12-10 10:32:29 +00002108 /*
Olivier Deprez0e641232021-09-23 10:07:05 +02002109 * If we are resetting, connecting or deleting we should
2110 * complete immediately because we may block controller
2111 * teardown or setup sequence
2112 * - ctrl disable/shutdown fabrics requests
2113 * - connect requests
2114 * - initialization admin requests
2115 * - I/O requests that entered after unquiescing and
2116 * the controller stopped responding
2117 *
2118 * All other requests should be cancelled by the error
2119 * recovery work, so it's fine that we fail it here.
David Brazdil0f672f62019-12-10 10:32:29 +00002120 */
Olivier Deprez0e641232021-09-23 10:07:05 +02002121 nvme_tcp_complete_timed_out(rq);
David Brazdil0f672f62019-12-10 10:32:29 +00002122 return BLK_EH_DONE;
2123 }
2124
Olivier Deprez0e641232021-09-23 10:07:05 +02002125 /*
2126 * LIVE state should trigger the normal error recovery which will
2127 * handle completing this request.
2128 */
2129 nvme_tcp_error_recovery(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00002130 return BLK_EH_RESET_TIMER;
2131}
2132
2133static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2134 struct request *rq)
2135{
2136 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2137 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2138 struct nvme_command *c = &pdu->cmd;
2139
2140 c->common.flags |= NVME_CMD_SGL_METABUF;
2141
Olivier Deprez0e641232021-09-23 10:07:05 +02002142 if (!blk_rq_nr_phys_segments(rq))
2143 nvme_tcp_set_sg_null(c);
2144 else if (rq_data_dir(rq) == WRITE &&
David Brazdil0f672f62019-12-10 10:32:29 +00002145 req->data_len <= nvme_tcp_inline_data_size(queue))
2146 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2147 else
2148 nvme_tcp_set_sg_host_data(c, req->data_len);
2149
2150 return 0;
2151}
2152
2153static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2154 struct request *rq)
2155{
2156 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2157 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2158 struct nvme_tcp_queue *queue = req->queue;
2159 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2160 blk_status_t ret;
2161
2162 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2163 if (ret)
2164 return ret;
2165
2166 req->state = NVME_TCP_SEND_CMD_PDU;
2167 req->offset = 0;
2168 req->data_sent = 0;
2169 req->pdu_len = 0;
2170 req->pdu_sent = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02002171 req->data_len = blk_rq_nr_phys_segments(rq) ?
2172 blk_rq_payload_bytes(rq) : 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002173 req->curr_bio = rq->bio;
2174
2175 if (rq_data_dir(rq) == WRITE &&
2176 req->data_len <= nvme_tcp_inline_data_size(queue))
2177 req->pdu_len = req->data_len;
2178 else if (req->curr_bio)
2179 nvme_tcp_init_iter(req, READ);
2180
2181 pdu->hdr.type = nvme_tcp_cmd;
2182 pdu->hdr.flags = 0;
2183 if (queue->hdr_digest)
2184 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2185 if (queue->data_digest && req->pdu_len) {
2186 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2187 ddgst = nvme_tcp_ddgst_len(queue);
2188 }
2189 pdu->hdr.hlen = sizeof(*pdu);
2190 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2191 pdu->hdr.plen =
2192 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2193
2194 ret = nvme_tcp_map_data(queue, rq);
2195 if (unlikely(ret)) {
2196 nvme_cleanup_cmd(rq);
2197 dev_err(queue->ctrl->ctrl.device,
2198 "Failed to map data (%d)\n", ret);
2199 return ret;
2200 }
2201
2202 return 0;
2203}
2204
2205static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2206 const struct blk_mq_queue_data *bd)
2207{
2208 struct nvme_ns *ns = hctx->queue->queuedata;
2209 struct nvme_tcp_queue *queue = hctx->driver_data;
2210 struct request *rq = bd->rq;
2211 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2212 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2213 blk_status_t ret;
2214
2215 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2216 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2217
2218 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2219 if (unlikely(ret))
2220 return ret;
2221
2222 blk_mq_start_request(rq);
2223
2224 nvme_tcp_queue_request(req);
2225
2226 return BLK_STS_OK;
2227}
2228
2229static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2230{
2231 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2232 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2233
2234 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2235 /* separate read/write queues */
2236 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2237 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2238 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2239 set->map[HCTX_TYPE_READ].nr_queues =
2240 ctrl->io_queues[HCTX_TYPE_READ];
2241 set->map[HCTX_TYPE_READ].queue_offset =
2242 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2243 } else {
2244 /* shared read/write queues */
2245 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2246 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2247 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2248 set->map[HCTX_TYPE_READ].nr_queues =
2249 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2250 set->map[HCTX_TYPE_READ].queue_offset = 0;
2251 }
2252 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2253 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2254
2255 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2256 /* map dedicated poll queues only if we have queues left */
2257 set->map[HCTX_TYPE_POLL].nr_queues =
2258 ctrl->io_queues[HCTX_TYPE_POLL];
2259 set->map[HCTX_TYPE_POLL].queue_offset =
2260 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2261 ctrl->io_queues[HCTX_TYPE_READ];
2262 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2263 }
2264
2265 dev_info(ctrl->ctrl.device,
2266 "mapped %d/%d/%d default/read/poll queues.\n",
2267 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2268 ctrl->io_queues[HCTX_TYPE_READ],
2269 ctrl->io_queues[HCTX_TYPE_POLL]);
2270
2271 return 0;
2272}
2273
2274static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2275{
2276 struct nvme_tcp_queue *queue = hctx->driver_data;
2277 struct sock *sk = queue->sock->sk;
2278
2279 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2280 sk_busy_loop(sk, true);
2281 nvme_tcp_try_recv(queue);
2282 return queue->nr_cqe;
2283}
2284
2285static struct blk_mq_ops nvme_tcp_mq_ops = {
2286 .queue_rq = nvme_tcp_queue_rq,
2287 .complete = nvme_complete_rq,
2288 .init_request = nvme_tcp_init_request,
2289 .exit_request = nvme_tcp_exit_request,
2290 .init_hctx = nvme_tcp_init_hctx,
2291 .timeout = nvme_tcp_timeout,
2292 .map_queues = nvme_tcp_map_queues,
2293 .poll = nvme_tcp_poll,
2294};
2295
2296static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2297 .queue_rq = nvme_tcp_queue_rq,
2298 .complete = nvme_complete_rq,
2299 .init_request = nvme_tcp_init_request,
2300 .exit_request = nvme_tcp_exit_request,
2301 .init_hctx = nvme_tcp_init_admin_hctx,
2302 .timeout = nvme_tcp_timeout,
2303};
2304
2305static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2306 .name = "tcp",
2307 .module = THIS_MODULE,
2308 .flags = NVME_F_FABRICS,
2309 .reg_read32 = nvmf_reg_read32,
2310 .reg_read64 = nvmf_reg_read64,
2311 .reg_write32 = nvmf_reg_write32,
2312 .free_ctrl = nvme_tcp_free_ctrl,
2313 .submit_async_event = nvme_tcp_submit_async_event,
2314 .delete_ctrl = nvme_tcp_delete_ctrl,
2315 .get_address = nvmf_get_address,
2316};
2317
2318static bool
2319nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2320{
2321 struct nvme_tcp_ctrl *ctrl;
2322 bool found = false;
2323
2324 mutex_lock(&nvme_tcp_ctrl_mutex);
2325 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2326 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2327 if (found)
2328 break;
2329 }
2330 mutex_unlock(&nvme_tcp_ctrl_mutex);
2331
2332 return found;
2333}
2334
2335static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2336 struct nvmf_ctrl_options *opts)
2337{
2338 struct nvme_tcp_ctrl *ctrl;
2339 int ret;
2340
2341 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2342 if (!ctrl)
2343 return ERR_PTR(-ENOMEM);
2344
2345 INIT_LIST_HEAD(&ctrl->list);
2346 ctrl->ctrl.opts = opts;
2347 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2348 opts->nr_poll_queues + 1;
2349 ctrl->ctrl.sqsize = opts->queue_size - 1;
2350 ctrl->ctrl.kato = opts->kato;
2351
2352 INIT_DELAYED_WORK(&ctrl->connect_work,
2353 nvme_tcp_reconnect_ctrl_work);
2354 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2355 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2356
2357 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2358 opts->trsvcid =
2359 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2360 if (!opts->trsvcid) {
2361 ret = -ENOMEM;
2362 goto out_free_ctrl;
2363 }
2364 opts->mask |= NVMF_OPT_TRSVCID;
2365 }
2366
2367 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2368 opts->traddr, opts->trsvcid, &ctrl->addr);
2369 if (ret) {
2370 pr_err("malformed address passed: %s:%s\n",
2371 opts->traddr, opts->trsvcid);
2372 goto out_free_ctrl;
2373 }
2374
2375 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2376 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2377 opts->host_traddr, NULL, &ctrl->src_addr);
2378 if (ret) {
2379 pr_err("malformed src address passed: %s\n",
2380 opts->host_traddr);
2381 goto out_free_ctrl;
2382 }
2383 }
2384
2385 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2386 ret = -EALREADY;
2387 goto out_free_ctrl;
2388 }
2389
2390 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2391 GFP_KERNEL);
2392 if (!ctrl->queues) {
2393 ret = -ENOMEM;
2394 goto out_free_ctrl;
2395 }
2396
2397 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2398 if (ret)
2399 goto out_kfree_queues;
2400
2401 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2402 WARN_ON_ONCE(1);
2403 ret = -EINTR;
2404 goto out_uninit_ctrl;
2405 }
2406
2407 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2408 if (ret)
2409 goto out_uninit_ctrl;
2410
2411 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2412 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2413
David Brazdil0f672f62019-12-10 10:32:29 +00002414 mutex_lock(&nvme_tcp_ctrl_mutex);
2415 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2416 mutex_unlock(&nvme_tcp_ctrl_mutex);
2417
2418 return &ctrl->ctrl;
2419
2420out_uninit_ctrl:
2421 nvme_uninit_ctrl(&ctrl->ctrl);
2422 nvme_put_ctrl(&ctrl->ctrl);
Olivier Deprez0e641232021-09-23 10:07:05 +02002423 nvme_put_ctrl(&ctrl->ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00002424 if (ret > 0)
2425 ret = -EIO;
2426 return ERR_PTR(ret);
2427out_kfree_queues:
2428 kfree(ctrl->queues);
2429out_free_ctrl:
2430 kfree(ctrl);
2431 return ERR_PTR(ret);
2432}
2433
2434static struct nvmf_transport_ops nvme_tcp_transport = {
2435 .name = "tcp",
2436 .module = THIS_MODULE,
2437 .required_opts = NVMF_OPT_TRADDR,
2438 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2439 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2440 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2441 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2442 NVMF_OPT_TOS,
2443 .create_ctrl = nvme_tcp_create_ctrl,
2444};
2445
2446static int __init nvme_tcp_init_module(void)
2447{
2448 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2449 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2450 if (!nvme_tcp_wq)
2451 return -ENOMEM;
2452
2453 nvmf_register_transport(&nvme_tcp_transport);
2454 return 0;
2455}
2456
2457static void __exit nvme_tcp_cleanup_module(void)
2458{
2459 struct nvme_tcp_ctrl *ctrl;
2460
2461 nvmf_unregister_transport(&nvme_tcp_transport);
2462
2463 mutex_lock(&nvme_tcp_ctrl_mutex);
2464 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2465 nvme_delete_ctrl(&ctrl->ctrl);
2466 mutex_unlock(&nvme_tcp_ctrl_mutex);
2467 flush_workqueue(nvme_delete_wq);
2468
2469 destroy_workqueue(nvme_tcp_wq);
2470}
2471
2472module_init(nvme_tcp_init_module);
2473module_exit(nvme_tcp_cleanup_module);
2474
2475MODULE_LICENSE("GPL v2");