blob: 6105894a218a56575b7325c2fef80226fbde32bf [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
16#include <net/busy_poll.h>
17
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Olivier Deprez157378f2022-04-04 15:47:50 +020023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
David Brazdil0f672f62019-12-10 10:32:29 +000033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Olivier Deprez157378f2022-04-04 15:47:50 +020049 struct llist_node lentry;
David Brazdil0f672f62019-12-10 10:32:29 +000050 __le32 ddgst;
51
52 struct bio *curr_bio;
53 struct iov_iter iter;
54
55 /* send state */
56 size_t offset;
57 size_t data_sent;
58 enum nvme_tcp_send_state state;
59};
60
61enum nvme_tcp_queue_flags {
62 NVME_TCP_Q_ALLOCATED = 0,
63 NVME_TCP_Q_LIVE = 1,
Olivier Deprez157378f2022-04-04 15:47:50 +020064 NVME_TCP_Q_POLLING = 2,
David Brazdil0f672f62019-12-10 10:32:29 +000065};
66
67enum nvme_tcp_recv_state {
68 NVME_TCP_RECV_PDU = 0,
69 NVME_TCP_RECV_DATA,
70 NVME_TCP_RECV_DDGST,
71};
72
73struct nvme_tcp_ctrl;
74struct nvme_tcp_queue {
75 struct socket *sock;
76 struct work_struct io_work;
77 int io_cpu;
78
Olivier Deprez157378f2022-04-04 15:47:50 +020079 struct mutex queue_lock;
80 struct mutex send_mutex;
81 struct llist_head req_list;
David Brazdil0f672f62019-12-10 10:32:29 +000082 struct list_head send_list;
Olivier Deprez157378f2022-04-04 15:47:50 +020083 bool more_requests;
David Brazdil0f672f62019-12-10 10:32:29 +000084
85 /* recv state */
86 void *pdu;
87 int pdu_remaining;
88 int pdu_offset;
89 size_t data_remaining;
90 size_t ddgst_remaining;
91 unsigned int nr_cqe;
92
93 /* send state */
94 struct nvme_tcp_request *request;
95
96 int queue_size;
97 size_t cmnd_capsule_len;
98 struct nvme_tcp_ctrl *ctrl;
99 unsigned long flags;
100 bool rd_enabled;
101
102 bool hdr_digest;
103 bool data_digest;
104 struct ahash_request *rcv_hash;
105 struct ahash_request *snd_hash;
106 __le32 exp_ddgst;
107 __le32 recv_ddgst;
108
109 struct page_frag_cache pf_cache;
110
111 void (*state_change)(struct sock *);
112 void (*data_ready)(struct sock *);
113 void (*write_space)(struct sock *);
114};
115
116struct nvme_tcp_ctrl {
117 /* read only in the hot path */
118 struct nvme_tcp_queue *queues;
119 struct blk_mq_tag_set tag_set;
120
121 /* other member variables */
122 struct list_head list;
123 struct blk_mq_tag_set admin_tag_set;
124 struct sockaddr_storage addr;
125 struct sockaddr_storage src_addr;
126 struct nvme_ctrl ctrl;
127
128 struct work_struct err_work;
129 struct delayed_work connect_work;
130 struct nvme_tcp_request async_req;
131 u32 io_queues[HCTX_MAX_TYPES];
132};
133
134static LIST_HEAD(nvme_tcp_ctrl_list);
135static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
136static struct workqueue_struct *nvme_tcp_wq;
Olivier Deprez157378f2022-04-04 15:47:50 +0200137static const struct blk_mq_ops nvme_tcp_mq_ops;
138static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
139static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000140
141static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
142{
143 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
144}
145
146static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
147{
148 return queue - queue->ctrl->queues;
149}
150
151static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
152{
153 u32 queue_idx = nvme_tcp_queue_id(queue);
154
155 if (queue_idx == 0)
156 return queue->ctrl->admin_tag_set.tags[queue_idx];
157 return queue->ctrl->tag_set.tags[queue_idx - 1];
158}
159
160static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
161{
162 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
163}
164
165static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
166{
167 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
168}
169
170static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
171{
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
173}
174
175static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
176{
177 return req == &req->queue->ctrl->async_req;
178}
179
180static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
181{
182 struct request *rq;
David Brazdil0f672f62019-12-10 10:32:29 +0000183
184 if (unlikely(nvme_tcp_async_req(req)))
185 return false; /* async events don't have a request */
186
187 rq = blk_mq_rq_from_pdu(req);
David Brazdil0f672f62019-12-10 10:32:29 +0000188
Olivier Deprez0e641232021-09-23 10:07:05 +0200189 return rq_data_dir(rq) == WRITE && req->data_len &&
190 req->data_len <= nvme_tcp_inline_data_size(req->queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000191}
192
193static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
194{
195 return req->iter.bvec->bv_page;
196}
197
198static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
199{
200 return req->iter.bvec->bv_offset + req->iter.iov_offset;
201}
202
203static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
204{
Olivier Deprez0e641232021-09-23 10:07:05 +0200205 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
David Brazdil0f672f62019-12-10 10:32:29 +0000206 req->pdu_len - req->pdu_sent);
207}
208
209static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
210{
211 return req->iter.iov_offset;
212}
213
214static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
215{
216 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
217 req->pdu_len - req->pdu_sent : 0;
218}
219
220static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
221 int len)
222{
223 return nvme_tcp_pdu_data_left(req) <= len;
224}
225
226static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
227 unsigned int dir)
228{
229 struct request *rq = blk_mq_rq_from_pdu(req);
230 struct bio_vec *vec;
231 unsigned int size;
232 int nsegs;
233 size_t offset;
234
235 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
236 vec = &rq->special_vec;
237 nsegs = 1;
238 size = blk_rq_payload_bytes(rq);
239 offset = 0;
240 } else {
241 struct bio *bio = req->curr_bio;
242
243 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
244 nsegs = bio_segments(bio);
245 size = bio->bi_iter.bi_size;
246 offset = bio->bi_iter.bi_bvec_done;
247 }
248
249 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
250 req->iter.iov_offset = offset;
251}
252
253static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
254 int len)
255{
256 req->data_sent += len;
257 req->pdu_sent += len;
258 iov_iter_advance(&req->iter, len);
259 if (!iov_iter_count(&req->iter) &&
260 req->data_sent < req->data_len) {
261 req->curr_bio = req->curr_bio->bi_next;
262 nvme_tcp_init_iter(req, WRITE);
263 }
264}
265
Olivier Deprez157378f2022-04-04 15:47:50 +0200266static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
267{
268 int ret;
269
270 /* drain the send queue as much as we can... */
271 do {
272 ret = nvme_tcp_try_send(queue);
273 } while (ret > 0);
274}
275
276static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
277{
278 return !list_empty(&queue->send_list) ||
279 !llist_empty(&queue->req_list) || queue->more_requests;
280}
281
282static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
283 bool sync, bool last)
David Brazdil0f672f62019-12-10 10:32:29 +0000284{
285 struct nvme_tcp_queue *queue = req->queue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200286 bool empty;
David Brazdil0f672f62019-12-10 10:32:29 +0000287
Olivier Deprez157378f2022-04-04 15:47:50 +0200288 empty = llist_add(&req->lentry, &queue->req_list) &&
289 list_empty(&queue->send_list) && !queue->request;
David Brazdil0f672f62019-12-10 10:32:29 +0000290
Olivier Deprez157378f2022-04-04 15:47:50 +0200291 /*
292 * if we're the first on the send_list and we can try to send
293 * directly, otherwise queue io_work. Also, only do that if we
294 * are on the same cpu, so we don't introduce contention.
295 */
296 if (queue->io_cpu == raw_smp_processor_id() &&
297 sync && empty && mutex_trylock(&queue->send_mutex)) {
298 queue->more_requests = !last;
299 nvme_tcp_send_all(queue);
300 queue->more_requests = false;
301 mutex_unlock(&queue->send_mutex);
302 }
303
304 if (last && nvme_tcp_queue_more(queue))
305 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
306}
307
308static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
309{
310 struct nvme_tcp_request *req;
311 struct llist_node *node;
312
313 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
314 req = llist_entry(node, struct nvme_tcp_request, lentry);
315 list_add(&req->entry, &queue->send_list);
316 }
David Brazdil0f672f62019-12-10 10:32:29 +0000317}
318
319static inline struct nvme_tcp_request *
320nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
321{
322 struct nvme_tcp_request *req;
323
David Brazdil0f672f62019-12-10 10:32:29 +0000324 req = list_first_entry_or_null(&queue->send_list,
325 struct nvme_tcp_request, entry);
Olivier Deprez157378f2022-04-04 15:47:50 +0200326 if (!req) {
327 nvme_tcp_process_req_list(queue);
328 req = list_first_entry_or_null(&queue->send_list,
329 struct nvme_tcp_request, entry);
330 if (unlikely(!req))
331 return NULL;
332 }
David Brazdil0f672f62019-12-10 10:32:29 +0000333
Olivier Deprez157378f2022-04-04 15:47:50 +0200334 list_del(&req->entry);
David Brazdil0f672f62019-12-10 10:32:29 +0000335 return req;
336}
337
338static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
339 __le32 *dgst)
340{
341 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
342 crypto_ahash_final(hash);
343}
344
345static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
346 struct page *page, off_t off, size_t len)
347{
348 struct scatterlist sg;
349
350 sg_init_marker(&sg, 1);
351 sg_set_page(&sg, page, len, off);
352 ahash_request_set_crypt(hash, &sg, NULL, len);
353 crypto_ahash_update(hash);
354}
355
356static inline void nvme_tcp_hdgst(struct ahash_request *hash,
357 void *pdu, size_t len)
358{
359 struct scatterlist sg;
360
361 sg_init_one(&sg, pdu, len);
362 ahash_request_set_crypt(hash, &sg, pdu + len, len);
363 crypto_ahash_digest(hash);
364}
365
366static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
367 void *pdu, size_t pdu_len)
368{
369 struct nvme_tcp_hdr *hdr = pdu;
370 __le32 recv_digest;
371 __le32 exp_digest;
372
373 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
374 dev_err(queue->ctrl->ctrl.device,
375 "queue %d: header digest flag is cleared\n",
376 nvme_tcp_queue_id(queue));
377 return -EPROTO;
378 }
379
380 recv_digest = *(__le32 *)(pdu + hdr->hlen);
381 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
382 exp_digest = *(__le32 *)(pdu + hdr->hlen);
383 if (recv_digest != exp_digest) {
384 dev_err(queue->ctrl->ctrl.device,
385 "header digest error: recv %#x expected %#x\n",
386 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
387 return -EIO;
388 }
389
390 return 0;
391}
392
393static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
394{
395 struct nvme_tcp_hdr *hdr = pdu;
396 u8 digest_len = nvme_tcp_hdgst_len(queue);
397 u32 len;
398
399 len = le32_to_cpu(hdr->plen) - hdr->hlen -
400 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
401
402 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
403 dev_err(queue->ctrl->ctrl.device,
404 "queue %d: data digest flag is cleared\n",
405 nvme_tcp_queue_id(queue));
406 return -EPROTO;
407 }
408 crypto_ahash_init(queue->rcv_hash);
409
410 return 0;
411}
412
413static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
414 struct request *rq, unsigned int hctx_idx)
415{
416 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
417
418 page_frag_free(req->pdu);
419}
420
421static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
422 struct request *rq, unsigned int hctx_idx,
423 unsigned int numa_node)
424{
425 struct nvme_tcp_ctrl *ctrl = set->driver_data;
426 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
427 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
428 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
429 u8 hdgst = nvme_tcp_hdgst_len(queue);
430
431 req->pdu = page_frag_alloc(&queue->pf_cache,
432 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
433 GFP_KERNEL | __GFP_ZERO);
434 if (!req->pdu)
435 return -ENOMEM;
436
437 req->queue = queue;
438 nvme_req(rq)->ctrl = &ctrl->ctrl;
439
440 return 0;
441}
442
443static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
444 unsigned int hctx_idx)
445{
446 struct nvme_tcp_ctrl *ctrl = data;
447 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
448
449 hctx->driver_data = queue;
450 return 0;
451}
452
453static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
454 unsigned int hctx_idx)
455{
456 struct nvme_tcp_ctrl *ctrl = data;
457 struct nvme_tcp_queue *queue = &ctrl->queues[0];
458
459 hctx->driver_data = queue;
460 return 0;
461}
462
463static enum nvme_tcp_recv_state
464nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
465{
466 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
467 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
468 NVME_TCP_RECV_DATA;
469}
470
471static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
472{
473 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
474 nvme_tcp_hdgst_len(queue);
475 queue->pdu_offset = 0;
476 queue->data_remaining = -1;
477 queue->ddgst_remaining = 0;
478}
479
480static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
481{
482 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
483 return;
484
Olivier Deprez0e641232021-09-23 10:07:05 +0200485 dev_warn(ctrl->device, "starting error recovery\n");
486 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
David Brazdil0f672f62019-12-10 10:32:29 +0000487}
488
489static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
490 struct nvme_completion *cqe)
491{
492 struct request *rq;
493
Olivier Deprez157378f2022-04-04 15:47:50 +0200494 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
David Brazdil0f672f62019-12-10 10:32:29 +0000495 if (!rq) {
496 dev_err(queue->ctrl->ctrl.device,
Olivier Deprez157378f2022-04-04 15:47:50 +0200497 "got bad cqe.command_id %#x on queue %d\n",
498 cqe->command_id, nvme_tcp_queue_id(queue));
David Brazdil0f672f62019-12-10 10:32:29 +0000499 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
500 return -EINVAL;
501 }
502
Olivier Deprez157378f2022-04-04 15:47:50 +0200503 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
504 nvme_complete_rq(rq);
David Brazdil0f672f62019-12-10 10:32:29 +0000505 queue->nr_cqe++;
506
507 return 0;
508}
509
510static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
511 struct nvme_tcp_data_pdu *pdu)
512{
513 struct request *rq;
514
Olivier Deprez157378f2022-04-04 15:47:50 +0200515 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
David Brazdil0f672f62019-12-10 10:32:29 +0000516 if (!rq) {
517 dev_err(queue->ctrl->ctrl.device,
Olivier Deprez157378f2022-04-04 15:47:50 +0200518 "got bad c2hdata.command_id %#x on queue %d\n",
519 pdu->command_id, nvme_tcp_queue_id(queue));
David Brazdil0f672f62019-12-10 10:32:29 +0000520 return -ENOENT;
521 }
522
523 if (!blk_rq_payload_bytes(rq)) {
524 dev_err(queue->ctrl->ctrl.device,
525 "queue %d tag %#x unexpected data\n",
526 nvme_tcp_queue_id(queue), rq->tag);
527 return -EIO;
528 }
529
530 queue->data_remaining = le32_to_cpu(pdu->data_length);
531
532 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
533 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
534 dev_err(queue->ctrl->ctrl.device,
535 "queue %d tag %#x SUCCESS set but not last PDU\n",
536 nvme_tcp_queue_id(queue), rq->tag);
537 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
538 return -EPROTO;
539 }
540
541 return 0;
542}
543
544static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
545 struct nvme_tcp_rsp_pdu *pdu)
546{
547 struct nvme_completion *cqe = &pdu->cqe;
548 int ret = 0;
549
550 /*
551 * AEN requests are special as they don't time out and can
552 * survive any kind of queue freeze and often don't respond to
553 * aborts. We don't even bother to allocate a struct request
554 * for them but rather special case them here.
555 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200556 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
557 cqe->command_id)))
David Brazdil0f672f62019-12-10 10:32:29 +0000558 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
559 &cqe->result);
560 else
561 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
562
563 return ret;
564}
565
566static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
567 struct nvme_tcp_r2t_pdu *pdu)
568{
569 struct nvme_tcp_data_pdu *data = req->pdu;
570 struct nvme_tcp_queue *queue = req->queue;
571 struct request *rq = blk_mq_rq_from_pdu(req);
572 u8 hdgst = nvme_tcp_hdgst_len(queue);
573 u8 ddgst = nvme_tcp_ddgst_len(queue);
574
575 req->pdu_len = le32_to_cpu(pdu->r2t_length);
576 req->pdu_sent = 0;
577
Olivier Deprez0e641232021-09-23 10:07:05 +0200578 if (unlikely(!req->pdu_len)) {
579 dev_err(queue->ctrl->ctrl.device,
580 "req %d r2t len is %u, probably a bug...\n",
581 rq->tag, req->pdu_len);
582 return -EPROTO;
583 }
584
David Brazdil0f672f62019-12-10 10:32:29 +0000585 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
586 dev_err(queue->ctrl->ctrl.device,
587 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
588 rq->tag, req->pdu_len, req->data_len,
589 req->data_sent);
590 return -EPROTO;
591 }
592
593 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
594 dev_err(queue->ctrl->ctrl.device,
595 "req %d unexpected r2t offset %u (expected %zu)\n",
596 rq->tag, le32_to_cpu(pdu->r2t_offset),
597 req->data_sent);
598 return -EPROTO;
599 }
600
601 memset(data, 0, sizeof(*data));
602 data->hdr.type = nvme_tcp_h2c_data;
603 data->hdr.flags = NVME_TCP_F_DATA_LAST;
604 if (queue->hdr_digest)
605 data->hdr.flags |= NVME_TCP_F_HDGST;
606 if (queue->data_digest)
607 data->hdr.flags |= NVME_TCP_F_DDGST;
608 data->hdr.hlen = sizeof(*data);
609 data->hdr.pdo = data->hdr.hlen + hdgst;
610 data->hdr.plen =
611 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
612 data->ttag = pdu->ttag;
Olivier Deprez157378f2022-04-04 15:47:50 +0200613 data->command_id = nvme_cid(rq);
614 data->data_offset = pdu->r2t_offset;
David Brazdil0f672f62019-12-10 10:32:29 +0000615 data->data_length = cpu_to_le32(req->pdu_len);
616 return 0;
617}
618
619static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
620 struct nvme_tcp_r2t_pdu *pdu)
621{
622 struct nvme_tcp_request *req;
623 struct request *rq;
624 int ret;
625
Olivier Deprez157378f2022-04-04 15:47:50 +0200626 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
David Brazdil0f672f62019-12-10 10:32:29 +0000627 if (!rq) {
628 dev_err(queue->ctrl->ctrl.device,
Olivier Deprez157378f2022-04-04 15:47:50 +0200629 "got bad r2t.command_id %#x on queue %d\n",
630 pdu->command_id, nvme_tcp_queue_id(queue));
David Brazdil0f672f62019-12-10 10:32:29 +0000631 return -ENOENT;
632 }
633 req = blk_mq_rq_to_pdu(rq);
634
635 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
636 if (unlikely(ret))
637 return ret;
638
639 req->state = NVME_TCP_SEND_H2C_PDU;
640 req->offset = 0;
641
Olivier Deprez157378f2022-04-04 15:47:50 +0200642 nvme_tcp_queue_request(req, false, true);
David Brazdil0f672f62019-12-10 10:32:29 +0000643
644 return 0;
645}
646
647static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
648 unsigned int *offset, size_t *len)
649{
650 struct nvme_tcp_hdr *hdr;
651 char *pdu = queue->pdu;
652 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
653 int ret;
654
655 ret = skb_copy_bits(skb, *offset,
656 &pdu[queue->pdu_offset], rcv_len);
657 if (unlikely(ret))
658 return ret;
659
660 queue->pdu_remaining -= rcv_len;
661 queue->pdu_offset += rcv_len;
662 *offset += rcv_len;
663 *len -= rcv_len;
664 if (queue->pdu_remaining)
665 return 0;
666
667 hdr = queue->pdu;
668 if (queue->hdr_digest) {
669 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
670 if (unlikely(ret))
671 return ret;
672 }
673
674
675 if (queue->data_digest) {
676 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
677 if (unlikely(ret))
678 return ret;
679 }
680
681 switch (hdr->type) {
682 case nvme_tcp_c2h_data:
683 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
684 case nvme_tcp_rsp:
685 nvme_tcp_init_recv_ctx(queue);
686 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
687 case nvme_tcp_r2t:
688 nvme_tcp_init_recv_ctx(queue);
689 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
690 default:
691 dev_err(queue->ctrl->ctrl.device,
692 "unsupported pdu type (%d)\n", hdr->type);
693 return -EINVAL;
694 }
695}
696
697static inline void nvme_tcp_end_request(struct request *rq, u16 status)
698{
699 union nvme_result res = {};
700
Olivier Deprez157378f2022-04-04 15:47:50 +0200701 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
702 nvme_complete_rq(rq);
David Brazdil0f672f62019-12-10 10:32:29 +0000703}
704
705static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
706 unsigned int *offset, size_t *len)
707{
708 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Olivier Deprez0e641232021-09-23 10:07:05 +0200709 struct request *rq =
Olivier Deprez157378f2022-04-04 15:47:50 +0200710 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
Olivier Deprez0e641232021-09-23 10:07:05 +0200711 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
David Brazdil0f672f62019-12-10 10:32:29 +0000712
713 while (true) {
714 int recv_len, ret;
715
716 recv_len = min_t(size_t, *len, queue->data_remaining);
717 if (!recv_len)
718 break;
719
720 if (!iov_iter_count(&req->iter)) {
721 req->curr_bio = req->curr_bio->bi_next;
722
723 /*
724 * If we don`t have any bios it means that controller
725 * sent more data than we requested, hence error
726 */
727 if (!req->curr_bio) {
728 dev_err(queue->ctrl->ctrl.device,
729 "queue %d no space in request %#x",
730 nvme_tcp_queue_id(queue), rq->tag);
731 nvme_tcp_init_recv_ctx(queue);
732 return -EIO;
733 }
734 nvme_tcp_init_iter(req, READ);
735 }
736
737 /* we can read only from what is left in this bio */
738 recv_len = min_t(size_t, recv_len,
739 iov_iter_count(&req->iter));
740
741 if (queue->data_digest)
742 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
743 &req->iter, recv_len, queue->rcv_hash);
744 else
745 ret = skb_copy_datagram_iter(skb, *offset,
746 &req->iter, recv_len);
747 if (ret) {
748 dev_err(queue->ctrl->ctrl.device,
749 "queue %d failed to copy request %#x data",
750 nvme_tcp_queue_id(queue), rq->tag);
751 return ret;
752 }
753
754 *len -= recv_len;
755 *offset += recv_len;
756 queue->data_remaining -= recv_len;
757 }
758
759 if (!queue->data_remaining) {
760 if (queue->data_digest) {
761 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
762 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
763 } else {
764 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
765 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
766 queue->nr_cqe++;
767 }
768 nvme_tcp_init_recv_ctx(queue);
769 }
770 }
771
772 return 0;
773}
774
775static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
776 struct sk_buff *skb, unsigned int *offset, size_t *len)
777{
778 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
779 char *ddgst = (char *)&queue->recv_ddgst;
780 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
781 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
782 int ret;
783
784 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
785 if (unlikely(ret))
786 return ret;
787
788 queue->ddgst_remaining -= recv_len;
789 *offset += recv_len;
790 *len -= recv_len;
791 if (queue->ddgst_remaining)
792 return 0;
793
794 if (queue->recv_ddgst != queue->exp_ddgst) {
795 dev_err(queue->ctrl->ctrl.device,
796 "data digest error: recv %#x expected %#x\n",
797 le32_to_cpu(queue->recv_ddgst),
798 le32_to_cpu(queue->exp_ddgst));
799 return -EIO;
800 }
801
802 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200803 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
804 pdu->command_id);
David Brazdil0f672f62019-12-10 10:32:29 +0000805
806 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
807 queue->nr_cqe++;
808 }
809
810 nvme_tcp_init_recv_ctx(queue);
811 return 0;
812}
813
814static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
815 unsigned int offset, size_t len)
816{
817 struct nvme_tcp_queue *queue = desc->arg.data;
818 size_t consumed = len;
819 int result;
820
821 while (len) {
822 switch (nvme_tcp_recv_state(queue)) {
823 case NVME_TCP_RECV_PDU:
824 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
825 break;
826 case NVME_TCP_RECV_DATA:
827 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
828 break;
829 case NVME_TCP_RECV_DDGST:
830 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
831 break;
832 default:
833 result = -EFAULT;
834 }
835 if (result) {
836 dev_err(queue->ctrl->ctrl.device,
837 "receive failed: %d\n", result);
838 queue->rd_enabled = false;
839 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
840 return result;
841 }
842 }
843
844 return consumed;
845}
846
847static void nvme_tcp_data_ready(struct sock *sk)
848{
849 struct nvme_tcp_queue *queue;
850
Olivier Deprez0e641232021-09-23 10:07:05 +0200851 read_lock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000852 queue = sk->sk_user_data;
Olivier Deprez157378f2022-04-04 15:47:50 +0200853 if (likely(queue && queue->rd_enabled) &&
854 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
David Brazdil0f672f62019-12-10 10:32:29 +0000855 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Olivier Deprez0e641232021-09-23 10:07:05 +0200856 read_unlock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000857}
858
859static void nvme_tcp_write_space(struct sock *sk)
860{
861 struct nvme_tcp_queue *queue;
862
863 read_lock_bh(&sk->sk_callback_lock);
864 queue = sk->sk_user_data;
865 if (likely(queue && sk_stream_is_writeable(sk))) {
866 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
867 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
868 }
869 read_unlock_bh(&sk->sk_callback_lock);
870}
871
872static void nvme_tcp_state_change(struct sock *sk)
873{
874 struct nvme_tcp_queue *queue;
875
Olivier Deprez0e641232021-09-23 10:07:05 +0200876 read_lock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000877 queue = sk->sk_user_data;
878 if (!queue)
879 goto done;
880
881 switch (sk->sk_state) {
882 case TCP_CLOSE:
883 case TCP_CLOSE_WAIT:
884 case TCP_LAST_ACK:
885 case TCP_FIN_WAIT1:
886 case TCP_FIN_WAIT2:
David Brazdil0f672f62019-12-10 10:32:29 +0000887 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
888 break;
889 default:
890 dev_info(queue->ctrl->ctrl.device,
891 "queue %d socket state %d\n",
892 nvme_tcp_queue_id(queue), sk->sk_state);
893 }
894
895 queue->state_change(sk);
896done:
Olivier Deprez0e641232021-09-23 10:07:05 +0200897 read_unlock_bh(&sk->sk_callback_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000898}
899
900static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
901{
902 queue->request = NULL;
903}
904
905static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
906{
Olivier Deprez157378f2022-04-04 15:47:50 +0200907 if (nvme_tcp_async_req(req)) {
908 union nvme_result res = {};
909
910 nvme_complete_async_event(&req->queue->ctrl->ctrl,
911 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
912 } else {
913 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
914 NVME_SC_HOST_PATH_ERROR);
915 }
David Brazdil0f672f62019-12-10 10:32:29 +0000916}
917
918static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
919{
920 struct nvme_tcp_queue *queue = req->queue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200921 int req_data_len = req->data_len;
David Brazdil0f672f62019-12-10 10:32:29 +0000922
923 while (true) {
924 struct page *page = nvme_tcp_req_cur_page(req);
925 size_t offset = nvme_tcp_req_cur_offset(req);
926 size_t len = nvme_tcp_req_cur_length(req);
927 bool last = nvme_tcp_pdu_last_send(req, len);
Olivier Deprez157378f2022-04-04 15:47:50 +0200928 int req_data_sent = req->data_sent;
David Brazdil0f672f62019-12-10 10:32:29 +0000929 int ret, flags = MSG_DONTWAIT;
930
Olivier Deprez157378f2022-04-04 15:47:50 +0200931 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
David Brazdil0f672f62019-12-10 10:32:29 +0000932 flags |= MSG_EOR;
933 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200934 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
David Brazdil0f672f62019-12-10 10:32:29 +0000935
Olivier Deprez0e641232021-09-23 10:07:05 +0200936 if (sendpage_ok(page)) {
937 ret = kernel_sendpage(queue->sock, page, offset, len,
David Brazdil0f672f62019-12-10 10:32:29 +0000938 flags);
939 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +0200940 ret = sock_no_sendpage(queue->sock, page, offset, len,
David Brazdil0f672f62019-12-10 10:32:29 +0000941 flags);
942 }
943 if (ret <= 0)
944 return ret;
945
David Brazdil0f672f62019-12-10 10:32:29 +0000946 if (queue->data_digest)
947 nvme_tcp_ddgst_update(queue->snd_hash, page,
948 offset, ret);
949
Olivier Deprez157378f2022-04-04 15:47:50 +0200950 /*
951 * update the request iterator except for the last payload send
952 * in the request where we don't want to modify it as we may
953 * compete with the RX path completing the request.
954 */
955 if (req_data_sent + ret < req_data_len)
956 nvme_tcp_advance_req(req, ret);
957
958 /* fully successful last send in current PDU */
David Brazdil0f672f62019-12-10 10:32:29 +0000959 if (last && ret == len) {
960 if (queue->data_digest) {
961 nvme_tcp_ddgst_final(queue->snd_hash,
962 &req->ddgst);
963 req->state = NVME_TCP_SEND_DDGST;
964 req->offset = 0;
965 } else {
966 nvme_tcp_done_send_req(queue);
967 }
968 return 1;
969 }
970 }
971 return -EAGAIN;
972}
973
974static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
975{
976 struct nvme_tcp_queue *queue = req->queue;
977 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
978 bool inline_data = nvme_tcp_has_inline_data(req);
David Brazdil0f672f62019-12-10 10:32:29 +0000979 u8 hdgst = nvme_tcp_hdgst_len(queue);
980 int len = sizeof(*pdu) + hdgst - req->offset;
Olivier Deprez157378f2022-04-04 15:47:50 +0200981 int flags = MSG_DONTWAIT;
David Brazdil0f672f62019-12-10 10:32:29 +0000982 int ret;
983
Olivier Deprez157378f2022-04-04 15:47:50 +0200984 if (inline_data || nvme_tcp_queue_more(queue))
985 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
986 else
987 flags |= MSG_EOR;
988
David Brazdil0f672f62019-12-10 10:32:29 +0000989 if (queue->hdr_digest && !req->offset)
990 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
991
992 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
993 offset_in_page(pdu) + req->offset, len, flags);
994 if (unlikely(ret <= 0))
995 return ret;
996
997 len -= ret;
998 if (!len) {
999 if (inline_data) {
1000 req->state = NVME_TCP_SEND_DATA;
1001 if (queue->data_digest)
1002 crypto_ahash_init(queue->snd_hash);
1003 nvme_tcp_init_iter(req, WRITE);
1004 } else {
1005 nvme_tcp_done_send_req(queue);
1006 }
1007 return 1;
1008 }
1009 req->offset += ret;
1010
1011 return -EAGAIN;
1012}
1013
1014static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1015{
1016 struct nvme_tcp_queue *queue = req->queue;
1017 struct nvme_tcp_data_pdu *pdu = req->pdu;
1018 u8 hdgst = nvme_tcp_hdgst_len(queue);
1019 int len = sizeof(*pdu) - req->offset + hdgst;
1020 int ret;
1021
1022 if (queue->hdr_digest && !req->offset)
1023 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1024
1025 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1026 offset_in_page(pdu) + req->offset, len,
Olivier Deprez157378f2022-04-04 15:47:50 +02001027 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
David Brazdil0f672f62019-12-10 10:32:29 +00001028 if (unlikely(ret <= 0))
1029 return ret;
1030
1031 len -= ret;
1032 if (!len) {
1033 req->state = NVME_TCP_SEND_DATA;
1034 if (queue->data_digest)
1035 crypto_ahash_init(queue->snd_hash);
1036 if (!req->data_sent)
1037 nvme_tcp_init_iter(req, WRITE);
1038 return 1;
1039 }
1040 req->offset += ret;
1041
1042 return -EAGAIN;
1043}
1044
1045static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1046{
1047 struct nvme_tcp_queue *queue = req->queue;
Olivier Deprez157378f2022-04-04 15:47:50 +02001048 size_t offset = req->offset;
David Brazdil0f672f62019-12-10 10:32:29 +00001049 int ret;
Olivier Deprez157378f2022-04-04 15:47:50 +02001050 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
David Brazdil0f672f62019-12-10 10:32:29 +00001051 struct kvec iov = {
Olivier Deprez157378f2022-04-04 15:47:50 +02001052 .iov_base = (u8 *)&req->ddgst + req->offset,
David Brazdil0f672f62019-12-10 10:32:29 +00001053 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1054 };
1055
Olivier Deprez157378f2022-04-04 15:47:50 +02001056 if (nvme_tcp_queue_more(queue))
1057 msg.msg_flags |= MSG_MORE;
1058 else
1059 msg.msg_flags |= MSG_EOR;
1060
David Brazdil0f672f62019-12-10 10:32:29 +00001061 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1062 if (unlikely(ret <= 0))
1063 return ret;
1064
Olivier Deprez157378f2022-04-04 15:47:50 +02001065 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
David Brazdil0f672f62019-12-10 10:32:29 +00001066 nvme_tcp_done_send_req(queue);
1067 return 1;
1068 }
1069
1070 req->offset += ret;
1071 return -EAGAIN;
1072}
1073
1074static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1075{
1076 struct nvme_tcp_request *req;
1077 int ret = 1;
1078
1079 if (!queue->request) {
1080 queue->request = nvme_tcp_fetch_request(queue);
1081 if (!queue->request)
1082 return 0;
1083 }
1084 req = queue->request;
1085
1086 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1087 ret = nvme_tcp_try_send_cmd_pdu(req);
1088 if (ret <= 0)
1089 goto done;
1090 if (!nvme_tcp_has_inline_data(req))
1091 return ret;
1092 }
1093
1094 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1095 ret = nvme_tcp_try_send_data_pdu(req);
1096 if (ret <= 0)
1097 goto done;
1098 }
1099
1100 if (req->state == NVME_TCP_SEND_DATA) {
1101 ret = nvme_tcp_try_send_data(req);
1102 if (ret <= 0)
1103 goto done;
1104 }
1105
1106 if (req->state == NVME_TCP_SEND_DDGST)
1107 ret = nvme_tcp_try_send_ddgst(req);
1108done:
Olivier Deprez157378f2022-04-04 15:47:50 +02001109 if (ret == -EAGAIN) {
David Brazdil0f672f62019-12-10 10:32:29 +00001110 ret = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001111 } else if (ret < 0) {
1112 dev_err(queue->ctrl->ctrl.device,
1113 "failed to send request %d\n", ret);
1114 if (ret != -EPIPE && ret != -ECONNRESET)
1115 nvme_tcp_fail_request(queue->request);
1116 nvme_tcp_done_send_req(queue);
1117 }
David Brazdil0f672f62019-12-10 10:32:29 +00001118 return ret;
1119}
1120
1121static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1122{
1123 struct socket *sock = queue->sock;
1124 struct sock *sk = sock->sk;
1125 read_descriptor_t rd_desc;
1126 int consumed;
1127
1128 rd_desc.arg.data = queue;
1129 rd_desc.count = 1;
1130 lock_sock(sk);
1131 queue->nr_cqe = 0;
1132 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1133 release_sock(sk);
1134 return consumed;
1135}
1136
1137static void nvme_tcp_io_work(struct work_struct *w)
1138{
1139 struct nvme_tcp_queue *queue =
1140 container_of(w, struct nvme_tcp_queue, io_work);
1141 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1142
1143 do {
1144 bool pending = false;
1145 int result;
1146
Olivier Deprez157378f2022-04-04 15:47:50 +02001147 if (mutex_trylock(&queue->send_mutex)) {
1148 result = nvme_tcp_try_send(queue);
1149 mutex_unlock(&queue->send_mutex);
1150 if (result > 0)
1151 pending = true;
1152 else if (unlikely(result < 0))
1153 break;
David Brazdil0f672f62019-12-10 10:32:29 +00001154 }
1155
1156 result = nvme_tcp_try_recv(queue);
1157 if (result > 0)
1158 pending = true;
Olivier Deprez157378f2022-04-04 15:47:50 +02001159 else if (unlikely(result < 0))
1160 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001161
1162 if (!pending)
1163 return;
1164
1165 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1166
1167 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1168}
1169
1170static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1171{
1172 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1173
1174 ahash_request_free(queue->rcv_hash);
1175 ahash_request_free(queue->snd_hash);
1176 crypto_free_ahash(tfm);
1177}
1178
1179static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1180{
1181 struct crypto_ahash *tfm;
1182
1183 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1184 if (IS_ERR(tfm))
1185 return PTR_ERR(tfm);
1186
1187 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1188 if (!queue->snd_hash)
1189 goto free_tfm;
1190 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1191
1192 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1193 if (!queue->rcv_hash)
1194 goto free_snd_hash;
1195 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1196
1197 return 0;
1198free_snd_hash:
1199 ahash_request_free(queue->snd_hash);
1200free_tfm:
1201 crypto_free_ahash(tfm);
1202 return -ENOMEM;
1203}
1204
1205static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1206{
1207 struct nvme_tcp_request *async = &ctrl->async_req;
1208
1209 page_frag_free(async->pdu);
1210}
1211
1212static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1213{
1214 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1215 struct nvme_tcp_request *async = &ctrl->async_req;
1216 u8 hdgst = nvme_tcp_hdgst_len(queue);
1217
1218 async->pdu = page_frag_alloc(&queue->pf_cache,
1219 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1220 GFP_KERNEL | __GFP_ZERO);
1221 if (!async->pdu)
1222 return -ENOMEM;
1223
1224 async->queue = &ctrl->queues[0];
1225 return 0;
1226}
1227
1228static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1229{
1230 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1231 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1232
1233 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1234 return;
1235
1236 if (queue->hdr_digest || queue->data_digest)
1237 nvme_tcp_free_crypto(queue);
1238
1239 sock_release(queue->sock);
1240 kfree(queue->pdu);
Olivier Deprez157378f2022-04-04 15:47:50 +02001241 mutex_destroy(&queue->queue_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001242}
1243
1244static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1245{
1246 struct nvme_tcp_icreq_pdu *icreq;
1247 struct nvme_tcp_icresp_pdu *icresp;
1248 struct msghdr msg = {};
1249 struct kvec iov;
1250 bool ctrl_hdgst, ctrl_ddgst;
1251 int ret;
1252
1253 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1254 if (!icreq)
1255 return -ENOMEM;
1256
1257 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1258 if (!icresp) {
1259 ret = -ENOMEM;
1260 goto free_icreq;
1261 }
1262
1263 icreq->hdr.type = nvme_tcp_icreq;
1264 icreq->hdr.hlen = sizeof(*icreq);
1265 icreq->hdr.pdo = 0;
1266 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1267 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1268 icreq->maxr2t = 0; /* single inflight r2t supported */
1269 icreq->hpda = 0; /* no alignment constraint */
1270 if (queue->hdr_digest)
1271 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1272 if (queue->data_digest)
1273 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1274
1275 iov.iov_base = icreq;
1276 iov.iov_len = sizeof(*icreq);
1277 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1278 if (ret < 0)
1279 goto free_icresp;
1280
1281 memset(&msg, 0, sizeof(msg));
1282 iov.iov_base = icresp;
1283 iov.iov_len = sizeof(*icresp);
1284 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1285 iov.iov_len, msg.msg_flags);
1286 if (ret < 0)
1287 goto free_icresp;
1288
1289 ret = -EINVAL;
1290 if (icresp->hdr.type != nvme_tcp_icresp) {
1291 pr_err("queue %d: bad type returned %d\n",
1292 nvme_tcp_queue_id(queue), icresp->hdr.type);
1293 goto free_icresp;
1294 }
1295
1296 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1297 pr_err("queue %d: bad pdu length returned %d\n",
1298 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1299 goto free_icresp;
1300 }
1301
1302 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1303 pr_err("queue %d: bad pfv returned %d\n",
1304 nvme_tcp_queue_id(queue), icresp->pfv);
1305 goto free_icresp;
1306 }
1307
1308 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1309 if ((queue->data_digest && !ctrl_ddgst) ||
1310 (!queue->data_digest && ctrl_ddgst)) {
1311 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1312 nvme_tcp_queue_id(queue),
1313 queue->data_digest ? "enabled" : "disabled",
1314 ctrl_ddgst ? "enabled" : "disabled");
1315 goto free_icresp;
1316 }
1317
1318 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1319 if ((queue->hdr_digest && !ctrl_hdgst) ||
1320 (!queue->hdr_digest && ctrl_hdgst)) {
1321 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1322 nvme_tcp_queue_id(queue),
1323 queue->hdr_digest ? "enabled" : "disabled",
1324 ctrl_hdgst ? "enabled" : "disabled");
1325 goto free_icresp;
1326 }
1327
1328 if (icresp->cpda != 0) {
1329 pr_err("queue %d: unsupported cpda returned %d\n",
1330 nvme_tcp_queue_id(queue), icresp->cpda);
1331 goto free_icresp;
1332 }
1333
1334 ret = 0;
1335free_icresp:
1336 kfree(icresp);
1337free_icreq:
1338 kfree(icreq);
1339 return ret;
1340}
1341
Olivier Deprez157378f2022-04-04 15:47:50 +02001342static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1343{
1344 return nvme_tcp_queue_id(queue) == 0;
1345}
1346
1347static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1348{
1349 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1350 int qid = nvme_tcp_queue_id(queue);
1351
1352 return !nvme_tcp_admin_queue(queue) &&
1353 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1354}
1355
1356static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1357{
1358 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1359 int qid = nvme_tcp_queue_id(queue);
1360
1361 return !nvme_tcp_admin_queue(queue) &&
1362 !nvme_tcp_default_queue(queue) &&
1363 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1364 ctrl->io_queues[HCTX_TYPE_READ];
1365}
1366
1367static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1368{
1369 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1370 int qid = nvme_tcp_queue_id(queue);
1371
1372 return !nvme_tcp_admin_queue(queue) &&
1373 !nvme_tcp_default_queue(queue) &&
1374 !nvme_tcp_read_queue(queue) &&
1375 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1376 ctrl->io_queues[HCTX_TYPE_READ] +
1377 ctrl->io_queues[HCTX_TYPE_POLL];
1378}
1379
1380static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1381{
1382 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1383 int qid = nvme_tcp_queue_id(queue);
1384 int n = 0;
1385
1386 if (nvme_tcp_default_queue(queue))
1387 n = qid - 1;
1388 else if (nvme_tcp_read_queue(queue))
1389 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1390 else if (nvme_tcp_poll_queue(queue))
1391 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1392 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1393 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1394}
1395
David Brazdil0f672f62019-12-10 10:32:29 +00001396static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1397 int qid, size_t queue_size)
1398{
1399 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1400 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Olivier Deprez157378f2022-04-04 15:47:50 +02001401 int ret, rcv_pdu_size;
David Brazdil0f672f62019-12-10 10:32:29 +00001402
Olivier Deprez157378f2022-04-04 15:47:50 +02001403 mutex_init(&queue->queue_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001404 queue->ctrl = ctrl;
Olivier Deprez157378f2022-04-04 15:47:50 +02001405 init_llist_head(&queue->req_list);
David Brazdil0f672f62019-12-10 10:32:29 +00001406 INIT_LIST_HEAD(&queue->send_list);
Olivier Deprez157378f2022-04-04 15:47:50 +02001407 mutex_init(&queue->send_mutex);
David Brazdil0f672f62019-12-10 10:32:29 +00001408 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1409 queue->queue_size = queue_size;
1410
1411 if (qid > 0)
1412 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1413 else
1414 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1415 NVME_TCP_ADMIN_CCSZ;
1416
1417 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1418 IPPROTO_TCP, &queue->sock);
1419 if (ret) {
1420 dev_err(nctrl->device,
1421 "failed to create socket: %d\n", ret);
Olivier Deprez157378f2022-04-04 15:47:50 +02001422 goto err_destroy_mutex;
David Brazdil0f672f62019-12-10 10:32:29 +00001423 }
1424
1425 /* Single syn retry */
Olivier Deprez157378f2022-04-04 15:47:50 +02001426 tcp_sock_set_syncnt(queue->sock->sk, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00001427
1428 /* Set TCP no delay */
Olivier Deprez157378f2022-04-04 15:47:50 +02001429 tcp_sock_set_nodelay(queue->sock->sk);
David Brazdil0f672f62019-12-10 10:32:29 +00001430
1431 /*
1432 * Cleanup whatever is sitting in the TCP transmit queue on socket
1433 * close. This is done to prevent stale data from being sent should
1434 * the network connection be restored before TCP times out.
1435 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001436 sock_no_linger(queue->sock->sk);
1437
1438 if (so_priority > 0)
1439 sock_set_priority(queue->sock->sk, so_priority);
David Brazdil0f672f62019-12-10 10:32:29 +00001440
1441 /* Set socket type of service */
Olivier Deprez157378f2022-04-04 15:47:50 +02001442 if (nctrl->opts->tos >= 0)
1443 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
David Brazdil0f672f62019-12-10 10:32:29 +00001444
Olivier Deprez0e641232021-09-23 10:07:05 +02001445 /* Set 10 seconds timeout for icresp recvmsg */
1446 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1447
David Brazdil0f672f62019-12-10 10:32:29 +00001448 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Olivier Deprez157378f2022-04-04 15:47:50 +02001449 nvme_tcp_set_queue_io_cpu(queue);
David Brazdil0f672f62019-12-10 10:32:29 +00001450 queue->request = NULL;
1451 queue->data_remaining = 0;
1452 queue->ddgst_remaining = 0;
1453 queue->pdu_remaining = 0;
1454 queue->pdu_offset = 0;
1455 sk_set_memalloc(queue->sock->sk);
1456
1457 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1458 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1459 sizeof(ctrl->src_addr));
1460 if (ret) {
1461 dev_err(nctrl->device,
1462 "failed to bind queue %d socket %d\n",
1463 qid, ret);
1464 goto err_sock;
1465 }
1466 }
1467
1468 queue->hdr_digest = nctrl->opts->hdr_digest;
1469 queue->data_digest = nctrl->opts->data_digest;
1470 if (queue->hdr_digest || queue->data_digest) {
1471 ret = nvme_tcp_alloc_crypto(queue);
1472 if (ret) {
1473 dev_err(nctrl->device,
1474 "failed to allocate queue %d crypto\n", qid);
1475 goto err_sock;
1476 }
1477 }
1478
1479 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1480 nvme_tcp_hdgst_len(queue);
1481 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1482 if (!queue->pdu) {
1483 ret = -ENOMEM;
1484 goto err_crypto;
1485 }
1486
1487 dev_dbg(nctrl->device, "connecting queue %d\n",
1488 nvme_tcp_queue_id(queue));
1489
1490 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1491 sizeof(ctrl->addr), 0);
1492 if (ret) {
1493 dev_err(nctrl->device,
1494 "failed to connect socket: %d\n", ret);
1495 goto err_rcv_pdu;
1496 }
1497
1498 ret = nvme_tcp_init_connection(queue);
1499 if (ret)
1500 goto err_init_connect;
1501
1502 queue->rd_enabled = true;
1503 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1504 nvme_tcp_init_recv_ctx(queue);
1505
1506 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1507 queue->sock->sk->sk_user_data = queue;
1508 queue->state_change = queue->sock->sk->sk_state_change;
1509 queue->data_ready = queue->sock->sk->sk_data_ready;
1510 queue->write_space = queue->sock->sk->sk_write_space;
1511 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1512 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1513 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1514#ifdef CONFIG_NET_RX_BUSY_POLL
1515 queue->sock->sk->sk_ll_usec = 1;
1516#endif
1517 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1518
1519 return 0;
1520
1521err_init_connect:
1522 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1523err_rcv_pdu:
1524 kfree(queue->pdu);
1525err_crypto:
1526 if (queue->hdr_digest || queue->data_digest)
1527 nvme_tcp_free_crypto(queue);
1528err_sock:
1529 sock_release(queue->sock);
1530 queue->sock = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001531err_destroy_mutex:
1532 mutex_destroy(&queue->queue_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001533 return ret;
1534}
1535
1536static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1537{
1538 struct socket *sock = queue->sock;
1539
1540 write_lock_bh(&sock->sk->sk_callback_lock);
1541 sock->sk->sk_user_data = NULL;
1542 sock->sk->sk_data_ready = queue->data_ready;
1543 sock->sk->sk_state_change = queue->state_change;
1544 sock->sk->sk_write_space = queue->write_space;
1545 write_unlock_bh(&sock->sk->sk_callback_lock);
1546}
1547
1548static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1549{
1550 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1551 nvme_tcp_restore_sock_calls(queue);
1552 cancel_work_sync(&queue->io_work);
1553}
1554
1555static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1556{
1557 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1558 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1559
Olivier Deprez157378f2022-04-04 15:47:50 +02001560 mutex_lock(&queue->queue_lock);
1561 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1562 __nvme_tcp_stop_queue(queue);
1563 mutex_unlock(&queue->queue_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001564}
1565
1566static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1567{
1568 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1569 int ret;
1570
1571 if (idx)
1572 ret = nvmf_connect_io_queue(nctrl, idx, false);
1573 else
1574 ret = nvmf_connect_admin_queue(nctrl);
1575
1576 if (!ret) {
1577 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1578 } else {
1579 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1580 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1581 dev_err(nctrl->device,
1582 "failed to connect queue: %d ret=%d\n", idx, ret);
1583 }
1584 return ret;
1585}
1586
1587static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1588 bool admin)
1589{
1590 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1591 struct blk_mq_tag_set *set;
1592 int ret;
1593
1594 if (admin) {
1595 set = &ctrl->admin_tag_set;
1596 memset(set, 0, sizeof(*set));
1597 set->ops = &nvme_tcp_admin_mq_ops;
1598 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1599 set->reserved_tags = 2; /* connect + keep-alive */
Olivier Deprez157378f2022-04-04 15:47:50 +02001600 set->numa_node = nctrl->numa_node;
1601 set->flags = BLK_MQ_F_BLOCKING;
David Brazdil0f672f62019-12-10 10:32:29 +00001602 set->cmd_size = sizeof(struct nvme_tcp_request);
1603 set->driver_data = ctrl;
1604 set->nr_hw_queues = 1;
1605 set->timeout = ADMIN_TIMEOUT;
1606 } else {
1607 set = &ctrl->tag_set;
1608 memset(set, 0, sizeof(*set));
1609 set->ops = &nvme_tcp_mq_ops;
1610 set->queue_depth = nctrl->sqsize + 1;
1611 set->reserved_tags = 1; /* fabric connect */
Olivier Deprez157378f2022-04-04 15:47:50 +02001612 set->numa_node = nctrl->numa_node;
1613 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
David Brazdil0f672f62019-12-10 10:32:29 +00001614 set->cmd_size = sizeof(struct nvme_tcp_request);
1615 set->driver_data = ctrl;
1616 set->nr_hw_queues = nctrl->queue_count - 1;
1617 set->timeout = NVME_IO_TIMEOUT;
1618 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1619 }
1620
1621 ret = blk_mq_alloc_tag_set(set);
1622 if (ret)
1623 return ERR_PTR(ret);
1624
1625 return set;
1626}
1627
1628static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1629{
1630 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001631 cancel_work_sync(&ctrl->async_event_work);
David Brazdil0f672f62019-12-10 10:32:29 +00001632 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1633 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1634 }
1635
1636 nvme_tcp_free_queue(ctrl, 0);
1637}
1638
1639static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1640{
1641 int i;
1642
1643 for (i = 1; i < ctrl->queue_count; i++)
1644 nvme_tcp_free_queue(ctrl, i);
1645}
1646
1647static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1648{
1649 int i;
1650
1651 for (i = 1; i < ctrl->queue_count; i++)
1652 nvme_tcp_stop_queue(ctrl, i);
1653}
1654
1655static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1656{
1657 int i, ret = 0;
1658
1659 for (i = 1; i < ctrl->queue_count; i++) {
1660 ret = nvme_tcp_start_queue(ctrl, i);
1661 if (ret)
1662 goto out_stop_queues;
1663 }
1664
1665 return 0;
1666
1667out_stop_queues:
1668 for (i--; i >= 1; i--)
1669 nvme_tcp_stop_queue(ctrl, i);
1670 return ret;
1671}
1672
1673static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1674{
1675 int ret;
1676
1677 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1678 if (ret)
1679 return ret;
1680
1681 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1682 if (ret)
1683 goto out_free_queue;
1684
1685 return 0;
1686
1687out_free_queue:
1688 nvme_tcp_free_queue(ctrl, 0);
1689 return ret;
1690}
1691
1692static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1693{
1694 int i, ret;
1695
1696 for (i = 1; i < ctrl->queue_count; i++) {
1697 ret = nvme_tcp_alloc_queue(ctrl, i,
1698 ctrl->sqsize + 1);
1699 if (ret)
1700 goto out_free_queues;
1701 }
1702
1703 return 0;
1704
1705out_free_queues:
1706 for (i--; i >= 1; i--)
1707 nvme_tcp_free_queue(ctrl, i);
1708
1709 return ret;
1710}
1711
1712static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1713{
1714 unsigned int nr_io_queues;
1715
1716 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1717 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1718 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1719
1720 return nr_io_queues;
1721}
1722
1723static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1724 unsigned int nr_io_queues)
1725{
1726 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1727 struct nvmf_ctrl_options *opts = nctrl->opts;
1728
1729 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1730 /*
1731 * separate read/write queues
1732 * hand out dedicated default queues only after we have
1733 * sufficient read queues.
1734 */
1735 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1736 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1737 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1738 min(opts->nr_write_queues, nr_io_queues);
1739 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1740 } else {
1741 /*
1742 * shared read/write queues
1743 * either no write queues were requested, or we don't have
1744 * sufficient queue count to have dedicated default queues.
1745 */
1746 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1747 min(opts->nr_io_queues, nr_io_queues);
1748 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1749 }
1750
1751 if (opts->nr_poll_queues && nr_io_queues) {
1752 /* map dedicated poll queues only if we have queues left */
1753 ctrl->io_queues[HCTX_TYPE_POLL] =
1754 min(opts->nr_poll_queues, nr_io_queues);
1755 }
1756}
1757
1758static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1759{
1760 unsigned int nr_io_queues;
1761 int ret;
1762
1763 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1764 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1765 if (ret)
1766 return ret;
1767
Olivier Deprez0e641232021-09-23 10:07:05 +02001768 if (nr_io_queues == 0) {
1769 dev_err(ctrl->device,
1770 "unable to set any I/O queues\n");
1771 return -ENOMEM;
1772 }
David Brazdil0f672f62019-12-10 10:32:29 +00001773
Olivier Deprez0e641232021-09-23 10:07:05 +02001774 ctrl->queue_count = nr_io_queues + 1;
David Brazdil0f672f62019-12-10 10:32:29 +00001775 dev_info(ctrl->device,
1776 "creating %d I/O queues.\n", nr_io_queues);
1777
1778 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1779
1780 return __nvme_tcp_alloc_io_queues(ctrl);
1781}
1782
1783static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1784{
1785 nvme_tcp_stop_io_queues(ctrl);
1786 if (remove) {
1787 blk_cleanup_queue(ctrl->connect_q);
1788 blk_mq_free_tag_set(ctrl->tagset);
1789 }
1790 nvme_tcp_free_io_queues(ctrl);
1791}
1792
1793static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1794{
1795 int ret;
1796
1797 ret = nvme_tcp_alloc_io_queues(ctrl);
1798 if (ret)
1799 return ret;
1800
1801 if (new) {
1802 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1803 if (IS_ERR(ctrl->tagset)) {
1804 ret = PTR_ERR(ctrl->tagset);
1805 goto out_free_io_queues;
1806 }
1807
1808 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1809 if (IS_ERR(ctrl->connect_q)) {
1810 ret = PTR_ERR(ctrl->connect_q);
1811 goto out_free_tag_set;
1812 }
David Brazdil0f672f62019-12-10 10:32:29 +00001813 }
1814
1815 ret = nvme_tcp_start_io_queues(ctrl);
1816 if (ret)
1817 goto out_cleanup_connect_q;
1818
Olivier Deprez0e641232021-09-23 10:07:05 +02001819 if (!new) {
1820 nvme_start_queues(ctrl);
1821 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1822 /*
1823 * If we timed out waiting for freeze we are likely to
1824 * be stuck. Fail the controller initialization just
1825 * to be safe.
1826 */
1827 ret = -ENODEV;
1828 goto out_wait_freeze_timed_out;
1829 }
1830 blk_mq_update_nr_hw_queues(ctrl->tagset,
1831 ctrl->queue_count - 1);
1832 nvme_unfreeze(ctrl);
1833 }
1834
David Brazdil0f672f62019-12-10 10:32:29 +00001835 return 0;
1836
Olivier Deprez0e641232021-09-23 10:07:05 +02001837out_wait_freeze_timed_out:
1838 nvme_stop_queues(ctrl);
1839 nvme_sync_io_queues(ctrl);
1840 nvme_tcp_stop_io_queues(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001841out_cleanup_connect_q:
Olivier Deprez0e641232021-09-23 10:07:05 +02001842 nvme_cancel_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001843 if (new)
1844 blk_cleanup_queue(ctrl->connect_q);
1845out_free_tag_set:
1846 if (new)
1847 blk_mq_free_tag_set(ctrl->tagset);
1848out_free_io_queues:
1849 nvme_tcp_free_io_queues(ctrl);
1850 return ret;
1851}
1852
1853static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1854{
1855 nvme_tcp_stop_queue(ctrl, 0);
1856 if (remove) {
1857 blk_cleanup_queue(ctrl->admin_q);
1858 blk_cleanup_queue(ctrl->fabrics_q);
1859 blk_mq_free_tag_set(ctrl->admin_tagset);
1860 }
1861 nvme_tcp_free_admin_queue(ctrl);
1862}
1863
1864static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1865{
1866 int error;
1867
1868 error = nvme_tcp_alloc_admin_queue(ctrl);
1869 if (error)
1870 return error;
1871
1872 if (new) {
1873 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1874 if (IS_ERR(ctrl->admin_tagset)) {
1875 error = PTR_ERR(ctrl->admin_tagset);
1876 goto out_free_queue;
1877 }
1878
1879 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1880 if (IS_ERR(ctrl->fabrics_q)) {
1881 error = PTR_ERR(ctrl->fabrics_q);
1882 goto out_free_tagset;
1883 }
1884
1885 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1886 if (IS_ERR(ctrl->admin_q)) {
1887 error = PTR_ERR(ctrl->admin_q);
1888 goto out_cleanup_fabrics_q;
1889 }
1890 }
1891
1892 error = nvme_tcp_start_queue(ctrl, 0);
1893 if (error)
1894 goto out_cleanup_queue;
1895
1896 error = nvme_enable_ctrl(ctrl);
1897 if (error)
1898 goto out_stop_queue;
1899
1900 blk_mq_unquiesce_queue(ctrl->admin_q);
1901
1902 error = nvme_init_identify(ctrl);
1903 if (error)
Olivier Deprez0e641232021-09-23 10:07:05 +02001904 goto out_quiesce_queue;
David Brazdil0f672f62019-12-10 10:32:29 +00001905
1906 return 0;
1907
Olivier Deprez0e641232021-09-23 10:07:05 +02001908out_quiesce_queue:
1909 blk_mq_quiesce_queue(ctrl->admin_q);
1910 blk_sync_queue(ctrl->admin_q);
David Brazdil0f672f62019-12-10 10:32:29 +00001911out_stop_queue:
1912 nvme_tcp_stop_queue(ctrl, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +02001913 nvme_cancel_admin_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001914out_cleanup_queue:
1915 if (new)
1916 blk_cleanup_queue(ctrl->admin_q);
1917out_cleanup_fabrics_q:
1918 if (new)
1919 blk_cleanup_queue(ctrl->fabrics_q);
1920out_free_tagset:
1921 if (new)
1922 blk_mq_free_tag_set(ctrl->admin_tagset);
1923out_free_queue:
1924 nvme_tcp_free_admin_queue(ctrl);
1925 return error;
1926}
1927
1928static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1929 bool remove)
1930{
1931 blk_mq_quiesce_queue(ctrl->admin_q);
Olivier Deprez0e641232021-09-23 10:07:05 +02001932 blk_sync_queue(ctrl->admin_q);
David Brazdil0f672f62019-12-10 10:32:29 +00001933 nvme_tcp_stop_queue(ctrl, 0);
1934 if (ctrl->admin_tagset) {
1935 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1936 nvme_cancel_request, ctrl);
1937 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1938 }
1939 if (remove)
1940 blk_mq_unquiesce_queue(ctrl->admin_q);
1941 nvme_tcp_destroy_admin_queue(ctrl, remove);
1942}
1943
1944static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1945 bool remove)
1946{
1947 if (ctrl->queue_count <= 1)
1948 return;
Olivier Deprez0e641232021-09-23 10:07:05 +02001949 blk_mq_quiesce_queue(ctrl->admin_q);
1950 nvme_start_freeze(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001951 nvme_stop_queues(ctrl);
Olivier Deprez0e641232021-09-23 10:07:05 +02001952 nvme_sync_io_queues(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00001953 nvme_tcp_stop_io_queues(ctrl);
1954 if (ctrl->tagset) {
1955 blk_mq_tagset_busy_iter(ctrl->tagset,
1956 nvme_cancel_request, ctrl);
1957 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1958 }
1959 if (remove)
1960 nvme_start_queues(ctrl);
1961 nvme_tcp_destroy_io_queues(ctrl, remove);
1962}
1963
1964static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1965{
1966 /* If we are resetting/deleting then do nothing */
1967 if (ctrl->state != NVME_CTRL_CONNECTING) {
1968 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1969 ctrl->state == NVME_CTRL_LIVE);
1970 return;
1971 }
1972
1973 if (nvmf_should_reconnect(ctrl)) {
1974 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1975 ctrl->opts->reconnect_delay);
1976 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1977 ctrl->opts->reconnect_delay * HZ);
1978 } else {
1979 dev_info(ctrl->device, "Removing controller...\n");
1980 nvme_delete_ctrl(ctrl);
1981 }
1982}
1983
1984static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1985{
1986 struct nvmf_ctrl_options *opts = ctrl->opts;
1987 int ret;
1988
1989 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1990 if (ret)
1991 return ret;
1992
1993 if (ctrl->icdoff) {
1994 dev_err(ctrl->device, "icdoff is not supported!\n");
1995 goto destroy_admin;
1996 }
1997
1998 if (opts->queue_size > ctrl->sqsize + 1)
1999 dev_warn(ctrl->device,
2000 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2001 opts->queue_size, ctrl->sqsize + 1);
2002
2003 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2004 dev_warn(ctrl->device,
2005 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2006 ctrl->sqsize + 1, ctrl->maxcmd);
2007 ctrl->sqsize = ctrl->maxcmd - 1;
2008 }
2009
2010 if (ctrl->queue_count > 1) {
2011 ret = nvme_tcp_configure_io_queues(ctrl, new);
2012 if (ret)
2013 goto destroy_admin;
2014 }
2015
2016 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002017 /*
2018 * state change failure is ok if we started ctrl delete,
2019 * unless we're during creation of a new controller to
2020 * avoid races with teardown flow.
2021 */
2022 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2023 ctrl->state != NVME_CTRL_DELETING_NOIO);
2024 WARN_ON_ONCE(new);
David Brazdil0f672f62019-12-10 10:32:29 +00002025 ret = -EINVAL;
2026 goto destroy_io;
2027 }
2028
2029 nvme_start_ctrl(ctrl);
2030 return 0;
2031
2032destroy_io:
Olivier Deprez0e641232021-09-23 10:07:05 +02002033 if (ctrl->queue_count > 1) {
2034 nvme_stop_queues(ctrl);
2035 nvme_sync_io_queues(ctrl);
2036 nvme_tcp_stop_io_queues(ctrl);
2037 nvme_cancel_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00002038 nvme_tcp_destroy_io_queues(ctrl, new);
Olivier Deprez0e641232021-09-23 10:07:05 +02002039 }
David Brazdil0f672f62019-12-10 10:32:29 +00002040destroy_admin:
Olivier Deprez0e641232021-09-23 10:07:05 +02002041 blk_mq_quiesce_queue(ctrl->admin_q);
2042 blk_sync_queue(ctrl->admin_q);
David Brazdil0f672f62019-12-10 10:32:29 +00002043 nvme_tcp_stop_queue(ctrl, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +02002044 nvme_cancel_admin_tagset(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00002045 nvme_tcp_destroy_admin_queue(ctrl, new);
2046 return ret;
2047}
2048
2049static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2050{
2051 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2052 struct nvme_tcp_ctrl, connect_work);
2053 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2054
2055 ++ctrl->nr_reconnects;
2056
2057 if (nvme_tcp_setup_ctrl(ctrl, false))
2058 goto requeue;
2059
2060 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2061 ctrl->nr_reconnects);
2062
2063 ctrl->nr_reconnects = 0;
2064
2065 return;
2066
2067requeue:
2068 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2069 ctrl->nr_reconnects);
2070 nvme_tcp_reconnect_or_remove(ctrl);
2071}
2072
2073static void nvme_tcp_error_recovery_work(struct work_struct *work)
2074{
2075 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2076 struct nvme_tcp_ctrl, err_work);
2077 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2078
2079 nvme_stop_keep_alive(ctrl);
Olivier Deprez157378f2022-04-04 15:47:50 +02002080 flush_work(&ctrl->async_event_work);
David Brazdil0f672f62019-12-10 10:32:29 +00002081 nvme_tcp_teardown_io_queues(ctrl, false);
2082 /* unquiesce to fail fast pending requests */
2083 nvme_start_queues(ctrl);
2084 nvme_tcp_teardown_admin_queue(ctrl, false);
2085 blk_mq_unquiesce_queue(ctrl->admin_q);
2086
2087 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002088 /* state change failure is ok if we started ctrl delete */
2089 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2090 ctrl->state != NVME_CTRL_DELETING_NOIO);
David Brazdil0f672f62019-12-10 10:32:29 +00002091 return;
2092 }
2093
2094 nvme_tcp_reconnect_or_remove(ctrl);
2095}
2096
2097static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2098{
2099 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2100 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2101
2102 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2103 blk_mq_quiesce_queue(ctrl->admin_q);
2104 if (shutdown)
2105 nvme_shutdown_ctrl(ctrl);
2106 else
2107 nvme_disable_ctrl(ctrl);
2108 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2109}
2110
2111static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2112{
2113 nvme_tcp_teardown_ctrl(ctrl, true);
2114}
2115
2116static void nvme_reset_ctrl_work(struct work_struct *work)
2117{
2118 struct nvme_ctrl *ctrl =
2119 container_of(work, struct nvme_ctrl, reset_work);
2120
2121 nvme_stop_ctrl(ctrl);
2122 nvme_tcp_teardown_ctrl(ctrl, false);
2123
2124 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002125 /* state change failure is ok if we started ctrl delete */
2126 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2127 ctrl->state != NVME_CTRL_DELETING_NOIO);
David Brazdil0f672f62019-12-10 10:32:29 +00002128 return;
2129 }
2130
2131 if (nvme_tcp_setup_ctrl(ctrl, false))
2132 goto out_fail;
2133
2134 return;
2135
2136out_fail:
2137 ++ctrl->nr_reconnects;
2138 nvme_tcp_reconnect_or_remove(ctrl);
2139}
2140
2141static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2142{
2143 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2144
2145 if (list_empty(&ctrl->list))
2146 goto free_ctrl;
2147
2148 mutex_lock(&nvme_tcp_ctrl_mutex);
2149 list_del(&ctrl->list);
2150 mutex_unlock(&nvme_tcp_ctrl_mutex);
2151
2152 nvmf_free_options(nctrl->opts);
2153free_ctrl:
2154 kfree(ctrl->queues);
2155 kfree(ctrl);
2156}
2157
2158static void nvme_tcp_set_sg_null(struct nvme_command *c)
2159{
2160 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2161
2162 sg->addr = 0;
2163 sg->length = 0;
2164 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2165 NVME_SGL_FMT_TRANSPORT_A;
2166}
2167
2168static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2169 struct nvme_command *c, u32 data_len)
2170{
2171 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2172
2173 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2174 sg->length = cpu_to_le32(data_len);
2175 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2176}
2177
2178static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2179 u32 data_len)
2180{
2181 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2182
2183 sg->addr = 0;
2184 sg->length = cpu_to_le32(data_len);
2185 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2186 NVME_SGL_FMT_TRANSPORT_A;
2187}
2188
2189static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2190{
2191 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2192 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2193 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2194 struct nvme_command *cmd = &pdu->cmd;
2195 u8 hdgst = nvme_tcp_hdgst_len(queue);
2196
2197 memset(pdu, 0, sizeof(*pdu));
2198 pdu->hdr.type = nvme_tcp_cmd;
2199 if (queue->hdr_digest)
2200 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2201 pdu->hdr.hlen = sizeof(*pdu);
2202 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2203
2204 cmd->common.opcode = nvme_admin_async_event;
2205 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2206 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2207 nvme_tcp_set_sg_null(cmd);
2208
2209 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2210 ctrl->async_req.offset = 0;
2211 ctrl->async_req.curr_bio = NULL;
2212 ctrl->async_req.data_len = 0;
2213
Olivier Deprez157378f2022-04-04 15:47:50 +02002214 nvme_tcp_queue_request(&ctrl->async_req, true, true);
David Brazdil0f672f62019-12-10 10:32:29 +00002215}
2216
Olivier Deprez0e641232021-09-23 10:07:05 +02002217static void nvme_tcp_complete_timed_out(struct request *rq)
2218{
2219 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2220 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2221
2222 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2223 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2224 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2225 blk_mq_complete_request(rq);
2226 }
2227}
2228
David Brazdil0f672f62019-12-10 10:32:29 +00002229static enum blk_eh_timer_return
2230nvme_tcp_timeout(struct request *rq, bool reserved)
2231{
2232 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Olivier Deprez0e641232021-09-23 10:07:05 +02002233 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
David Brazdil0f672f62019-12-10 10:32:29 +00002234 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2235
Olivier Deprez0e641232021-09-23 10:07:05 +02002236 dev_warn(ctrl->device,
David Brazdil0f672f62019-12-10 10:32:29 +00002237 "queue %d: timeout request %#x type %d\n",
2238 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2239
Olivier Deprez0e641232021-09-23 10:07:05 +02002240 if (ctrl->state != NVME_CTRL_LIVE) {
David Brazdil0f672f62019-12-10 10:32:29 +00002241 /*
Olivier Deprez0e641232021-09-23 10:07:05 +02002242 * If we are resetting, connecting or deleting we should
2243 * complete immediately because we may block controller
2244 * teardown or setup sequence
2245 * - ctrl disable/shutdown fabrics requests
2246 * - connect requests
2247 * - initialization admin requests
2248 * - I/O requests that entered after unquiescing and
2249 * the controller stopped responding
2250 *
2251 * All other requests should be cancelled by the error
2252 * recovery work, so it's fine that we fail it here.
David Brazdil0f672f62019-12-10 10:32:29 +00002253 */
Olivier Deprez0e641232021-09-23 10:07:05 +02002254 nvme_tcp_complete_timed_out(rq);
David Brazdil0f672f62019-12-10 10:32:29 +00002255 return BLK_EH_DONE;
2256 }
2257
Olivier Deprez0e641232021-09-23 10:07:05 +02002258 /*
2259 * LIVE state should trigger the normal error recovery which will
2260 * handle completing this request.
2261 */
2262 nvme_tcp_error_recovery(ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00002263 return BLK_EH_RESET_TIMER;
2264}
2265
2266static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2267 struct request *rq)
2268{
2269 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2270 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2271 struct nvme_command *c = &pdu->cmd;
2272
2273 c->common.flags |= NVME_CMD_SGL_METABUF;
2274
Olivier Deprez0e641232021-09-23 10:07:05 +02002275 if (!blk_rq_nr_phys_segments(rq))
2276 nvme_tcp_set_sg_null(c);
2277 else if (rq_data_dir(rq) == WRITE &&
David Brazdil0f672f62019-12-10 10:32:29 +00002278 req->data_len <= nvme_tcp_inline_data_size(queue))
2279 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2280 else
2281 nvme_tcp_set_sg_host_data(c, req->data_len);
2282
2283 return 0;
2284}
2285
2286static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2287 struct request *rq)
2288{
2289 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2290 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2291 struct nvme_tcp_queue *queue = req->queue;
2292 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2293 blk_status_t ret;
2294
2295 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2296 if (ret)
2297 return ret;
2298
2299 req->state = NVME_TCP_SEND_CMD_PDU;
2300 req->offset = 0;
2301 req->data_sent = 0;
2302 req->pdu_len = 0;
2303 req->pdu_sent = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02002304 req->data_len = blk_rq_nr_phys_segments(rq) ?
2305 blk_rq_payload_bytes(rq) : 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002306 req->curr_bio = rq->bio;
2307
2308 if (rq_data_dir(rq) == WRITE &&
2309 req->data_len <= nvme_tcp_inline_data_size(queue))
2310 req->pdu_len = req->data_len;
2311 else if (req->curr_bio)
2312 nvme_tcp_init_iter(req, READ);
2313
2314 pdu->hdr.type = nvme_tcp_cmd;
2315 pdu->hdr.flags = 0;
2316 if (queue->hdr_digest)
2317 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2318 if (queue->data_digest && req->pdu_len) {
2319 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2320 ddgst = nvme_tcp_ddgst_len(queue);
2321 }
2322 pdu->hdr.hlen = sizeof(*pdu);
2323 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2324 pdu->hdr.plen =
2325 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2326
2327 ret = nvme_tcp_map_data(queue, rq);
2328 if (unlikely(ret)) {
2329 nvme_cleanup_cmd(rq);
2330 dev_err(queue->ctrl->ctrl.device,
2331 "Failed to map data (%d)\n", ret);
2332 return ret;
2333 }
2334
2335 return 0;
2336}
2337
Olivier Deprez157378f2022-04-04 15:47:50 +02002338static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2339{
2340 struct nvme_tcp_queue *queue = hctx->driver_data;
2341
2342 if (!llist_empty(&queue->req_list))
2343 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2344}
2345
David Brazdil0f672f62019-12-10 10:32:29 +00002346static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2347 const struct blk_mq_queue_data *bd)
2348{
2349 struct nvme_ns *ns = hctx->queue->queuedata;
2350 struct nvme_tcp_queue *queue = hctx->driver_data;
2351 struct request *rq = bd->rq;
2352 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2353 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2354 blk_status_t ret;
2355
2356 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2357 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2358
2359 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2360 if (unlikely(ret))
2361 return ret;
2362
2363 blk_mq_start_request(rq);
2364
Olivier Deprez157378f2022-04-04 15:47:50 +02002365 nvme_tcp_queue_request(req, true, bd->last);
David Brazdil0f672f62019-12-10 10:32:29 +00002366
2367 return BLK_STS_OK;
2368}
2369
2370static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2371{
2372 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2373 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2374
2375 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2376 /* separate read/write queues */
2377 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2378 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2379 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2380 set->map[HCTX_TYPE_READ].nr_queues =
2381 ctrl->io_queues[HCTX_TYPE_READ];
2382 set->map[HCTX_TYPE_READ].queue_offset =
2383 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2384 } else {
2385 /* shared read/write queues */
2386 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2387 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2388 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2389 set->map[HCTX_TYPE_READ].nr_queues =
2390 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2391 set->map[HCTX_TYPE_READ].queue_offset = 0;
2392 }
2393 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2394 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2395
2396 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2397 /* map dedicated poll queues only if we have queues left */
2398 set->map[HCTX_TYPE_POLL].nr_queues =
2399 ctrl->io_queues[HCTX_TYPE_POLL];
2400 set->map[HCTX_TYPE_POLL].queue_offset =
2401 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2402 ctrl->io_queues[HCTX_TYPE_READ];
2403 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2404 }
2405
2406 dev_info(ctrl->ctrl.device,
2407 "mapped %d/%d/%d default/read/poll queues.\n",
2408 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2409 ctrl->io_queues[HCTX_TYPE_READ],
2410 ctrl->io_queues[HCTX_TYPE_POLL]);
2411
2412 return 0;
2413}
2414
2415static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2416{
2417 struct nvme_tcp_queue *queue = hctx->driver_data;
2418 struct sock *sk = queue->sock->sk;
2419
Olivier Deprez157378f2022-04-04 15:47:50 +02002420 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2421 return 0;
2422
2423 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
David Brazdil0f672f62019-12-10 10:32:29 +00002424 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2425 sk_busy_loop(sk, true);
2426 nvme_tcp_try_recv(queue);
Olivier Deprez157378f2022-04-04 15:47:50 +02002427 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
David Brazdil0f672f62019-12-10 10:32:29 +00002428 return queue->nr_cqe;
2429}
2430
Olivier Deprez157378f2022-04-04 15:47:50 +02002431static const struct blk_mq_ops nvme_tcp_mq_ops = {
David Brazdil0f672f62019-12-10 10:32:29 +00002432 .queue_rq = nvme_tcp_queue_rq,
Olivier Deprez157378f2022-04-04 15:47:50 +02002433 .commit_rqs = nvme_tcp_commit_rqs,
David Brazdil0f672f62019-12-10 10:32:29 +00002434 .complete = nvme_complete_rq,
2435 .init_request = nvme_tcp_init_request,
2436 .exit_request = nvme_tcp_exit_request,
2437 .init_hctx = nvme_tcp_init_hctx,
2438 .timeout = nvme_tcp_timeout,
2439 .map_queues = nvme_tcp_map_queues,
2440 .poll = nvme_tcp_poll,
2441};
2442
Olivier Deprez157378f2022-04-04 15:47:50 +02002443static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
David Brazdil0f672f62019-12-10 10:32:29 +00002444 .queue_rq = nvme_tcp_queue_rq,
2445 .complete = nvme_complete_rq,
2446 .init_request = nvme_tcp_init_request,
2447 .exit_request = nvme_tcp_exit_request,
2448 .init_hctx = nvme_tcp_init_admin_hctx,
2449 .timeout = nvme_tcp_timeout,
2450};
2451
2452static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2453 .name = "tcp",
2454 .module = THIS_MODULE,
2455 .flags = NVME_F_FABRICS,
2456 .reg_read32 = nvmf_reg_read32,
2457 .reg_read64 = nvmf_reg_read64,
2458 .reg_write32 = nvmf_reg_write32,
2459 .free_ctrl = nvme_tcp_free_ctrl,
2460 .submit_async_event = nvme_tcp_submit_async_event,
2461 .delete_ctrl = nvme_tcp_delete_ctrl,
2462 .get_address = nvmf_get_address,
2463};
2464
2465static bool
2466nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2467{
2468 struct nvme_tcp_ctrl *ctrl;
2469 bool found = false;
2470
2471 mutex_lock(&nvme_tcp_ctrl_mutex);
2472 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2473 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2474 if (found)
2475 break;
2476 }
2477 mutex_unlock(&nvme_tcp_ctrl_mutex);
2478
2479 return found;
2480}
2481
2482static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2483 struct nvmf_ctrl_options *opts)
2484{
2485 struct nvme_tcp_ctrl *ctrl;
2486 int ret;
2487
2488 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2489 if (!ctrl)
2490 return ERR_PTR(-ENOMEM);
2491
2492 INIT_LIST_HEAD(&ctrl->list);
2493 ctrl->ctrl.opts = opts;
2494 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2495 opts->nr_poll_queues + 1;
2496 ctrl->ctrl.sqsize = opts->queue_size - 1;
2497 ctrl->ctrl.kato = opts->kato;
2498
2499 INIT_DELAYED_WORK(&ctrl->connect_work,
2500 nvme_tcp_reconnect_ctrl_work);
2501 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2502 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2503
2504 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2505 opts->trsvcid =
2506 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2507 if (!opts->trsvcid) {
2508 ret = -ENOMEM;
2509 goto out_free_ctrl;
2510 }
2511 opts->mask |= NVMF_OPT_TRSVCID;
2512 }
2513
2514 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2515 opts->traddr, opts->trsvcid, &ctrl->addr);
2516 if (ret) {
2517 pr_err("malformed address passed: %s:%s\n",
2518 opts->traddr, opts->trsvcid);
2519 goto out_free_ctrl;
2520 }
2521
2522 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2523 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2524 opts->host_traddr, NULL, &ctrl->src_addr);
2525 if (ret) {
2526 pr_err("malformed src address passed: %s\n",
2527 opts->host_traddr);
2528 goto out_free_ctrl;
2529 }
2530 }
2531
2532 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2533 ret = -EALREADY;
2534 goto out_free_ctrl;
2535 }
2536
2537 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2538 GFP_KERNEL);
2539 if (!ctrl->queues) {
2540 ret = -ENOMEM;
2541 goto out_free_ctrl;
2542 }
2543
2544 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2545 if (ret)
2546 goto out_kfree_queues;
2547
2548 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2549 WARN_ON_ONCE(1);
2550 ret = -EINTR;
2551 goto out_uninit_ctrl;
2552 }
2553
2554 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2555 if (ret)
2556 goto out_uninit_ctrl;
2557
2558 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2559 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2560
David Brazdil0f672f62019-12-10 10:32:29 +00002561 mutex_lock(&nvme_tcp_ctrl_mutex);
2562 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2563 mutex_unlock(&nvme_tcp_ctrl_mutex);
2564
2565 return &ctrl->ctrl;
2566
2567out_uninit_ctrl:
2568 nvme_uninit_ctrl(&ctrl->ctrl);
2569 nvme_put_ctrl(&ctrl->ctrl);
David Brazdil0f672f62019-12-10 10:32:29 +00002570 if (ret > 0)
2571 ret = -EIO;
2572 return ERR_PTR(ret);
2573out_kfree_queues:
2574 kfree(ctrl->queues);
2575out_free_ctrl:
2576 kfree(ctrl);
2577 return ERR_PTR(ret);
2578}
2579
2580static struct nvmf_transport_ops nvme_tcp_transport = {
2581 .name = "tcp",
2582 .module = THIS_MODULE,
2583 .required_opts = NVMF_OPT_TRADDR,
2584 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2585 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2586 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2587 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2588 NVMF_OPT_TOS,
2589 .create_ctrl = nvme_tcp_create_ctrl,
2590};
2591
2592static int __init nvme_tcp_init_module(void)
2593{
2594 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2595 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2596 if (!nvme_tcp_wq)
2597 return -ENOMEM;
2598
2599 nvmf_register_transport(&nvme_tcp_transport);
2600 return 0;
2601}
2602
2603static void __exit nvme_tcp_cleanup_module(void)
2604{
2605 struct nvme_tcp_ctrl *ctrl;
2606
2607 nvmf_unregister_transport(&nvme_tcp_transport);
2608
2609 mutex_lock(&nvme_tcp_ctrl_mutex);
2610 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2611 nvme_delete_ctrl(&ctrl->ctrl);
2612 mutex_unlock(&nvme_tcp_ctrl_mutex);
2613 flush_workqueue(nvme_delete_wq);
2614
2615 destroy_workqueue(nvme_tcp_wq);
2616}
2617
2618module_init(nvme_tcp_init_module);
2619module_exit(nvme_tcp_cleanup_module);
2620
2621MODULE_LICENSE("GPL v2");