blob: c091417bd799e583ef4eb91d773c634d654b4ce9 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * rpc_rdma.c
44 *
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
48 */
49
50#include <linux/highmem.h>
51
52#include <linux/sunrpc/svc_rdma.h>
53
54#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
61/* Returns size of largest RPC-over-RDMA header in a Call message
62 *
63 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
65 */
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
72
73 /* Maximum Read list size */
Olivier Deprez0e641232021-09-23 10:07:05 +020074 size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075
76 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32); /* list discriminator */
80
81 dprintk("RPC: %s: max call header size = %u\n",
82 __func__, size);
83 return size;
84}
85
86/* Returns size of largest RPC-over-RDMA header in a Reply message
87 *
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
90 */
91static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92{
93 unsigned int size;
94
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
97
98 /* Maximum Write list size */
Olivier Deprez0e641232021-09-23 10:07:05 +020099 size += sizeof(__be32); /* segment count */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 size += sizeof(__be32); /* list discriminator */
102
103 dprintk("RPC: %s: max reply header size = %u\n",
104 __func__, size);
105 return size;
106}
107
David Brazdil0f672f62019-12-10 10:32:29 +0000108/**
109 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 * @r_xprt: transport instance to initialize
111 *
112 * The max_inline fields contain the maximum size of an RPC message
113 * so the marshaling code doesn't have to repeat this calculation
114 * for every RPC.
115 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
117{
David Brazdil0f672f62019-12-10 10:32:29 +0000118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120
David Brazdil0f672f62019-12-10 10:32:29 +0000121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125}
126
127/* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
130 * a Read chunk for this operation.
131 *
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
134 */
135static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
137{
138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
140
David Brazdil0f672f62019-12-10 10:32:29 +0000141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 return false;
143
144 if (xdr->page_len) {
145 remaining = xdr->page_len;
146 offset = offset_in_page(xdr->page_base);
147 count = RPCRDMA_MIN_SEND_SGES;
148 while (remaining) {
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
151 offset = 0;
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 return false;
154 }
155 }
156
157 return true;
158}
159
160/* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
164 * this request.
165 */
166static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
168{
David Brazdil0f672f62019-12-10 10:32:29 +0000169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
170}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171
David Brazdil0f672f62019-12-10 10:32:29 +0000172/* The client is required to provide a Reply chunk if the maximum
173 * size of the non-payload part of the RPC Reply is larger than
174 * the inline threshold.
175 */
176static bool
177rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
179{
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
181
182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184}
185
Olivier Deprez0e641232021-09-23 10:07:05 +0200186/* ACL likes to be lazy in allocating pages. For TCP, these
187 * pages can be allocated during receive processing. Not true
188 * for RDMA, which must always provision receive buffers
189 * up front.
190 */
191static noinline int
192rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
193{
194 struct page **ppages;
195 int len;
196
197 len = buf->page_len;
198 ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
199 while (len > 0) {
200 if (!*ppages)
201 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
202 if (!*ppages)
203 return -ENOBUFS;
204 ppages++;
205 len -= PAGE_SIZE;
206 }
207
208 return 0;
209}
210
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211/* Split @vec on page boundaries into SGEs. FMR registers pages, not
212 * a byte range. Other modes coalesce these SGEs into a single MR
213 * when they can.
214 *
215 * Returns pointer to next available SGE, and bumps the total number
216 * of SGEs consumed.
217 */
218static struct rpcrdma_mr_seg *
219rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
220 unsigned int *n)
221{
222 u32 remaining, page_offset;
223 char *base;
224
225 base = vec->iov_base;
226 page_offset = offset_in_page(base);
227 remaining = vec->iov_len;
228 while (remaining) {
229 seg->mr_page = NULL;
230 seg->mr_offset = base;
231 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
232 remaining -= seg->mr_len;
233 base += seg->mr_len;
234 ++seg;
235 ++(*n);
236 page_offset = 0;
237 }
238 return seg;
239}
240
241/* Convert @xdrbuf into SGEs no larger than a page each. As they
242 * are registered, these SGEs are then coalesced into RDMA segments
243 * when the selected memreg mode supports it.
244 *
245 * Returns positive number of SGEs consumed, or a negative errno.
246 */
247
248static int
249rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
250 unsigned int pos, enum rpcrdma_chunktype type,
251 struct rpcrdma_mr_seg *seg)
252{
253 unsigned long page_base;
254 unsigned int len, n;
255 struct page **ppages;
256
257 n = 0;
258 if (pos == 0)
259 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
260
261 len = xdrbuf->page_len;
262 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
263 page_base = offset_in_page(xdrbuf->page_base);
264 while (len) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 seg->mr_page = *ppages;
266 seg->mr_offset = (char *)page_base;
267 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
268 len -= seg->mr_len;
269 ++ppages;
270 ++seg;
271 ++n;
272 page_base = 0;
273 }
274
275 /* When encoding a Read chunk, the tail iovec contains an
276 * XDR pad and may be omitted.
277 */
278 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
279 goto out;
280
281 /* When encoding a Write chunk, some servers need to see an
282 * extra segment for non-XDR-aligned Write chunks. The upper
283 * layer provides space in the tail iovec that may be used
284 * for this purpose.
285 */
286 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
287 goto out;
288
289 if (xdrbuf->tail[0].iov_len)
290 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
291
292out:
293 if (unlikely(n > RPCRDMA_MAX_SEGS))
294 return -EIO;
295 return n;
296}
297
298static inline int
299encode_item_present(struct xdr_stream *xdr)
300{
301 __be32 *p;
302
303 p = xdr_reserve_space(xdr, sizeof(*p));
304 if (unlikely(!p))
305 return -EMSGSIZE;
306
307 *p = xdr_one;
308 return 0;
309}
310
311static inline int
312encode_item_not_present(struct xdr_stream *xdr)
313{
314 __be32 *p;
315
316 p = xdr_reserve_space(xdr, sizeof(*p));
317 if (unlikely(!p))
318 return -EMSGSIZE;
319
320 *p = xdr_zero;
321 return 0;
322}
323
324static void
325xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
326{
327 *iptr++ = cpu_to_be32(mr->mr_handle);
328 *iptr++ = cpu_to_be32(mr->mr_length);
329 xdr_encode_hyper(iptr, mr->mr_offset);
330}
331
332static int
333encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
334{
335 __be32 *p;
336
337 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
338 if (unlikely(!p))
339 return -EMSGSIZE;
340
341 xdr_encode_rdma_segment(p, mr);
342 return 0;
343}
344
345static int
346encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
347 u32 position)
348{
349 __be32 *p;
350
351 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
352 if (unlikely(!p))
353 return -EMSGSIZE;
354
355 *p++ = xdr_one; /* Item present */
356 *p++ = cpu_to_be32(position);
357 xdr_encode_rdma_segment(p, mr);
358 return 0;
359}
360
David Brazdil0f672f62019-12-10 10:32:29 +0000361static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
362 struct rpcrdma_req *req,
363 struct rpcrdma_mr_seg *seg,
364 int nsegs, bool writing,
365 struct rpcrdma_mr **mr)
366{
367 *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
368 if (!*mr) {
369 *mr = rpcrdma_mr_get(r_xprt);
370 if (!*mr)
371 goto out_getmr_err;
372 trace_xprtrdma_mr_get(req);
373 (*mr)->mr_req = req;
374 }
375
376 rpcrdma_mr_push(*mr, &req->rl_registered);
377 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
378
379out_getmr_err:
380 trace_xprtrdma_nomrs(req);
381 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
382 if (r_xprt->rx_ep.rep_connected != -ENODEV)
383 schedule_work(&r_xprt->rx_buf.rb_refresh_worker);
384 return ERR_PTR(-EAGAIN);
385}
386
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000387/* Register and XDR encode the Read list. Supports encoding a list of read
388 * segments that belong to a single read chunk.
389 *
390 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
391 *
392 * Read chunklist (a linked list):
393 * N elements, position P (same P for all chunks of same arg!):
394 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
395 *
396 * Returns zero on success, or a negative errno if a failure occurred.
397 * @xdr is advanced to the next position in the stream.
398 *
399 * Only a single @pos value is currently supported.
400 */
David Brazdil0f672f62019-12-10 10:32:29 +0000401static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
402 struct rpcrdma_req *req,
403 struct rpc_rqst *rqst,
404 enum rpcrdma_chunktype rtype)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405{
406 struct xdr_stream *xdr = &req->rl_stream;
407 struct rpcrdma_mr_seg *seg;
408 struct rpcrdma_mr *mr;
409 unsigned int pos;
410 int nsegs;
411
David Brazdil0f672f62019-12-10 10:32:29 +0000412 if (rtype == rpcrdma_noch)
413 goto done;
414
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415 pos = rqst->rq_snd_buf.head[0].iov_len;
416 if (rtype == rpcrdma_areadch)
417 pos = 0;
418 seg = req->rl_segments;
419 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
420 rtype, seg);
421 if (nsegs < 0)
422 return nsegs;
423
424 do {
David Brazdil0f672f62019-12-10 10:32:29 +0000425 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 if (IS_ERR(seg))
427 return PTR_ERR(seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428
429 if (encode_read_segment(xdr, mr, pos) < 0)
430 return -EMSGSIZE;
431
David Brazdil0f672f62019-12-10 10:32:29 +0000432 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433 r_xprt->rx_stats.read_chunk_count++;
434 nsegs -= mr->mr_nents;
435 } while (nsegs);
436
David Brazdil0f672f62019-12-10 10:32:29 +0000437done:
438 return encode_item_not_present(xdr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439}
440
441/* Register and XDR encode the Write list. Supports encoding a list
442 * containing one array of plain segments that belong to a single
443 * write chunk.
444 *
445 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
446 *
447 * Write chunklist (a list of (one) counted array):
448 * N elements:
449 * 1 - N - HLOO - HLOO - ... - HLOO - 0
450 *
451 * Returns zero on success, or a negative errno if a failure occurred.
452 * @xdr is advanced to the next position in the stream.
453 *
454 * Only a single Write chunk is currently supported.
455 */
David Brazdil0f672f62019-12-10 10:32:29 +0000456static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
457 struct rpcrdma_req *req,
458 struct rpc_rqst *rqst,
459 enum rpcrdma_chunktype wtype)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460{
461 struct xdr_stream *xdr = &req->rl_stream;
462 struct rpcrdma_mr_seg *seg;
463 struct rpcrdma_mr *mr;
464 int nsegs, nchunks;
465 __be32 *segcount;
466
David Brazdil0f672f62019-12-10 10:32:29 +0000467 if (wtype != rpcrdma_writech)
468 goto done;
469
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470 seg = req->rl_segments;
471 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
472 rqst->rq_rcv_buf.head[0].iov_len,
473 wtype, seg);
474 if (nsegs < 0)
475 return nsegs;
476
477 if (encode_item_present(xdr) < 0)
478 return -EMSGSIZE;
479 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
480 if (unlikely(!segcount))
481 return -EMSGSIZE;
482 /* Actual value encoded below */
483
484 nchunks = 0;
485 do {
David Brazdil0f672f62019-12-10 10:32:29 +0000486 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487 if (IS_ERR(seg))
488 return PTR_ERR(seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489
490 if (encode_rdma_segment(xdr, mr) < 0)
491 return -EMSGSIZE;
492
David Brazdil0f672f62019-12-10 10:32:29 +0000493 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494 r_xprt->rx_stats.write_chunk_count++;
495 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
496 nchunks++;
497 nsegs -= mr->mr_nents;
498 } while (nsegs);
499
500 /* Update count of segments in this Write chunk */
501 *segcount = cpu_to_be32(nchunks);
502
David Brazdil0f672f62019-12-10 10:32:29 +0000503done:
504 return encode_item_not_present(xdr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505}
506
507/* Register and XDR encode the Reply chunk. Supports encoding an array
508 * of plain segments that belong to a single write (reply) chunk.
509 *
510 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
511 *
512 * Reply chunk (a counted array):
513 * N elements:
514 * 1 - N - HLOO - HLOO - ... - HLOO
515 *
516 * Returns zero on success, or a negative errno if a failure occurred.
517 * @xdr is advanced to the next position in the stream.
518 */
David Brazdil0f672f62019-12-10 10:32:29 +0000519static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
520 struct rpcrdma_req *req,
521 struct rpc_rqst *rqst,
522 enum rpcrdma_chunktype wtype)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523{
524 struct xdr_stream *xdr = &req->rl_stream;
525 struct rpcrdma_mr_seg *seg;
526 struct rpcrdma_mr *mr;
527 int nsegs, nchunks;
528 __be32 *segcount;
529
David Brazdil0f672f62019-12-10 10:32:29 +0000530 if (wtype != rpcrdma_replych)
531 return encode_item_not_present(xdr);
532
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000533 seg = req->rl_segments;
534 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
535 if (nsegs < 0)
536 return nsegs;
537
538 if (encode_item_present(xdr) < 0)
539 return -EMSGSIZE;
540 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
541 if (unlikely(!segcount))
542 return -EMSGSIZE;
543 /* Actual value encoded below */
544
545 nchunks = 0;
546 do {
David Brazdil0f672f62019-12-10 10:32:29 +0000547 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 if (IS_ERR(seg))
549 return PTR_ERR(seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550
551 if (encode_rdma_segment(xdr, mr) < 0)
552 return -EMSGSIZE;
553
David Brazdil0f672f62019-12-10 10:32:29 +0000554 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555 r_xprt->rx_stats.reply_chunk_count++;
556 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
557 nchunks++;
558 nsegs -= mr->mr_nents;
559 } while (nsegs);
560
561 /* Update count of segments in the Reply chunk */
562 *segcount = cpu_to_be32(nchunks);
563
564 return 0;
565}
566
David Brazdil0f672f62019-12-10 10:32:29 +0000567static void rpcrdma_sendctx_done(struct kref *kref)
568{
569 struct rpcrdma_req *req =
570 container_of(kref, struct rpcrdma_req, rl_kref);
571 struct rpcrdma_rep *rep = req->rl_reply;
572
573 rpcrdma_complete_rqst(rep);
574 rep->rr_rxprt->rx_stats.reply_waits_for_send++;
575}
576
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577/**
David Brazdil0f672f62019-12-10 10:32:29 +0000578 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579 * @sc: sendctx containing SGEs to unmap
580 *
581 */
David Brazdil0f672f62019-12-10 10:32:29 +0000582void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000583{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584 struct ib_sge *sge;
David Brazdil0f672f62019-12-10 10:32:29 +0000585
586 if (!sc->sc_unmap_count)
587 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588
589 /* The first two SGEs contain the transport header and
590 * the inline buffer. These are always left mapped so
591 * they can be cheaply re-used.
592 */
David Brazdil0f672f62019-12-10 10:32:29 +0000593 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
594 ++sge, --sc->sc_unmap_count)
595 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
596 DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597
David Brazdil0f672f62019-12-10 10:32:29 +0000598 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599}
600
601/* Prepare an SGE for the RPC-over-RDMA transport header.
602 */
David Brazdil0f672f62019-12-10 10:32:29 +0000603static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
604 struct rpcrdma_req *req, u32 len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000605{
606 struct rpcrdma_sendctx *sc = req->rl_sendctx;
607 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
608 struct ib_sge *sge = sc->sc_sges;
609
David Brazdil0f672f62019-12-10 10:32:29 +0000610 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611 goto out_regbuf;
612 sge->addr = rdmab_addr(rb);
613 sge->length = len;
614 sge->lkey = rdmab_lkey(rb);
615
David Brazdil0f672f62019-12-10 10:32:29 +0000616 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
617 DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618 sc->sc_wr.num_sge++;
619 return true;
620
621out_regbuf:
622 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
623 return false;
624}
625
626/* Prepare the Send SGEs. The head and tail iovec, and each entry
627 * in the page list, gets its own SGE.
628 */
David Brazdil0f672f62019-12-10 10:32:29 +0000629static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
630 struct rpcrdma_req *req,
631 struct xdr_buf *xdr,
632 enum rpcrdma_chunktype rtype)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000633{
634 struct rpcrdma_sendctx *sc = req->rl_sendctx;
635 unsigned int sge_no, page_base, len, remaining;
636 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000637 struct ib_sge *sge = sc->sc_sges;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000638 struct page *page, **ppages;
639
640 /* The head iovec is straightforward, as it is already
641 * DMA-mapped. Sync the content that has changed.
642 */
David Brazdil0f672f62019-12-10 10:32:29 +0000643 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644 goto out_regbuf;
David Brazdil0f672f62019-12-10 10:32:29 +0000645 sc->sc_device = rdmab_device(rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 sge_no = 1;
647 sge[sge_no].addr = rdmab_addr(rb);
648 sge[sge_no].length = xdr->head[0].iov_len;
649 sge[sge_no].lkey = rdmab_lkey(rb);
650 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
651 sge[sge_no].length, DMA_TO_DEVICE);
652
653 /* If there is a Read chunk, the page list is being handled
654 * via explicit RDMA, and thus is skipped here. However, the
655 * tail iovec may include an XDR pad for the page list, as
656 * well as additional content, and may not reside in the
657 * same page as the head iovec.
658 */
659 if (rtype == rpcrdma_readch) {
660 len = xdr->tail[0].iov_len;
661
662 /* Do not include the tail if it is only an XDR pad */
663 if (len < 4)
664 goto out;
665
666 page = virt_to_page(xdr->tail[0].iov_base);
667 page_base = offset_in_page(xdr->tail[0].iov_base);
668
669 /* If the content in the page list is an odd length,
670 * xdr_write_pages() has added a pad at the beginning
671 * of the tail iovec. Force the tail's non-pad content
672 * to land at the next XDR position in the Send message.
673 */
674 page_base += len & 3;
675 len -= len & 3;
676 goto map_tail;
677 }
678
679 /* If there is a page list present, temporarily DMA map
680 * and prepare an SGE for each page to be sent.
681 */
682 if (xdr->page_len) {
683 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
684 page_base = offset_in_page(xdr->page_base);
685 remaining = xdr->page_len;
686 while (remaining) {
687 sge_no++;
688 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
689 goto out_mapping_overflow;
690
691 len = min_t(u32, PAGE_SIZE - page_base, remaining);
David Brazdil0f672f62019-12-10 10:32:29 +0000692 sge[sge_no].addr =
693 ib_dma_map_page(rdmab_device(rb), *ppages,
694 page_base, len, DMA_TO_DEVICE);
695 if (ib_dma_mapping_error(rdmab_device(rb),
696 sge[sge_no].addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000697 goto out_mapping_err;
698 sge[sge_no].length = len;
David Brazdil0f672f62019-12-10 10:32:29 +0000699 sge[sge_no].lkey = rdmab_lkey(rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700
701 sc->sc_unmap_count++;
702 ppages++;
703 remaining -= len;
704 page_base = 0;
705 }
706 }
707
708 /* The tail iovec is not always constructed in the same
709 * page where the head iovec resides (see, for example,
710 * gss_wrap_req_priv). To neatly accommodate that case,
711 * DMA map it separately.
712 */
713 if (xdr->tail[0].iov_len) {
714 page = virt_to_page(xdr->tail[0].iov_base);
715 page_base = offset_in_page(xdr->tail[0].iov_base);
716 len = xdr->tail[0].iov_len;
717
718map_tail:
719 sge_no++;
David Brazdil0f672f62019-12-10 10:32:29 +0000720 sge[sge_no].addr =
721 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
722 DMA_TO_DEVICE);
723 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000724 goto out_mapping_err;
725 sge[sge_no].length = len;
David Brazdil0f672f62019-12-10 10:32:29 +0000726 sge[sge_no].lkey = rdmab_lkey(rb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000727 sc->sc_unmap_count++;
728 }
729
730out:
731 sc->sc_wr.num_sge += sge_no;
732 if (sc->sc_unmap_count)
David Brazdil0f672f62019-12-10 10:32:29 +0000733 kref_get(&req->rl_kref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000734 return true;
735
736out_regbuf:
737 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
738 return false;
739
740out_mapping_overflow:
David Brazdil0f672f62019-12-10 10:32:29 +0000741 rpcrdma_sendctx_unmap(sc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000742 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
743 return false;
744
745out_mapping_err:
David Brazdil0f672f62019-12-10 10:32:29 +0000746 rpcrdma_sendctx_unmap(sc);
747 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748 return false;
749}
750
751/**
752 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
753 * @r_xprt: controlling transport
754 * @req: context of RPC Call being marshalled
755 * @hdrlen: size of transport header, in bytes
756 * @xdr: xdr_buf containing RPC Call
757 * @rtype: chunk type being encoded
758 *
759 * Returns 0 on success; otherwise a negative errno is returned.
760 */
761int
762rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
763 struct rpcrdma_req *req, u32 hdrlen,
764 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
765{
David Brazdil0f672f62019-12-10 10:32:29 +0000766 int ret;
767
768 ret = -EAGAIN;
769 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 if (!req->rl_sendctx)
David Brazdil0f672f62019-12-10 10:32:29 +0000771 goto err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000772 req->rl_sendctx->sc_wr.num_sge = 0;
773 req->rl_sendctx->sc_unmap_count = 0;
774 req->rl_sendctx->sc_req = req;
David Brazdil0f672f62019-12-10 10:32:29 +0000775 kref_init(&req->rl_kref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776
David Brazdil0f672f62019-12-10 10:32:29 +0000777 ret = -EIO;
778 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
779 goto err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 if (rtype != rpcrdma_areadch)
David Brazdil0f672f62019-12-10 10:32:29 +0000781 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
782 goto err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000783 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000784
785err:
786 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
787 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000788}
789
790/**
791 * rpcrdma_marshal_req - Marshal and send one RPC request
792 * @r_xprt: controlling transport
793 * @rqst: RPC request to be marshaled
794 *
795 * For the RPC in "rqst", this function:
796 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
797 * - Registers Read, Write, and Reply chunks
798 * - Constructs the transport header
799 * - Posts a Send WR to send the transport header and request
800 *
801 * Returns:
802 * %0 if the RPC was sent successfully,
803 * %-ENOTCONN if the connection was lost,
804 * %-EAGAIN if the caller should call again with the same arguments,
805 * %-ENOBUFS if the caller should call again after a delay,
806 * %-EMSGSIZE if the transport header is too small,
807 * %-EIO if a permanent problem occurred while marshaling.
808 */
809int
810rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
811{
812 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
813 struct xdr_stream *xdr = &req->rl_stream;
814 enum rpcrdma_chunktype rtype, wtype;
815 bool ddp_allowed;
816 __be32 *p;
817 int ret;
818
Olivier Deprez0e641232021-09-23 10:07:05 +0200819 if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
820 ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
821 if (ret)
822 return ret;
823 }
824
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000825 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000826 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
827 rqst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000828
829 /* Fixed header fields */
830 ret = -EMSGSIZE;
831 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
832 if (!p)
833 goto out_err;
834 *p++ = rqst->rq_xid;
835 *p++ = rpcrdma_version;
836 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
837
838 /* When the ULP employs a GSS flavor that guarantees integrity
839 * or privacy, direct data placement of individual data items
840 * is not allowed.
841 */
842 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
843 RPCAUTH_AUTH_DATATOUCH);
844
845 /*
846 * Chunks needed for results?
847 *
848 * o If the expected result is under the inline threshold, all ops
849 * return as inline.
850 * o Large read ops return data as write chunk(s), header as
851 * inline.
852 * o Large non-read ops return as a single reply chunk.
853 */
854 if (rpcrdma_results_inline(r_xprt, rqst))
855 wtype = rpcrdma_noch;
David Brazdil0f672f62019-12-10 10:32:29 +0000856 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
857 rpcrdma_nonpayload_inline(r_xprt, rqst))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000858 wtype = rpcrdma_writech;
859 else
860 wtype = rpcrdma_replych;
861
862 /*
863 * Chunks needed for arguments?
864 *
865 * o If the total request is under the inline threshold, all ops
866 * are sent as inline.
867 * o Large write ops transmit data as read chunk(s), header as
868 * inline.
869 * o Large non-write ops are sent with the entire message as a
870 * single read chunk (protocol 0-position special case).
871 *
872 * This assumes that the upper layer does not present a request
873 * that both has a data payload, and whose non-data arguments
874 * by themselves are larger than the inline threshold.
875 */
876 if (rpcrdma_args_inline(r_xprt, rqst)) {
877 *p++ = rdma_msg;
878 rtype = rpcrdma_noch;
879 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
880 *p++ = rdma_msg;
881 rtype = rpcrdma_readch;
882 } else {
883 r_xprt->rx_stats.nomsg_call_count++;
884 *p++ = rdma_nomsg;
885 rtype = rpcrdma_areadch;
886 }
887
888 /* If this is a retransmit, discard previously registered
889 * chunks. Very likely the connection has been replaced,
890 * so these registrations are invalid and unusable.
891 */
David Brazdil0f672f62019-12-10 10:32:29 +0000892 frwr_recycle(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000893
894 /* This implementation supports the following combinations
895 * of chunk lists in one RPC-over-RDMA Call message:
896 *
897 * - Read list
898 * - Write list
899 * - Reply chunk
900 * - Read list + Reply chunk
901 *
902 * It might not yet support the following combinations:
903 *
904 * - Read list + Write list
905 *
906 * It does not support the following combinations:
907 *
908 * - Write list + Reply chunk
909 * - Read list + Write list + Reply chunk
910 *
911 * This implementation supports only a single chunk in each
912 * Read or Write list. Thus for example the client cannot
913 * send a Call message with a Position Zero Read chunk and a
914 * regular Read chunk at the same time.
915 */
David Brazdil0f672f62019-12-10 10:32:29 +0000916 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
917 if (ret)
918 goto out_err;
919 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
920 if (ret)
921 goto out_err;
922 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000923 if (ret)
924 goto out_err;
925
David Brazdil0f672f62019-12-10 10:32:29 +0000926 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000927 &rqst->rq_snd_buf, rtype);
928 if (ret)
929 goto out_err;
David Brazdil0f672f62019-12-10 10:32:29 +0000930
931 trace_xprtrdma_marshal(req, rtype, wtype);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000932 return 0;
933
934out_err:
David Brazdil0f672f62019-12-10 10:32:29 +0000935 trace_xprtrdma_marshal_failed(rqst, ret);
936 r_xprt->rx_stats.failed_marshal_count++;
937 frwr_reset(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000938 return ret;
939}
940
941/**
942 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
943 * @rqst: controlling RPC request
944 * @srcp: points to RPC message payload in receive buffer
945 * @copy_len: remaining length of receive buffer content
946 * @pad: Write chunk pad bytes needed (zero for pure inline)
947 *
948 * The upper layer has set the maximum number of bytes it can
949 * receive in each component of rq_rcv_buf. These values are set in
950 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
951 *
952 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
953 * many cases this function simply updates iov_base pointers in
954 * rq_rcv_buf to point directly to the received reply data, to
955 * avoid copying reply data.
956 *
957 * Returns the count of bytes which had to be memcopied.
958 */
959static unsigned long
960rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
961{
962 unsigned long fixup_copy_count;
963 int i, npages, curlen;
964 char *destp;
965 struct page **ppages;
966 int page_base;
967
968 /* The head iovec is redirected to the RPC reply message
969 * in the receive buffer, to avoid a memcopy.
970 */
971 rqst->rq_rcv_buf.head[0].iov_base = srcp;
972 rqst->rq_private_buf.head[0].iov_base = srcp;
973
974 /* The contents of the receive buffer that follow
975 * head.iov_len bytes are copied into the page list.
976 */
977 curlen = rqst->rq_rcv_buf.head[0].iov_len;
978 if (curlen > copy_len)
979 curlen = copy_len;
980 trace_xprtrdma_fixup(rqst, copy_len, curlen);
981 srcp += curlen;
982 copy_len -= curlen;
983
984 ppages = rqst->rq_rcv_buf.pages +
985 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
986 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
987 fixup_copy_count = 0;
988 if (copy_len && rqst->rq_rcv_buf.page_len) {
989 int pagelist_len;
990
991 pagelist_len = rqst->rq_rcv_buf.page_len;
992 if (pagelist_len > copy_len)
993 pagelist_len = copy_len;
994 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
995 for (i = 0; i < npages; i++) {
996 curlen = PAGE_SIZE - page_base;
997 if (curlen > pagelist_len)
998 curlen = pagelist_len;
999
1000 trace_xprtrdma_fixup_pg(rqst, i, srcp,
1001 copy_len, curlen);
1002 destp = kmap_atomic(ppages[i]);
1003 memcpy(destp + page_base, srcp, curlen);
1004 flush_dcache_page(ppages[i]);
1005 kunmap_atomic(destp);
1006 srcp += curlen;
1007 copy_len -= curlen;
1008 fixup_copy_count += curlen;
1009 pagelist_len -= curlen;
1010 if (!pagelist_len)
1011 break;
1012 page_base = 0;
1013 }
1014
1015 /* Implicit padding for the last segment in a Write
1016 * chunk is inserted inline at the front of the tail
1017 * iovec. The upper layer ignores the content of
1018 * the pad. Simply ensure inline content in the tail
1019 * that follows the Write chunk is properly aligned.
1020 */
1021 if (pad)
1022 srcp -= pad;
1023 }
1024
1025 /* The tail iovec is redirected to the remaining data
1026 * in the receive buffer, to avoid a memcopy.
1027 */
1028 if (copy_len || pad) {
1029 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1030 rqst->rq_private_buf.tail[0].iov_base = srcp;
1031 }
1032
1033 return fixup_copy_count;
1034}
1035
1036/* By convention, backchannel calls arrive via rdma_msg type
1037 * messages, and never populate the chunk lists. This makes
1038 * the RPC/RDMA header small and fixed in size, so it is
1039 * straightforward to check the RPC header's direction field.
1040 */
1041static bool
1042rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1043#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1044{
1045 struct xdr_stream *xdr = &rep->rr_stream;
1046 __be32 *p;
1047
1048 if (rep->rr_proc != rdma_msg)
1049 return false;
1050
1051 /* Peek at stream contents without advancing. */
1052 p = xdr_inline_decode(xdr, 0);
1053
1054 /* Chunk lists */
1055 if (*p++ != xdr_zero)
1056 return false;
1057 if (*p++ != xdr_zero)
1058 return false;
1059 if (*p++ != xdr_zero)
1060 return false;
1061
1062 /* RPC header */
1063 if (*p++ != rep->rr_xid)
1064 return false;
1065 if (*p != cpu_to_be32(RPC_CALL))
1066 return false;
1067
1068 /* Now that we are sure this is a backchannel call,
1069 * advance to the RPC header.
1070 */
1071 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1072 if (unlikely(!p))
1073 goto out_short;
1074
1075 rpcrdma_bc_receive_call(r_xprt, rep);
1076 return true;
1077
1078out_short:
1079 pr_warn("RPC/RDMA short backward direction call\n");
1080 return true;
1081}
1082#else /* CONFIG_SUNRPC_BACKCHANNEL */
1083{
1084 return false;
1085}
1086#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1087
1088static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1089{
1090 u32 handle;
1091 u64 offset;
1092 __be32 *p;
1093
1094 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1095 if (unlikely(!p))
1096 return -EIO;
1097
1098 handle = be32_to_cpup(p++);
1099 *length = be32_to_cpup(p++);
1100 xdr_decode_hyper(p, &offset);
1101
1102 trace_xprtrdma_decode_seg(handle, *length, offset);
1103 return 0;
1104}
1105
1106static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1107{
1108 u32 segcount, seglength;
1109 __be32 *p;
1110
1111 p = xdr_inline_decode(xdr, sizeof(*p));
1112 if (unlikely(!p))
1113 return -EIO;
1114
1115 *length = 0;
1116 segcount = be32_to_cpup(p);
1117 while (segcount--) {
1118 if (decode_rdma_segment(xdr, &seglength))
1119 return -EIO;
1120 *length += seglength;
1121 }
1122
1123 return 0;
1124}
1125
1126/* In RPC-over-RDMA Version One replies, a Read list is never
1127 * expected. This decoder is a stub that returns an error if
1128 * a Read list is present.
1129 */
1130static int decode_read_list(struct xdr_stream *xdr)
1131{
1132 __be32 *p;
1133
1134 p = xdr_inline_decode(xdr, sizeof(*p));
1135 if (unlikely(!p))
1136 return -EIO;
1137 if (unlikely(*p != xdr_zero))
1138 return -EIO;
1139 return 0;
1140}
1141
1142/* Supports only one Write chunk in the Write list
1143 */
1144static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1145{
1146 u32 chunklen;
1147 bool first;
1148 __be32 *p;
1149
1150 *length = 0;
1151 first = true;
1152 do {
1153 p = xdr_inline_decode(xdr, sizeof(*p));
1154 if (unlikely(!p))
1155 return -EIO;
1156 if (*p == xdr_zero)
1157 break;
1158 if (!first)
1159 return -EIO;
1160
1161 if (decode_write_chunk(xdr, &chunklen))
1162 return -EIO;
1163 *length += chunklen;
1164 first = false;
1165 } while (true);
1166 return 0;
1167}
1168
1169static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1170{
1171 __be32 *p;
1172
1173 p = xdr_inline_decode(xdr, sizeof(*p));
1174 if (unlikely(!p))
1175 return -EIO;
1176
1177 *length = 0;
1178 if (*p != xdr_zero)
1179 if (decode_write_chunk(xdr, length))
1180 return -EIO;
1181 return 0;
1182}
1183
1184static int
1185rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1186 struct rpc_rqst *rqst)
1187{
1188 struct xdr_stream *xdr = &rep->rr_stream;
1189 u32 writelist, replychunk, rpclen;
1190 char *base;
1191
1192 /* Decode the chunk lists */
1193 if (decode_read_list(xdr))
1194 return -EIO;
1195 if (decode_write_list(xdr, &writelist))
1196 return -EIO;
1197 if (decode_reply_chunk(xdr, &replychunk))
1198 return -EIO;
1199
1200 /* RDMA_MSG sanity checks */
1201 if (unlikely(replychunk))
1202 return -EIO;
1203
1204 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1205 base = (char *)xdr_inline_decode(xdr, 0);
1206 rpclen = xdr_stream_remaining(xdr);
1207 r_xprt->rx_stats.fixup_copy_count +=
1208 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1209
1210 r_xprt->rx_stats.total_rdma_reply += writelist;
1211 return rpclen + xdr_align_size(writelist);
1212}
1213
1214static noinline int
1215rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1216{
1217 struct xdr_stream *xdr = &rep->rr_stream;
1218 u32 writelist, replychunk;
1219
1220 /* Decode the chunk lists */
1221 if (decode_read_list(xdr))
1222 return -EIO;
1223 if (decode_write_list(xdr, &writelist))
1224 return -EIO;
1225 if (decode_reply_chunk(xdr, &replychunk))
1226 return -EIO;
1227
1228 /* RDMA_NOMSG sanity checks */
1229 if (unlikely(writelist))
1230 return -EIO;
1231 if (unlikely(!replychunk))
1232 return -EIO;
1233
1234 /* Reply chunk buffer already is the reply vector */
1235 r_xprt->rx_stats.total_rdma_reply += replychunk;
1236 return replychunk;
1237}
1238
1239static noinline int
1240rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1241 struct rpc_rqst *rqst)
1242{
1243 struct xdr_stream *xdr = &rep->rr_stream;
1244 __be32 *p;
1245
1246 p = xdr_inline_decode(xdr, sizeof(*p));
1247 if (unlikely(!p))
1248 return -EIO;
1249
1250 switch (*p) {
1251 case err_vers:
1252 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1253 if (!p)
1254 break;
David Brazdil0f672f62019-12-10 10:32:29 +00001255 dprintk("RPC: %s: server reports "
1256 "version error (%u-%u), xid %08x\n", __func__,
1257 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1258 be32_to_cpu(rep->rr_xid));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001259 break;
1260 case err_chunk:
David Brazdil0f672f62019-12-10 10:32:29 +00001261 dprintk("RPC: %s: server reports "
1262 "header decoding error, xid %08x\n", __func__,
1263 be32_to_cpu(rep->rr_xid));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264 break;
1265 default:
David Brazdil0f672f62019-12-10 10:32:29 +00001266 dprintk("RPC: %s: server reports "
1267 "unrecognized error %d, xid %08x\n", __func__,
1268 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269 }
1270
Olivier Deprez0e641232021-09-23 10:07:05 +02001271 return -EIO;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272}
1273
1274/* Perform XID lookup, reconstruction of the RPC reply, and
1275 * RPC completion while holding the transport lock to ensure
1276 * the rep, rqst, and rq_task pointers remain stable.
1277 */
1278void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1279{
1280 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1281 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1282 struct rpc_rqst *rqst = rep->rr_rqst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001283 int status;
1284
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285 switch (rep->rr_proc) {
1286 case rdma_msg:
1287 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1288 break;
1289 case rdma_nomsg:
1290 status = rpcrdma_decode_nomsg(r_xprt, rep);
1291 break;
1292 case rdma_error:
1293 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1294 break;
1295 default:
1296 status = -EIO;
1297 }
1298 if (status < 0)
1299 goto out_badheader;
1300
1301out:
David Brazdil0f672f62019-12-10 10:32:29 +00001302 spin_lock(&xprt->queue_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001303 xprt_complete_rqst(rqst->rq_task, status);
1304 xprt_unpin_rqst(rqst);
David Brazdil0f672f62019-12-10 10:32:29 +00001305 spin_unlock(&xprt->queue_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001306 return;
1307
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308out_badheader:
1309 trace_xprtrdma_reply_hdr(rep);
1310 r_xprt->rx_stats.bad_reply_count++;
Olivier Deprez0e641232021-09-23 10:07:05 +02001311 rqst->rq_task->tk_status = status;
1312 status = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001313 goto out;
1314}
1315
David Brazdil0f672f62019-12-10 10:32:29 +00001316static void rpcrdma_reply_done(struct kref *kref)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001317{
David Brazdil0f672f62019-12-10 10:32:29 +00001318 struct rpcrdma_req *req =
1319 container_of(kref, struct rpcrdma_req, rl_kref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001320
David Brazdil0f672f62019-12-10 10:32:29 +00001321 rpcrdma_complete_rqst(req->rl_reply);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001322}
1323
David Brazdil0f672f62019-12-10 10:32:29 +00001324/**
1325 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1326 * @rep: Incoming rpcrdma_rep object to process
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001327 *
1328 * Errors must result in the RPC task either being awakened, or
1329 * allowed to timeout, to discover the errors at that time.
1330 */
1331void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1332{
1333 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1334 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1335 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1336 struct rpcrdma_req *req;
1337 struct rpc_rqst *rqst;
1338 u32 credits;
1339 __be32 *p;
1340
David Brazdil0f672f62019-12-10 10:32:29 +00001341 /* Any data means we had a useful conversation, so
1342 * then we don't need to delay the next reconnect.
1343 */
1344 if (xprt->reestablish_timeout)
1345 xprt->reestablish_timeout = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001346
1347 /* Fixed transport header fields */
1348 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
David Brazdil0f672f62019-12-10 10:32:29 +00001349 rep->rr_hdrbuf.head[0].iov_base, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001350 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1351 if (unlikely(!p))
1352 goto out_shortreply;
1353 rep->rr_xid = *p++;
1354 rep->rr_vers = *p++;
1355 credits = be32_to_cpu(*p++);
1356 rep->rr_proc = *p++;
1357
1358 if (rep->rr_vers != rpcrdma_version)
1359 goto out_badversion;
1360
1361 if (rpcrdma_is_bcall(r_xprt, rep))
1362 return;
1363
1364 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1365 * get context for handling any incoming chunks.
1366 */
David Brazdil0f672f62019-12-10 10:32:29 +00001367 spin_lock(&xprt->queue_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001368 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1369 if (!rqst)
1370 goto out_norqst;
1371 xprt_pin_rqst(rqst);
David Brazdil0f672f62019-12-10 10:32:29 +00001372 spin_unlock(&xprt->queue_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001373
1374 if (credits == 0)
1375 credits = 1; /* don't deadlock */
1376 else if (credits > buf->rb_max_requests)
1377 credits = buf->rb_max_requests;
David Brazdil0f672f62019-12-10 10:32:29 +00001378 if (buf->rb_credits != credits) {
1379 spin_lock(&xprt->transport_lock);
1380 buf->rb_credits = credits;
1381 xprt->cwnd = credits << RPC_CWNDSHIFT;
1382 spin_unlock(&xprt->transport_lock);
1383 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001384 rpcrdma_post_recvs(r_xprt, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001385
1386 req = rpcr_to_rdmar(rqst);
David Brazdil0f672f62019-12-10 10:32:29 +00001387 if (req->rl_reply) {
1388 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1389 rpcrdma_recv_buffer_put(req->rl_reply);
1390 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001391 req->rl_reply = rep;
1392 rep->rr_rqst = rqst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001393
1394 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1395
David Brazdil0f672f62019-12-10 10:32:29 +00001396 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1397 frwr_reminv(rep, &req->rl_registered);
1398 if (!list_empty(&req->rl_registered))
1399 frwr_unmap_async(r_xprt, req);
1400 /* LocalInv completion will complete the RPC */
1401 else
1402 kref_put(&req->rl_kref, rpcrdma_reply_done);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001403 return;
1404
1405out_badversion:
1406 trace_xprtrdma_reply_vers(rep);
David Brazdil0f672f62019-12-10 10:32:29 +00001407 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001408
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001409out_norqst:
David Brazdil0f672f62019-12-10 10:32:29 +00001410 spin_unlock(&xprt->queue_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001411 trace_xprtrdma_reply_rqst(rep);
David Brazdil0f672f62019-12-10 10:32:29 +00001412 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001413
1414out_shortreply:
1415 trace_xprtrdma_reply_short(rep);
1416
David Brazdil0f672f62019-12-10 10:32:29 +00001417out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001418 rpcrdma_recv_buffer_put(rep);
1419}