blob: 78c6648af7827d535eec9a441c80af2d0fb86be6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/net/sunrpc/clnt.c
4 *
5 * This file contains the high-level RPC interface.
6 * It is modeled as a finite state machine to support both synchronous
7 * and asynchronous requests.
8 *
9 * - RPC header generation and argument serialization.
10 * - Credential refresh.
11 * - TCP connect handling.
12 * - Retry of operation when it is suspected the operation failed because
13 * of uid squashing on the server, or when the credentials were stale
14 * and need to be refreshed, or when a packet was damaged in transit.
15 * This may be have to be moved to the VFS layer.
16 *
17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
19 */
20
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kallsyms.h>
25#include <linux/mm.h>
26#include <linux/namei.h>
27#include <linux/mount.h>
28#include <linux/slab.h>
29#include <linux/rcupdate.h>
30#include <linux/utsname.h>
31#include <linux/workqueue.h>
32#include <linux/in.h>
33#include <linux/in6.h>
34#include <linux/un.h>
35
36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/addr.h>
38#include <linux/sunrpc/rpc_pipe_fs.h>
39#include <linux/sunrpc/metrics.h>
40#include <linux/sunrpc/bc_xprt.h>
41#include <trace/events/sunrpc.h>
42
43#include "sunrpc.h"
44#include "netns.h"
45
46#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
47# define RPCDBG_FACILITY RPCDBG_CALL
48#endif
49
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050/*
51 * All RPC clients are linked into this list
52 */
53
54static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
55
56
57static void call_start(struct rpc_task *task);
58static void call_reserve(struct rpc_task *task);
59static void call_reserveresult(struct rpc_task *task);
60static void call_allocate(struct rpc_task *task);
David Brazdil0f672f62019-12-10 10:32:29 +000061static void call_encode(struct rpc_task *task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062static void call_decode(struct rpc_task *task);
63static void call_bind(struct rpc_task *task);
64static void call_bind_status(struct rpc_task *task);
65static void call_transmit(struct rpc_task *task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066static void call_status(struct rpc_task *task);
67static void call_transmit_status(struct rpc_task *task);
68static void call_refresh(struct rpc_task *task);
69static void call_refreshresult(struct rpc_task *task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070static void call_connect(struct rpc_task *task);
71static void call_connect_status(struct rpc_task *task);
72
David Brazdil0f672f62019-12-10 10:32:29 +000073static int rpc_encode_header(struct rpc_task *task,
74 struct xdr_stream *xdr);
75static int rpc_decode_header(struct rpc_task *task,
76 struct xdr_stream *xdr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077static int rpc_ping(struct rpc_clnt *clnt);
David Brazdil0f672f62019-12-10 10:32:29 +000078static void rpc_check_timeout(struct rpc_task *task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079
80static void rpc_register_client(struct rpc_clnt *clnt)
81{
82 struct net *net = rpc_net_ns(clnt);
83 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
84
85 spin_lock(&sn->rpc_client_lock);
86 list_add(&clnt->cl_clients, &sn->all_clients);
87 spin_unlock(&sn->rpc_client_lock);
88}
89
90static void rpc_unregister_client(struct rpc_clnt *clnt)
91{
92 struct net *net = rpc_net_ns(clnt);
93 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
94
95 spin_lock(&sn->rpc_client_lock);
96 list_del(&clnt->cl_clients);
97 spin_unlock(&sn->rpc_client_lock);
98}
99
100static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
101{
102 rpc_remove_client_dir(clnt);
103}
104
105static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
106{
107 struct net *net = rpc_net_ns(clnt);
108 struct super_block *pipefs_sb;
109
110 pipefs_sb = rpc_get_sb_net(net);
111 if (pipefs_sb) {
112 __rpc_clnt_remove_pipedir(clnt);
113 rpc_put_sb_net(net);
114 }
115}
116
117static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
118 struct rpc_clnt *clnt)
119{
120 static uint32_t clntid;
121 const char *dir_name = clnt->cl_program->pipe_dir_name;
122 char name[15];
123 struct dentry *dir, *dentry;
124
125 dir = rpc_d_lookup_sb(sb, dir_name);
126 if (dir == NULL) {
127 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
128 return dir;
129 }
130 for (;;) {
131 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
132 name[sizeof(name) - 1] = '\0';
133 dentry = rpc_create_client_dir(dir, name, clnt);
134 if (!IS_ERR(dentry))
135 break;
136 if (dentry == ERR_PTR(-EEXIST))
137 continue;
138 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
139 " %s/%s, error %ld\n",
140 dir_name, name, PTR_ERR(dentry));
141 break;
142 }
143 dput(dir);
144 return dentry;
145}
146
147static int
148rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
149{
150 struct dentry *dentry;
151
152 if (clnt->cl_program->pipe_dir_name != NULL) {
153 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
154 if (IS_ERR(dentry))
155 return PTR_ERR(dentry);
156 }
157 return 0;
158}
159
160static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
161{
162 if (clnt->cl_program->pipe_dir_name == NULL)
163 return 1;
164
165 switch (event) {
166 case RPC_PIPEFS_MOUNT:
167 if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
168 return 1;
169 if (atomic_read(&clnt->cl_count) == 0)
170 return 1;
171 break;
172 case RPC_PIPEFS_UMOUNT:
173 if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
174 return 1;
175 break;
176 }
177 return 0;
178}
179
180static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
181 struct super_block *sb)
182{
183 struct dentry *dentry;
184
185 switch (event) {
186 case RPC_PIPEFS_MOUNT:
187 dentry = rpc_setup_pipedir_sb(sb, clnt);
188 if (!dentry)
189 return -ENOENT;
190 if (IS_ERR(dentry))
191 return PTR_ERR(dentry);
192 break;
193 case RPC_PIPEFS_UMOUNT:
194 __rpc_clnt_remove_pipedir(clnt);
195 break;
196 default:
197 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
198 return -ENOTSUPP;
199 }
200 return 0;
201}
202
203static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
204 struct super_block *sb)
205{
206 int error = 0;
207
208 for (;; clnt = clnt->cl_parent) {
209 if (!rpc_clnt_skip_event(clnt, event))
210 error = __rpc_clnt_handle_event(clnt, event, sb);
211 if (error || clnt == clnt->cl_parent)
212 break;
213 }
214 return error;
215}
216
217static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
218{
219 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
220 struct rpc_clnt *clnt;
221
222 spin_lock(&sn->rpc_client_lock);
223 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
224 if (rpc_clnt_skip_event(clnt, event))
225 continue;
226 spin_unlock(&sn->rpc_client_lock);
227 return clnt;
228 }
229 spin_unlock(&sn->rpc_client_lock);
230 return NULL;
231}
232
233static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
234 void *ptr)
235{
236 struct super_block *sb = ptr;
237 struct rpc_clnt *clnt;
238 int error = 0;
239
240 while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
241 error = __rpc_pipefs_event(clnt, event, sb);
242 if (error)
243 break;
244 }
245 return error;
246}
247
248static struct notifier_block rpc_clients_block = {
249 .notifier_call = rpc_pipefs_event,
250 .priority = SUNRPC_PIPEFS_RPC_PRIO,
251};
252
253int rpc_clients_notifier_register(void)
254{
255 return rpc_pipefs_notifier_register(&rpc_clients_block);
256}
257
258void rpc_clients_notifier_unregister(void)
259{
260 return rpc_pipefs_notifier_unregister(&rpc_clients_block);
261}
262
263static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
264 struct rpc_xprt *xprt,
265 const struct rpc_timeout *timeout)
266{
267 struct rpc_xprt *old;
268
269 spin_lock(&clnt->cl_lock);
270 old = rcu_dereference_protected(clnt->cl_xprt,
271 lockdep_is_held(&clnt->cl_lock));
272
273 if (!xprt_bound(xprt))
274 clnt->cl_autobind = 1;
275
276 clnt->cl_timeout = timeout;
277 rcu_assign_pointer(clnt->cl_xprt, xprt);
278 spin_unlock(&clnt->cl_lock);
279
280 return old;
281}
282
283static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
284{
285 clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
286 nodename, sizeof(clnt->cl_nodename));
287}
288
289static int rpc_client_register(struct rpc_clnt *clnt,
290 rpc_authflavor_t pseudoflavor,
291 const char *client_name)
292{
293 struct rpc_auth_create_args auth_args = {
294 .pseudoflavor = pseudoflavor,
295 .target_name = client_name,
296 };
297 struct rpc_auth *auth;
298 struct net *net = rpc_net_ns(clnt);
299 struct super_block *pipefs_sb;
300 int err;
301
302 rpc_clnt_debugfs_register(clnt);
303
304 pipefs_sb = rpc_get_sb_net(net);
305 if (pipefs_sb) {
306 err = rpc_setup_pipedir(pipefs_sb, clnt);
307 if (err)
308 goto out;
309 }
310
311 rpc_register_client(clnt);
312 if (pipefs_sb)
313 rpc_put_sb_net(net);
314
315 auth = rpcauth_create(&auth_args, clnt);
316 if (IS_ERR(auth)) {
317 dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
318 pseudoflavor);
319 err = PTR_ERR(auth);
320 goto err_auth;
321 }
322 return 0;
323err_auth:
324 pipefs_sb = rpc_get_sb_net(net);
325 rpc_unregister_client(clnt);
326 __rpc_clnt_remove_pipedir(clnt);
327out:
328 if (pipefs_sb)
329 rpc_put_sb_net(net);
330 rpc_clnt_debugfs_unregister(clnt);
331 return err;
332}
333
334static DEFINE_IDA(rpc_clids);
335
336void rpc_cleanup_clids(void)
337{
338 ida_destroy(&rpc_clids);
339}
340
341static int rpc_alloc_clid(struct rpc_clnt *clnt)
342{
343 int clid;
344
345 clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
346 if (clid < 0)
347 return clid;
348 clnt->cl_clid = clid;
349 return 0;
350}
351
352static void rpc_free_clid(struct rpc_clnt *clnt)
353{
354 ida_simple_remove(&rpc_clids, clnt->cl_clid);
355}
356
357static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
358 struct rpc_xprt_switch *xps,
359 struct rpc_xprt *xprt,
360 struct rpc_clnt *parent)
361{
362 const struct rpc_program *program = args->program;
363 const struct rpc_version *version;
364 struct rpc_clnt *clnt = NULL;
365 const struct rpc_timeout *timeout;
366 const char *nodename = args->nodename;
367 int err;
368
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000369 err = rpciod_up();
370 if (err)
371 goto out_no_rpciod;
372
373 err = -EINVAL;
374 if (args->version >= program->nrvers)
375 goto out_err;
376 version = program->version[args->version];
377 if (version == NULL)
378 goto out_err;
379
380 err = -ENOMEM;
381 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
382 if (!clnt)
383 goto out_err;
384 clnt->cl_parent = parent ? : clnt;
385
386 err = rpc_alloc_clid(clnt);
387 if (err)
388 goto out_no_clid;
389
David Brazdil0f672f62019-12-10 10:32:29 +0000390 clnt->cl_cred = get_cred(args->cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 clnt->cl_procinfo = version->procs;
392 clnt->cl_maxproc = version->nrprocs;
393 clnt->cl_prog = args->prognumber ? : program->number;
394 clnt->cl_vers = version->number;
395 clnt->cl_stats = program->stats;
396 clnt->cl_metrics = rpc_alloc_iostats(clnt);
397 rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
398 err = -ENOMEM;
399 if (clnt->cl_metrics == NULL)
400 goto out_no_stats;
401 clnt->cl_program = program;
402 INIT_LIST_HEAD(&clnt->cl_tasks);
403 spin_lock_init(&clnt->cl_lock);
404
405 timeout = xprt->timeout;
406 if (args->timeout != NULL) {
407 memcpy(&clnt->cl_timeout_default, args->timeout,
408 sizeof(clnt->cl_timeout_default));
409 timeout = &clnt->cl_timeout_default;
410 }
411
412 rpc_clnt_set_transport(clnt, xprt, timeout);
413 xprt_iter_init(&clnt->cl_xpi, xps);
414 xprt_switch_put(xps);
415
416 clnt->cl_rtt = &clnt->cl_rtt_default;
417 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
418
419 atomic_set(&clnt->cl_count, 1);
420
421 if (nodename == NULL)
422 nodename = utsname()->nodename;
423 /* save the nodename */
424 rpc_clnt_set_nodename(clnt, nodename);
425
426 err = rpc_client_register(clnt, args->authflavor, args->client_name);
427 if (err)
428 goto out_no_path;
429 if (parent)
430 atomic_inc(&parent->cl_count);
Olivier Deprez157378f2022-04-04 15:47:50 +0200431
432 trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433 return clnt;
434
435out_no_path:
436 rpc_free_iostats(clnt->cl_metrics);
437out_no_stats:
David Brazdil0f672f62019-12-10 10:32:29 +0000438 put_cred(clnt->cl_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 rpc_free_clid(clnt);
440out_no_clid:
441 kfree(clnt);
442out_err:
443 rpciod_down();
444out_no_rpciod:
445 xprt_switch_put(xps);
446 xprt_put(xprt);
Olivier Deprez157378f2022-04-04 15:47:50 +0200447 trace_rpc_clnt_new_err(program->name, args->servername, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448 return ERR_PTR(err);
449}
450
451static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
452 struct rpc_xprt *xprt)
453{
454 struct rpc_clnt *clnt = NULL;
455 struct rpc_xprt_switch *xps;
456
457 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
458 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
459 xps = args->bc_xprt->xpt_bc_xps;
460 xprt_switch_get(xps);
461 } else {
462 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
463 if (xps == NULL) {
464 xprt_put(xprt);
465 return ERR_PTR(-ENOMEM);
466 }
467 if (xprt->bc_xprt) {
468 xprt_switch_get(xps);
469 xprt->bc_xprt->xpt_bc_xps = xps;
470 }
471 }
472 clnt = rpc_new_client(args, xps, xprt, NULL);
473 if (IS_ERR(clnt))
474 return clnt;
475
476 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
477 int err = rpc_ping(clnt);
478 if (err != 0) {
479 rpc_shutdown_client(clnt);
480 return ERR_PTR(err);
481 }
482 }
483
484 clnt->cl_softrtry = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000485 if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000486 clnt->cl_softrtry = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000487 if (args->flags & RPC_CLNT_CREATE_SOFTERR)
488 clnt->cl_softerr = 1;
489 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490
491 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
492 clnt->cl_autobind = 1;
493 if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
494 clnt->cl_noretranstimeo = 1;
495 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
496 clnt->cl_discrtry = 1;
497 if (!(args->flags & RPC_CLNT_CREATE_QUIET))
498 clnt->cl_chatty = 1;
499
500 return clnt;
501}
502
503/**
504 * rpc_create - create an RPC client and transport with one call
505 * @args: rpc_clnt create argument structure
506 *
507 * Creates and initializes an RPC transport and an RPC client.
508 *
509 * It can ping the server in order to determine if it is up, and to see if
510 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
511 * this behavior so asynchronous tasks can also use rpc_create.
512 */
513struct rpc_clnt *rpc_create(struct rpc_create_args *args)
514{
515 struct rpc_xprt *xprt;
516 struct xprt_create xprtargs = {
517 .net = args->net,
518 .ident = args->protocol,
519 .srcaddr = args->saddress,
520 .dstaddr = args->address,
521 .addrlen = args->addrsize,
522 .servername = args->servername,
523 .bc_xprt = args->bc_xprt,
524 };
525 char servername[48];
David Brazdil0f672f62019-12-10 10:32:29 +0000526 struct rpc_clnt *clnt;
527 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000528
529 if (args->bc_xprt) {
530 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
531 xprt = args->bc_xprt->xpt_bc_xprt;
532 if (xprt) {
533 xprt_get(xprt);
534 return rpc_create_xprt(args, xprt);
535 }
536 }
537
538 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
539 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
540 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
541 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
542 /*
543 * If the caller chooses not to specify a hostname, whip
544 * up a string representation of the passed-in address.
545 */
546 if (xprtargs.servername == NULL) {
547 struct sockaddr_un *sun =
548 (struct sockaddr_un *)args->address;
549 struct sockaddr_in *sin =
550 (struct sockaddr_in *)args->address;
551 struct sockaddr_in6 *sin6 =
552 (struct sockaddr_in6 *)args->address;
553
554 servername[0] = '\0';
555 switch (args->address->sa_family) {
556 case AF_LOCAL:
557 snprintf(servername, sizeof(servername), "%s",
558 sun->sun_path);
559 break;
560 case AF_INET:
561 snprintf(servername, sizeof(servername), "%pI4",
562 &sin->sin_addr.s_addr);
563 break;
564 case AF_INET6:
565 snprintf(servername, sizeof(servername), "%pI6",
566 &sin6->sin6_addr);
567 break;
568 default:
569 /* caller wants default server name, but
570 * address family isn't recognized. */
571 return ERR_PTR(-EINVAL);
572 }
573 xprtargs.servername = servername;
574 }
575
576 xprt = xprt_create_transport(&xprtargs);
577 if (IS_ERR(xprt))
578 return (struct rpc_clnt *)xprt;
579
580 /*
581 * By default, kernel RPC client connects from a reserved port.
582 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
583 * but it is always enabled for rpciod, which handles the connect
584 * operation.
585 */
586 xprt->resvport = 1;
587 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
588 xprt->resvport = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200589 xprt->reuseport = 0;
590 if (args->flags & RPC_CLNT_CREATE_REUSEPORT)
591 xprt->reuseport = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000592
David Brazdil0f672f62019-12-10 10:32:29 +0000593 clnt = rpc_create_xprt(args, xprt);
594 if (IS_ERR(clnt) || args->nconnect <= 1)
595 return clnt;
596
597 for (i = 0; i < args->nconnect - 1; i++) {
598 if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0)
599 break;
600 }
601 return clnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602}
603EXPORT_SYMBOL_GPL(rpc_create);
604
605/*
606 * This function clones the RPC client structure. It allows us to share the
607 * same transport while varying parameters such as the authentication
608 * flavour.
609 */
610static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
611 struct rpc_clnt *clnt)
612{
613 struct rpc_xprt_switch *xps;
614 struct rpc_xprt *xprt;
615 struct rpc_clnt *new;
616 int err;
617
618 err = -ENOMEM;
619 rcu_read_lock();
620 xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
621 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
622 rcu_read_unlock();
623 if (xprt == NULL || xps == NULL) {
624 xprt_put(xprt);
625 xprt_switch_put(xps);
626 goto out_err;
627 }
628 args->servername = xprt->servername;
629 args->nodename = clnt->cl_nodename;
630
631 new = rpc_new_client(args, xps, xprt, clnt);
Olivier Deprez157378f2022-04-04 15:47:50 +0200632 if (IS_ERR(new))
633 return new;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634
635 /* Turn off autobind on clones */
636 new->cl_autobind = 0;
637 new->cl_softrtry = clnt->cl_softrtry;
David Brazdil0f672f62019-12-10 10:32:29 +0000638 new->cl_softerr = clnt->cl_softerr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639 new->cl_noretranstimeo = clnt->cl_noretranstimeo;
640 new->cl_discrtry = clnt->cl_discrtry;
641 new->cl_chatty = clnt->cl_chatty;
David Brazdil0f672f62019-12-10 10:32:29 +0000642 new->cl_principal = clnt->cl_principal;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643 return new;
644
645out_err:
Olivier Deprez157378f2022-04-04 15:47:50 +0200646 trace_rpc_clnt_clone_err(clnt, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000647 return ERR_PTR(err);
648}
649
650/**
651 * rpc_clone_client - Clone an RPC client structure
652 *
653 * @clnt: RPC client whose parameters are copied
654 *
655 * Returns a fresh RPC client or an ERR_PTR.
656 */
657struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
658{
659 struct rpc_create_args args = {
660 .program = clnt->cl_program,
661 .prognumber = clnt->cl_prog,
662 .version = clnt->cl_vers,
663 .authflavor = clnt->cl_auth->au_flavor,
David Brazdil0f672f62019-12-10 10:32:29 +0000664 .cred = clnt->cl_cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000665 };
666 return __rpc_clone_client(&args, clnt);
667}
668EXPORT_SYMBOL_GPL(rpc_clone_client);
669
670/**
671 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
672 *
673 * @clnt: RPC client whose parameters are copied
674 * @flavor: security flavor for new client
675 *
676 * Returns a fresh RPC client or an ERR_PTR.
677 */
678struct rpc_clnt *
679rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
680{
681 struct rpc_create_args args = {
682 .program = clnt->cl_program,
683 .prognumber = clnt->cl_prog,
684 .version = clnt->cl_vers,
685 .authflavor = flavor,
David Brazdil0f672f62019-12-10 10:32:29 +0000686 .cred = clnt->cl_cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000687 };
688 return __rpc_clone_client(&args, clnt);
689}
690EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
691
692/**
693 * rpc_switch_client_transport: switch the RPC transport on the fly
694 * @clnt: pointer to a struct rpc_clnt
695 * @args: pointer to the new transport arguments
696 * @timeout: pointer to the new timeout parameters
697 *
698 * This function allows the caller to switch the RPC transport for the
699 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
700 * server, for instance. It assumes that the caller has ensured that
701 * there are no active RPC tasks by using some form of locking.
702 *
703 * Returns zero if "clnt" is now using the new xprt. Otherwise a
704 * negative errno is returned, and "clnt" continues to use the old
705 * xprt.
706 */
707int rpc_switch_client_transport(struct rpc_clnt *clnt,
708 struct xprt_create *args,
709 const struct rpc_timeout *timeout)
710{
711 const struct rpc_timeout *old_timeo;
712 rpc_authflavor_t pseudoflavor;
713 struct rpc_xprt_switch *xps, *oldxps;
714 struct rpc_xprt *xprt, *old;
715 struct rpc_clnt *parent;
716 int err;
717
718 xprt = xprt_create_transport(args);
Olivier Deprez157378f2022-04-04 15:47:50 +0200719 if (IS_ERR(xprt))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 return PTR_ERR(xprt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000721
722 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
723 if (xps == NULL) {
724 xprt_put(xprt);
725 return -ENOMEM;
726 }
727
728 pseudoflavor = clnt->cl_auth->au_flavor;
729
730 old_timeo = clnt->cl_timeout;
731 old = rpc_clnt_set_transport(clnt, xprt, timeout);
732 oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps);
733
734 rpc_unregister_client(clnt);
735 __rpc_clnt_remove_pipedir(clnt);
736 rpc_clnt_debugfs_unregister(clnt);
737
738 /*
739 * A new transport was created. "clnt" therefore
740 * becomes the root of a new cl_parent tree. clnt's
741 * children, if it has any, still point to the old xprt.
742 */
743 parent = clnt->cl_parent;
744 clnt->cl_parent = clnt;
745
746 /*
747 * The old rpc_auth cache cannot be re-used. GSS
748 * contexts in particular are between a single
749 * client and server.
750 */
751 err = rpc_client_register(clnt, pseudoflavor, NULL);
752 if (err)
753 goto out_revert;
754
755 synchronize_rcu();
756 if (parent != clnt)
757 rpc_release_client(parent);
758 xprt_switch_put(oldxps);
759 xprt_put(old);
Olivier Deprez157378f2022-04-04 15:47:50 +0200760 trace_rpc_clnt_replace_xprt(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761 return 0;
762
763out_revert:
764 xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps);
765 rpc_clnt_set_transport(clnt, old, old_timeo);
766 clnt->cl_parent = parent;
767 rpc_client_register(clnt, pseudoflavor, NULL);
768 xprt_switch_put(xps);
769 xprt_put(xprt);
Olivier Deprez157378f2022-04-04 15:47:50 +0200770 trace_rpc_clnt_replace_xprt_err(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 return err;
772}
773EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
774
775static
776int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
777{
778 struct rpc_xprt_switch *xps;
779
780 rcu_read_lock();
781 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
782 rcu_read_unlock();
783 if (xps == NULL)
784 return -EAGAIN;
785 xprt_iter_init_listall(xpi, xps);
786 xprt_switch_put(xps);
787 return 0;
788}
789
790/**
791 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
792 * @clnt: pointer to client
793 * @fn: function to apply
794 * @data: void pointer to function data
795 *
796 * Iterates through the list of RPC transports currently attached to the
797 * client and applies the function fn(clnt, xprt, data).
798 *
799 * On error, the iteration stops, and the function returns the error value.
800 */
801int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
802 int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
803 void *data)
804{
805 struct rpc_xprt_iter xpi;
806 int ret;
807
808 ret = rpc_clnt_xprt_iter_init(clnt, &xpi);
809 if (ret)
810 return ret;
811 for (;;) {
812 struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
813
814 if (!xprt)
815 break;
816 ret = fn(clnt, xprt, data);
817 xprt_put(xprt);
818 if (ret < 0)
819 break;
820 }
821 xprt_iter_destroy(&xpi);
822 return ret;
823}
824EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt);
825
826/*
827 * Kill all tasks for the given client.
828 * XXX: kill their descendants as well?
829 */
830void rpc_killall_tasks(struct rpc_clnt *clnt)
831{
832 struct rpc_task *rovr;
833
834
835 if (list_empty(&clnt->cl_tasks))
836 return;
Olivier Deprez157378f2022-04-04 15:47:50 +0200837
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000838 /*
839 * Spin lock all_tasks to prevent changes...
840 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200841 trace_rpc_clnt_killall(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000842 spin_lock(&clnt->cl_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000843 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
844 rpc_signal_task(rovr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 spin_unlock(&clnt->cl_lock);
846}
847EXPORT_SYMBOL_GPL(rpc_killall_tasks);
848
849/*
850 * Properly shut down an RPC client, terminating all outstanding
851 * requests.
852 */
853void rpc_shutdown_client(struct rpc_clnt *clnt)
854{
855 might_sleep();
856
Olivier Deprez157378f2022-04-04 15:47:50 +0200857 trace_rpc_clnt_shutdown(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000858
859 while (!list_empty(&clnt->cl_tasks)) {
860 rpc_killall_tasks(clnt);
861 wait_event_timeout(destroy_wait,
862 list_empty(&clnt->cl_tasks), 1*HZ);
863 }
864
865 rpc_release_client(clnt);
866}
867EXPORT_SYMBOL_GPL(rpc_shutdown_client);
868
869/*
870 * Free an RPC client
871 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200872static void rpc_free_client_work(struct work_struct *work)
873{
874 struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
875
876 trace_rpc_clnt_free(clnt);
877
878 /* These might block on processes that might allocate memory,
879 * so they cannot be called in rpciod, so they are handled separately
880 * here.
881 */
882 rpc_clnt_debugfs_unregister(clnt);
883 rpc_free_clid(clnt);
884 rpc_clnt_remove_pipedir(clnt);
885 xprt_put(rcu_dereference_raw(clnt->cl_xprt));
886
887 kfree(clnt);
888 rpciod_down();
889}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890static struct rpc_clnt *
891rpc_free_client(struct rpc_clnt *clnt)
892{
893 struct rpc_clnt *parent = NULL;
894
Olivier Deprez157378f2022-04-04 15:47:50 +0200895 trace_rpc_clnt_release(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000896 if (clnt->cl_parent != clnt)
897 parent = clnt->cl_parent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000898 rpc_unregister_client(clnt);
899 rpc_free_iostats(clnt->cl_metrics);
900 clnt->cl_metrics = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000901 xprt_iter_destroy(&clnt->cl_xpi);
David Brazdil0f672f62019-12-10 10:32:29 +0000902 put_cred(clnt->cl_cred);
Olivier Deprez157378f2022-04-04 15:47:50 +0200903
904 INIT_WORK(&clnt->cl_work, rpc_free_client_work);
905 schedule_work(&clnt->cl_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000906 return parent;
907}
908
909/*
910 * Free an RPC client
911 */
912static struct rpc_clnt *
913rpc_free_auth(struct rpc_clnt *clnt)
914{
915 if (clnt->cl_auth == NULL)
916 return rpc_free_client(clnt);
917
918 /*
919 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
920 * release remaining GSS contexts. This mechanism ensures
921 * that it can do so safely.
922 */
923 atomic_inc(&clnt->cl_count);
924 rpcauth_release(clnt->cl_auth);
925 clnt->cl_auth = NULL;
926 if (atomic_dec_and_test(&clnt->cl_count))
927 return rpc_free_client(clnt);
928 return NULL;
929}
930
931/*
932 * Release reference to the RPC client
933 */
934void
935rpc_release_client(struct rpc_clnt *clnt)
936{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 do {
938 if (list_empty(&clnt->cl_tasks))
939 wake_up(&destroy_wait);
940 if (!atomic_dec_and_test(&clnt->cl_count))
941 break;
942 clnt = rpc_free_auth(clnt);
943 } while (clnt != NULL);
944}
945EXPORT_SYMBOL_GPL(rpc_release_client);
946
947/**
948 * rpc_bind_new_program - bind a new RPC program to an existing client
949 * @old: old rpc_client
950 * @program: rpc program to set
951 * @vers: rpc program version
952 *
953 * Clones the rpc client and sets up a new RPC program. This is mainly
954 * of use for enabling different RPC programs to share the same transport.
955 * The Sun NFSv2/v3 ACL protocol can do this.
956 */
957struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
958 const struct rpc_program *program,
959 u32 vers)
960{
961 struct rpc_create_args args = {
962 .program = program,
963 .prognumber = program->number,
964 .version = vers,
965 .authflavor = old->cl_auth->au_flavor,
David Brazdil0f672f62019-12-10 10:32:29 +0000966 .cred = old->cl_cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967 };
968 struct rpc_clnt *clnt;
969 int err;
970
971 clnt = __rpc_clone_client(&args, old);
972 if (IS_ERR(clnt))
973 goto out;
974 err = rpc_ping(clnt);
975 if (err != 0) {
976 rpc_shutdown_client(clnt);
977 clnt = ERR_PTR(err);
978 }
979out:
980 return clnt;
981}
982EXPORT_SYMBOL_GPL(rpc_bind_new_program);
983
David Brazdil0f672f62019-12-10 10:32:29 +0000984struct rpc_xprt *
985rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
986{
987 struct rpc_xprt_switch *xps;
988
989 if (!xprt)
990 return NULL;
991 rcu_read_lock();
992 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
993 atomic_long_inc(&xps->xps_queuelen);
994 rcu_read_unlock();
995 atomic_long_inc(&xprt->queuelen);
996
997 return xprt;
998}
999
1000static void
1001rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
1002{
1003 struct rpc_xprt_switch *xps;
1004
1005 atomic_long_dec(&xprt->queuelen);
1006 rcu_read_lock();
1007 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
1008 atomic_long_dec(&xps->xps_queuelen);
1009 rcu_read_unlock();
1010
1011 xprt_put(xprt);
1012}
1013
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001014void rpc_task_release_transport(struct rpc_task *task)
1015{
1016 struct rpc_xprt *xprt = task->tk_xprt;
1017
1018 if (xprt) {
1019 task->tk_xprt = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001020 if (task->tk_client)
1021 rpc_task_release_xprt(task->tk_client, xprt);
1022 else
1023 xprt_put(xprt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001024 }
1025}
1026EXPORT_SYMBOL_GPL(rpc_task_release_transport);
1027
1028void rpc_task_release_client(struct rpc_task *task)
1029{
1030 struct rpc_clnt *clnt = task->tk_client;
1031
David Brazdil0f672f62019-12-10 10:32:29 +00001032 rpc_task_release_transport(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033 if (clnt != NULL) {
1034 /* Remove from client task list */
1035 spin_lock(&clnt->cl_lock);
1036 list_del(&task->tk_task);
1037 spin_unlock(&clnt->cl_lock);
1038 task->tk_client = NULL;
1039
1040 rpc_release_client(clnt);
1041 }
David Brazdil0f672f62019-12-10 10:32:29 +00001042}
1043
1044static struct rpc_xprt *
1045rpc_task_get_first_xprt(struct rpc_clnt *clnt)
1046{
1047 struct rpc_xprt *xprt;
1048
1049 rcu_read_lock();
1050 xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
1051 rcu_read_unlock();
1052 return rpc_task_get_xprt(clnt, xprt);
1053}
1054
1055static struct rpc_xprt *
1056rpc_task_get_next_xprt(struct rpc_clnt *clnt)
1057{
1058 return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001059}
1060
1061static
1062void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
1063{
David Brazdil0f672f62019-12-10 10:32:29 +00001064 if (task->tk_xprt)
1065 return;
1066 if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
1067 task->tk_xprt = rpc_task_get_first_xprt(clnt);
1068 else
1069 task->tk_xprt = rpc_task_get_next_xprt(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001070}
1071
1072static
1073void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
1074{
1075
1076 if (clnt != NULL) {
1077 rpc_task_set_transport(task, clnt);
1078 task->tk_client = clnt;
1079 atomic_inc(&clnt->cl_count);
1080 if (clnt->cl_softrtry)
1081 task->tk_flags |= RPC_TASK_SOFT;
David Brazdil0f672f62019-12-10 10:32:29 +00001082 if (clnt->cl_softerr)
1083 task->tk_flags |= RPC_TASK_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001084 if (clnt->cl_noretranstimeo)
1085 task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
1086 if (atomic_read(&clnt->cl_swapper))
1087 task->tk_flags |= RPC_TASK_SWAPPER;
1088 /* Add to the client's list of all tasks */
1089 spin_lock(&clnt->cl_lock);
1090 list_add_tail(&task->tk_task, &clnt->cl_tasks);
1091 spin_unlock(&clnt->cl_lock);
1092 }
1093}
1094
1095static void
1096rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
1097{
1098 if (msg != NULL) {
1099 task->tk_msg.rpc_proc = msg->rpc_proc;
1100 task->tk_msg.rpc_argp = msg->rpc_argp;
1101 task->tk_msg.rpc_resp = msg->rpc_resp;
Olivier Deprez157378f2022-04-04 15:47:50 +02001102 task->tk_msg.rpc_cred = msg->rpc_cred;
1103 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1104 get_cred(task->tk_msg.rpc_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001105 }
1106}
1107
1108/*
1109 * Default callback for async RPC calls
1110 */
1111static void
1112rpc_default_callback(struct rpc_task *task, void *data)
1113{
1114}
1115
1116static const struct rpc_call_ops rpc_default_ops = {
1117 .rpc_call_done = rpc_default_callback,
1118};
1119
1120/**
1121 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1122 * @task_setup_data: pointer to task initialisation data
1123 */
1124struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
1125{
1126 struct rpc_task *task;
1127
1128 task = rpc_new_task(task_setup_data);
1129
Olivier Deprez157378f2022-04-04 15:47:50 +02001130 if (!RPC_IS_ASYNC(task))
1131 task->tk_flags |= RPC_TASK_CRED_NOREF;
1132
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001133 rpc_task_set_client(task, task_setup_data->rpc_client);
1134 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
1135
1136 if (task->tk_action == NULL)
1137 rpc_call_start(task);
1138
1139 atomic_inc(&task->tk_count);
1140 rpc_execute(task);
1141 return task;
1142}
1143EXPORT_SYMBOL_GPL(rpc_run_task);
1144
1145/**
1146 * rpc_call_sync - Perform a synchronous RPC call
1147 * @clnt: pointer to RPC client
1148 * @msg: RPC call parameters
1149 * @flags: RPC call flags
1150 */
1151int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
1152{
1153 struct rpc_task *task;
1154 struct rpc_task_setup task_setup_data = {
1155 .rpc_client = clnt,
1156 .rpc_message = msg,
1157 .callback_ops = &rpc_default_ops,
1158 .flags = flags,
1159 };
1160 int status;
1161
1162 WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
1163 if (flags & RPC_TASK_ASYNC) {
1164 rpc_release_calldata(task_setup_data.callback_ops,
1165 task_setup_data.callback_data);
1166 return -EINVAL;
1167 }
1168
1169 task = rpc_run_task(&task_setup_data);
1170 if (IS_ERR(task))
1171 return PTR_ERR(task);
1172 status = task->tk_status;
1173 rpc_put_task(task);
1174 return status;
1175}
1176EXPORT_SYMBOL_GPL(rpc_call_sync);
1177
1178/**
1179 * rpc_call_async - Perform an asynchronous RPC call
1180 * @clnt: pointer to RPC client
1181 * @msg: RPC call parameters
1182 * @flags: RPC call flags
1183 * @tk_ops: RPC call ops
1184 * @data: user call data
1185 */
1186int
1187rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
1188 const struct rpc_call_ops *tk_ops, void *data)
1189{
1190 struct rpc_task *task;
1191 struct rpc_task_setup task_setup_data = {
1192 .rpc_client = clnt,
1193 .rpc_message = msg,
1194 .callback_ops = tk_ops,
1195 .callback_data = data,
1196 .flags = flags|RPC_TASK_ASYNC,
1197 };
1198
1199 task = rpc_run_task(&task_setup_data);
1200 if (IS_ERR(task))
1201 return PTR_ERR(task);
1202 rpc_put_task(task);
1203 return 0;
1204}
1205EXPORT_SYMBOL_GPL(rpc_call_async);
1206
1207#if defined(CONFIG_SUNRPC_BACKCHANNEL)
David Brazdil0f672f62019-12-10 10:32:29 +00001208static void call_bc_encode(struct rpc_task *task);
1209
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210/**
1211 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1212 * rpc_execute against it
1213 * @req: RPC request
1214 */
1215struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
1216{
1217 struct rpc_task *task;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218 struct rpc_task_setup task_setup_data = {
1219 .callback_ops = &rpc_default_ops,
David Brazdil0f672f62019-12-10 10:32:29 +00001220 .flags = RPC_TASK_SOFTCONN |
1221 RPC_TASK_NO_RETRANS_TIMEOUT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222 };
1223
1224 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
1225 /*
1226 * Create an rpc_task to send the data
1227 */
1228 task = rpc_new_task(&task_setup_data);
David Brazdil0f672f62019-12-10 10:32:29 +00001229 xprt_init_bc_request(req, task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001230
David Brazdil0f672f62019-12-10 10:32:29 +00001231 task->tk_action = call_bc_encode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001232 atomic_inc(&task->tk_count);
1233 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
1234 rpc_execute(task);
1235
1236 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
1237 return task;
1238}
1239#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1240
David Brazdil0f672f62019-12-10 10:32:29 +00001241/**
1242 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
1243 * @req: RPC request to prepare
1244 * @pages: vector of struct page pointers
1245 * @base: offset in first page where receive should start, in bytes
1246 * @len: expected size of the upper layer data payload, in bytes
1247 * @hdrsize: expected size of upper layer reply header, in XDR words
1248 *
1249 */
1250void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
1251 unsigned int base, unsigned int len,
1252 unsigned int hdrsize)
1253{
1254 /* Subtract one to force an extra word of buffer space for the
1255 * payload's XDR pad to fall into the rcv_buf's tail iovec.
1256 */
1257 hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
1258
1259 xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
Olivier Deprez157378f2022-04-04 15:47:50 +02001260 trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf);
David Brazdil0f672f62019-12-10 10:32:29 +00001261}
1262EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
1263
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264void
1265rpc_call_start(struct rpc_task *task)
1266{
1267 task->tk_action = call_start;
1268}
1269EXPORT_SYMBOL_GPL(rpc_call_start);
1270
1271/**
1272 * rpc_peeraddr - extract remote peer address from clnt's xprt
1273 * @clnt: RPC client structure
1274 * @buf: target buffer
1275 * @bufsize: length of target buffer
1276 *
1277 * Returns the number of bytes that are actually in the stored address.
1278 */
1279size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
1280{
1281 size_t bytes;
1282 struct rpc_xprt *xprt;
1283
1284 rcu_read_lock();
1285 xprt = rcu_dereference(clnt->cl_xprt);
1286
1287 bytes = xprt->addrlen;
1288 if (bytes > bufsize)
1289 bytes = bufsize;
1290 memcpy(buf, &xprt->addr, bytes);
1291 rcu_read_unlock();
1292
1293 return bytes;
1294}
1295EXPORT_SYMBOL_GPL(rpc_peeraddr);
1296
1297/**
1298 * rpc_peeraddr2str - return remote peer address in printable format
1299 * @clnt: RPC client structure
1300 * @format: address format
1301 *
1302 * NB: the lifetime of the memory referenced by the returned pointer is
1303 * the same as the rpc_xprt itself. As long as the caller uses this
1304 * pointer, it must hold the RCU read lock.
1305 */
1306const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
1307 enum rpc_display_format_t format)
1308{
1309 struct rpc_xprt *xprt;
1310
1311 xprt = rcu_dereference(clnt->cl_xprt);
1312
1313 if (xprt->address_strings[format] != NULL)
1314 return xprt->address_strings[format];
1315 else
1316 return "unprintable";
1317}
1318EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
1319
1320static const struct sockaddr_in rpc_inaddr_loopback = {
1321 .sin_family = AF_INET,
1322 .sin_addr.s_addr = htonl(INADDR_ANY),
1323};
1324
1325static const struct sockaddr_in6 rpc_in6addr_loopback = {
1326 .sin6_family = AF_INET6,
1327 .sin6_addr = IN6ADDR_ANY_INIT,
1328};
1329
1330/*
1331 * Try a getsockname() on a connected datagram socket. Using a
1332 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1333 * This conserves the ephemeral port number space.
1334 *
1335 * Returns zero and fills in "buf" if successful; otherwise, a
1336 * negative errno is returned.
1337 */
1338static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1339 struct sockaddr *buf)
1340{
1341 struct socket *sock;
1342 int err;
1343
1344 err = __sock_create(net, sap->sa_family,
1345 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1346 if (err < 0) {
1347 dprintk("RPC: can't create UDP socket (%d)\n", err);
1348 goto out;
1349 }
1350
1351 switch (sap->sa_family) {
1352 case AF_INET:
1353 err = kernel_bind(sock,
1354 (struct sockaddr *)&rpc_inaddr_loopback,
1355 sizeof(rpc_inaddr_loopback));
1356 break;
1357 case AF_INET6:
1358 err = kernel_bind(sock,
1359 (struct sockaddr *)&rpc_in6addr_loopback,
1360 sizeof(rpc_in6addr_loopback));
1361 break;
1362 default:
1363 err = -EAFNOSUPPORT;
1364 goto out;
1365 }
1366 if (err < 0) {
1367 dprintk("RPC: can't bind UDP socket (%d)\n", err);
1368 goto out_release;
1369 }
1370
1371 err = kernel_connect(sock, sap, salen, 0);
1372 if (err < 0) {
1373 dprintk("RPC: can't connect UDP socket (%d)\n", err);
1374 goto out_release;
1375 }
1376
1377 err = kernel_getsockname(sock, buf);
1378 if (err < 0) {
1379 dprintk("RPC: getsockname failed (%d)\n", err);
1380 goto out_release;
1381 }
1382
1383 err = 0;
1384 if (buf->sa_family == AF_INET6) {
1385 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1386 sin6->sin6_scope_id = 0;
1387 }
1388 dprintk("RPC: %s succeeded\n", __func__);
1389
1390out_release:
1391 sock_release(sock);
1392out:
1393 return err;
1394}
1395
1396/*
1397 * Scraping a connected socket failed, so we don't have a useable
1398 * local address. Fallback: generate an address that will prevent
1399 * the server from calling us back.
1400 *
1401 * Returns zero and fills in "buf" if successful; otherwise, a
1402 * negative errno is returned.
1403 */
1404static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1405{
1406 switch (family) {
1407 case AF_INET:
1408 if (buflen < sizeof(rpc_inaddr_loopback))
1409 return -EINVAL;
1410 memcpy(buf, &rpc_inaddr_loopback,
1411 sizeof(rpc_inaddr_loopback));
1412 break;
1413 case AF_INET6:
1414 if (buflen < sizeof(rpc_in6addr_loopback))
1415 return -EINVAL;
1416 memcpy(buf, &rpc_in6addr_loopback,
1417 sizeof(rpc_in6addr_loopback));
1418 break;
1419 default:
1420 dprintk("RPC: %s: address family not supported\n",
1421 __func__);
1422 return -EAFNOSUPPORT;
1423 }
1424 dprintk("RPC: %s: succeeded\n", __func__);
1425 return 0;
1426}
1427
1428/**
1429 * rpc_localaddr - discover local endpoint address for an RPC client
1430 * @clnt: RPC client structure
1431 * @buf: target buffer
1432 * @buflen: size of target buffer, in bytes
1433 *
1434 * Returns zero and fills in "buf" and "buflen" if successful;
1435 * otherwise, a negative errno is returned.
1436 *
1437 * This works even if the underlying transport is not currently connected,
1438 * or if the upper layer never previously provided a source address.
1439 *
1440 * The result of this function call is transient: multiple calls in
1441 * succession may give different results, depending on how local
1442 * networking configuration changes over time.
1443 */
1444int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1445{
1446 struct sockaddr_storage address;
1447 struct sockaddr *sap = (struct sockaddr *)&address;
1448 struct rpc_xprt *xprt;
1449 struct net *net;
1450 size_t salen;
1451 int err;
1452
1453 rcu_read_lock();
1454 xprt = rcu_dereference(clnt->cl_xprt);
1455 salen = xprt->addrlen;
1456 memcpy(sap, &xprt->addr, salen);
1457 net = get_net(xprt->xprt_net);
1458 rcu_read_unlock();
1459
1460 rpc_set_port(sap, 0);
1461 err = rpc_sockname(net, sap, salen, buf);
1462 put_net(net);
1463 if (err != 0)
1464 /* Couldn't discover local address, return ANYADDR */
1465 return rpc_anyaddr(sap->sa_family, buf, buflen);
1466 return 0;
1467}
1468EXPORT_SYMBOL_GPL(rpc_localaddr);
1469
1470void
1471rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1472{
1473 struct rpc_xprt *xprt;
1474
1475 rcu_read_lock();
1476 xprt = rcu_dereference(clnt->cl_xprt);
1477 if (xprt->ops->set_buffer_size)
1478 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1479 rcu_read_unlock();
1480}
1481EXPORT_SYMBOL_GPL(rpc_setbufsize);
1482
1483/**
1484 * rpc_net_ns - Get the network namespace for this RPC client
1485 * @clnt: RPC client to query
1486 *
1487 */
1488struct net *rpc_net_ns(struct rpc_clnt *clnt)
1489{
1490 struct net *ret;
1491
1492 rcu_read_lock();
1493 ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1494 rcu_read_unlock();
1495 return ret;
1496}
1497EXPORT_SYMBOL_GPL(rpc_net_ns);
1498
1499/**
1500 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1501 * @clnt: RPC client to query
1502 *
1503 * For stream transports, this is one RPC record fragment (see RFC
1504 * 1831), as we don't support multi-record requests yet. For datagram
1505 * transports, this is the size of an IP packet minus the IP, UDP, and
1506 * RPC header sizes.
1507 */
1508size_t rpc_max_payload(struct rpc_clnt *clnt)
1509{
1510 size_t ret;
1511
1512 rcu_read_lock();
1513 ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1514 rcu_read_unlock();
1515 return ret;
1516}
1517EXPORT_SYMBOL_GPL(rpc_max_payload);
1518
1519/**
1520 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
1521 * @clnt: RPC client to query
1522 */
1523size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
1524{
1525 struct rpc_xprt *xprt;
1526 size_t ret;
1527
1528 rcu_read_lock();
1529 xprt = rcu_dereference(clnt->cl_xprt);
1530 ret = xprt->ops->bc_maxpayload(xprt);
1531 rcu_read_unlock();
1532 return ret;
1533}
1534EXPORT_SYMBOL_GPL(rpc_max_bc_payload);
1535
David Brazdil0f672f62019-12-10 10:32:29 +00001536unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt)
1537{
1538 struct rpc_xprt *xprt;
1539 unsigned int ret;
1540
1541 rcu_read_lock();
1542 xprt = rcu_dereference(clnt->cl_xprt);
1543 ret = xprt->ops->bc_num_slots(xprt);
1544 rcu_read_unlock();
1545 return ret;
1546}
1547EXPORT_SYMBOL_GPL(rpc_num_bc_slots);
1548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001549/**
1550 * rpc_force_rebind - force transport to check that remote port is unchanged
1551 * @clnt: client to rebind
1552 *
1553 */
1554void rpc_force_rebind(struct rpc_clnt *clnt)
1555{
1556 if (clnt->cl_autobind) {
1557 rcu_read_lock();
1558 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1559 rcu_read_unlock();
1560 }
1561}
1562EXPORT_SYMBOL_GPL(rpc_force_rebind);
1563
David Brazdil0f672f62019-12-10 10:32:29 +00001564static int
1565__rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001566{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001567 task->tk_status = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001568 task->tk_rpc_status = 0;
1569 task->tk_action = action;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001570 return 1;
1571}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001572
1573/*
1574 * Restart an (async) RPC call. Usually called from within the
1575 * exit handler.
1576 */
1577int
1578rpc_restart_call(struct rpc_task *task)
1579{
David Brazdil0f672f62019-12-10 10:32:29 +00001580 return __rpc_restart_call(task, call_start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001581}
1582EXPORT_SYMBOL_GPL(rpc_restart_call);
1583
David Brazdil0f672f62019-12-10 10:32:29 +00001584/*
1585 * Restart an (async) RPC call from the call_prepare state.
1586 * Usually called from within the exit handler.
1587 */
1588int
1589rpc_restart_call_prepare(struct rpc_task *task)
1590{
1591 if (task->tk_ops->rpc_call_prepare != NULL)
1592 return __rpc_restart_call(task, rpc_prepare_task);
1593 return rpc_restart_call(task);
1594}
1595EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1596
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001597const char
1598*rpc_proc_name(const struct rpc_task *task)
1599{
1600 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1601
1602 if (proc) {
1603 if (proc->p_name)
1604 return proc->p_name;
1605 else
1606 return "NULL";
1607 } else
1608 return "no proc";
1609}
1610
David Brazdil0f672f62019-12-10 10:32:29 +00001611static void
1612__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
1613{
Olivier Deprez157378f2022-04-04 15:47:50 +02001614 trace_rpc_call_rpcerror(task, tk_status, rpc_status);
David Brazdil0f672f62019-12-10 10:32:29 +00001615 task->tk_rpc_status = rpc_status;
1616 rpc_exit(task, tk_status);
1617}
1618
1619static void
1620rpc_call_rpcerror(struct rpc_task *task, int status)
1621{
1622 __rpc_call_rpcerror(task, status, status);
1623}
1624
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001625/*
1626 * 0. Initial state
1627 *
1628 * Other FSM states can be visited zero or more times, but
1629 * this state is visited exactly once for each RPC.
1630 */
1631static void
1632call_start(struct rpc_task *task)
1633{
1634 struct rpc_clnt *clnt = task->tk_client;
1635 int idx = task->tk_msg.rpc_proc->p_statidx;
1636
1637 trace_rpc_request(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001638
1639 /* Increment call count (version might not be valid for ping) */
1640 if (clnt->cl_program->version[clnt->cl_vers])
1641 clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
1642 clnt->cl_stats->rpccnt++;
1643 task->tk_action = call_reserve;
1644 rpc_task_set_transport(task, clnt);
1645}
1646
1647/*
1648 * 1. Reserve an RPC call slot
1649 */
1650static void
1651call_reserve(struct rpc_task *task)
1652{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001653 task->tk_status = 0;
1654 task->tk_action = call_reserveresult;
1655 xprt_reserve(task);
1656}
1657
1658static void call_retry_reserve(struct rpc_task *task);
1659
1660/*
1661 * 1b. Grok the result of xprt_reserve()
1662 */
1663static void
1664call_reserveresult(struct rpc_task *task)
1665{
1666 int status = task->tk_status;
1667
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001668 /*
1669 * After a call to xprt_reserve(), we must have either
1670 * a request slot or else an error status.
1671 */
1672 task->tk_status = 0;
1673 if (status >= 0) {
1674 if (task->tk_rqstp) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001675 task->tk_action = call_refresh;
1676 return;
1677 }
1678
David Brazdil0f672f62019-12-10 10:32:29 +00001679 rpc_call_rpcerror(task, -EIO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680 return;
1681 }
1682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683 switch (status) {
1684 case -ENOMEM:
1685 rpc_delay(task, HZ >> 2);
Olivier Deprez157378f2022-04-04 15:47:50 +02001686 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001687 case -EAGAIN: /* woken up; retry */
1688 task->tk_action = call_retry_reserve;
1689 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001690 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02001691 rpc_call_rpcerror(task, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001692 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001693}
1694
1695/*
1696 * 1c. Retry reserving an RPC call slot
1697 */
1698static void
1699call_retry_reserve(struct rpc_task *task)
1700{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001701 task->tk_status = 0;
1702 task->tk_action = call_reserveresult;
1703 xprt_retry_reserve(task);
1704}
1705
1706/*
1707 * 2. Bind and/or refresh the credentials
1708 */
1709static void
1710call_refresh(struct rpc_task *task)
1711{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001712 task->tk_action = call_refreshresult;
1713 task->tk_status = 0;
1714 task->tk_client->cl_stats->rpcauthrefresh++;
1715 rpcauth_refreshcred(task);
1716}
1717
1718/*
1719 * 2a. Process the results of a credential refresh
1720 */
1721static void
1722call_refreshresult(struct rpc_task *task)
1723{
1724 int status = task->tk_status;
1725
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001726 task->tk_status = 0;
1727 task->tk_action = call_refresh;
1728 switch (status) {
1729 case 0:
1730 if (rpcauth_uptodatecred(task)) {
1731 task->tk_action = call_allocate;
1732 return;
1733 }
1734 /* Use rate-limiting and a max number of retries if refresh
1735 * had status 0 but failed to update the cred.
1736 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001737 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001738 case -ETIMEDOUT:
1739 rpc_delay(task, 3*HZ);
Olivier Deprez157378f2022-04-04 15:47:50 +02001740 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001741 case -EAGAIN:
1742 status = -EACCES;
Olivier Deprez157378f2022-04-04 15:47:50 +02001743 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001744 case -EKEYEXPIRED:
1745 if (!task->tk_cred_retry)
1746 break;
1747 task->tk_cred_retry--;
Olivier Deprez157378f2022-04-04 15:47:50 +02001748 trace_rpc_retry_refresh_status(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001749 return;
1750 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001751 trace_rpc_refresh_status(task);
David Brazdil0f672f62019-12-10 10:32:29 +00001752 rpc_call_rpcerror(task, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001753}
1754
1755/*
1756 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1757 * (Note: buffer memory is freed in xprt_release).
1758 */
1759static void
1760call_allocate(struct rpc_task *task)
1761{
David Brazdil0f672f62019-12-10 10:32:29 +00001762 const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001763 struct rpc_rqst *req = task->tk_rqstp;
1764 struct rpc_xprt *xprt = req->rq_xprt;
1765 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1766 int status;
1767
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001768 task->tk_status = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001769 task->tk_action = call_encode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001770
1771 if (req->rq_buffer)
1772 return;
1773
1774 if (proc->p_proc != 0) {
1775 BUG_ON(proc->p_arglen == 0);
1776 if (proc->p_decode != NULL)
1777 BUG_ON(proc->p_replen == 0);
1778 }
1779
1780 /*
1781 * Calculate the size (in quads) of the RPC call
1782 * and reply headers, and convert both values
1783 * to byte sizes.
1784 */
David Brazdil0f672f62019-12-10 10:32:29 +00001785 req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
1786 proc->p_arglen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001787 req->rq_callsize <<= 2;
David Brazdil0f672f62019-12-10 10:32:29 +00001788 /*
1789 * Note: the reply buffer must at minimum allocate enough space
1790 * for the 'struct accepted_reply' from RFC5531.
1791 */
1792 req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \
1793 max_t(size_t, proc->p_replen, 2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001794 req->rq_rcvsize <<= 2;
1795
1796 status = xprt->ops->buf_alloc(task);
Olivier Deprez157378f2022-04-04 15:47:50 +02001797 trace_rpc_buf_alloc(task, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001798 if (status == 0)
1799 return;
1800 if (status != -ENOMEM) {
David Brazdil0f672f62019-12-10 10:32:29 +00001801 rpc_call_rpcerror(task, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001802 return;
1803 }
1804
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001805 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1806 task->tk_action = call_allocate;
1807 rpc_delay(task, HZ>>4);
1808 return;
1809 }
1810
David Brazdil0f672f62019-12-10 10:32:29 +00001811 rpc_call_rpcerror(task, -ERESTARTSYS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001812}
1813
David Brazdil0f672f62019-12-10 10:32:29 +00001814static int
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001815rpc_task_need_encode(struct rpc_task *task)
1816{
David Brazdil0f672f62019-12-10 10:32:29 +00001817 return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
1818 (!(task->tk_flags & RPC_TASK_SENT) ||
1819 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
1820 xprt_request_need_retransmit(task));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001821}
1822
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001823static void
1824rpc_xdr_encode(struct rpc_task *task)
1825{
1826 struct rpc_rqst *req = task->tk_rqstp;
David Brazdil0f672f62019-12-10 10:32:29 +00001827 struct xdr_stream xdr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001828
1829 xdr_buf_init(&req->rq_snd_buf,
1830 req->rq_buffer,
1831 req->rq_callsize);
1832 xdr_buf_init(&req->rq_rcv_buf,
1833 req->rq_rbuffer,
1834 req->rq_rcvsize);
1835
David Brazdil0f672f62019-12-10 10:32:29 +00001836 req->rq_reply_bytes_recvd = 0;
1837 req->rq_snd_buf.head[0].iov_len = 0;
1838 xdr_init_encode(&xdr, &req->rq_snd_buf,
1839 req->rq_snd_buf.head[0].iov_base, req);
1840 xdr_free_bvec(&req->rq_snd_buf);
1841 if (rpc_encode_header(task, &xdr))
1842 return;
1843
1844 task->tk_status = rpcauth_wrap_req(task, &xdr);
1845}
1846
1847/*
1848 * 3. Encode arguments of an RPC call
1849 */
1850static void
1851call_encode(struct rpc_task *task)
1852{
1853 if (!rpc_task_need_encode(task))
1854 goto out;
Olivier Deprez157378f2022-04-04 15:47:50 +02001855
David Brazdil0f672f62019-12-10 10:32:29 +00001856 /* Dequeue task from the receive queue while we're encoding */
1857 xprt_request_dequeue_xprt(task);
1858 /* Encode here so that rpcsec_gss can use correct sequence number. */
1859 rpc_xdr_encode(task);
1860 /* Did the encode result in an error condition? */
1861 if (task->tk_status != 0) {
1862 /* Was the error nonfatal? */
1863 switch (task->tk_status) {
1864 case -EAGAIN:
1865 case -ENOMEM:
1866 rpc_delay(task, HZ >> 4);
1867 break;
1868 case -EKEYEXPIRED:
1869 if (!task->tk_cred_retry) {
Olivier Deprez92d4c212022-12-06 15:05:30 +01001870 rpc_call_rpcerror(task, task->tk_status);
David Brazdil0f672f62019-12-10 10:32:29 +00001871 } else {
1872 task->tk_action = call_refresh;
1873 task->tk_cred_retry--;
Olivier Deprez157378f2022-04-04 15:47:50 +02001874 trace_rpc_retry_refresh_status(task);
David Brazdil0f672f62019-12-10 10:32:29 +00001875 }
1876 break;
1877 default:
1878 rpc_call_rpcerror(task, task->tk_status);
1879 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001880 return;
1881 }
1882
David Brazdil0f672f62019-12-10 10:32:29 +00001883 /* Add task to reply queue before transmission to avoid races */
1884 if (rpc_reply_expected(task))
1885 xprt_request_enqueue_receive(task);
1886 xprt_request_enqueue_transmit(task);
1887out:
1888 task->tk_action = call_transmit;
1889 /* Check that the connection is OK */
1890 if (!xprt_bound(task->tk_xprt))
1891 task->tk_action = call_bind;
1892 else if (!xprt_connected(task->tk_xprt))
1893 task->tk_action = call_connect;
1894}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001895
David Brazdil0f672f62019-12-10 10:32:29 +00001896/*
1897 * Helpers to check if the task was already transmitted, and
1898 * to take action when that is the case.
1899 */
1900static bool
1901rpc_task_transmitted(struct rpc_task *task)
1902{
1903 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1904}
1905
1906static void
1907rpc_task_handle_transmitted(struct rpc_task *task)
1908{
1909 xprt_end_transmit(task);
1910 task->tk_action = call_transmit_status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001911}
1912
1913/*
1914 * 4. Get the server port number if not yet set
1915 */
1916static void
1917call_bind(struct rpc_task *task)
1918{
1919 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1920
David Brazdil0f672f62019-12-10 10:32:29 +00001921 if (rpc_task_transmitted(task)) {
1922 rpc_task_handle_transmitted(task);
1923 return;
1924 }
1925
1926 if (xprt_bound(xprt)) {
1927 task->tk_action = call_connect;
1928 return;
1929 }
1930
David Brazdil0f672f62019-12-10 10:32:29 +00001931 task->tk_action = call_bind_status;
1932 if (!xprt_prepare_transmit(task))
1933 return;
1934
1935 xprt->ops->rpcbind(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001936}
1937
1938/*
1939 * 4a. Sort out bind result
1940 */
1941static void
1942call_bind_status(struct rpc_task *task)
1943{
David Brazdil0f672f62019-12-10 10:32:29 +00001944 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001945 int status = -EIO;
1946
David Brazdil0f672f62019-12-10 10:32:29 +00001947 if (rpc_task_transmitted(task)) {
1948 rpc_task_handle_transmitted(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001949 return;
1950 }
1951
David Brazdil0f672f62019-12-10 10:32:29 +00001952 if (task->tk_status >= 0)
1953 goto out_next;
1954 if (xprt_bound(xprt)) {
1955 task->tk_status = 0;
1956 goto out_next;
1957 }
1958
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001959 switch (task->tk_status) {
1960 case -ENOMEM:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001961 rpc_delay(task, HZ >> 2);
1962 goto retry_timeout;
1963 case -EACCES:
Olivier Deprez157378f2022-04-04 15:47:50 +02001964 trace_rpcb_prog_unavail_err(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001965 /* fail immediately if this is an RPC ping */
1966 if (task->tk_msg.rpc_proc->p_proc == 0) {
1967 status = -EOPNOTSUPP;
1968 break;
1969 }
1970 if (task->tk_rebind_retry == 0)
1971 break;
1972 task->tk_rebind_retry--;
1973 rpc_delay(task, 3*HZ);
1974 goto retry_timeout;
David Brazdil0f672f62019-12-10 10:32:29 +00001975 case -ENOBUFS:
1976 rpc_delay(task, HZ >> 2);
1977 goto retry_timeout;
1978 case -EAGAIN:
1979 goto retry_timeout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001980 case -ETIMEDOUT:
Olivier Deprez157378f2022-04-04 15:47:50 +02001981 trace_rpcb_timeout_err(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001982 goto retry_timeout;
1983 case -EPFNOSUPPORT:
1984 /* server doesn't support any rpcbind version we know of */
Olivier Deprez157378f2022-04-04 15:47:50 +02001985 trace_rpcb_bind_version_err(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001986 break;
1987 case -EPROTONOSUPPORT:
Olivier Deprez157378f2022-04-04 15:47:50 +02001988 trace_rpcb_bind_version_err(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001989 goto retry_timeout;
1990 case -ECONNREFUSED: /* connection problems */
1991 case -ECONNRESET:
1992 case -ECONNABORTED:
1993 case -ENOTCONN:
1994 case -EHOSTDOWN:
1995 case -ENETDOWN:
1996 case -EHOSTUNREACH:
1997 case -ENETUNREACH:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001998 case -EPIPE:
Olivier Deprez157378f2022-04-04 15:47:50 +02001999 trace_rpcb_unreachable_err(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002000 if (!RPC_IS_SOFTCONN(task)) {
2001 rpc_delay(task, 5*HZ);
2002 goto retry_timeout;
2003 }
2004 status = task->tk_status;
2005 break;
2006 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02002007 trace_rpcb_unrecognized_err(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002008 }
2009
David Brazdil0f672f62019-12-10 10:32:29 +00002010 rpc_call_rpcerror(task, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002011 return;
David Brazdil0f672f62019-12-10 10:32:29 +00002012out_next:
2013 task->tk_action = call_connect;
2014 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002015retry_timeout:
2016 task->tk_status = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002017 task->tk_action = call_bind;
2018 rpc_check_timeout(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002019}
2020
2021/*
2022 * 4b. Connect to the RPC server
2023 */
2024static void
2025call_connect(struct rpc_task *task)
2026{
2027 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2028
David Brazdil0f672f62019-12-10 10:32:29 +00002029 if (rpc_task_transmitted(task)) {
2030 rpc_task_handle_transmitted(task);
2031 return;
2032 }
2033
2034 if (xprt_connected(xprt)) {
2035 task->tk_action = call_transmit;
2036 return;
2037 }
2038
David Brazdil0f672f62019-12-10 10:32:29 +00002039 task->tk_action = call_connect_status;
2040 if (task->tk_status < 0)
2041 return;
2042 if (task->tk_flags & RPC_TASK_NOCONNECT) {
2043 rpc_call_rpcerror(task, -ENOTCONN);
2044 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002045 }
David Brazdil0f672f62019-12-10 10:32:29 +00002046 if (!xprt_prepare_transmit(task))
2047 return;
2048 xprt_connect(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002049}
2050
2051/*
2052 * 4c. Sort out connect result
2053 */
2054static void
2055call_connect_status(struct rpc_task *task)
2056{
David Brazdil0f672f62019-12-10 10:32:29 +00002057 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002058 struct rpc_clnt *clnt = task->tk_client;
2059 int status = task->tk_status;
2060
David Brazdil0f672f62019-12-10 10:32:29 +00002061 if (rpc_task_transmitted(task)) {
2062 rpc_task_handle_transmitted(task);
2063 return;
2064 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002065
2066 trace_rpc_connect_status(task);
David Brazdil0f672f62019-12-10 10:32:29 +00002067
2068 if (task->tk_status == 0) {
2069 clnt->cl_stats->netreconn++;
2070 goto out_next;
2071 }
2072 if (xprt_connected(xprt)) {
2073 task->tk_status = 0;
2074 goto out_next;
2075 }
2076
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002077 task->tk_status = 0;
2078 switch (status) {
2079 case -ECONNREFUSED:
2080 /* A positive refusal suggests a rebind is needed. */
2081 if (RPC_IS_SOFTCONN(task))
2082 break;
2083 if (clnt->cl_autobind) {
2084 rpc_force_rebind(clnt);
David Brazdil0f672f62019-12-10 10:32:29 +00002085 goto out_retry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002086 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002087 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002088 case -ECONNRESET:
2089 case -ECONNABORTED:
2090 case -ENETDOWN:
2091 case -ENETUNREACH:
2092 case -EHOSTUNREACH:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002093 case -EPIPE:
Olivier Deprez157378f2022-04-04 15:47:50 +02002094 case -EPROTO:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002095 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2096 task->tk_rqstp->rq_connect_cookie);
2097 if (RPC_IS_SOFTCONN(task))
2098 break;
2099 /* retry with existing socket, after a delay */
2100 rpc_delay(task, 3*HZ);
Olivier Deprez157378f2022-04-04 15:47:50 +02002101 fallthrough;
David Brazdil0f672f62019-12-10 10:32:29 +00002102 case -EADDRINUSE:
2103 case -ENOTCONN:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002104 case -EAGAIN:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002105 case -ETIMEDOUT:
David Brazdil0f672f62019-12-10 10:32:29 +00002106 goto out_retry;
2107 case -ENOBUFS:
2108 rpc_delay(task, HZ >> 2);
2109 goto out_retry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002110 }
David Brazdil0f672f62019-12-10 10:32:29 +00002111 rpc_call_rpcerror(task, status);
2112 return;
2113out_next:
2114 task->tk_action = call_transmit;
2115 return;
2116out_retry:
2117 /* Check for timeouts before looping back to call_bind */
2118 task->tk_action = call_bind;
2119 rpc_check_timeout(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002120}
2121
2122/*
2123 * 5. Transmit the RPC request, and wait for reply
2124 */
2125static void
2126call_transmit(struct rpc_task *task)
2127{
David Brazdil0f672f62019-12-10 10:32:29 +00002128 if (rpc_task_transmitted(task)) {
2129 rpc_task_handle_transmitted(task);
2130 return;
2131 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002132
David Brazdil0f672f62019-12-10 10:32:29 +00002133 task->tk_action = call_transmit_status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002134 if (!xprt_prepare_transmit(task))
2135 return;
David Brazdil0f672f62019-12-10 10:32:29 +00002136 task->tk_status = 0;
2137 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
2138 if (!xprt_connected(task->tk_xprt)) {
2139 task->tk_status = -ENOTCONN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002140 return;
2141 }
David Brazdil0f672f62019-12-10 10:32:29 +00002142 xprt_transmit(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002143 }
David Brazdil0f672f62019-12-10 10:32:29 +00002144 xprt_end_transmit(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002145}
2146
2147/*
2148 * 5a. Handle cleanup after a transmission
2149 */
2150static void
2151call_transmit_status(struct rpc_task *task)
2152{
2153 task->tk_action = call_status;
2154
2155 /*
2156 * Common case: success. Force the compiler to put this
2157 * test first.
2158 */
David Brazdil0f672f62019-12-10 10:32:29 +00002159 if (rpc_task_transmitted(task)) {
2160 task->tk_status = 0;
2161 xprt_request_wait_receive(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002162 return;
2163 }
2164
2165 switch (task->tk_status) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002166 default:
David Brazdil0f672f62019-12-10 10:32:29 +00002167 break;
2168 case -EBADMSG:
2169 task->tk_status = 0;
2170 task->tk_action = call_encode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002171 break;
2172 /*
2173 * Special cases: if we've been waiting on the
2174 * socket's write_space() callback, or if the
2175 * socket just returned a connection error,
2176 * then hold onto the transport lock.
2177 */
Olivier Deprez92d4c212022-12-06 15:05:30 +01002178 case -ENOMEM:
David Brazdil0f672f62019-12-10 10:32:29 +00002179 case -ENOBUFS:
2180 rpc_delay(task, HZ>>2);
Olivier Deprez157378f2022-04-04 15:47:50 +02002181 fallthrough;
David Brazdil0f672f62019-12-10 10:32:29 +00002182 case -EBADSLT:
2183 case -EAGAIN:
2184 task->tk_action = call_transmit;
2185 task->tk_status = 0;
2186 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002187 case -ECONNREFUSED:
2188 case -EHOSTDOWN:
2189 case -ENETDOWN:
2190 case -EHOSTUNREACH:
2191 case -ENETUNREACH:
2192 case -EPERM:
2193 if (RPC_IS_SOFTCONN(task)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002194 if (!task->tk_msg.rpc_proc->p_proc)
2195 trace_xprt_ping(task->tk_xprt,
2196 task->tk_status);
David Brazdil0f672f62019-12-10 10:32:29 +00002197 rpc_call_rpcerror(task, task->tk_status);
2198 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002199 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002200 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 case -ECONNRESET:
2202 case -ECONNABORTED:
2203 case -EADDRINUSE:
2204 case -ENOTCONN:
2205 case -EPIPE:
David Brazdil0f672f62019-12-10 10:32:29 +00002206 task->tk_action = call_bind;
2207 task->tk_status = 0;
2208 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002209 }
David Brazdil0f672f62019-12-10 10:32:29 +00002210 rpc_check_timeout(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002211}
2212
2213#if defined(CONFIG_SUNRPC_BACKCHANNEL)
David Brazdil0f672f62019-12-10 10:32:29 +00002214static void call_bc_transmit(struct rpc_task *task);
2215static void call_bc_transmit_status(struct rpc_task *task);
2216
2217static void
2218call_bc_encode(struct rpc_task *task)
2219{
2220 xprt_request_enqueue_transmit(task);
2221 task->tk_action = call_bc_transmit;
2222}
2223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002224/*
2225 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
2226 * addition, disconnect on connectivity errors.
2227 */
2228static void
2229call_bc_transmit(struct rpc_task *task)
2230{
David Brazdil0f672f62019-12-10 10:32:29 +00002231 task->tk_action = call_bc_transmit_status;
2232 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
2233 if (!xprt_prepare_transmit(task))
2234 return;
2235 task->tk_status = 0;
2236 xprt_transmit(task);
2237 }
2238 xprt_end_transmit(task);
2239}
2240
2241static void
2242call_bc_transmit_status(struct rpc_task *task)
2243{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002244 struct rpc_rqst *req = task->tk_rqstp;
2245
David Brazdil0f672f62019-12-10 10:32:29 +00002246 if (rpc_task_transmitted(task))
2247 task->tk_status = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002248
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002249 switch (task->tk_status) {
2250 case 0:
2251 /* Success */
2252 case -ENETDOWN:
2253 case -EHOSTDOWN:
2254 case -EHOSTUNREACH:
2255 case -ENETUNREACH:
2256 case -ECONNRESET:
2257 case -ECONNREFUSED:
2258 case -EADDRINUSE:
2259 case -ENOTCONN:
2260 case -EPIPE:
2261 break;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002262 case -ENOMEM:
David Brazdil0f672f62019-12-10 10:32:29 +00002263 case -ENOBUFS:
2264 rpc_delay(task, HZ>>2);
Olivier Deprez157378f2022-04-04 15:47:50 +02002265 fallthrough;
David Brazdil0f672f62019-12-10 10:32:29 +00002266 case -EBADSLT:
2267 case -EAGAIN:
2268 task->tk_status = 0;
2269 task->tk_action = call_bc_transmit;
2270 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002271 case -ETIMEDOUT:
2272 /*
2273 * Problem reaching the server. Disconnect and let the
2274 * forechannel reestablish the connection. The server will
2275 * have to retransmit the backchannel request and we'll
2276 * reprocess it. Since these ops are idempotent, there's no
2277 * need to cache our reply at this time.
2278 */
2279 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2280 "error: %d\n", task->tk_status);
2281 xprt_conditional_disconnect(req->rq_xprt,
2282 req->rq_connect_cookie);
2283 break;
2284 default:
2285 /*
2286 * We were unable to reply and will have to drop the
2287 * request. The server should reconnect and retransmit.
2288 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002289 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
2290 "error: %d\n", task->tk_status);
2291 break;
2292 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002293 task->tk_action = rpc_exit_task;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002294}
2295#endif /* CONFIG_SUNRPC_BACKCHANNEL */
2296
2297/*
2298 * 6. Sort out the RPC call status
2299 */
2300static void
2301call_status(struct rpc_task *task)
2302{
2303 struct rpc_clnt *clnt = task->tk_client;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002304 int status;
2305
2306 if (!task->tk_msg.rpc_proc->p_proc)
2307 trace_xprt_ping(task->tk_xprt, task->tk_status);
2308
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002309 status = task->tk_status;
2310 if (status >= 0) {
2311 task->tk_action = call_decode;
2312 return;
2313 }
2314
2315 trace_rpc_call_status(task);
2316 task->tk_status = 0;
2317 switch(status) {
2318 case -EHOSTDOWN:
2319 case -ENETDOWN:
2320 case -EHOSTUNREACH:
2321 case -ENETUNREACH:
2322 case -EPERM:
David Brazdil0f672f62019-12-10 10:32:29 +00002323 if (RPC_IS_SOFTCONN(task))
2324 goto out_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002325 /*
2326 * Delay any retries for 3 seconds, then handle as if it
2327 * were a timeout.
2328 */
2329 rpc_delay(task, 3*HZ);
Olivier Deprez157378f2022-04-04 15:47:50 +02002330 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002331 case -ETIMEDOUT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002332 break;
2333 case -ECONNREFUSED:
2334 case -ECONNRESET:
2335 case -ECONNABORTED:
David Brazdil0f672f62019-12-10 10:32:29 +00002336 case -ENOTCONN:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002337 rpc_force_rebind(clnt);
David Brazdil0f672f62019-12-10 10:32:29 +00002338 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002339 case -EADDRINUSE:
2340 rpc_delay(task, 3*HZ);
Olivier Deprez157378f2022-04-04 15:47:50 +02002341 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002342 case -EPIPE:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002343 case -EAGAIN:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002344 break;
Olivier Deprez92d4c212022-12-06 15:05:30 +01002345 case -ENFILE:
2346 case -ENOBUFS:
2347 case -ENOMEM:
2348 rpc_delay(task, HZ>>2);
2349 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002350 case -EIO:
2351 /* shutdown or soft timeout */
David Brazdil0f672f62019-12-10 10:32:29 +00002352 goto out_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002353 default:
2354 if (clnt->cl_chatty)
2355 printk("%s: RPC call returned error %d\n",
2356 clnt->cl_program->name, -status);
David Brazdil0f672f62019-12-10 10:32:29 +00002357 goto out_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002358 }
David Brazdil0f672f62019-12-10 10:32:29 +00002359 task->tk_action = call_encode;
Olivier Deprez157378f2022-04-04 15:47:50 +02002360 if (status != -ECONNRESET && status != -ECONNABORTED)
2361 rpc_check_timeout(task);
David Brazdil0f672f62019-12-10 10:32:29 +00002362 return;
2363out_exit:
2364 rpc_call_rpcerror(task, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002365}
2366
David Brazdil0f672f62019-12-10 10:32:29 +00002367static bool
2368rpc_check_connected(const struct rpc_rqst *req)
2369{
2370 /* No allocated request or transport? return true */
2371 if (!req || !req->rq_xprt)
2372 return true;
2373 return xprt_connected(req->rq_xprt);
2374}
2375
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002376static void
David Brazdil0f672f62019-12-10 10:32:29 +00002377rpc_check_timeout(struct rpc_task *task)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002378{
2379 struct rpc_clnt *clnt = task->tk_client;
2380
Olivier Deprez0e641232021-09-23 10:07:05 +02002381 if (RPC_SIGNALLED(task)) {
2382 rpc_call_rpcerror(task, -ERESTARTSYS);
2383 return;
2384 }
2385
David Brazdil0f672f62019-12-10 10:32:29 +00002386 if (xprt_adjust_timeout(task->tk_rqstp) == 0)
2387 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002388
Olivier Deprez157378f2022-04-04 15:47:50 +02002389 trace_rpc_timeout_status(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002390 task->tk_timeouts++;
2391
David Brazdil0f672f62019-12-10 10:32:29 +00002392 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
2393 rpc_call_rpcerror(task, -ETIMEDOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002394 return;
2395 }
David Brazdil0f672f62019-12-10 10:32:29 +00002396
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002397 if (RPC_IS_SOFT(task)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002398 /*
2399 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has
2400 * been sent, it should time out only if the transport
2401 * connection gets terminally broken.
2402 */
2403 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) &&
2404 rpc_check_connected(task->tk_rqstp))
2405 return;
2406
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002407 if (clnt->cl_chatty) {
David Brazdil0f672f62019-12-10 10:32:29 +00002408 pr_notice_ratelimited(
2409 "%s: server %s not responding, timed out\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002410 clnt->cl_program->name,
2411 task->tk_xprt->servername);
2412 }
2413 if (task->tk_flags & RPC_TASK_TIMEOUT)
David Brazdil0f672f62019-12-10 10:32:29 +00002414 rpc_call_rpcerror(task, -ETIMEDOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002415 else
David Brazdil0f672f62019-12-10 10:32:29 +00002416 __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002417 return;
2418 }
2419
2420 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
2421 task->tk_flags |= RPC_CALL_MAJORSEEN;
2422 if (clnt->cl_chatty) {
David Brazdil0f672f62019-12-10 10:32:29 +00002423 pr_notice_ratelimited(
2424 "%s: server %s not responding, still trying\n",
2425 clnt->cl_program->name,
2426 task->tk_xprt->servername);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002427 }
2428 }
2429 rpc_force_rebind(clnt);
2430 /*
2431 * Did our request time out due to an RPCSEC_GSS out-of-sequence
2432 * event? RFC2203 requires the server to drop all such requests.
2433 */
2434 rpcauth_invalcred(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002435}
2436
2437/*
2438 * 7. Decode the RPC reply
2439 */
2440static void
2441call_decode(struct rpc_task *task)
2442{
2443 struct rpc_clnt *clnt = task->tk_client;
2444 struct rpc_rqst *req = task->tk_rqstp;
David Brazdil0f672f62019-12-10 10:32:29 +00002445 struct xdr_stream xdr;
2446 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002447
David Brazdil0f672f62019-12-10 10:32:29 +00002448 if (!task->tk_msg.rpc_proc->p_decode) {
2449 task->tk_action = rpc_exit_task;
2450 return;
2451 }
2452
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002453 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2454 if (clnt->cl_chatty) {
David Brazdil0f672f62019-12-10 10:32:29 +00002455 pr_notice_ratelimited("%s: server %s OK\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002456 clnt->cl_program->name,
2457 task->tk_xprt->servername);
2458 }
2459 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2460 }
2461
2462 /*
David Brazdil0f672f62019-12-10 10:32:29 +00002463 * Did we ever call xprt_complete_rqst()? If not, we should assume
2464 * the message is incomplete.
2465 */
2466 err = -EAGAIN;
2467 if (!req->rq_reply_bytes_recvd)
2468 goto out;
2469
Olivier Deprez0e641232021-09-23 10:07:05 +02002470 /* Ensure that we see all writes made by xprt_complete_rqst()
2471 * before it changed req->rq_reply_bytes_recvd.
2472 */
2473 smp_rmb();
2474
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002475 req->rq_rcv_buf.len = req->rq_private_buf.len;
Olivier Deprez157378f2022-04-04 15:47:50 +02002476 trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002477
2478 /* Check that the softirq receive buffer is valid */
2479 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
2480 sizeof(req->rq_rcv_buf)) != 0);
2481
David Brazdil0f672f62019-12-10 10:32:29 +00002482 xdr_init_decode(&xdr, &req->rq_rcv_buf,
2483 req->rq_rcv_buf.head[0].iov_base, req);
2484 err = rpc_decode_header(task, &xdr);
2485out:
2486 switch (err) {
2487 case 0:
2488 task->tk_action = rpc_exit_task;
2489 task->tk_status = rpcauth_unwrap_resp(task, &xdr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002490 return;
David Brazdil0f672f62019-12-10 10:32:29 +00002491 case -EAGAIN:
2492 task->tk_status = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002493 if (task->tk_client->cl_discrtry)
2494 xprt_conditional_disconnect(req->rq_xprt,
David Brazdil0f672f62019-12-10 10:32:29 +00002495 req->rq_connect_cookie);
2496 task->tk_action = call_encode;
2497 rpc_check_timeout(task);
2498 break;
2499 case -EKEYREJECTED:
2500 task->tk_action = call_reserve;
2501 rpc_check_timeout(task);
2502 rpcauth_invalcred(task);
2503 /* Ensure we obtain a new XID if we retry! */
2504 xprt_release(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002505 }
2506}
2507
David Brazdil0f672f62019-12-10 10:32:29 +00002508static int
2509rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002510{
2511 struct rpc_clnt *clnt = task->tk_client;
2512 struct rpc_rqst *req = task->tk_rqstp;
David Brazdil0f672f62019-12-10 10:32:29 +00002513 __be32 *p;
2514 int error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002515
David Brazdil0f672f62019-12-10 10:32:29 +00002516 error = -EMSGSIZE;
2517 p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2);
2518 if (!p)
2519 goto out_fail;
2520 *p++ = req->rq_xid;
2521 *p++ = rpc_call;
2522 *p++ = cpu_to_be32(RPC_VERSION);
2523 *p++ = cpu_to_be32(clnt->cl_prog);
2524 *p++ = cpu_to_be32(clnt->cl_vers);
2525 *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002526
David Brazdil0f672f62019-12-10 10:32:29 +00002527 error = rpcauth_marshcred(task, xdr);
2528 if (error < 0)
2529 goto out_fail;
2530 return 0;
2531out_fail:
2532 trace_rpc_bad_callhdr(task);
2533 rpc_call_rpcerror(task, error);
2534 return error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002535}
2536
David Brazdil0f672f62019-12-10 10:32:29 +00002537static noinline int
2538rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002539{
2540 struct rpc_clnt *clnt = task->tk_client;
David Brazdil0f672f62019-12-10 10:32:29 +00002541 int error;
2542 __be32 *p;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002543
David Brazdil0f672f62019-12-10 10:32:29 +00002544 /* RFC-1014 says that the representation of XDR data must be a
2545 * multiple of four bytes
2546 * - if it isn't pointer subtraction in the NFS client may give
2547 * undefined results
2548 */
2549 if (task->tk_rqstp->rq_rcv_buf.len & 3)
2550 goto out_unparsable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002551
David Brazdil0f672f62019-12-10 10:32:29 +00002552 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
2553 if (!p)
2554 goto out_unparsable;
2555 p++; /* skip XID */
2556 if (*p++ != rpc_reply)
2557 goto out_unparsable;
2558 if (*p++ != rpc_msg_accepted)
2559 goto out_msg_denied;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002560
David Brazdil0f672f62019-12-10 10:32:29 +00002561 error = rpcauth_checkverf(task, xdr);
2562 if (error)
2563 goto out_verifier;
2564
2565 p = xdr_inline_decode(xdr, sizeof(*p));
2566 if (!p)
2567 goto out_unparsable;
2568 switch (*p) {
2569 case rpc_success:
2570 return 0;
2571 case rpc_prog_unavail:
2572 trace_rpc__prog_unavail(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002573 error = -EPFNOSUPPORT;
2574 goto out_err;
David Brazdil0f672f62019-12-10 10:32:29 +00002575 case rpc_prog_mismatch:
2576 trace_rpc__prog_mismatch(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002577 error = -EPROTONOSUPPORT;
2578 goto out_err;
David Brazdil0f672f62019-12-10 10:32:29 +00002579 case rpc_proc_unavail:
2580 trace_rpc__proc_unavail(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002581 error = -EOPNOTSUPP;
2582 goto out_err;
David Brazdil0f672f62019-12-10 10:32:29 +00002583 case rpc_garbage_args:
2584 case rpc_system_err:
2585 trace_rpc__garbage_args(task);
2586 error = -EIO;
2587 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002588 default:
David Brazdil0f672f62019-12-10 10:32:29 +00002589 goto out_unparsable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002590 }
2591
2592out_garbage:
2593 clnt->cl_stats->rpcgarbage++;
2594 if (task->tk_garb_retry) {
2595 task->tk_garb_retry--;
David Brazdil0f672f62019-12-10 10:32:29 +00002596 task->tk_action = call_encode;
2597 return -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002598 }
2599out_err:
David Brazdil0f672f62019-12-10 10:32:29 +00002600 rpc_call_rpcerror(task, error);
2601 return error;
2602
2603out_unparsable:
2604 trace_rpc__unparsable(task);
2605 error = -EIO;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002606 goto out_garbage;
David Brazdil0f672f62019-12-10 10:32:29 +00002607
2608out_verifier:
2609 trace_rpc_bad_verifier(task);
2610 goto out_garbage;
2611
2612out_msg_denied:
2613 error = -EACCES;
2614 p = xdr_inline_decode(xdr, sizeof(*p));
2615 if (!p)
2616 goto out_unparsable;
2617 switch (*p++) {
2618 case rpc_auth_error:
2619 break;
2620 case rpc_mismatch:
2621 trace_rpc__mismatch(task);
2622 error = -EPROTONOSUPPORT;
2623 goto out_err;
2624 default:
2625 goto out_unparsable;
2626 }
2627
2628 p = xdr_inline_decode(xdr, sizeof(*p));
2629 if (!p)
2630 goto out_unparsable;
2631 switch (*p++) {
2632 case rpc_autherr_rejectedcred:
2633 case rpc_autherr_rejectedverf:
2634 case rpcsec_gsserr_credproblem:
2635 case rpcsec_gsserr_ctxproblem:
2636 if (!task->tk_cred_retry)
2637 break;
2638 task->tk_cred_retry--;
2639 trace_rpc__stale_creds(task);
2640 return -EKEYREJECTED;
2641 case rpc_autherr_badcred:
2642 case rpc_autherr_badverf:
2643 /* possibly garbled cred/verf? */
2644 if (!task->tk_garb_retry)
2645 break;
2646 task->tk_garb_retry--;
2647 trace_rpc__bad_creds(task);
2648 task->tk_action = call_encode;
2649 return -EAGAIN;
2650 case rpc_autherr_tooweak:
2651 trace_rpc__auth_tooweak(task);
2652 pr_warn("RPC: server %s requires stronger authentication.\n",
2653 task->tk_xprt->servername);
2654 break;
2655 default:
2656 goto out_unparsable;
2657 }
2658 goto out_err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002659}
2660
2661static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
2662 const void *obj)
2663{
2664}
2665
2666static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
2667 void *obj)
2668{
2669 return 0;
2670}
2671
2672static const struct rpc_procinfo rpcproc_null = {
2673 .p_encode = rpcproc_encode_null,
2674 .p_decode = rpcproc_decode_null,
2675};
2676
2677static int rpc_ping(struct rpc_clnt *clnt)
2678{
2679 struct rpc_message msg = {
2680 .rpc_proc = &rpcproc_null,
2681 };
2682 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00002683 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
2684 RPC_TASK_NULLCREDS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002685 return err;
2686}
2687
2688static
2689struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
2690 struct rpc_xprt *xprt, struct rpc_cred *cred, int flags,
2691 const struct rpc_call_ops *ops, void *data)
2692{
2693 struct rpc_message msg = {
2694 .rpc_proc = &rpcproc_null,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002695 };
2696 struct rpc_task_setup task_setup_data = {
2697 .rpc_client = clnt,
2698 .rpc_xprt = xprt,
2699 .rpc_message = &msg,
David Brazdil0f672f62019-12-10 10:32:29 +00002700 .rpc_op_cred = cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002701 .callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
2702 .callback_data = data,
Olivier Deprez157378f2022-04-04 15:47:50 +02002703 .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
2704 RPC_TASK_NULLCREDS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002705 };
2706
2707 return rpc_run_task(&task_setup_data);
2708}
2709
2710struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2711{
2712 return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL);
2713}
2714EXPORT_SYMBOL_GPL(rpc_call_null);
2715
2716struct rpc_cb_add_xprt_calldata {
2717 struct rpc_xprt_switch *xps;
2718 struct rpc_xprt *xprt;
2719};
2720
2721static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata)
2722{
2723 struct rpc_cb_add_xprt_calldata *data = calldata;
2724
2725 if (task->tk_status == 0)
2726 rpc_xprt_switch_add_xprt(data->xps, data->xprt);
2727}
2728
2729static void rpc_cb_add_xprt_release(void *calldata)
2730{
2731 struct rpc_cb_add_xprt_calldata *data = calldata;
2732
2733 xprt_put(data->xprt);
2734 xprt_switch_put(data->xps);
2735 kfree(data);
2736}
2737
2738static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
2739 .rpc_call_done = rpc_cb_add_xprt_done,
2740 .rpc_release = rpc_cb_add_xprt_release,
2741};
2742
2743/**
2744 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
2745 * @clnt: pointer to struct rpc_clnt
2746 * @xps: pointer to struct rpc_xprt_switch,
2747 * @xprt: pointer struct rpc_xprt
2748 * @dummy: unused
2749 */
2750int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
2751 struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
2752 void *dummy)
2753{
2754 struct rpc_cb_add_xprt_calldata *data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002755 struct rpc_task *task;
2756
2757 data = kmalloc(sizeof(*data), GFP_NOFS);
2758 if (!data)
2759 return -ENOMEM;
2760 data->xps = xprt_switch_get(xps);
2761 data->xprt = xprt_get(xprt);
David Brazdil0f672f62019-12-10 10:32:29 +00002762 if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) {
2763 rpc_cb_add_xprt_release(data);
2764 goto success;
2765 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002766
Olivier Deprez157378f2022-04-04 15:47:50 +02002767 task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002768 &rpc_cb_add_xprt_call_ops, data);
Olivier Deprez157378f2022-04-04 15:47:50 +02002769
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002770 rpc_put_task(task);
David Brazdil0f672f62019-12-10 10:32:29 +00002771success:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002772 return 1;
2773}
2774EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt);
2775
2776/**
2777 * rpc_clnt_setup_test_and_add_xprt()
2778 *
2779 * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
2780 * 1) caller of the test function must dereference the rpc_xprt_switch
2781 * and the rpc_xprt.
2782 * 2) test function must call rpc_xprt_switch_add_xprt, usually in
2783 * the rpc_call_done routine.
2784 *
2785 * Upon success (return of 1), the test function adds the new
2786 * transport to the rpc_clnt xprt switch
2787 *
2788 * @clnt: struct rpc_clnt to get the new transport
2789 * @xps: the rpc_xprt_switch to hold the new transport
2790 * @xprt: the rpc_xprt to test
2791 * @data: a struct rpc_add_xprt_test pointer that holds the test function
2792 * and test function call data
2793 */
2794int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
2795 struct rpc_xprt_switch *xps,
2796 struct rpc_xprt *xprt,
2797 void *data)
2798{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002799 struct rpc_task *task;
2800 struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data;
2801 int status = -EADDRINUSE;
2802
2803 xprt = xprt_get(xprt);
2804 xprt_switch_get(xps);
2805
2806 if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
2807 goto out_err;
2808
2809 /* Test the connection */
Olivier Deprez157378f2022-04-04 15:47:50 +02002810 task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002811 if (IS_ERR(task)) {
2812 status = PTR_ERR(task);
2813 goto out_err;
2814 }
2815 status = task->tk_status;
2816 rpc_put_task(task);
2817
2818 if (status < 0)
2819 goto out_err;
2820
2821 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
2822 xtest->add_xprt_test(clnt, xprt, xtest->data);
2823
David Brazdil0f672f62019-12-10 10:32:29 +00002824 xprt_put(xprt);
2825 xprt_switch_put(xps);
2826
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002827 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
2828 return 1;
2829out_err:
2830 xprt_put(xprt);
2831 xprt_switch_put(xps);
2832 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not added\n",
2833 status, xprt->address_strings[RPC_DISPLAY_ADDR]);
2834 return status;
2835}
2836EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt);
2837
2838/**
2839 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
2840 * @clnt: pointer to struct rpc_clnt
2841 * @xprtargs: pointer to struct xprt_create
2842 * @setup: callback to test and/or set up the connection
2843 * @data: pointer to setup function data
2844 *
2845 * Creates a new transport using the parameters set in args and
2846 * adds it to clnt.
2847 * If ping is set, then test that connectivity succeeds before
2848 * adding the new transport.
2849 *
2850 */
2851int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2852 struct xprt_create *xprtargs,
2853 int (*setup)(struct rpc_clnt *,
2854 struct rpc_xprt_switch *,
2855 struct rpc_xprt *,
2856 void *),
2857 void *data)
2858{
2859 struct rpc_xprt_switch *xps;
2860 struct rpc_xprt *xprt;
2861 unsigned long connect_timeout;
2862 unsigned long reconnect_timeout;
Olivier Deprez157378f2022-04-04 15:47:50 +02002863 unsigned char resvport, reuseport;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002864 int ret = 0;
2865
2866 rcu_read_lock();
2867 xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
2868 xprt = xprt_iter_xprt(&clnt->cl_xpi);
2869 if (xps == NULL || xprt == NULL) {
2870 rcu_read_unlock();
David Brazdil0f672f62019-12-10 10:32:29 +00002871 xprt_switch_put(xps);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002872 return -EAGAIN;
2873 }
2874 resvport = xprt->resvport;
Olivier Deprez157378f2022-04-04 15:47:50 +02002875 reuseport = xprt->reuseport;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002876 connect_timeout = xprt->connect_timeout;
2877 reconnect_timeout = xprt->max_reconnect_timeout;
2878 rcu_read_unlock();
2879
2880 xprt = xprt_create_transport(xprtargs);
2881 if (IS_ERR(xprt)) {
2882 ret = PTR_ERR(xprt);
2883 goto out_put_switch;
2884 }
2885 xprt->resvport = resvport;
Olivier Deprez157378f2022-04-04 15:47:50 +02002886 xprt->reuseport = reuseport;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002887 if (xprt->ops->set_connect_timeout != NULL)
2888 xprt->ops->set_connect_timeout(xprt,
2889 connect_timeout,
2890 reconnect_timeout);
2891
2892 rpc_xprt_switch_set_roundrobin(xps);
2893 if (setup) {
2894 ret = setup(clnt, xps, xprt, data);
2895 if (ret != 0)
2896 goto out_put_xprt;
2897 }
2898 rpc_xprt_switch_add_xprt(xps, xprt);
2899out_put_xprt:
2900 xprt_put(xprt);
2901out_put_switch:
2902 xprt_switch_put(xps);
2903 return ret;
2904}
2905EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
2906
2907struct connect_timeout_data {
2908 unsigned long connect_timeout;
2909 unsigned long reconnect_timeout;
2910};
2911
2912static int
2913rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt,
2914 struct rpc_xprt *xprt,
2915 void *data)
2916{
2917 struct connect_timeout_data *timeo = data;
2918
2919 if (xprt->ops->set_connect_timeout)
2920 xprt->ops->set_connect_timeout(xprt,
2921 timeo->connect_timeout,
2922 timeo->reconnect_timeout);
2923 return 0;
2924}
2925
2926void
2927rpc_set_connect_timeout(struct rpc_clnt *clnt,
2928 unsigned long connect_timeout,
2929 unsigned long reconnect_timeout)
2930{
2931 struct connect_timeout_data timeout = {
2932 .connect_timeout = connect_timeout,
2933 .reconnect_timeout = reconnect_timeout,
2934 };
2935 rpc_clnt_iterate_for_each_xprt(clnt,
2936 rpc_xprt_set_connect_timeout,
2937 &timeout);
2938}
2939EXPORT_SYMBOL_GPL(rpc_set_connect_timeout);
2940
2941void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
2942{
2943 rcu_read_lock();
2944 xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
2945 rcu_read_unlock();
2946}
2947EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
2948
2949void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
2950{
2951 rcu_read_lock();
2952 rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
2953 xprt);
2954 rcu_read_unlock();
2955}
2956EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
2957
2958bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
2959 const struct sockaddr *sap)
2960{
2961 struct rpc_xprt_switch *xps;
2962 bool ret;
2963
2964 rcu_read_lock();
2965 xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
2966 ret = rpc_xprt_switch_has_addr(xps, sap);
2967 rcu_read_unlock();
2968 return ret;
2969}
2970EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
2971
2972#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2973static void rpc_show_header(void)
2974{
2975 printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2976 "-timeout ---ops--\n");
2977}
2978
2979static void rpc_show_task(const struct rpc_clnt *clnt,
2980 const struct rpc_task *task)
2981{
2982 const char *rpc_waitq = "none";
2983
2984 if (RPC_IS_QUEUED(task))
2985 rpc_waitq = rpc_qname(task->tk_waitqueue);
2986
2987 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2988 task->tk_pid, task->tk_flags, task->tk_status,
David Brazdil0f672f62019-12-10 10:32:29 +00002989 clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002990 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
2991 task->tk_action, rpc_waitq);
2992}
2993
2994void rpc_show_tasks(struct net *net)
2995{
2996 struct rpc_clnt *clnt;
2997 struct rpc_task *task;
2998 int header = 0;
2999 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
3000
3001 spin_lock(&sn->rpc_client_lock);
3002 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
3003 spin_lock(&clnt->cl_lock);
3004 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
3005 if (!header) {
3006 rpc_show_header();
3007 header++;
3008 }
3009 rpc_show_task(clnt, task);
3010 }
3011 spin_unlock(&clnt->cl_lock);
3012 }
3013 spin_unlock(&sn->rpc_client_lock);
3014}
3015#endif
3016
3017#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
3018static int
3019rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt,
3020 struct rpc_xprt *xprt,
3021 void *dummy)
3022{
3023 return xprt_enable_swap(xprt);
3024}
3025
3026int
3027rpc_clnt_swap_activate(struct rpc_clnt *clnt)
3028{
3029 if (atomic_inc_return(&clnt->cl_swapper) == 1)
3030 return rpc_clnt_iterate_for_each_xprt(clnt,
3031 rpc_clnt_swap_activate_callback, NULL);
3032 return 0;
3033}
3034EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate);
3035
3036static int
3037rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
3038 struct rpc_xprt *xprt,
3039 void *dummy)
3040{
3041 xprt_disable_swap(xprt);
3042 return 0;
3043}
3044
3045void
3046rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
3047{
3048 if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
3049 rpc_clnt_iterate_for_each_xprt(clnt,
3050 rpc_clnt_swap_deactivate_callback, NULL);
3051}
3052EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate);
3053#endif /* CONFIG_SUNRPC_SWAP */