blob: 73e130a840ce4774df4e977759d46c42ddbd820d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/include/linux/sunrpc/svc.h
4 *
5 * RPC server declarations.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10
11#ifndef SUNRPC_SVC_H
12#define SUNRPC_SVC_H
13
14#include <linux/in.h>
15#include <linux/in6.h>
16#include <linux/sunrpc/types.h>
17#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/auth.h>
19#include <linux/sunrpc/svcauth.h>
20#include <linux/wait.h>
21#include <linux/mm.h>
22
23/* statistics for svc_pool structures */
24struct svc_pool_stats {
25 atomic_long_t packets;
26 unsigned long sockets_queued;
27 atomic_long_t threads_woken;
28 atomic_long_t threads_timedout;
29};
30
31/*
32 *
33 * RPC service thread pool.
34 *
35 * Pool of threads and temporary sockets. Generally there is only
36 * a single one of these per RPC service, but on NUMA machines those
37 * services that can benefit from it (i.e. nfs but not lockd) will
38 * have one pool per NUMA node. This optimisation reduces cross-
39 * node traffic on multi-node NUMA NFS servers.
40 */
41struct svc_pool {
42 unsigned int sp_id; /* pool id; also node id on NUMA */
43 spinlock_t sp_lock; /* protects all fields */
44 struct list_head sp_sockets; /* pending sockets */
45 unsigned int sp_nrthreads; /* # of threads in pool */
46 struct list_head sp_all_threads; /* all server threads */
47 struct svc_pool_stats sp_stats; /* statistics on pool operation */
48#define SP_TASK_PENDING (0) /* still work to do even if no
49 * xprt is queued. */
50#define SP_CONGESTED (1)
51 unsigned long sp_flags;
52} ____cacheline_aligned_in_smp;
53
54struct svc_serv;
55
56struct svc_serv_ops {
57 /* Callback to use when last thread exits. */
58 void (*svo_shutdown)(struct svc_serv *, struct net *);
59
60 /* function for service threads to run */
61 int (*svo_function)(void *);
62
63 /* queue up a transport for servicing */
64 void (*svo_enqueue_xprt)(struct svc_xprt *);
65
66 /* set up thread (or whatever) execution context */
67 int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
68
69 /* optional module to count when adding threads (pooled svcs only) */
70 struct module *svo_module;
71};
72
73/*
74 * RPC service.
75 *
76 * An RPC service is a ``daemon,'' possibly multithreaded, which
77 * receives and processes incoming RPC messages.
78 * It has one or more transport sockets associated with it, and maintains
79 * a list of idle threads waiting for input.
80 *
81 * We currently do not support more than one RPC program per daemon.
82 */
83struct svc_serv {
84 struct svc_program * sv_program; /* RPC program */
85 struct svc_stat * sv_stats; /* RPC statistics */
86 spinlock_t sv_lock;
87 unsigned int sv_nrthreads; /* # of server threads */
88 unsigned int sv_maxconn; /* max connections allowed or
89 * '0' causing max to be based
90 * on number of threads. */
91
92 unsigned int sv_max_payload; /* datagram payload size */
93 unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
94 unsigned int sv_xdrsize; /* XDR buffer size */
95 struct list_head sv_permsocks; /* all permanent sockets */
96 struct list_head sv_tempsocks; /* all temporary sockets */
97 int sv_tmpcnt; /* count of temporary sockets */
98 struct timer_list sv_temptimer; /* timer for aging temporary sockets */
99
100 char * sv_name; /* service name */
101
102 unsigned int sv_nrpools; /* number of thread pools */
103 struct svc_pool * sv_pools; /* array of thread pools */
104 const struct svc_serv_ops *sv_ops; /* server operations */
105#if defined(CONFIG_SUNRPC_BACKCHANNEL)
106 struct list_head sv_cb_list; /* queue for callback requests
107 * that arrive over the same
108 * connection */
109 spinlock_t sv_cb_lock; /* protects the svc_cb_list */
110 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
111 * entries in the svc_cb_list */
112 struct svc_xprt *sv_bc_xprt; /* callback on fore channel */
113#endif /* CONFIG_SUNRPC_BACKCHANNEL */
114};
115
116/*
117 * We use sv_nrthreads as a reference count. svc_destroy() drops
118 * this refcount, so we need to bump it up around operations that
119 * change the number of threads. Horrible, but there it is.
120 * Should be called with the "service mutex" held.
121 */
122static inline void svc_get(struct svc_serv *serv)
123{
124 serv->sv_nrthreads++;
125}
126
127/*
128 * Maximum payload size supported by a kernel RPC server.
129 * This is use to determine the max number of pages nfsd is
130 * willing to return in a single READ operation.
131 *
132 * These happen to all be powers of 2, which is not strictly
133 * necessary but helps enforce the real limitation, which is
134 * that they should be multiples of PAGE_SIZE.
135 *
136 * For UDP transports, a block plus NFS,RPC, and UDP headers
137 * has to fit into the IP datagram limit of 64K. The largest
138 * feasible number for all known page sizes is probably 48K,
139 * but we choose 32K here. This is the same as the historical
140 * Linux limit; someone who cares more about NFS/UDP performance
141 * can test a larger number.
142 *
143 * For TCP transports we have more freedom. A size of 1MB is
144 * chosen to match the client limit. Other OSes are known to
145 * have larger limits, but those numbers are probably beyond
146 * the point of diminishing returns.
147 */
148#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
149#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
150#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
151
152extern u32 svc_max_payload(const struct svc_rqst *rqstp);
153
154/*
155 * RPC Requsts and replies are stored in one or more pages.
156 * We maintain an array of pages for each server thread.
157 * Requests are copied into these pages as they arrive. Remaining
158 * pages are available to write the reply into.
159 *
160 * Pages are sent using ->sendpage so each server thread needs to
161 * allocate more to replace those used in sending. To help keep track
162 * of these pages we have a receive list where all pages initialy live,
163 * and a send list where pages are moved to when there are to be part
164 * of a reply.
165 *
166 * We use xdr_buf for holding responses as it fits well with NFS
167 * read responses (that have a header, and some data pages, and possibly
168 * a tail) and means we can share some client side routines.
169 *
170 * The xdr_buf.head kvec always points to the first page in the rq_*pages
171 * list. The xdr_buf.pages pointer points to the second page on that
172 * list. xdr_buf.tail points to the end of the first page.
173 * This assumes that the non-page part of an rpc reply will fit
174 * in a page - NFSd ensures this. lockd also has no trouble.
175 *
176 * Each request/reply pair can have at most one "payload", plus two pages,
177 * one for the request, and one for the reply.
178 * We using ->sendfile to return read data, we might need one extra page
179 * if the request is not page-aligned. So add another '1'.
180 */
181#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
182 + 2 + 1)
183
184static inline u32 svc_getnl(struct kvec *iov)
185{
186 __be32 val, *vp;
187 vp = iov->iov_base;
188 val = *vp++;
189 iov->iov_base = (void*)vp;
190 iov->iov_len -= sizeof(__be32);
191 return ntohl(val);
192}
193
194static inline void svc_putnl(struct kvec *iov, u32 val)
195{
196 __be32 *vp = iov->iov_base + iov->iov_len;
197 *vp = htonl(val);
198 iov->iov_len += sizeof(__be32);
199}
200
201static inline __be32 svc_getu32(struct kvec *iov)
202{
203 __be32 val, *vp;
204 vp = iov->iov_base;
205 val = *vp++;
206 iov->iov_base = (void*)vp;
207 iov->iov_len -= sizeof(__be32);
208 return val;
209}
210
211static inline void svc_ungetu32(struct kvec *iov)
212{
213 __be32 *vp = (__be32 *)iov->iov_base;
214 iov->iov_base = (void *)(vp - 1);
215 iov->iov_len += sizeof(*vp);
216}
217
218static inline void svc_putu32(struct kvec *iov, __be32 val)
219{
220 __be32 *vp = iov->iov_base + iov->iov_len;
221 *vp = val;
222 iov->iov_len += sizeof(__be32);
223}
224
225/*
226 * The context of a single thread, including the request currently being
227 * processed.
228 */
229struct svc_rqst {
230 struct list_head rq_all; /* all threads list */
231 struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
232 struct svc_xprt * rq_xprt; /* transport ptr */
233
234 struct sockaddr_storage rq_addr; /* peer address */
235 size_t rq_addrlen;
236 struct sockaddr_storage rq_daddr; /* dest addr of request
237 * - reply from here */
238 size_t rq_daddrlen;
239
240 struct svc_serv * rq_server; /* RPC service definition */
241 struct svc_pool * rq_pool; /* thread pool */
242 const struct svc_procedure *rq_procinfo;/* procedure info */
243 struct auth_ops * rq_authop; /* authentication flavour */
244 struct svc_cred rq_cred; /* auth info */
245 void * rq_xprt_ctxt; /* transport specific context ptr */
246 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
247
248 size_t rq_xprt_hlen; /* xprt header len */
249 struct xdr_buf rq_arg;
250 struct xdr_buf rq_res;
251 struct page *rq_pages[RPCSVC_MAXPAGES + 1];
252 struct page * *rq_respages; /* points into rq_pages */
253 struct page * *rq_next_page; /* next reply page to use */
254 struct page * *rq_page_end; /* one past the last page */
255
256 struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
257
258 __be32 rq_xid; /* transmission id */
259 u32 rq_prog; /* program number */
260 u32 rq_vers; /* program version */
261 u32 rq_proc; /* procedure number */
262 u32 rq_prot; /* IP protocol */
263 int rq_cachetype; /* catering to nfsd */
264#define RQ_SECURE (0) /* secure port */
265#define RQ_LOCAL (1) /* local request */
266#define RQ_USEDEFERRAL (2) /* use deferral */
267#define RQ_DROPME (3) /* drop current reply */
268#define RQ_SPLICE_OK (4) /* turned off in gss privacy
269 * to prevent encrypting page
270 * cache pages */
271#define RQ_VICTIM (5) /* about to be shut down */
272#define RQ_BUSY (6) /* request is busy */
273#define RQ_DATA (7) /* request has data */
274 unsigned long rq_flags; /* flags field */
275 ktime_t rq_qtime; /* enqueue time */
276
277 void * rq_argp; /* decoded arguments */
278 void * rq_resp; /* xdr'd results */
279 void * rq_auth_data; /* flavor-specific data */
280 int rq_auth_slack; /* extra space xdr code
281 * should leave in head
282 * for krb5i, krb5p.
283 */
284 int rq_reserved; /* space on socket outq
285 * reserved for this request
286 */
287 ktime_t rq_stime; /* start time */
288
289 struct cache_req rq_chandle; /* handle passed to caches for
290 * request delaying
291 */
292 /* Catering to nfsd */
293 struct auth_domain * rq_client; /* RPC peer info */
294 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
295 struct svc_cacherep * rq_cacherep; /* cache info */
296 struct task_struct *rq_task; /* service thread */
297 spinlock_t rq_lock; /* per-request lock */
298};
299
300#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
301
302/*
303 * Rigorous type checking on sockaddr type conversions
304 */
305static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
306{
307 return (struct sockaddr_in *) &rqst->rq_addr;
308}
309
310static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
311{
312 return (struct sockaddr_in6 *) &rqst->rq_addr;
313}
314
315static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
316{
317 return (struct sockaddr *) &rqst->rq_addr;
318}
319
320static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
321{
322 return (struct sockaddr_in *) &rqst->rq_daddr;
323}
324
325static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
326{
327 return (struct sockaddr_in6 *) &rqst->rq_daddr;
328}
329
330static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
331{
332 return (struct sockaddr *) &rqst->rq_daddr;
333}
334
335/*
336 * Check buffer bounds after decoding arguments
337 */
338static inline int
339xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
340{
341 char *cp = (char *)p;
342 struct kvec *vec = &rqstp->rq_arg.head[0];
343 return cp >= (char*)vec->iov_base
344 && cp <= (char*)vec->iov_base + vec->iov_len;
345}
346
347static inline int
348xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
349{
350 struct kvec *vec = &rqstp->rq_res.head[0];
351 char *cp = (char*)p;
352
353 vec->iov_len = cp - (char*)vec->iov_base;
354
355 return vec->iov_len <= PAGE_SIZE;
356}
357
358static inline void svc_free_res_pages(struct svc_rqst *rqstp)
359{
360 while (rqstp->rq_next_page != rqstp->rq_respages) {
361 struct page **pp = --rqstp->rq_next_page;
362 if (*pp) {
363 put_page(*pp);
364 *pp = NULL;
365 }
366 }
367}
368
369struct svc_deferred_req {
370 u32 prot; /* protocol (UDP or TCP) */
371 struct svc_xprt *xprt;
372 struct sockaddr_storage addr; /* where reply must go */
373 size_t addrlen;
374 struct sockaddr_storage daddr; /* where reply must come from */
375 size_t daddrlen;
376 struct cache_deferred_req handle;
377 size_t xprt_hlen;
378 int argslen;
379 __be32 args[0];
380};
381
382/*
383 * List of RPC programs on the same transport endpoint
384 */
385struct svc_program {
386 struct svc_program * pg_next; /* other programs (same xprt) */
387 u32 pg_prog; /* program number */
388 unsigned int pg_lovers; /* lowest version */
389 unsigned int pg_hivers; /* highest version */
390 unsigned int pg_nvers; /* number of versions */
391 const struct svc_version **pg_vers; /* version array */
392 char * pg_name; /* service name */
393 char * pg_class; /* class name: services sharing authentication */
394 struct svc_stat * pg_stats; /* rpc statistics */
395 int (*pg_authenticate)(struct svc_rqst *);
396};
397
398/*
399 * RPC program version
400 */
401struct svc_version {
402 u32 vs_vers; /* version number */
403 u32 vs_nproc; /* number of procedures */
404 const struct svc_procedure *vs_proc; /* per-procedure info */
405 unsigned int *vs_count; /* call counts */
406 u32 vs_xdrsize; /* xdrsize needed for this version */
407
408 /* Don't register with rpcbind */
409 bool vs_hidden;
410
411 /* Don't care if the rpcbind registration fails */
412 bool vs_rpcb_optnl;
413
414 /* Need xprt with congestion control */
415 bool vs_need_cong_ctrl;
416
417 /* Override dispatch function (e.g. when caching replies).
418 * A return value of 0 means drop the request.
419 * vs_dispatch == NULL means use default dispatcher.
420 */
421 int (*vs_dispatch)(struct svc_rqst *, __be32 *);
422};
423
424/*
425 * RPC procedure info
426 */
427struct svc_procedure {
428 /* process the request: */
429 __be32 (*pc_func)(struct svc_rqst *);
430 /* XDR decode args: */
431 int (*pc_decode)(struct svc_rqst *, __be32 *data);
432 /* XDR encode result: */
433 int (*pc_encode)(struct svc_rqst *, __be32 *data);
434 /* XDR free result: */
435 void (*pc_release)(struct svc_rqst *);
436 unsigned int pc_argsize; /* argument struct size */
437 unsigned int pc_ressize; /* result struct size */
438 unsigned int pc_cachetype; /* cache info (NFS) */
439 unsigned int pc_xdrressize; /* maximum size of XDR reply */
440};
441
442/*
443 * Mode for mapping cpus to pools.
444 */
445enum {
446 SVC_POOL_AUTO = -1, /* choose one of the others */
447 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
448 * (legacy & UP mode) */
449 SVC_POOL_PERCPU, /* one pool per cpu */
450 SVC_POOL_PERNODE /* one pool per numa node */
451};
452
453struct svc_pool_map {
454 int count; /* How many svc_servs use us */
455 int mode; /* Note: int not enum to avoid
456 * warnings about "enumeration value
457 * not handled in switch" */
458 unsigned int npools;
459 unsigned int *pool_to; /* maps pool id to cpu or node */
460 unsigned int *to_pool; /* maps cpu or node to pool id */
461};
462
463extern struct svc_pool_map svc_pool_map;
464
465/*
466 * Function prototypes.
467 */
468int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
469void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
470int svc_bind(struct svc_serv *serv, struct net *net);
471struct svc_serv *svc_create(struct svc_program *, unsigned int,
472 const struct svc_serv_ops *);
473struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
474 struct svc_pool *pool, int node);
475struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
476 struct svc_pool *pool, int node);
477void svc_rqst_free(struct svc_rqst *);
478void svc_exit_thread(struct svc_rqst *);
479unsigned int svc_pool_map_get(void);
480void svc_pool_map_put(void);
481struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
482 const struct svc_serv_ops *);
483int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
484int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
485int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
486void svc_destroy(struct svc_serv *);
487void svc_shutdown_net(struct svc_serv *, struct net *);
488int svc_process(struct svc_rqst *);
489int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
490 struct svc_rqst *);
491int svc_register(const struct svc_serv *, struct net *, const int,
492 const unsigned short, const unsigned short);
493
494void svc_wake_up(struct svc_serv *);
495void svc_reserve(struct svc_rqst *rqstp, int space);
496struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
497char * svc_print_addr(struct svc_rqst *, char *, size_t);
498unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
499 struct page **pages,
500 struct kvec *first, size_t total);
501char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
502 struct kvec *first, void *p,
503 size_t total);
504
505#define RPC_MAX_ADDRBUFLEN (63U)
506
507/*
508 * When we want to reduce the size of the reserved space in the response
509 * buffer, we need to take into account the size of any checksum data that
510 * may be at the end of the packet. This is difficult to determine exactly
511 * for all cases without actually generating the checksum, so we just use a
512 * static value.
513 */
514static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
515{
516 svc_reserve(rqstp, space + rqstp->rq_auth_slack);
517}
518
519#endif /* SUNRPC_SVC_H */