blob: df696efdd6753f2c01737241394662089a9c6272 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/include/linux/sunrpc/sched.h
4 *
5 * Scheduling primitives for kernel Sun RPC.
6 *
7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
8 */
9
10#ifndef _LINUX_SUNRPC_SCHED_H_
11#define _LINUX_SUNRPC_SCHED_H_
12
13#include <linux/timer.h>
14#include <linux/ktime.h>
15#include <linux/sunrpc/types.h>
16#include <linux/spinlock.h>
17#include <linux/wait_bit.h>
18#include <linux/workqueue.h>
19#include <linux/sunrpc/xdr.h>
20
21/*
22 * This is the actual RPC procedure call info.
23 */
24struct rpc_procinfo;
25struct rpc_message {
26 const struct rpc_procinfo *rpc_proc; /* Procedure information */
27 void * rpc_argp; /* Arguments */
28 void * rpc_resp; /* Result */
David Brazdil0f672f62019-12-10 10:32:29 +000029 const struct cred * rpc_cred; /* Credentials */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030};
31
32struct rpc_call_ops;
33struct rpc_wait_queue;
34struct rpc_wait {
35 struct list_head list; /* wait queue links */
36 struct list_head links; /* Links to related tasks */
37 struct list_head timer_list; /* Timer list */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038};
39
40/*
41 * This is the RPC task struct
42 */
43struct rpc_task {
44 atomic_t tk_count; /* Reference count */
45 int tk_status; /* result of last operation */
46 struct list_head tk_task; /* global list of tasks */
47
48 /*
49 * callback to be executed after waking up
50 * action next procedure for async tasks
51 */
52 void (*tk_callback)(struct rpc_task *);
53 void (*tk_action)(struct rpc_task *);
54
55 unsigned long tk_timeout; /* timeout for rpc_sleep() */
56 unsigned long tk_runstate; /* Task run status */
57
58 struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
59 union {
60 struct work_struct tk_work; /* Async task work queue */
61 struct rpc_wait tk_wait; /* RPC wait */
62 } u;
63
David Brazdil0f672f62019-12-10 10:32:29 +000064 int tk_rpc_status; /* Result of last RPC operation */
65
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 /*
67 * RPC call state
68 */
69 struct rpc_message tk_msg; /* RPC call info */
70 void * tk_calldata; /* Caller private data */
71 const struct rpc_call_ops *tk_ops; /* Caller callbacks */
72
73 struct rpc_clnt * tk_client; /* RPC client */
74 struct rpc_xprt * tk_xprt; /* Transport */
David Brazdil0f672f62019-12-10 10:32:29 +000075 struct rpc_cred * tk_op_cred; /* cred being operated on */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076
77 struct rpc_rqst * tk_rqstp; /* RPC request */
78
79 struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
80 * be any workqueue
81 */
82 ktime_t tk_start; /* RPC task init timestamp */
83
84 pid_t tk_owner; /* Process id for batching tasks */
85 unsigned short tk_flags; /* misc flags */
86 unsigned short tk_timeouts; /* maj timeouts */
87
88#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
89 unsigned short tk_pid; /* debugging aid */
90#endif
91 unsigned char tk_priority : 2,/* Task priority */
92 tk_garb_retry : 2,
93 tk_cred_retry : 2,
94 tk_rebind_retry : 2;
95};
96
97typedef void (*rpc_action)(struct rpc_task *);
98
99struct rpc_call_ops {
100 void (*rpc_call_prepare)(struct rpc_task *, void *);
101 void (*rpc_call_done)(struct rpc_task *, void *);
102 void (*rpc_count_stats)(struct rpc_task *, void *);
103 void (*rpc_release)(void *);
104};
105
106struct rpc_task_setup {
107 struct rpc_task *task;
108 struct rpc_clnt *rpc_client;
109 struct rpc_xprt *rpc_xprt;
David Brazdil0f672f62019-12-10 10:32:29 +0000110 struct rpc_cred *rpc_op_cred; /* credential being operated on */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 const struct rpc_message *rpc_message;
112 const struct rpc_call_ops *callback_ops;
113 void *callback_data;
114 struct workqueue_struct *workqueue;
115 unsigned short flags;
116 signed char priority;
117};
118
119/*
120 * RPC task flags
121 */
122#define RPC_TASK_ASYNC 0x0001 /* is an async task */
123#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
David Brazdil0f672f62019-12-10 10:32:29 +0000124#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
126#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
127#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
David Brazdil0f672f62019-12-10 10:32:29 +0000128#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000129#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
130#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
131#define RPC_TASK_SENT 0x0800 /* message was sent */
132#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
133#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
134#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
Olivier Deprez157378f2022-04-04 15:47:50 +0200135#define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136
137#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
138#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
140#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
141#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
142
143#define RPC_TASK_RUNNING 0
144#define RPC_TASK_QUEUED 1
145#define RPC_TASK_ACTIVE 2
David Brazdil0f672f62019-12-10 10:32:29 +0000146#define RPC_TASK_NEED_XMIT 3
147#define RPC_TASK_NEED_RECV 4
148#define RPC_TASK_MSG_PIN_WAIT 5
149#define RPC_TASK_SIGNALLED 6
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150
151#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
152#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
153#define rpc_test_and_set_running(t) \
154 test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
155#define rpc_clear_running(t) \
156 do { \
157 smp_mb__before_atomic(); \
158 clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
159 smp_mb__after_atomic(); \
160 } while (0)
161
162#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
163#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
164#define rpc_clear_queued(t) \
165 do { \
166 smp_mb__before_atomic(); \
167 clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
168 smp_mb__after_atomic(); \
169 } while (0)
170
171#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
172
David Brazdil0f672f62019-12-10 10:32:29 +0000173#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
174
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175/*
176 * Task priorities.
177 * Note: if you change these, you must also change
178 * the task initialization definitions below.
179 */
180#define RPC_PRIORITY_LOW (-1)
181#define RPC_PRIORITY_NORMAL (0)
182#define RPC_PRIORITY_HIGH (1)
183#define RPC_PRIORITY_PRIVILEGED (2)
184#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
185
186struct rpc_timer {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 struct list_head list;
188 unsigned long expires;
David Brazdil0f672f62019-12-10 10:32:29 +0000189 struct delayed_work dwork;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190};
191
192/*
193 * RPC synchronization objects
194 */
195struct rpc_wait_queue {
196 spinlock_t lock;
197 struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198 unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
199 unsigned char priority; /* current priority */
200 unsigned char nr; /* # tasks remaining for cookie */
201 unsigned short qlen; /* total # tasks waiting in queue */
202 struct rpc_timer timer_list;
203#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
204 const char * name;
205#endif
206};
207
208/*
209 * This is the # requests to send consecutively
210 * from a single cookie. The aim is to improve
211 * performance of NFS operations such as read/write.
212 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
214
215/*
216 * Function prototypes
217 */
218struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
219struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
220struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
221void rpc_put_task(struct rpc_task *);
222void rpc_put_task_async(struct rpc_task *);
David Brazdil0f672f62019-12-10 10:32:29 +0000223void rpc_signal_task(struct rpc_task *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224void rpc_exit_task(struct rpc_task *);
225void rpc_exit(struct rpc_task *, int);
226void rpc_release_calldata(const struct rpc_call_ops *, void *);
227void rpc_killall_tasks(struct rpc_clnt *);
228void rpc_execute(struct rpc_task *);
229void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
230void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
231void rpc_destroy_wait_queue(struct rpc_wait_queue *);
David Brazdil0f672f62019-12-10 10:32:29 +0000232unsigned long rpc_task_timeout(const struct rpc_task *task);
233void rpc_sleep_on_timeout(struct rpc_wait_queue *queue,
234 struct rpc_task *task,
235 rpc_action action,
236 unsigned long timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000237void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
238 rpc_action action);
David Brazdil0f672f62019-12-10 10:32:29 +0000239void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
240 struct rpc_task *task,
241 unsigned long timeout,
242 int priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243void rpc_sleep_on_priority(struct rpc_wait_queue *,
244 struct rpc_task *,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245 int priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246void rpc_wake_up_queued_task(struct rpc_wait_queue *,
247 struct rpc_task *);
David Brazdil0f672f62019-12-10 10:32:29 +0000248void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
249 struct rpc_task *,
250 int);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251void rpc_wake_up(struct rpc_wait_queue *);
252struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
253struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
254 struct rpc_wait_queue *,
255 bool (*)(struct rpc_task *, void *),
256 void *);
257struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
258 bool (*)(struct rpc_task *, void *),
259 void *);
260void rpc_wake_up_status(struct rpc_wait_queue *, int);
261void rpc_delay(struct rpc_task *, unsigned long);
262int rpc_malloc(struct rpc_task *);
263void rpc_free(struct rpc_task *);
264int rpciod_up(void);
265void rpciod_down(void);
266int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
267#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
268struct net;
269void rpc_show_tasks(struct net *);
270#endif
271int rpc_init_mempool(void);
272void rpc_destroy_mempool(void);
273extern struct workqueue_struct *rpciod_workqueue;
274extern struct workqueue_struct *xprtiod_workqueue;
275void rpc_prepare_task(struct rpc_task *task);
276
277static inline int rpc_wait_for_completion_task(struct rpc_task *task)
278{
279 return __rpc_wait_for_completion_task(task, NULL);
280}
281
282#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
283static inline const char * rpc_qname(const struct rpc_wait_queue *q)
284{
285 return ((q && q->name) ? q->name : "unknown");
286}
287
288static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
289 const char *name)
290{
291 q->name = name;
292}
293#else
294static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
295 const char *name)
296{
297}
298#endif
299
300#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
301int rpc_clnt_swap_activate(struct rpc_clnt *clnt);
302void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt);
303#else
304static inline int
305rpc_clnt_swap_activate(struct rpc_clnt *clnt)
306{
307 return -EINVAL;
308}
309
310static inline void
311rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
312{
313}
314#endif /* CONFIG_SUNRPC_SWAP */
315
316#endif /* _LINUX_SUNRPC_SCHED_H_ */