blob: f0f55fbd13752903030e70685aa7c81386e1fbd4 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/net/sunrpc/sched.c
4 *
5 * Scheduling for synchronous and asynchronous RPC requests.
6 *
7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8 *
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 */
12
13#include <linux/module.h>
14
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/smp.h>
20#include <linux/spinlock.h>
21#include <linux/mutex.h>
22#include <linux/freezer.h>
David Brazdil0f672f62019-12-10 10:32:29 +000023#include <linux/sched/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
25#include <linux/sunrpc/clnt.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <linux/sunrpc/metrics.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027
28#include "sunrpc.h"
29
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030#define CREATE_TRACE_POINTS
31#include <trace/events/sunrpc.h>
32
33/*
34 * RPC slabs and memory pools
35 */
36#define RPC_BUFFER_MAXSIZE (2048)
37#define RPC_BUFFER_POOLSIZE (8)
38#define RPC_TASK_POOLSIZE (8)
39static struct kmem_cache *rpc_task_slabp __read_mostly;
40static struct kmem_cache *rpc_buffer_slabp __read_mostly;
41static mempool_t *rpc_task_mempool __read_mostly;
42static mempool_t *rpc_buffer_mempool __read_mostly;
43
44static void rpc_async_schedule(struct work_struct *);
45static void rpc_release_task(struct rpc_task *task);
David Brazdil0f672f62019-12-10 10:32:29 +000046static void __rpc_queue_timer_fn(struct work_struct *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047
48/*
49 * RPC tasks sit here while waiting for conditions to improve.
50 */
51static struct rpc_wait_queue delay_queue;
52
53/*
54 * rpciod-related stuff
55 */
56struct workqueue_struct *rpciod_workqueue __read_mostly;
57struct workqueue_struct *xprtiod_workqueue __read_mostly;
David Brazdil0f672f62019-12-10 10:32:29 +000058EXPORT_SYMBOL_GPL(xprtiod_workqueue);
59
60unsigned long
61rpc_task_timeout(const struct rpc_task *task)
62{
63 unsigned long timeout = READ_ONCE(task->tk_timeout);
64
65 if (timeout != 0) {
66 unsigned long now = jiffies;
67 if (time_before(now, timeout))
68 return timeout - now;
69 }
70 return 0;
71}
72EXPORT_SYMBOL_GPL(rpc_task_timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073
74/*
75 * Disable the timer for a given RPC task. Should be called with
76 * queue->lock and bh_disabled in order to avoid races within
77 * rpc_run_timer().
78 */
79static void
80__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
81{
David Brazdil0f672f62019-12-10 10:32:29 +000082 if (list_empty(&task->u.tk_wait.timer_list))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 task->tk_timeout = 0;
85 list_del(&task->u.tk_wait.timer_list);
86 if (list_empty(&queue->timer_list.list))
David Brazdil0f672f62019-12-10 10:32:29 +000087 cancel_delayed_work(&queue->timer_list.dwork);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088}
89
90static void
91rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
92{
David Brazdil0f672f62019-12-10 10:32:29 +000093 unsigned long now = jiffies;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094 queue->timer_list.expires = expires;
David Brazdil0f672f62019-12-10 10:32:29 +000095 if (time_before_eq(expires, now))
96 expires = 0;
97 else
98 expires -= now;
99 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100}
101
102/*
103 * Set up a timer for the current task.
104 */
105static void
David Brazdil0f672f62019-12-10 10:32:29 +0000106__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
107 unsigned long timeout)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108{
David Brazdil0f672f62019-12-10 10:32:29 +0000109 task->tk_timeout = timeout;
110 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
111 rpc_set_queue_timer(queue, timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
113}
114
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
116{
117 if (queue->priority != priority) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 queue->priority = priority;
David Brazdil0f672f62019-12-10 10:32:29 +0000119 queue->nr = 1U << priority;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 }
121}
122
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
124{
125 rpc_set_waitqueue_priority(queue, queue->maxpriority);
David Brazdil0f672f62019-12-10 10:32:29 +0000126}
127
128/*
129 * Add a request to a queue list
130 */
131static void
132__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
133{
134 struct rpc_task *t;
135
136 list_for_each_entry(t, q, u.tk_wait.list) {
137 if (t->tk_owner == task->tk_owner) {
138 list_add_tail(&task->u.tk_wait.links,
139 &t->u.tk_wait.links);
140 /* Cache the queue head in task->u.tk_wait.list */
141 task->u.tk_wait.list.next = q;
142 task->u.tk_wait.list.prev = NULL;
143 return;
144 }
145 }
146 INIT_LIST_HEAD(&task->u.tk_wait.links);
147 list_add_tail(&task->u.tk_wait.list, q);
148}
149
150/*
151 * Remove request from a queue list
152 */
153static void
154__rpc_list_dequeue_task(struct rpc_task *task)
155{
156 struct list_head *q;
157 struct rpc_task *t;
158
159 if (task->u.tk_wait.list.prev == NULL) {
160 list_del(&task->u.tk_wait.links);
161 return;
162 }
163 if (!list_empty(&task->u.tk_wait.links)) {
164 t = list_first_entry(&task->u.tk_wait.links,
165 struct rpc_task,
166 u.tk_wait.links);
167 /* Assume __rpc_list_enqueue_task() cached the queue head */
168 q = t->u.tk_wait.list.next;
169 list_add_tail(&t->u.tk_wait.list, q);
170 list_del(&task->u.tk_wait.links);
171 }
172 list_del(&task->u.tk_wait.list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173}
174
175/*
176 * Add new request to a priority queue.
177 */
178static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
179 struct rpc_task *task,
180 unsigned char queue_priority)
181{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 if (unlikely(queue_priority > queue->maxpriority))
183 queue_priority = queue->maxpriority;
David Brazdil0f672f62019-12-10 10:32:29 +0000184 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185}
186
187/*
188 * Add new request to wait queue.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189 */
190static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
191 struct rpc_task *task,
192 unsigned char queue_priority)
193{
David Brazdil0f672f62019-12-10 10:32:29 +0000194 INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195 if (RPC_IS_PRIORITY(queue))
196 __rpc_add_wait_queue_priority(queue, task, queue_priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197 else
198 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
199 task->tk_waitqueue = queue;
200 queue->qlen++;
201 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
202 smp_wmb();
203 rpc_set_queued(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204}
205
206/*
207 * Remove request from a priority queue.
208 */
209static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
210{
David Brazdil0f672f62019-12-10 10:32:29 +0000211 __rpc_list_dequeue_task(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212}
213
214/*
215 * Remove request from queue.
216 * Note: must be called with spin lock held.
217 */
218static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
219{
220 __rpc_disable_timer(queue, task);
221 if (RPC_IS_PRIORITY(queue))
222 __rpc_remove_wait_queue_priority(task);
David Brazdil0f672f62019-12-10 10:32:29 +0000223 else
224 list_del(&task->u.tk_wait.list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225 queue->qlen--;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226}
227
228static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
229{
230 int i;
231
232 spin_lock_init(&queue->lock);
233 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
234 INIT_LIST_HEAD(&queue->tasks[i]);
235 queue->maxpriority = nr_queues - 1;
236 rpc_reset_waitqueue_priority(queue);
237 queue->qlen = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000238 queue->timer_list.expires = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200239 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240 INIT_LIST_HEAD(&queue->timer_list.list);
241 rpc_assign_waitqueue_name(queue, qname);
242}
243
244void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245{
246 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
247}
248EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
249
250void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
251{
252 __rpc_init_priority_wait_queue(queue, qname, 1);
253}
254EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
255
256void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
257{
David Brazdil0f672f62019-12-10 10:32:29 +0000258 cancel_delayed_work_sync(&queue->timer_list.dwork);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259}
260EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
261
262static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
263{
264 freezable_schedule_unsafe();
265 if (signal_pending_state(mode, current))
266 return -ERESTARTSYS;
267 return 0;
268}
269
270#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
271static void rpc_task_set_debuginfo(struct rpc_task *task)
272{
273 static atomic_t rpc_pid;
274
275 task->tk_pid = atomic_inc_return(&rpc_pid);
276}
277#else
278static inline void rpc_task_set_debuginfo(struct rpc_task *task)
279{
280}
281#endif
282
283static void rpc_set_active(struct rpc_task *task)
284{
285 rpc_task_set_debuginfo(task);
286 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
287 trace_rpc_task_begin(task, NULL);
288}
289
290/*
291 * Mark an RPC call as having completed by clearing the 'active' bit
292 * and then waking up all tasks that were sleeping.
293 */
294static int rpc_complete_task(struct rpc_task *task)
295{
296 void *m = &task->tk_runstate;
297 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
298 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
299 unsigned long flags;
300 int ret;
301
302 trace_rpc_task_complete(task, NULL);
303
304 spin_lock_irqsave(&wq->lock, flags);
305 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
306 ret = atomic_dec_and_test(&task->tk_count);
307 if (waitqueue_active(wq))
308 __wake_up_locked_key(wq, TASK_NORMAL, &k);
309 spin_unlock_irqrestore(&wq->lock, flags);
310 return ret;
311}
312
313/*
314 * Allow callers to wait for completion of an RPC call
315 *
316 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
317 * to enforce taking of the wq->lock and hence avoid races with
318 * rpc_complete_task().
319 */
320int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
321{
322 if (action == NULL)
323 action = rpc_wait_bit_killable;
324 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
325 action, TASK_KILLABLE);
326}
327EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
328
329/*
330 * Make an RPC task runnable.
331 *
332 * Note: If the task is ASYNC, and is being made runnable after sitting on an
333 * rpc_wait_queue, this must be called with the queue spinlock held to protect
334 * the wait queue operation.
335 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
336 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
337 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
338 * the RPC_TASK_RUNNING flag.
339 */
340static void rpc_make_runnable(struct workqueue_struct *wq,
341 struct rpc_task *task)
342{
343 bool need_wakeup = !rpc_test_and_set_running(task);
344
345 rpc_clear_queued(task);
346 if (!need_wakeup)
347 return;
348 if (RPC_IS_ASYNC(task)) {
349 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
350 queue_work(wq, &task->u.tk_work);
351 } else
352 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
353}
354
355/*
356 * Prepare for sleeping on a wait queue.
357 * By always appending tasks to the list we ensure FIFO behavior.
358 * NB: An RPC task will only receive interrupt-driven events as long
359 * as it's on a wait queue.
360 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200361static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362 struct rpc_task *task,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363 unsigned char queue_priority)
364{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000365 trace_rpc_task_sleep(task, q);
366
367 __rpc_add_wait_queue(q, task, queue_priority);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368}
369
Olivier Deprez0e641232021-09-23 10:07:05 +0200370static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
371 struct rpc_task *task,
372 unsigned char queue_priority)
373{
374 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
375 return;
376 __rpc_do_sleep_on_priority(q, task, queue_priority);
377}
378
David Brazdil0f672f62019-12-10 10:32:29 +0000379static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
380 struct rpc_task *task, unsigned long timeout,
381 unsigned char queue_priority)
382{
Olivier Deprez0e641232021-09-23 10:07:05 +0200383 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
384 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000385 if (time_is_after_jiffies(timeout)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200386 __rpc_do_sleep_on_priority(q, task, queue_priority);
David Brazdil0f672f62019-12-10 10:32:29 +0000387 __rpc_add_timer(q, task, timeout);
388 } else
389 task->tk_status = -ETIMEDOUT;
390}
391
392static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
393{
394 if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
395 task->tk_callback = action;
396}
397
398static bool rpc_sleep_check_activated(struct rpc_task *task)
399{
400 /* We shouldn't ever put an inactive task to sleep */
401 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
402 task->tk_status = -EIO;
403 rpc_put_task_async(task);
404 return false;
405 }
406 return true;
407}
408
409void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
410 rpc_action action, unsigned long timeout)
411{
412 if (!rpc_sleep_check_activated(task))
413 return;
414
415 rpc_set_tk_callback(task, action);
416
417 /*
418 * Protect the queue operations.
419 */
420 spin_lock(&q->lock);
421 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
422 spin_unlock(&q->lock);
423}
424EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
425
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
427 rpc_action action)
428{
David Brazdil0f672f62019-12-10 10:32:29 +0000429 if (!rpc_sleep_check_activated(task))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431
David Brazdil0f672f62019-12-10 10:32:29 +0000432 rpc_set_tk_callback(task, action);
433
434 WARN_ON_ONCE(task->tk_timeout != 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435 /*
436 * Protect the queue operations.
437 */
David Brazdil0f672f62019-12-10 10:32:29 +0000438 spin_lock(&q->lock);
439 __rpc_sleep_on_priority(q, task, task->tk_priority);
440 spin_unlock(&q->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441}
442EXPORT_SYMBOL_GPL(rpc_sleep_on);
443
David Brazdil0f672f62019-12-10 10:32:29 +0000444void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
445 struct rpc_task *task, unsigned long timeout, int priority)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446{
David Brazdil0f672f62019-12-10 10:32:29 +0000447 if (!rpc_sleep_check_activated(task))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449
David Brazdil0f672f62019-12-10 10:32:29 +0000450 priority -= RPC_PRIORITY_LOW;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451 /*
452 * Protect the queue operations.
453 */
David Brazdil0f672f62019-12-10 10:32:29 +0000454 spin_lock(&q->lock);
455 __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
456 spin_unlock(&q->lock);
457}
458EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
459
460void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
461 int priority)
462{
463 if (!rpc_sleep_check_activated(task))
464 return;
465
466 WARN_ON_ONCE(task->tk_timeout != 0);
467 priority -= RPC_PRIORITY_LOW;
468 /*
469 * Protect the queue operations.
470 */
471 spin_lock(&q->lock);
472 __rpc_sleep_on_priority(q, task, priority);
473 spin_unlock(&q->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474}
475EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
476
477/**
478 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
479 * @wq: workqueue on which to run task
480 * @queue: wait queue
481 * @task: task to be woken up
482 *
483 * Caller must hold queue->lock, and have cleared the task queued flag.
484 */
485static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
486 struct rpc_wait_queue *queue,
487 struct rpc_task *task)
488{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489 /* Has the task been executed yet? If not, we cannot wake it up! */
490 if (!RPC_IS_ACTIVATED(task)) {
491 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
492 return;
493 }
494
495 trace_rpc_task_wakeup(task, queue);
496
497 __rpc_remove_wait_queue(queue, task);
498
499 rpc_make_runnable(wq, task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500}
501
502/*
503 * Wake up a queued task while the queue lock is being held
504 */
David Brazdil0f672f62019-12-10 10:32:29 +0000505static struct rpc_task *
506rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
507 struct rpc_wait_queue *queue, struct rpc_task *task,
508 bool (*action)(struct rpc_task *, void *), void *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000509{
510 if (RPC_IS_QUEUED(task)) {
511 smp_rmb();
David Brazdil0f672f62019-12-10 10:32:29 +0000512 if (task->tk_waitqueue == queue) {
513 if (action == NULL || action(task, data)) {
514 __rpc_do_wake_up_task_on_wq(wq, queue, task);
515 return task;
516 }
517 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518 }
David Brazdil0f672f62019-12-10 10:32:29 +0000519 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520}
521
522/*
523 * Wake up a queued task while the queue lock is being held
524 */
David Brazdil0f672f62019-12-10 10:32:29 +0000525static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
526 struct rpc_task *task)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527{
David Brazdil0f672f62019-12-10 10:32:29 +0000528 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
529 task, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530}
531
532/*
533 * Wake up a task on a specific queue
534 */
535void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
536{
David Brazdil0f672f62019-12-10 10:32:29 +0000537 if (!RPC_IS_QUEUED(task))
538 return;
539 spin_lock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000540 rpc_wake_up_task_queue_locked(queue, task);
David Brazdil0f672f62019-12-10 10:32:29 +0000541 spin_unlock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542}
543EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
544
David Brazdil0f672f62019-12-10 10:32:29 +0000545static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
546{
547 task->tk_status = *(int *)status;
548 return true;
549}
550
551static void
552rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
553 struct rpc_task *task, int status)
554{
555 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
556 task, rpc_task_action_set_status, &status);
557}
558
559/**
560 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
561 * @queue: pointer to rpc_wait_queue
562 * @task: pointer to rpc_task
563 * @status: integer error value
564 *
565 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
566 * set to the value of @status.
567 */
568void
569rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
570 struct rpc_task *task, int status)
571{
572 if (!RPC_IS_QUEUED(task))
573 return;
574 spin_lock(&queue->lock);
575 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
576 spin_unlock(&queue->lock);
577}
578
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579/*
580 * Wake up the next task on a priority queue.
581 */
582static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
583{
584 struct list_head *q;
585 struct rpc_task *task;
586
587 /*
Olivier Deprez0e641232021-09-23 10:07:05 +0200588 * Service the privileged queue.
589 */
590 q = &queue->tasks[RPC_NR_PRIORITY - 1];
591 if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
592 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
593 goto out;
594 }
595
596 /*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597 * Service a batch of tasks from a single owner.
598 */
599 q = &queue->tasks[queue->priority];
Olivier Deprez0e641232021-09-23 10:07:05 +0200600 if (!list_empty(q) && queue->nr) {
601 queue->nr--;
David Brazdil0f672f62019-12-10 10:32:29 +0000602 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
603 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604 }
605
606 /*
607 * Service the next queue.
608 */
609 do {
610 if (q == &queue->tasks[0])
611 q = &queue->tasks[queue->maxpriority];
612 else
613 q = q - 1;
614 if (!list_empty(q)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000615 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616 goto new_queue;
617 }
618 } while (q != &queue->tasks[queue->priority]);
619
620 rpc_reset_waitqueue_priority(queue);
621 return NULL;
622
623new_queue:
624 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625out:
626 return task;
627}
628
629static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
630{
631 if (RPC_IS_PRIORITY(queue))
632 return __rpc_find_next_queued_priority(queue);
633 if (!list_empty(&queue->tasks[0]))
634 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
635 return NULL;
636}
637
638/*
639 * Wake up the first task on the wait queue.
640 */
641struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
642 struct rpc_wait_queue *queue,
643 bool (*func)(struct rpc_task *, void *), void *data)
644{
645 struct rpc_task *task = NULL;
646
David Brazdil0f672f62019-12-10 10:32:29 +0000647 spin_lock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648 task = __rpc_find_next_queued(queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000649 if (task != NULL)
650 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
651 task, func, data);
652 spin_unlock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653
654 return task;
655}
656
657/*
658 * Wake up the first task on the wait queue.
659 */
660struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
661 bool (*func)(struct rpc_task *, void *), void *data)
662{
663 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
664}
665EXPORT_SYMBOL_GPL(rpc_wake_up_first);
666
667static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
668{
669 return true;
670}
671
672/*
673 * Wake up the next task on the wait queue.
674*/
675struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
676{
677 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
678}
679EXPORT_SYMBOL_GPL(rpc_wake_up_next);
680
681/**
Olivier Deprez0e641232021-09-23 10:07:05 +0200682 * rpc_wake_up_locked - wake up all rpc_tasks
683 * @queue: rpc_wait_queue on which the tasks are sleeping
684 *
685 */
686static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
687{
688 struct rpc_task *task;
689
690 for (;;) {
691 task = __rpc_find_next_queued(queue);
692 if (task == NULL)
693 break;
694 rpc_wake_up_task_queue_locked(queue, task);
695 }
696}
697
698/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000699 * rpc_wake_up - wake up all rpc_tasks
700 * @queue: rpc_wait_queue on which the tasks are sleeping
701 *
702 * Grabs queue->lock
703 */
704void rpc_wake_up(struct rpc_wait_queue *queue)
705{
David Brazdil0f672f62019-12-10 10:32:29 +0000706 spin_lock(&queue->lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200707 rpc_wake_up_locked(queue);
David Brazdil0f672f62019-12-10 10:32:29 +0000708 spin_unlock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000709}
710EXPORT_SYMBOL_GPL(rpc_wake_up);
711
712/**
Olivier Deprez0e641232021-09-23 10:07:05 +0200713 * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
714 * @queue: rpc_wait_queue on which the tasks are sleeping
715 * @status: status value to set
716 */
717static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
718{
719 struct rpc_task *task;
720
721 for (;;) {
722 task = __rpc_find_next_queued(queue);
723 if (task == NULL)
724 break;
725 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
726 }
727}
728
729/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
731 * @queue: rpc_wait_queue on which the tasks are sleeping
732 * @status: status value to set
733 *
734 * Grabs queue->lock
735 */
736void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
737{
David Brazdil0f672f62019-12-10 10:32:29 +0000738 spin_lock(&queue->lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200739 rpc_wake_up_status_locked(queue, status);
David Brazdil0f672f62019-12-10 10:32:29 +0000740 spin_unlock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000741}
742EXPORT_SYMBOL_GPL(rpc_wake_up_status);
743
David Brazdil0f672f62019-12-10 10:32:29 +0000744static void __rpc_queue_timer_fn(struct work_struct *work)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000745{
David Brazdil0f672f62019-12-10 10:32:29 +0000746 struct rpc_wait_queue *queue = container_of(work,
747 struct rpc_wait_queue,
748 timer_list.dwork.work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749 struct rpc_task *task, *n;
750 unsigned long expires, now, timeo;
751
752 spin_lock(&queue->lock);
753 expires = now = jiffies;
754 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
David Brazdil0f672f62019-12-10 10:32:29 +0000755 timeo = task->tk_timeout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756 if (time_after_eq(now, timeo)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200757 trace_rpc_task_timeout(task, task->tk_action);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758 task->tk_status = -ETIMEDOUT;
759 rpc_wake_up_task_queue_locked(queue, task);
760 continue;
761 }
762 if (expires == now || time_after(expires, timeo))
763 expires = timeo;
764 }
765 if (!list_empty(&queue->timer_list.list))
766 rpc_set_queue_timer(queue, expires);
767 spin_unlock(&queue->lock);
768}
769
770static void __rpc_atrun(struct rpc_task *task)
771{
772 if (task->tk_status == -ETIMEDOUT)
773 task->tk_status = 0;
774}
775
776/*
777 * Run a task at a later time
778 */
779void rpc_delay(struct rpc_task *task, unsigned long delay)
780{
David Brazdil0f672f62019-12-10 10:32:29 +0000781 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782}
783EXPORT_SYMBOL_GPL(rpc_delay);
784
785/*
786 * Helper to call task->tk_ops->rpc_call_prepare
787 */
788void rpc_prepare_task(struct rpc_task *task)
789{
790 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
791}
792
793static void
794rpc_init_task_statistics(struct rpc_task *task)
795{
796 /* Initialize retry counters */
797 task->tk_garb_retry = 2;
798 task->tk_cred_retry = 2;
799 task->tk_rebind_retry = 2;
800
801 /* starting timestamp */
802 task->tk_start = ktime_get();
803}
804
805static void
806rpc_reset_task_statistics(struct rpc_task *task)
807{
808 task->tk_timeouts = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000809 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000810 rpc_init_task_statistics(task);
811}
812
813/*
814 * Helper that calls task->tk_ops->rpc_call_done if it exists
815 */
816void rpc_exit_task(struct rpc_task *task)
817{
Olivier Deprez0e641232021-09-23 10:07:05 +0200818 trace_rpc_task_end(task, task->tk_action);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819 task->tk_action = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000820 if (task->tk_ops->rpc_count_stats)
821 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
822 else if (task->tk_client)
823 rpc_count_iostats(task, task->tk_client->cl_metrics);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000824 if (task->tk_ops->rpc_call_done != NULL) {
825 task->tk_ops->rpc_call_done(task, task->tk_calldata);
826 if (task->tk_action != NULL) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000827 /* Always release the RPC slot and buffer memory */
828 xprt_release(task);
829 rpc_reset_task_statistics(task);
830 }
831 }
832}
833
David Brazdil0f672f62019-12-10 10:32:29 +0000834void rpc_signal_task(struct rpc_task *task)
835{
836 struct rpc_wait_queue *queue;
837
838 if (!RPC_IS_ACTIVATED(task))
839 return;
Olivier Deprez157378f2022-04-04 15:47:50 +0200840
841 trace_rpc_task_signalled(task, task->tk_action);
David Brazdil0f672f62019-12-10 10:32:29 +0000842 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
843 smp_mb__after_atomic();
844 queue = READ_ONCE(task->tk_waitqueue);
845 if (queue)
846 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
847}
848
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000849void rpc_exit(struct rpc_task *task, int status)
850{
851 task->tk_status = status;
852 task->tk_action = rpc_exit_task;
David Brazdil0f672f62019-12-10 10:32:29 +0000853 rpc_wake_up_queued_task(task->tk_waitqueue, task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000854}
855EXPORT_SYMBOL_GPL(rpc_exit);
856
857void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
858{
859 if (ops->rpc_release != NULL)
860 ops->rpc_release(calldata);
861}
862
863/*
864 * This is the RPC `scheduler' (or rather, the finite state machine).
865 */
866static void __rpc_execute(struct rpc_task *task)
867{
868 struct rpc_wait_queue *queue;
869 int task_is_async = RPC_IS_ASYNC(task);
870 int status = 0;
871
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000872 WARN_ON_ONCE(RPC_IS_QUEUED(task));
873 if (RPC_IS_QUEUED(task))
874 return;
875
876 for (;;) {
877 void (*do_action)(struct rpc_task *);
878
879 /*
880 * Perform the next FSM step or a pending callback.
881 *
882 * tk_action may be NULL if the task has been killed.
883 * In particular, note that rpc_killall_tasks may
884 * do this at any time, so beware when dereferencing.
885 */
886 do_action = task->tk_action;
887 if (task->tk_callback) {
888 do_action = task->tk_callback;
889 task->tk_callback = NULL;
890 }
891 if (!do_action)
892 break;
893 trace_rpc_task_run_action(task, do_action);
894 do_action(task);
895
896 /*
897 * Lockless check for whether task is sleeping or not.
898 */
899 if (!RPC_IS_QUEUED(task))
900 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000901
902 /*
903 * Signalled tasks should exit rather than sleep.
904 */
905 if (RPC_SIGNALLED(task)) {
906 task->tk_rpc_status = -ERESTARTSYS;
907 rpc_exit(task, -ERESTARTSYS);
908 }
909
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000910 /*
911 * The queue->lock protects against races with
912 * rpc_make_runnable().
913 *
914 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
915 * rpc_task, rpc_make_runnable() can assign it to a
916 * different workqueue. We therefore cannot assume that the
917 * rpc_task pointer may still be dereferenced.
918 */
919 queue = task->tk_waitqueue;
David Brazdil0f672f62019-12-10 10:32:29 +0000920 spin_lock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000921 if (!RPC_IS_QUEUED(task)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000922 spin_unlock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000923 continue;
924 }
925 rpc_clear_running(task);
David Brazdil0f672f62019-12-10 10:32:29 +0000926 spin_unlock(&queue->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000927 if (task_is_async)
928 return;
929
930 /* sync task: sleep here */
Olivier Deprez157378f2022-04-04 15:47:50 +0200931 trace_rpc_task_sync_sleep(task, task->tk_action);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000932 status = out_of_line_wait_on_bit(&task->tk_runstate,
933 RPC_TASK_QUEUED, rpc_wait_bit_killable,
934 TASK_KILLABLE);
David Brazdil0f672f62019-12-10 10:32:29 +0000935 if (status < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000936 /*
937 * When a sync task receives a signal, it exits with
938 * -ERESTARTSYS. In order to catch any callbacks that
939 * clean up after sleeping on some queue, we don't
940 * break the loop here, but go around once more.
941 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200942 trace_rpc_task_signalled(task, task->tk_action);
David Brazdil0f672f62019-12-10 10:32:29 +0000943 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
944 task->tk_rpc_status = -ERESTARTSYS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000945 rpc_exit(task, -ERESTARTSYS);
946 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200947 trace_rpc_task_sync_wake(task, task->tk_action);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000948 }
949
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000950 /* Release all resources associated with the task */
951 rpc_release_task(task);
952}
953
954/*
955 * User-visible entry point to the scheduler.
956 *
957 * This may be called recursively if e.g. an async NFS task updates
958 * the attributes and finds that dirty pages must be flushed.
959 * NOTE: Upon exit of this function the task is guaranteed to be
960 * released. In particular note that tk_release() will have
961 * been called, so your task memory may have been freed.
962 */
963void rpc_execute(struct rpc_task *task)
964{
965 bool is_async = RPC_IS_ASYNC(task);
966
967 rpc_set_active(task);
968 rpc_make_runnable(rpciod_workqueue, task);
Olivier Deprez0e641232021-09-23 10:07:05 +0200969 if (!is_async) {
970 unsigned int pflags = memalloc_nofs_save();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000971 __rpc_execute(task);
Olivier Deprez0e641232021-09-23 10:07:05 +0200972 memalloc_nofs_restore(pflags);
973 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000974}
975
976static void rpc_async_schedule(struct work_struct *work)
977{
David Brazdil0f672f62019-12-10 10:32:29 +0000978 unsigned int pflags = memalloc_nofs_save();
979
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000980 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
David Brazdil0f672f62019-12-10 10:32:29 +0000981 memalloc_nofs_restore(pflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000982}
983
984/**
985 * rpc_malloc - allocate RPC buffer resources
986 * @task: RPC task
987 *
988 * A single memory region is allocated, which is split between the
989 * RPC call and RPC reply that this task is being used for. When
990 * this RPC is retired, the memory is released by calling rpc_free.
991 *
992 * To prevent rpciod from hanging, this allocator never sleeps,
993 * returning -ENOMEM and suppressing warning if the request cannot
994 * be serviced immediately. The caller can arrange to sleep in a
995 * way that is safe for rpciod.
996 *
997 * Most requests are 'small' (under 2KiB) and can be serviced from a
998 * mempool, ensuring that NFS reads and writes can always proceed,
999 * and that there is good locality of reference for these buffers.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001000 */
1001int rpc_malloc(struct rpc_task *task)
1002{
1003 struct rpc_rqst *rqst = task->tk_rqstp;
1004 size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1005 struct rpc_buffer *buf;
David Brazdil0f672f62019-12-10 10:32:29 +00001006 gfp_t gfp = GFP_NOFS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001007
Olivier Deprez92d4c212022-12-06 15:05:30 +01001008 if (RPC_IS_ASYNC(task))
1009 gfp = GFP_NOWAIT | __GFP_NOWARN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001010 if (RPC_IS_SWAPPER(task))
Olivier Deprez92d4c212022-12-06 15:05:30 +01001011 gfp |= __GFP_MEMALLOC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001012
1013 size += sizeof(struct rpc_buffer);
1014 if (size <= RPC_BUFFER_MAXSIZE)
1015 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1016 else
1017 buf = kmalloc(size, gfp);
1018
1019 if (!buf)
1020 return -ENOMEM;
1021
1022 buf->len = size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001023 rqst->rq_buffer = buf->data;
1024 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1025 return 0;
1026}
1027EXPORT_SYMBOL_GPL(rpc_malloc);
1028
1029/**
1030 * rpc_free - free RPC buffer resources allocated via rpc_malloc
1031 * @task: RPC task
1032 *
1033 */
1034void rpc_free(struct rpc_task *task)
1035{
1036 void *buffer = task->tk_rqstp->rq_buffer;
1037 size_t size;
1038 struct rpc_buffer *buf;
1039
1040 buf = container_of(buffer, struct rpc_buffer, data);
1041 size = buf->len;
1042
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001043 if (size <= RPC_BUFFER_MAXSIZE)
1044 mempool_free(buf, rpc_buffer_mempool);
1045 else
1046 kfree(buf);
1047}
1048EXPORT_SYMBOL_GPL(rpc_free);
1049
1050/*
1051 * Creation and deletion of RPC task structures
1052 */
1053static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1054{
1055 memset(task, 0, sizeof(*task));
1056 atomic_set(&task->tk_count, 1);
1057 task->tk_flags = task_setup_data->flags;
1058 task->tk_ops = task_setup_data->callback_ops;
1059 task->tk_calldata = task_setup_data->callback_data;
1060 INIT_LIST_HEAD(&task->tk_task);
1061
1062 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1063 task->tk_owner = current->tgid;
1064
1065 /* Initialize workqueue for async tasks */
1066 task->tk_workqueue = task_setup_data->workqueue;
1067
David Brazdil0f672f62019-12-10 10:32:29 +00001068 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1069 xprt_get(task_setup_data->rpc_xprt));
1070
1071 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072
1073 if (task->tk_ops->rpc_call_prepare != NULL)
1074 task->tk_action = rpc_prepare_task;
1075
1076 rpc_init_task_statistics(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077}
1078
1079static struct rpc_task *
1080rpc_alloc_task(void)
1081{
David Brazdil0f672f62019-12-10 10:32:29 +00001082 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083}
1084
1085/*
1086 * Create a new task for the specified client.
1087 */
1088struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1089{
1090 struct rpc_task *task = setup_data->task;
1091 unsigned short flags = 0;
1092
1093 if (task == NULL) {
1094 task = rpc_alloc_task();
1095 flags = RPC_TASK_DYNAMIC;
1096 }
1097
1098 rpc_init_task(task, setup_data);
1099 task->tk_flags |= flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 return task;
1101}
1102
1103/*
1104 * rpc_free_task - release rpc task and perform cleanups
1105 *
1106 * Note that we free up the rpc_task _after_ rpc_release_calldata()
1107 * in order to work around a workqueue dependency issue.
1108 *
1109 * Tejun Heo states:
1110 * "Workqueue currently considers two work items to be the same if they're
1111 * on the same address and won't execute them concurrently - ie. it
1112 * makes a work item which is queued again while being executed wait
1113 * for the previous execution to complete.
1114 *
1115 * If a work function frees the work item, and then waits for an event
1116 * which should be performed by another work item and *that* work item
1117 * recycles the freed work item, it can create a false dependency loop.
1118 * There really is no reliable way to detect this short of verifying
1119 * every memory free."
1120 *
1121 */
1122static void rpc_free_task(struct rpc_task *task)
1123{
1124 unsigned short tk_flags = task->tk_flags;
1125
David Brazdil0f672f62019-12-10 10:32:29 +00001126 put_rpccred(task->tk_op_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001127 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1128
Olivier Deprez157378f2022-04-04 15:47:50 +02001129 if (tk_flags & RPC_TASK_DYNAMIC)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001130 mempool_free(task, rpc_task_mempool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001131}
1132
1133static void rpc_async_release(struct work_struct *work)
1134{
David Brazdil0f672f62019-12-10 10:32:29 +00001135 unsigned int pflags = memalloc_nofs_save();
1136
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
David Brazdil0f672f62019-12-10 10:32:29 +00001138 memalloc_nofs_restore(pflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001139}
1140
1141static void rpc_release_resources_task(struct rpc_task *task)
1142{
1143 xprt_release(task);
1144 if (task->tk_msg.rpc_cred) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001145 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1146 put_cred(task->tk_msg.rpc_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001147 task->tk_msg.rpc_cred = NULL;
1148 }
1149 rpc_task_release_client(task);
1150}
1151
1152static void rpc_final_put_task(struct rpc_task *task,
1153 struct workqueue_struct *q)
1154{
1155 if (q != NULL) {
1156 INIT_WORK(&task->u.tk_work, rpc_async_release);
1157 queue_work(q, &task->u.tk_work);
1158 } else
1159 rpc_free_task(task);
1160}
1161
1162static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1163{
1164 if (atomic_dec_and_test(&task->tk_count)) {
1165 rpc_release_resources_task(task);
1166 rpc_final_put_task(task, q);
1167 }
1168}
1169
1170void rpc_put_task(struct rpc_task *task)
1171{
1172 rpc_do_put_task(task, NULL);
1173}
1174EXPORT_SYMBOL_GPL(rpc_put_task);
1175
1176void rpc_put_task_async(struct rpc_task *task)
1177{
1178 rpc_do_put_task(task, task->tk_workqueue);
1179}
1180EXPORT_SYMBOL_GPL(rpc_put_task_async);
1181
1182static void rpc_release_task(struct rpc_task *task)
1183{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001184 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1185
1186 rpc_release_resources_task(task);
1187
1188 /*
1189 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1190 * so it should be safe to use task->tk_count as a test for whether
1191 * or not any other processes still hold references to our rpc_task.
1192 */
1193 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1194 /* Wake up anyone who may be waiting for task completion */
1195 if (!rpc_complete_task(task))
1196 return;
1197 } else {
1198 if (!atomic_dec_and_test(&task->tk_count))
1199 return;
1200 }
1201 rpc_final_put_task(task, task->tk_workqueue);
1202}
1203
1204int rpciod_up(void)
1205{
1206 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1207}
1208
1209void rpciod_down(void)
1210{
1211 module_put(THIS_MODULE);
1212}
1213
1214/*
1215 * Start up the rpciod workqueue.
1216 */
1217static int rpciod_start(void)
1218{
1219 struct workqueue_struct *wq;
1220
1221 /*
1222 * Create the rpciod thread and wait for it to start.
1223 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001224 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1225 if (!wq)
1226 goto out_failed;
1227 rpciod_workqueue = wq;
1228 /* Note: highpri because network receive is latency sensitive */
1229 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1230 if (!wq)
1231 goto free_rpciod;
1232 xprtiod_workqueue = wq;
1233 return 1;
1234free_rpciod:
1235 wq = rpciod_workqueue;
1236 rpciod_workqueue = NULL;
1237 destroy_workqueue(wq);
1238out_failed:
1239 return 0;
1240}
1241
1242static void rpciod_stop(void)
1243{
1244 struct workqueue_struct *wq = NULL;
1245
1246 if (rpciod_workqueue == NULL)
1247 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248
1249 wq = rpciod_workqueue;
1250 rpciod_workqueue = NULL;
1251 destroy_workqueue(wq);
1252 wq = xprtiod_workqueue;
1253 xprtiod_workqueue = NULL;
1254 destroy_workqueue(wq);
1255}
1256
1257void
1258rpc_destroy_mempool(void)
1259{
1260 rpciod_stop();
1261 mempool_destroy(rpc_buffer_mempool);
1262 mempool_destroy(rpc_task_mempool);
1263 kmem_cache_destroy(rpc_task_slabp);
1264 kmem_cache_destroy(rpc_buffer_slabp);
1265 rpc_destroy_wait_queue(&delay_queue);
1266}
1267
1268int
1269rpc_init_mempool(void)
1270{
1271 /*
1272 * The following is not strictly a mempool initialisation,
1273 * but there is no harm in doing it here
1274 */
1275 rpc_init_wait_queue(&delay_queue, "delayq");
1276 if (!rpciod_start())
1277 goto err_nomem;
1278
1279 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1280 sizeof(struct rpc_task),
1281 0, SLAB_HWCACHE_ALIGN,
1282 NULL);
1283 if (!rpc_task_slabp)
1284 goto err_nomem;
1285 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1286 RPC_BUFFER_MAXSIZE,
1287 0, SLAB_HWCACHE_ALIGN,
1288 NULL);
1289 if (!rpc_buffer_slabp)
1290 goto err_nomem;
1291 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1292 rpc_task_slabp);
1293 if (!rpc_task_mempool)
1294 goto err_nomem;
1295 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1296 rpc_buffer_slabp);
1297 if (!rpc_buffer_mempool)
1298 goto err_nomem;
1299 return 0;
1300err_nomem:
1301 rpc_destroy_mempool();
1302 return -ENOMEM;
1303}