Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_RCUWAIT_H_ |
| 3 | #define _LINUX_RCUWAIT_H_ |
| 4 | |
| 5 | #include <linux/rcupdate.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 6 | #include <linux/sched/signal.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | |
| 8 | /* |
| 9 | * rcuwait provides a way of blocking and waking up a single |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10 | * task in an rcu-safe manner. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 12 | * The only time @task is non-nil is when a user is blocked (or |
| 13 | * checking if it needs to) on a condition, and reset as soon as we |
| 14 | * know that the condition has succeeded and are awoken. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | */ |
| 16 | struct rcuwait { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 17 | struct task_struct __rcu *task; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | }; |
| 19 | |
| 20 | #define __RCUWAIT_INITIALIZER(name) \ |
| 21 | { .task = NULL, } |
| 22 | |
| 23 | static inline void rcuwait_init(struct rcuwait *w) |
| 24 | { |
| 25 | w->task = NULL; |
| 26 | } |
| 27 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 28 | /* |
| 29 | * Note: this provides no serialization and, just as with waitqueues, |
| 30 | * requires care to estimate as to whether or not the wait is active. |
| 31 | */ |
| 32 | static inline int rcuwait_active(struct rcuwait *w) |
| 33 | { |
| 34 | return !!rcu_access_pointer(w->task); |
| 35 | } |
| 36 | |
| 37 | extern int rcuwait_wake_up(struct rcuwait *w); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * The caller is responsible for locking around rcuwait_wait_event(), |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 41 | * and [prepare_to/finish]_rcuwait() such that writes to @task are |
| 42 | * properly serialized. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 44 | |
| 45 | static inline void prepare_to_rcuwait(struct rcuwait *w) |
| 46 | { |
| 47 | rcu_assign_pointer(w->task, current); |
| 48 | } |
| 49 | |
| 50 | static inline void finish_rcuwait(struct rcuwait *w) |
| 51 | { |
| 52 | rcu_assign_pointer(w->task, NULL); |
| 53 | __set_current_state(TASK_RUNNING); |
| 54 | } |
| 55 | |
| 56 | #define rcuwait_wait_event(w, condition, state) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 57 | ({ \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 58 | int __ret = 0; \ |
| 59 | prepare_to_rcuwait(w); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | for (;;) { \ |
| 61 | /* \ |
| 62 | * Implicit barrier (A) pairs with (B) in \ |
| 63 | * rcuwait_wake_up(). \ |
| 64 | */ \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 65 | set_current_state(state); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | if (condition) \ |
| 67 | break; \ |
| 68 | \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 69 | if (signal_pending_state(state, current)) { \ |
| 70 | __ret = -EINTR; \ |
| 71 | break; \ |
| 72 | } \ |
| 73 | \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | schedule(); \ |
| 75 | } \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 76 | finish_rcuwait(w); \ |
| 77 | __ret; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | }) |
| 79 | |
| 80 | #endif /* _LINUX_RCUWAIT_H_ */ |