blob: a55642aa3f68b98940140fca6610cbd6694bd754 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Generic waiting primitives.
4 *
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 */
7#include "sched.h"
8
9void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10{
11 spin_lock_init(&wq_head->lock);
12 lockdep_set_class_and_name(&wq_head->lock, key, name);
13 INIT_LIST_HEAD(&wq_head->head);
14}
15
16EXPORT_SYMBOL(__init_waitqueue_head);
17
18void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19{
20 unsigned long flags;
21
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23 spin_lock_irqsave(&wq_head->lock, flags);
24 __add_wait_queue(wq_head, wq_entry);
25 spin_unlock_irqrestore(&wq_head->lock, flags);
26}
27EXPORT_SYMBOL(add_wait_queue);
28
29void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30{
31 unsigned long flags;
32
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34 spin_lock_irqsave(&wq_head->lock, flags);
35 __add_wait_queue_entry_tail(wq_head, wq_entry);
36 spin_unlock_irqrestore(&wq_head->lock, flags);
37}
38EXPORT_SYMBOL(add_wait_queue_exclusive);
39
40void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41{
42 unsigned long flags;
43
44 spin_lock_irqsave(&wq_head->lock, flags);
45 __remove_wait_queue(wq_head, wq_entry);
46 spin_unlock_irqrestore(&wq_head->lock, flags);
47}
48EXPORT_SYMBOL(remove_wait_queue);
49
50/*
51 * Scan threshold to break wait queue walk.
52 * This allows a waker to take a break from holding the
53 * wait queue lock during the wait queue walk.
54 */
55#define WAITQUEUE_WALK_BREAK_CNT 64
56
57/*
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
61 *
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 */
66static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67 int nr_exclusive, int wake_flags, void *key,
68 wait_queue_entry_t *bookmark)
69{
70 wait_queue_entry_t *curr, *next;
71 int cnt = 0;
72
73 lockdep_assert_held(&wq_head->lock);
74
75 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76 curr = list_next_entry(bookmark, entry);
77
78 list_del(&bookmark->entry);
79 bookmark->flags = 0;
80 } else
81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82
83 if (&curr->entry == &wq_head->head)
84 return nr_exclusive;
85
86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87 unsigned flags = curr->flags;
88 int ret;
89
90 if (flags & WQ_FLAG_BOOKMARK)
91 continue;
92
93 ret = curr->func(curr, mode, wake_flags, key);
94 if (ret < 0)
95 break;
96 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
97 break;
98
99 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100 (&next->entry != &wq_head->head)) {
101 bookmark->flags = WQ_FLAG_BOOKMARK;
102 list_add_tail(&bookmark->entry, &next->entry);
103 break;
104 }
105 }
106
107 return nr_exclusive;
108}
109
110static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111 int nr_exclusive, int wake_flags, void *key)
112{
113 unsigned long flags;
114 wait_queue_entry_t bookmark;
115
116 bookmark.flags = 0;
117 bookmark.private = NULL;
118 bookmark.func = NULL;
119 INIT_LIST_HEAD(&bookmark.entry);
120
David Brazdil0f672f62019-12-10 10:32:29 +0000121 do {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122 spin_lock_irqsave(&wq_head->lock, flags);
123 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124 wake_flags, key, &bookmark);
125 spin_unlock_irqrestore(&wq_head->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000126 } while (bookmark.flags & WQ_FLAG_BOOKMARK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127}
128
129/**
130 * __wake_up - wake up threads blocked on a waitqueue.
131 * @wq_head: the waitqueue
132 * @mode: which threads
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 * @key: is directly passed to the wakeup function
135 *
136 * If this function wakes up a task, it executes a full memory barrier before
137 * accessing the task state.
138 */
139void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140 int nr_exclusive, void *key)
141{
142 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143}
144EXPORT_SYMBOL(__wake_up);
145
146/*
147 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 */
149void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150{
151 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152}
153EXPORT_SYMBOL_GPL(__wake_up_locked);
154
155void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156{
157 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
158}
159EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160
161void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163{
164 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167
168/**
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 * @wq_head: the waitqueue
171 * @mode: which threads
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 * @key: opaque value to be passed to wakeup targets
173 *
174 * The sync wakeup differs that the waker knows that it will schedule
175 * away soon, so while the target thread will be woken up, it will not
176 * be migrated to another CPU - ie. the two threads are 'synchronized'
177 * with each other. This can prevent needless bouncing between CPUs.
178 *
179 * On UP it can prevent extra preemption.
180 *
181 * If this function wakes up a task, it executes a full memory barrier before
182 * accessing the task state.
183 */
184void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
Olivier Deprez157378f2022-04-04 15:47:50 +0200185 void *key)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 if (unlikely(!wq_head))
188 return;
189
Olivier Deprez157378f2022-04-04 15:47:50 +0200190 __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191}
192EXPORT_SYMBOL_GPL(__wake_up_sync_key);
193
Olivier Deprez157378f2022-04-04 15:47:50 +0200194/**
195 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
196 * @wq_head: the waitqueue
197 * @mode: which threads
198 * @key: opaque value to be passed to wakeup targets
199 *
200 * The sync wakeup differs in that the waker knows that it will schedule
201 * away soon, so while the target thread will be woken up, it will not
202 * be migrated to another CPU - ie. the two threads are 'synchronized'
203 * with each other. This can prevent needless bouncing between CPUs.
204 *
205 * On UP it can prevent extra preemption.
206 *
207 * If this function wakes up a task, it executes a full memory barrier before
208 * accessing the task state.
209 */
210void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
211 unsigned int mode, void *key)
212{
213 __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
214}
215EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
216
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217/*
218 * __wake_up_sync - see __wake_up_sync_key()
219 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200220void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000221{
Olivier Deprez157378f2022-04-04 15:47:50 +0200222 __wake_up_sync_key(wq_head, mode, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223}
224EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
225
Olivier Deprez157378f2022-04-04 15:47:50 +0200226void __wake_up_pollfree(struct wait_queue_head *wq_head)
227{
228 __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
229 /* POLLFREE must have cleared the queue. */
230 WARN_ON_ONCE(waitqueue_active(wq_head));
231}
232
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233/*
234 * Note: we use "set_current_state()" _after_ the wait-queue add,
235 * because we need a memory barrier there on SMP, so that any
236 * wake-function that tests for the wait-queue being active
237 * will be guaranteed to see waitqueue addition _or_ subsequent
238 * tests in this thread will see the wakeup having taken place.
239 *
240 * The spin_unlock() itself is semi-permeable and only protects
241 * one way (it only protects stuff inside the critical region and
242 * stops them from bleeding out - it would still allow subsequent
243 * loads to move into the critical region).
244 */
245void
246prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
247{
248 unsigned long flags;
249
250 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
251 spin_lock_irqsave(&wq_head->lock, flags);
252 if (list_empty(&wq_entry->entry))
253 __add_wait_queue(wq_head, wq_entry);
254 set_current_state(state);
255 spin_unlock_irqrestore(&wq_head->lock, flags);
256}
257EXPORT_SYMBOL(prepare_to_wait);
258
Olivier Deprez0e641232021-09-23 10:07:05 +0200259/* Returns true if we are the first waiter in the queue, false otherwise. */
260bool
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
262{
263 unsigned long flags;
Olivier Deprez0e641232021-09-23 10:07:05 +0200264 bool was_empty = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265
266 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
267 spin_lock_irqsave(&wq_head->lock, flags);
Olivier Deprez0e641232021-09-23 10:07:05 +0200268 if (list_empty(&wq_entry->entry)) {
269 was_empty = list_empty(&wq_head->head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270 __add_wait_queue_entry_tail(wq_head, wq_entry);
Olivier Deprez0e641232021-09-23 10:07:05 +0200271 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272 set_current_state(state);
273 spin_unlock_irqrestore(&wq_head->lock, flags);
Olivier Deprez0e641232021-09-23 10:07:05 +0200274 return was_empty;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275}
276EXPORT_SYMBOL(prepare_to_wait_exclusive);
277
278void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
279{
280 wq_entry->flags = flags;
281 wq_entry->private = current;
282 wq_entry->func = autoremove_wake_function;
283 INIT_LIST_HEAD(&wq_entry->entry);
284}
285EXPORT_SYMBOL(init_wait_entry);
286
287long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
288{
289 unsigned long flags;
290 long ret = 0;
291
292 spin_lock_irqsave(&wq_head->lock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000293 if (signal_pending_state(state, current)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294 /*
295 * Exclusive waiter must not fail if it was selected by wakeup,
296 * it should "consume" the condition we were waiting for.
297 *
298 * The caller will recheck the condition and return success if
299 * we were already woken up, we can not miss the event because
300 * wakeup locks/unlocks the same wq_head->lock.
301 *
302 * But we need to ensure that set-condition + wakeup after that
303 * can't see us, it should wake up another exclusive waiter if
304 * we fail.
305 */
306 list_del_init(&wq_entry->entry);
307 ret = -ERESTARTSYS;
308 } else {
309 if (list_empty(&wq_entry->entry)) {
310 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
311 __add_wait_queue_entry_tail(wq_head, wq_entry);
312 else
313 __add_wait_queue(wq_head, wq_entry);
314 }
315 set_current_state(state);
316 }
317 spin_unlock_irqrestore(&wq_head->lock, flags);
318
319 return ret;
320}
321EXPORT_SYMBOL(prepare_to_wait_event);
322
323/*
324 * Note! These two wait functions are entered with the
325 * wait-queue lock held (and interrupts off in the _irq
326 * case), so there is no race with testing the wakeup
327 * condition in the caller before they add the wait
328 * entry to the wake queue.
329 */
330int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
331{
332 if (likely(list_empty(&wait->entry)))
333 __add_wait_queue_entry_tail(wq, wait);
334
335 set_current_state(TASK_INTERRUPTIBLE);
336 if (signal_pending(current))
337 return -ERESTARTSYS;
338
339 spin_unlock(&wq->lock);
340 schedule();
341 spin_lock(&wq->lock);
342
343 return 0;
344}
345EXPORT_SYMBOL(do_wait_intr);
346
347int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
348{
349 if (likely(list_empty(&wait->entry)))
350 __add_wait_queue_entry_tail(wq, wait);
351
352 set_current_state(TASK_INTERRUPTIBLE);
353 if (signal_pending(current))
354 return -ERESTARTSYS;
355
356 spin_unlock_irq(&wq->lock);
357 schedule();
358 spin_lock_irq(&wq->lock);
359
360 return 0;
361}
362EXPORT_SYMBOL(do_wait_intr_irq);
363
364/**
365 * finish_wait - clean up after waiting in a queue
366 * @wq_head: waitqueue waited on
367 * @wq_entry: wait descriptor
368 *
369 * Sets current thread back to running state and removes
370 * the wait descriptor from the given waitqueue if still
371 * queued.
372 */
373void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
374{
375 unsigned long flags;
376
377 __set_current_state(TASK_RUNNING);
378 /*
379 * We can check for list emptiness outside the lock
380 * IFF:
381 * - we use the "careful" check that verifies both
382 * the next and prev pointers, so that there cannot
383 * be any half-pending updates in progress on other
384 * CPU's that we haven't seen yet (and that might
385 * still change the stack area.
386 * and
387 * - all other users take the lock (ie we can only
388 * have _one_ other CPU that looks at or modifies
389 * the list).
390 */
391 if (!list_empty_careful(&wq_entry->entry)) {
392 spin_lock_irqsave(&wq_head->lock, flags);
393 list_del_init(&wq_entry->entry);
394 spin_unlock_irqrestore(&wq_head->lock, flags);
395 }
396}
397EXPORT_SYMBOL(finish_wait);
398
399int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
400{
401 int ret = default_wake_function(wq_entry, mode, sync, key);
402
403 if (ret)
Olivier Deprez157378f2022-04-04 15:47:50 +0200404 list_del_init_careful(&wq_entry->entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405
406 return ret;
407}
408EXPORT_SYMBOL(autoremove_wake_function);
409
410static inline bool is_kthread_should_stop(void)
411{
412 return (current->flags & PF_KTHREAD) && kthread_should_stop();
413}
414
415/*
416 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
417 *
418 * add_wait_queue(&wq_head, &wait);
419 * for (;;) {
420 * if (condition)
421 * break;
422 *
423 * // in wait_woken() // in woken_wake_function()
424 *
425 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
426 * smp_mb(); // A try_to_wake_up():
427 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
428 * schedule() if (p->state & mode)
429 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
430 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
431 * smp_mb(); // B condition = true;
432 * } smp_mb(); // C
433 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
434 */
435long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
436{
437 /*
438 * The below executes an smp_mb(), which matches with the full barrier
439 * executed by the try_to_wake_up() in woken_wake_function() such that
440 * either we see the store to wq_entry->flags in woken_wake_function()
441 * or woken_wake_function() sees our store to current->state.
442 */
443 set_current_state(mode); /* A */
444 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
445 timeout = schedule_timeout(timeout);
446 __set_current_state(TASK_RUNNING);
447
448 /*
449 * The below executes an smp_mb(), which matches with the smp_mb() (C)
450 * in woken_wake_function() such that either we see the wait condition
451 * being true or the store to wq_entry->flags in woken_wake_function()
452 * follows ours in the coherence order.
453 */
454 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
455
456 return timeout;
457}
458EXPORT_SYMBOL(wait_woken);
459
460int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
461{
462 /* Pairs with the smp_store_mb() in wait_woken(). */
463 smp_mb(); /* C */
464 wq_entry->flags |= WQ_FLAG_WOKEN;
465
466 return default_wake_function(wq_entry, mode, sync, key);
467}
468EXPORT_SYMBOL(woken_wake_function);