Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * kernel/mutex-debug.c |
| 3 | * |
| 4 | * Debugging code for mutexes |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * lock debugging, locking tree, deadlock detection started by: |
| 11 | * |
| 12 | * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey |
| 13 | * Released under the General Public License (GPL). |
| 14 | */ |
| 15 | #include <linux/mutex.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/export.h> |
| 18 | #include <linux/poison.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/spinlock.h> |
| 21 | #include <linux/kallsyms.h> |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/debug_locks.h> |
| 24 | |
| 25 | #include "mutex-debug.h" |
| 26 | |
| 27 | /* |
| 28 | * Must be called with lock->wait_lock held. |
| 29 | */ |
| 30 | void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) |
| 31 | { |
| 32 | memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); |
| 33 | waiter->magic = waiter; |
| 34 | INIT_LIST_HEAD(&waiter->list); |
| 35 | } |
| 36 | |
| 37 | void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) |
| 38 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 39 | lockdep_assert_held(&lock->wait_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); |
| 41 | DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); |
| 42 | DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); |
| 43 | } |
| 44 | |
| 45 | void debug_mutex_free_waiter(struct mutex_waiter *waiter) |
| 46 | { |
| 47 | DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list)); |
| 48 | memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter)); |
| 49 | } |
| 50 | |
| 51 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 52 | struct task_struct *task) |
| 53 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 54 | lockdep_assert_held(&lock->wait_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | |
| 56 | /* Mark the current thread as blocked on the lock: */ |
| 57 | task->blocked_on = waiter; |
| 58 | } |
| 59 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 60 | void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | struct task_struct *task) |
| 62 | { |
| 63 | DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); |
| 64 | DEBUG_LOCKS_WARN_ON(waiter->task != task); |
| 65 | DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); |
| 66 | task->blocked_on = NULL; |
| 67 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 68 | INIT_LIST_HEAD(&waiter->list); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 69 | waiter->task = NULL; |
| 70 | } |
| 71 | |
| 72 | void debug_mutex_unlock(struct mutex *lock) |
| 73 | { |
| 74 | if (likely(debug_locks)) { |
| 75 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
| 76 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | void debug_mutex_init(struct mutex *lock, const char *name, |
| 81 | struct lock_class_key *key) |
| 82 | { |
| 83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 84 | /* |
| 85 | * Make sure we are not reinitializing a held lock: |
| 86 | */ |
| 87 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 88 | lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | #endif |
| 90 | lock->magic = lock; |
| 91 | } |
| 92 | |
| 93 | /*** |
| 94 | * mutex_destroy - mark a mutex unusable |
| 95 | * @lock: the mutex to be destroyed |
| 96 | * |
| 97 | * This function marks the mutex uninitialized, and any subsequent |
| 98 | * use of the mutex is forbidden. The mutex must not be locked when |
| 99 | * this function is called. |
| 100 | */ |
| 101 | void mutex_destroy(struct mutex *lock) |
| 102 | { |
| 103 | DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock)); |
| 104 | lock->magic = NULL; |
| 105 | } |
| 106 | |
| 107 | EXPORT_SYMBOL_GPL(mutex_destroy); |