Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * kernel/lockdep_internals.h |
| 4 | * |
| 5 | * Runtime locking correctness validator |
| 6 | * |
| 7 | * lockdep subsystem internal functions and variables. |
| 8 | */ |
| 9 | |
| 10 | /* |
| 11 | * Lock-class usage-state bits: |
| 12 | */ |
| 13 | enum lock_usage_bit { |
| 14 | #define LOCKDEP_STATE(__STATE) \ |
| 15 | LOCK_USED_IN_##__STATE, \ |
| 16 | LOCK_USED_IN_##__STATE##_READ, \ |
| 17 | LOCK_ENABLED_##__STATE, \ |
| 18 | LOCK_ENABLED_##__STATE##_READ, |
| 19 | #include "lockdep_states.h" |
| 20 | #undef LOCKDEP_STATE |
| 21 | LOCK_USED, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 22 | LOCK_USED_READ, |
| 23 | LOCK_USAGE_STATES, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | }; |
| 25 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 26 | /* states after LOCK_USED_READ are not traced and printed */ |
| 27 | static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES); |
| 28 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 29 | #define LOCK_USAGE_READ_MASK 1 |
| 30 | #define LOCK_USAGE_DIR_MASK 2 |
| 31 | #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) |
| 32 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 33 | /* |
| 34 | * Usage-state bitmasks: |
| 35 | */ |
| 36 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), |
| 37 | |
| 38 | enum { |
| 39 | #define LOCKDEP_STATE(__STATE) \ |
| 40 | __LOCKF(USED_IN_##__STATE) \ |
| 41 | __LOCKF(USED_IN_##__STATE##_READ) \ |
| 42 | __LOCKF(ENABLED_##__STATE) \ |
| 43 | __LOCKF(ENABLED_##__STATE##_READ) |
| 44 | #include "lockdep_states.h" |
| 45 | #undef LOCKDEP_STATE |
| 46 | __LOCKF(USED) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 47 | __LOCKF(USED_READ) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | }; |
| 49 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 50 | #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | |
| 51 | static const unsigned long LOCKF_ENABLED_IRQ = |
| 52 | #include "lockdep_states.h" |
| 53 | 0; |
| 54 | #undef LOCKDEP_STATE |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 56 | #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | |
| 57 | static const unsigned long LOCKF_USED_IN_IRQ = |
| 58 | #include "lockdep_states.h" |
| 59 | 0; |
| 60 | #undef LOCKDEP_STATE |
| 61 | |
| 62 | #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | |
| 63 | static const unsigned long LOCKF_ENABLED_IRQ_READ = |
| 64 | #include "lockdep_states.h" |
| 65 | 0; |
| 66 | #undef LOCKDEP_STATE |
| 67 | |
| 68 | #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | |
| 69 | static const unsigned long LOCKF_USED_IN_IRQ_READ = |
| 70 | #include "lockdep_states.h" |
| 71 | 0; |
| 72 | #undef LOCKDEP_STATE |
| 73 | |
| 74 | #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) |
| 75 | #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) |
| 76 | |
| 77 | #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) |
| 78 | #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 | |
| 80 | /* |
| 81 | * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, |
| 82 | * .data and .bss to fit in required 32MB limit for the kernel. With |
| 83 | * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. |
| 84 | * So, reduce the static allocations for lockdeps related structures so that |
| 85 | * everything fits in current required size limit. |
| 86 | */ |
| 87 | #ifdef CONFIG_LOCKDEP_SMALL |
| 88 | /* |
| 89 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies |
| 90 | * we track. |
| 91 | * |
| 92 | * We use the per-lock dependency maps in two ways: we grow it by adding |
| 93 | * every to-be-taken lock to all currently held lock's own dependency |
| 94 | * table (if it's not there yet), and we check it for lock order |
| 95 | * conflicts and deadlocks. |
| 96 | */ |
| 97 | #define MAX_LOCKDEP_ENTRIES 16384UL |
| 98 | #define MAX_LOCKDEP_CHAINS_BITS 15 |
| 99 | #define MAX_STACK_TRACE_ENTRIES 262144UL |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 100 | #define STACK_TRACE_HASH_SIZE 8192 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 101 | #else |
| 102 | #define MAX_LOCKDEP_ENTRIES 32768UL |
| 103 | |
| 104 | #define MAX_LOCKDEP_CHAINS_BITS 16 |
| 105 | |
| 106 | /* |
| 107 | * Stack-trace: tightly packed array of stack backtrace |
| 108 | * addresses. Protected by the hash_lock. |
| 109 | */ |
| 110 | #define MAX_STACK_TRACE_ENTRIES 524288UL |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 111 | #define STACK_TRACE_HASH_SIZE 16384 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | #endif |
| 113 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 114 | /* |
| 115 | * Bit definitions for lock_chain.irq_context |
| 116 | */ |
| 117 | #define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) |
| 118 | #define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) |
| 119 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 120 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
| 121 | |
| 122 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
| 123 | |
| 124 | extern struct list_head all_lock_classes; |
| 125 | extern struct lock_chain lock_chains[]; |
| 126 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 127 | #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 128 | |
| 129 | extern void get_usage_chars(struct lock_class *class, |
| 130 | char usage[LOCK_USAGE_CHARS]); |
| 131 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 132 | extern const char *__get_key_name(const struct lockdep_subclass_key *key, |
| 133 | char *str); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 134 | |
| 135 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
| 136 | |
| 137 | extern unsigned long nr_lock_classes; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 138 | extern unsigned long nr_zapped_classes; |
| 139 | extern unsigned long nr_zapped_lock_chains; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | extern unsigned long nr_list_entries; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 141 | long lockdep_next_lockchain(long i); |
| 142 | unsigned long lock_chain_count(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 143 | extern unsigned long nr_stack_trace_entries; |
| 144 | |
| 145 | extern unsigned int nr_hardirq_chains; |
| 146 | extern unsigned int nr_softirq_chains; |
| 147 | extern unsigned int nr_process_chains; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 148 | extern unsigned int nr_free_chain_hlocks; |
| 149 | extern unsigned int nr_lost_chain_hlocks; |
| 150 | extern unsigned int nr_large_chain_blocks; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 151 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 152 | extern unsigned int max_lockdep_depth; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 153 | extern unsigned int max_bfs_queue_depth; |
| 154 | |
| 155 | #ifdef CONFIG_PROVE_LOCKING |
| 156 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
| 157 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 158 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 159 | u64 lockdep_stack_trace_count(void); |
| 160 | u64 lockdep_stack_hash_count(void); |
| 161 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 162 | #else |
| 163 | static inline unsigned long |
| 164 | lockdep_count_forward_deps(struct lock_class *class) |
| 165 | { |
| 166 | return 0; |
| 167 | } |
| 168 | static inline unsigned long |
| 169 | lockdep_count_backward_deps(struct lock_class *class) |
| 170 | { |
| 171 | return 0; |
| 172 | } |
| 173 | #endif |
| 174 | |
| 175 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 176 | |
| 177 | #include <asm/local.h> |
| 178 | /* |
| 179 | * Various lockdep statistics. |
| 180 | * We want them per cpu as they are often accessed in fast path |
| 181 | * and we want to avoid too much cache bouncing. |
| 182 | */ |
| 183 | struct lockdep_stats { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 184 | unsigned long chain_lookup_hits; |
| 185 | unsigned int chain_lookup_misses; |
| 186 | unsigned long hardirqs_on_events; |
| 187 | unsigned long hardirqs_off_events; |
| 188 | unsigned long redundant_hardirqs_on; |
| 189 | unsigned long redundant_hardirqs_off; |
| 190 | unsigned long softirqs_on_events; |
| 191 | unsigned long softirqs_off_events; |
| 192 | unsigned long redundant_softirqs_on; |
| 193 | unsigned long redundant_softirqs_off; |
| 194 | int nr_unused_locks; |
| 195 | unsigned int nr_redundant_checks; |
| 196 | unsigned int nr_redundant; |
| 197 | unsigned int nr_cyclic_checks; |
| 198 | unsigned int nr_find_usage_forwards_checks; |
| 199 | unsigned int nr_find_usage_backwards_checks; |
| 200 | |
| 201 | /* |
| 202 | * Per lock class locking operation stat counts |
| 203 | */ |
| 204 | unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 205 | }; |
| 206 | |
| 207 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 208 | extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | |
| 210 | #define __debug_atomic_inc(ptr) \ |
| 211 | this_cpu_inc(lockdep_stats.ptr); |
| 212 | |
| 213 | #define debug_atomic_inc(ptr) { \ |
| 214 | WARN_ON_ONCE(!irqs_disabled()); \ |
| 215 | __this_cpu_inc(lockdep_stats.ptr); \ |
| 216 | } |
| 217 | |
| 218 | #define debug_atomic_dec(ptr) { \ |
| 219 | WARN_ON_ONCE(!irqs_disabled()); \ |
| 220 | __this_cpu_dec(lockdep_stats.ptr); \ |
| 221 | } |
| 222 | |
| 223 | #define debug_atomic_read(ptr) ({ \ |
| 224 | struct lockdep_stats *__cpu_lockdep_stats; \ |
| 225 | unsigned long long __total = 0; \ |
| 226 | int __cpu; \ |
| 227 | for_each_possible_cpu(__cpu) { \ |
| 228 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ |
| 229 | __total += __cpu_lockdep_stats->ptr; \ |
| 230 | } \ |
| 231 | __total; \ |
| 232 | }) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 233 | |
| 234 | static inline void debug_class_ops_inc(struct lock_class *class) |
| 235 | { |
| 236 | int idx; |
| 237 | |
| 238 | idx = class - lock_classes; |
| 239 | __debug_atomic_inc(lock_class_ops[idx]); |
| 240 | } |
| 241 | |
| 242 | static inline unsigned long debug_class_ops_read(struct lock_class *class) |
| 243 | { |
| 244 | int idx, cpu; |
| 245 | unsigned long ops = 0; |
| 246 | |
| 247 | idx = class - lock_classes; |
| 248 | for_each_possible_cpu(cpu) |
| 249 | ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); |
| 250 | return ops; |
| 251 | } |
| 252 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | #else |
| 254 | # define __debug_atomic_inc(ptr) do { } while (0) |
| 255 | # define debug_atomic_inc(ptr) do { } while (0) |
| 256 | # define debug_atomic_dec(ptr) do { } while (0) |
| 257 | # define debug_atomic_read(ptr) 0 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 258 | # define debug_class_ops_inc(ptr) do { } while (0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 259 | #endif |