blob: b8a835fd611b286993eb9801598065ab442ef3f4 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Runtime locking correctness validator
4 *
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
David Brazdil0f672f62019-12-10 10:32:29 +00008 * see Documentation/locking/lockdep-design.rst for more details.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 */
10#ifndef __LINUX_LOCKDEP_H
11#define __LINUX_LOCKDEP_H
12
13struct task_struct;
14struct lockdep_map;
15
16/* for sysctl */
17extern int prove_locking;
18extern int lock_stat;
19
20#define MAX_LOCKDEP_SUBCLASSES 8UL
21
22#include <linux/types.h>
23
24#ifdef CONFIG_LOCKDEP
25
26#include <linux/linkage.h>
27#include <linux/list.h>
28#include <linux/debug_locks.h>
29#include <linux/stacktrace.h>
30
31/*
32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33 * the total number of states... :-(
34 */
35#define XXX_LOCK_USAGE_STATES (1+2*4)
36
37/*
38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39 * cached in the instance of lockdep_map
40 *
41 * Currently main class (subclass == 0) and signle depth subclass
42 * are cached in lockdep_map. This optimization is mainly targeting
43 * on rq->lock. double_rq_lock() acquires this highly competitive with
44 * single depth.
45 */
46#define NR_LOCKDEP_CACHING_CLASSES 2
47
48/*
David Brazdil0f672f62019-12-10 10:32:29 +000049 * A lockdep key is associated with each lock object. For static locks we use
50 * the lock address itself as the key. Dynamically allocated lock objects can
51 * have a statically or dynamically allocated key. Dynamically allocated lock
52 * keys must be registered before being used and must be unregistered before
53 * the key memory is freed.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054 */
55struct lockdep_subclass_key {
56 char __one_byte;
57} __attribute__ ((__packed__));
58
David Brazdil0f672f62019-12-10 10:32:29 +000059/* hash_entry is used to keep track of dynamically allocated keys. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060struct lock_class_key {
David Brazdil0f672f62019-12-10 10:32:29 +000061 union {
62 struct hlist_node hash_entry;
63 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
64 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065};
66
67extern struct lock_class_key __lockdep_no_validate__;
68
David Brazdil0f672f62019-12-10 10:32:29 +000069struct lock_trace;
70
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071#define LOCKSTAT_POINTS 4
72
73/*
David Brazdil0f672f62019-12-10 10:32:29 +000074 * The lock-class itself. The order of the structure members matters.
75 * reinit_class() zeroes the key member and all subsequent members.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076 */
77struct lock_class {
78 /*
79 * class-hash:
80 */
81 struct hlist_node hash_entry;
82
83 /*
David Brazdil0f672f62019-12-10 10:32:29 +000084 * Entry in all_lock_classes when in use. Entry in free_lock_classes
85 * when not in use. Instances that are being freed are on one of the
86 * zapped_classes lists.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087 */
88 struct list_head lock_entry;
89
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 /*
91 * These fields represent a directed graph of lock dependencies,
92 * to every node we attach a list of "forward" and a list of
93 * "backward" graph nodes.
94 */
95 struct list_head locks_after, locks_before;
96
David Brazdil0f672f62019-12-10 10:32:29 +000097 const struct lockdep_subclass_key *key;
98 unsigned int subclass;
99 unsigned int dep_gen_id;
100
101 /*
102 * IRQ/softirq usage tracking bits:
103 */
104 unsigned long usage_mask;
105 const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
106
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 /*
108 * Generation counter, when doing certain classes of graph walking,
109 * to ensure that we check one node only once:
110 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 int name_version;
David Brazdil0f672f62019-12-10 10:32:29 +0000112 const char *name;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
114#ifdef CONFIG_LOCK_STAT
115 unsigned long contention_point[LOCKSTAT_POINTS];
116 unsigned long contending_point[LOCKSTAT_POINTS];
117#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000118} __no_randomize_layout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119
120#ifdef CONFIG_LOCK_STAT
121struct lock_time {
122 s64 min;
123 s64 max;
124 s64 total;
125 unsigned long nr;
126};
127
128enum bounce_type {
129 bounce_acquired_write,
130 bounce_acquired_read,
131 bounce_contended_write,
132 bounce_contended_read,
133 nr_bounce_types,
134
135 bounce_acquired = bounce_acquired_write,
136 bounce_contended = bounce_contended_write,
137};
138
139struct lock_class_stats {
140 unsigned long contention_point[LOCKSTAT_POINTS];
141 unsigned long contending_point[LOCKSTAT_POINTS];
142 struct lock_time read_waittime;
143 struct lock_time write_waittime;
144 struct lock_time read_holdtime;
145 struct lock_time write_holdtime;
146 unsigned long bounces[nr_bounce_types];
147};
148
149struct lock_class_stats lock_stats(struct lock_class *class);
150void clear_lock_stats(struct lock_class *class);
151#endif
152
153/*
154 * Map the lock object (the lock instance) to the lock-class object.
155 * This is embedded into specific lock instances:
156 */
157struct lockdep_map {
158 struct lock_class_key *key;
159 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
160 const char *name;
161#ifdef CONFIG_LOCK_STAT
162 int cpu;
163 unsigned long ip;
164#endif
165};
166
167static inline void lockdep_copy_map(struct lockdep_map *to,
168 struct lockdep_map *from)
169{
170 int i;
171
172 *to = *from;
173 /*
174 * Since the class cache can be modified concurrently we could observe
175 * half pointers (64bit arch using 32bit copy insns). Therefore clear
176 * the caches and take the performance hit.
177 *
178 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
179 * that relies on cache abuse.
180 */
181 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
182 to->class_cache[i] = NULL;
183}
184
185/*
186 * Every lock has a list of other locks that were taken after it.
187 * We only grow the list, never remove from it:
188 */
189struct lock_list {
190 struct list_head entry;
191 struct lock_class *class;
David Brazdil0f672f62019-12-10 10:32:29 +0000192 struct lock_class *links_to;
193 const struct lock_trace *trace;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194 int distance;
195
196 /*
197 * The parent field is used to implement breadth-first search, and the
198 * bit 0 is reused to indicate if the lock has been accessed in BFS.
199 */
200 struct lock_list *parent;
201};
202
David Brazdil0f672f62019-12-10 10:32:29 +0000203/**
204 * struct lock_chain - lock dependency chain record
205 *
206 * @irq_context: the same as irq_context in held_lock below
207 * @depth: the number of held locks in this chain
208 * @base: the index in chain_hlocks for this chain
209 * @entry: the collided lock chains in lock_chain hash list
210 * @chain_key: the hash key of this lock_chain
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211 */
212struct lock_chain {
David Brazdil0f672f62019-12-10 10:32:29 +0000213 /* see BUILD_BUG_ON()s in add_chain_cache() */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214 unsigned int irq_context : 2,
215 depth : 6,
216 base : 24;
217 /* 4 byte hole */
218 struct hlist_node entry;
219 u64 chain_key;
220};
221
222#define MAX_LOCKDEP_KEYS_BITS 13
David Brazdil0f672f62019-12-10 10:32:29 +0000223#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
224#define INITIAL_CHAIN_KEY -1
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225
226struct held_lock {
227 /*
228 * One-way hash of the dependency chain up to this point. We
229 * hash the hashes step by step as the dependency chain grows.
230 *
231 * We use it for dependency-caching and we skip detection
232 * passes and dependency-updates if there is a cache-hit, so
233 * it is absolutely critical for 100% coverage of the validator
234 * to have a unique key value for every unique dependency path
235 * that can occur in the system, to make a unique hash value
236 * as likely as possible - hence the 64-bit width.
237 *
238 * The task struct holds the current hash value (initialized
239 * with zero), here we store the previous hash value:
240 */
241 u64 prev_chain_key;
242 unsigned long acquire_ip;
243 struct lockdep_map *instance;
244 struct lockdep_map *nest_lock;
245#ifdef CONFIG_LOCK_STAT
246 u64 waittime_stamp;
247 u64 holdtime_stamp;
248#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000249 /*
250 * class_idx is zero-indexed; it points to the element in
251 * lock_classes this held lock instance belongs to. class_idx is in
252 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
253 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
255 /*
256 * The lock-stack is unified in that the lock chains of interrupt
257 * contexts nest ontop of process context chains, but we 'separate'
258 * the hashes by starting with 0 if we cross into an interrupt
259 * context, and we also keep do not add cross-context lock
260 * dependencies - the lock usage graph walking covers that area
261 * anyway, and we'd just unnecessarily increase the number of
262 * dependencies otherwise. [Note: hardirq and softirq contexts
263 * are separated from each other too.]
264 *
265 * The following field is used to detect when we cross into an
266 * interrupt context:
267 */
268 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
269 unsigned int trylock:1; /* 16 bits */
270
271 unsigned int read:2; /* see lock_acquire() comment */
272 unsigned int check:1; /* see lock_acquire() comment */
273 unsigned int hardirqs_off:1;
274 unsigned int references:12; /* 32 bits */
275 unsigned int pin_count;
276};
277
278/*
279 * Initialization, self-test and debugging-output methods:
280 */
281extern void lockdep_init(void);
282extern void lockdep_reset(void);
283extern void lockdep_reset_lock(struct lockdep_map *lock);
284extern void lockdep_free_key_range(void *start, unsigned long size);
285extern asmlinkage void lockdep_sys_exit(void);
David Brazdil0f672f62019-12-10 10:32:29 +0000286extern void lockdep_set_selftest_task(struct task_struct *task);
287
288extern void lockdep_init_task(struct task_struct *task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289
290extern void lockdep_off(void);
291extern void lockdep_on(void);
292
David Brazdil0f672f62019-12-10 10:32:29 +0000293extern void lockdep_register_key(struct lock_class_key *key);
294extern void lockdep_unregister_key(struct lock_class_key *key);
295
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296/*
297 * These methods are used by specific locking variants (spinlocks,
298 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
299 * to lockdep:
300 */
301
302extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
303 struct lock_class_key *key, int subclass);
304
305/*
306 * Reinitialize a lock key - for cases where there is special locking or
307 * special initialization of locks so that the validator gets the scope
308 * of dependencies wrong: they are either too broad (they need a class-split)
309 * or they are too narrow (they suffer from a false class-split):
310 */
311#define lockdep_set_class(lock, key) \
312 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
313#define lockdep_set_class_and_name(lock, key, name) \
314 lockdep_init_map(&(lock)->dep_map, name, key, 0)
315#define lockdep_set_class_and_subclass(lock, key, sub) \
316 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
317#define lockdep_set_subclass(lock, sub) \
318 lockdep_init_map(&(lock)->dep_map, #lock, \
319 (lock)->dep_map.key, sub)
320
321#define lockdep_set_novalidate_class(lock) \
322 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
323/*
324 * Compare locking classes
325 */
326#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
327
328static inline int lockdep_match_key(struct lockdep_map *lock,
329 struct lock_class_key *key)
330{
331 return lock->key == key;
332}
333
334/*
335 * Acquire a lock.
336 *
337 * Values for "read":
338 *
339 * 0: exclusive (write) acquire
340 * 1: read-acquire (no recursion allowed)
341 * 2: read-acquire with same-instance recursion allowed
342 *
343 * Values for check:
344 *
345 * 0: simple checks (freeing, held-at-exit-time, etc.)
346 * 1: full validation
347 */
348extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
349 int trylock, int read, int check,
350 struct lockdep_map *nest_lock, unsigned long ip);
351
352extern void lock_release(struct lockdep_map *lock, int nested,
353 unsigned long ip);
354
355/*
356 * Same "read" as for lock_acquire(), except -1 means any.
357 */
358extern int lock_is_held_type(const struct lockdep_map *lock, int read);
359
360static inline int lock_is_held(const struct lockdep_map *lock)
361{
362 return lock_is_held_type(lock, -1);
363}
364
365#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
366#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
367
368extern void lock_set_class(struct lockdep_map *lock, const char *name,
369 struct lock_class_key *key, unsigned int subclass,
370 unsigned long ip);
371
372static inline void lock_set_subclass(struct lockdep_map *lock,
373 unsigned int subclass, unsigned long ip)
374{
375 lock_set_class(lock, lock->name, lock->key, subclass, ip);
376}
377
378extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
379
380struct pin_cookie { unsigned int val; };
381
382#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
383
384extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
385extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
386extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
387
388#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
389
390#define lockdep_assert_held(l) do { \
391 WARN_ON(debug_locks && !lockdep_is_held(l)); \
392 } while (0)
393
David Brazdil0f672f62019-12-10 10:32:29 +0000394#define lockdep_assert_held_write(l) do { \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
396 } while (0)
397
398#define lockdep_assert_held_read(l) do { \
399 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
400 } while (0)
401
402#define lockdep_assert_held_once(l) do { \
403 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
404 } while (0)
405
406#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
407
408#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
409#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
410#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
411
412#else /* !CONFIG_LOCKDEP */
413
David Brazdil0f672f62019-12-10 10:32:29 +0000414static inline void lockdep_init_task(struct task_struct *task)
415{
416}
417
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418static inline void lockdep_off(void)
419{
420}
421
422static inline void lockdep_on(void)
423{
424}
425
David Brazdil0f672f62019-12-10 10:32:29 +0000426static inline void lockdep_set_selftest_task(struct task_struct *task)
427{
428}
429
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
431# define lock_release(l, n, i) do { } while (0)
432# define lock_downgrade(l, i) do { } while (0)
433# define lock_set_class(l, n, k, s, i) do { } while (0)
434# define lock_set_subclass(l, s, i) do { } while (0)
435# define lockdep_init() do { } while (0)
436# define lockdep_init_map(lock, name, key, sub) \
437 do { (void)(name); (void)(key); } while (0)
438# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
439# define lockdep_set_class_and_name(lock, key, name) \
440 do { (void)(key); (void)(name); } while (0)
441#define lockdep_set_class_and_subclass(lock, key, sub) \
442 do { (void)(key); } while (0)
443#define lockdep_set_subclass(lock, sub) do { } while (0)
444
445#define lockdep_set_novalidate_class(lock) do { } while (0)
446
447/*
448 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
449 * case since the result is not well defined and the caller should rather
450 * #ifdef the call himself.
451 */
452
453# define lockdep_reset() do { debug_locks = 1; } while (0)
454# define lockdep_free_key_range(start, size) do { } while (0)
455# define lockdep_sys_exit() do { } while (0)
456/*
457 * The class key takes no space if lockdep is disabled:
458 */
459struct lock_class_key { };
460
David Brazdil0f672f62019-12-10 10:32:29 +0000461static inline void lockdep_register_key(struct lock_class_key *key)
462{
463}
464
465static inline void lockdep_unregister_key(struct lock_class_key *key)
466{
467}
468
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469/*
470 * The lockdep_map takes no space if lockdep is disabled:
471 */
472struct lockdep_map { };
473
474#define lockdep_depth(tsk) (0)
475
476#define lockdep_is_held_type(l, r) (1)
477
478#define lockdep_assert_held(l) do { (void)(l); } while (0)
David Brazdil0f672f62019-12-10 10:32:29 +0000479#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
481#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
482
483#define lockdep_recursing(tsk) (0)
484
485struct pin_cookie { };
486
487#define NIL_COOKIE (struct pin_cookie){ }
488
David Brazdil0f672f62019-12-10 10:32:29 +0000489#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
491#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
492
493#endif /* !LOCKDEP */
494
495enum xhlock_context_t {
496 XHLOCK_HARD,
497 XHLOCK_SOFT,
498 XHLOCK_CTX_NR,
499};
500
501#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
502/*
503 * To initialize a lockdep_map statically use this macro.
504 * Note that _name must not be NULL.
505 */
506#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
507 { .name = (_name), .key = (void *)(_key), }
508
509static inline void lockdep_invariant_state(bool force) {}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510static inline void lockdep_free_task(struct task_struct *task) {}
511
512#ifdef CONFIG_LOCK_STAT
513
514extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
515extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
516
517#define LOCK_CONTENDED(_lock, try, lock) \
518do { \
519 if (!try(_lock)) { \
520 lock_contended(&(_lock)->dep_map, _RET_IP_); \
521 lock(_lock); \
522 } \
523 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
524} while (0)
525
526#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
527({ \
528 int ____err = 0; \
529 if (!try(_lock)) { \
530 lock_contended(&(_lock)->dep_map, _RET_IP_); \
531 ____err = lock(_lock); \
532 } \
533 if (!____err) \
534 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
535 ____err; \
536})
537
538#else /* CONFIG_LOCK_STAT */
539
540#define lock_contended(lockdep_map, ip) do {} while (0)
541#define lock_acquired(lockdep_map, ip) do {} while (0)
542
543#define LOCK_CONTENDED(_lock, try, lock) \
544 lock(_lock)
545
546#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
547 lock(_lock)
548
549#endif /* CONFIG_LOCK_STAT */
550
551#ifdef CONFIG_LOCKDEP
552
553/*
554 * On lockdep we dont want the hand-coded irq-enable of
555 * _raw_*_lock_flags() code, because lockdep assumes
556 * that interrupts are not re-enabled during lock-acquire:
557 */
558#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
559 LOCK_CONTENDED((_lock), (try), (lock))
560
561#else /* CONFIG_LOCKDEP */
562
563#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
564 lockfl((_lock), (flags))
565
566#endif /* CONFIG_LOCKDEP */
567
568#ifdef CONFIG_PROVE_LOCKING
569extern void print_irqtrace_events(struct task_struct *curr);
570#else
571static inline void print_irqtrace_events(struct task_struct *curr)
572{
573}
574#endif
575
576/*
577 * For trivial one-depth nesting of a lock-class, the following
578 * global define can be used. (Subsystems with multiple levels
579 * of nesting should define their own lock-nesting subclasses.)
580 */
581#define SINGLE_DEPTH_NESTING 1
582
583/*
584 * Map the dependency ops to NOP or to real lockdep ops, depending
585 * on the per lock-class debug mode:
586 */
587
588#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
589#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
590#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
591
592#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
593#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
594#define spin_release(l, n, i) lock_release(l, n, i)
595
596#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
597#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
598#define rwlock_release(l, n, i) lock_release(l, n, i)
599
600#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
601#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
602#define seqcount_release(l, n, i) lock_release(l, n, i)
603
604#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
605#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
606#define mutex_release(l, n, i) lock_release(l, n, i)
607
608#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
609#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
610#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
611#define rwsem_release(l, n, i) lock_release(l, n, i)
612
613#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
614#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
615#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
616#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
617
618#ifdef CONFIG_PROVE_LOCKING
619# define might_lock(lock) \
620do { \
621 typecheck(struct lockdep_map *, &(lock)->dep_map); \
622 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
623 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
624} while (0)
625# define might_lock_read(lock) \
626do { \
627 typecheck(struct lockdep_map *, &(lock)->dep_map); \
628 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
629 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
630} while (0)
631
632#define lockdep_assert_irqs_enabled() do { \
633 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
634 !current->hardirqs_enabled, \
635 "IRQs not enabled as expected\n"); \
636 } while (0)
637
638#define lockdep_assert_irqs_disabled() do { \
639 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
640 current->hardirqs_enabled, \
641 "IRQs not disabled as expected\n"); \
642 } while (0)
643
David Brazdil0f672f62019-12-10 10:32:29 +0000644#define lockdep_assert_in_irq() do { \
645 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
646 !current->hardirq_context, \
647 "Not in hardirq as expected\n"); \
648 } while (0)
649
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650#else
651# define might_lock(lock) do { } while (0)
652# define might_lock_read(lock) do { } while (0)
653# define lockdep_assert_irqs_enabled() do { } while (0)
654# define lockdep_assert_irqs_disabled() do { } while (0)
David Brazdil0f672f62019-12-10 10:32:29 +0000655# define lockdep_assert_in_irq() do { } while (0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656#endif
657
658#ifdef CONFIG_LOCKDEP
659void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
660#else
661static inline void
662lockdep_rcu_suspicious(const char *file, const int line, const char *s)
663{
664}
665#endif
666
667#endif /* __LINUX_LOCKDEP_H */