blob: 00d6054687dd2b46a05b48fd69b8cfef1fde384b [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/* rwsem.h: R/W semaphores, public interface
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 */
7
8#ifndef _LINUX_RWSEM_H
9#define _LINUX_RWSEM_H
10
11#include <linux/linkage.h>
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/atomic.h>
18#include <linux/err.h>
19#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
20#include <linux/osq_lock.h>
21#endif
22
David Brazdil0f672f62019-12-10 10:32:29 +000023/*
24 * For an uncontended rwsem, count and owner are the only fields a task
25 * needs to touch when acquiring the rwsem. So they are put next to each
26 * other to increase the chance that they will share the same cacheline.
27 *
28 * In a contended rwsem, the owner is likely the most frequently accessed
29 * field in the structure as the optimistic waiter that holds the osq lock
30 * will spin on owner. For an embedded rwsem, other hot fields in the
31 * containing structure should be moved further away from the rwsem to
32 * reduce the chance that they will share the same cacheline causing
33 * cacheline bouncing problem.
34 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035struct rw_semaphore {
36 atomic_long_t count;
David Brazdil0f672f62019-12-10 10:32:29 +000037 /*
38 * Write owner or one of the read owners as well flags regarding
39 * the current state of the rwsem. Can be used as a speculative
40 * check to see if the write owner is running on the cpu.
41 */
42 atomic_long_t owner;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
44 struct optimistic_spin_queue osq; /* spinner MCS lock */
David Brazdil0f672f62019-12-10 10:32:29 +000045#endif
46 raw_spinlock_t wait_lock;
47 struct list_head wait_list;
48#ifdef CONFIG_DEBUG_RWSEMS
49 void *magic;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050#endif
51#ifdef CONFIG_DEBUG_LOCK_ALLOC
52 struct lockdep_map dep_map;
53#endif
54};
55
56/*
David Brazdil0f672f62019-12-10 10:32:29 +000057 * Setting all bits of the owner field except bit 0 will indicate
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058 * that the rwsem is writer-owned with an unknown owner.
59 */
David Brazdil0f672f62019-12-10 10:32:29 +000060#define RWSEM_OWNER_UNKNOWN (-2L)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061
62/* In all implementations count != 0 means locked */
63static inline int rwsem_is_locked(struct rw_semaphore *sem)
64{
65 return atomic_long_read(&sem->count) != 0;
66}
67
David Brazdil0f672f62019-12-10 10:32:29 +000068#define RWSEM_UNLOCKED_VALUE 0L
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070
71/* Common initializer macros and functions */
72
73#ifdef CONFIG_DEBUG_LOCK_ALLOC
74# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
75#else
76# define __RWSEM_DEP_MAP_INIT(lockname)
77#endif
78
David Brazdil0f672f62019-12-10 10:32:29 +000079#ifdef CONFIG_DEBUG_RWSEMS
80# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
81#else
82# define __DEBUG_RWSEM_INITIALIZER(lockname)
83#endif
84
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
David Brazdil0f672f62019-12-10 10:32:29 +000086#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087#else
88#define __RWSEM_OPT_INIT(lockname)
89#endif
90
91#define __RWSEM_INITIALIZER(name) \
92 { __RWSEM_INIT_COUNT(name), \
David Brazdil0f672f62019-12-10 10:32:29 +000093 .owner = ATOMIC_LONG_INIT(0), \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094 .wait_list = LIST_HEAD_INIT((name).wait_list), \
95 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
96 __RWSEM_OPT_INIT(name) \
David Brazdil0f672f62019-12-10 10:32:29 +000097 __DEBUG_RWSEM_INITIALIZER(name) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098 __RWSEM_DEP_MAP_INIT(name) }
99
100#define DECLARE_RWSEM(name) \
101 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
102
103extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
104 struct lock_class_key *key);
105
106#define init_rwsem(sem) \
107do { \
108 static struct lock_class_key __key; \
109 \
110 __init_rwsem((sem), #sem, &__key); \
111} while (0)
112
113/*
114 * This is the same regardless of which rwsem implementation that is being used.
115 * It is just a heuristic meant to be called by somebody alreadying holding the
116 * rwsem to see if somebody from an incompatible type is wanting access to the
117 * lock.
118 */
119static inline int rwsem_is_contended(struct rw_semaphore *sem)
120{
121 return !list_empty(&sem->wait_list);
122}
123
124/*
125 * lock for reading
126 */
127extern void down_read(struct rw_semaphore *sem);
128extern int __must_check down_read_killable(struct rw_semaphore *sem);
129
130/*
131 * trylock for reading -- returns 1 if successful, 0 if contention
132 */
133extern int down_read_trylock(struct rw_semaphore *sem);
134
135/*
136 * lock for writing
137 */
138extern void down_write(struct rw_semaphore *sem);
139extern int __must_check down_write_killable(struct rw_semaphore *sem);
140
141/*
142 * trylock for writing -- returns 1 if successful, 0 if contention
143 */
144extern int down_write_trylock(struct rw_semaphore *sem);
145
146/*
147 * release a read lock
148 */
149extern void up_read(struct rw_semaphore *sem);
150
151/*
152 * release a write lock
153 */
154extern void up_write(struct rw_semaphore *sem);
155
156/*
157 * downgrade write lock to read lock
158 */
159extern void downgrade_write(struct rw_semaphore *sem);
160
161#ifdef CONFIG_DEBUG_LOCK_ALLOC
162/*
163 * nested locking. NOTE: rwsems are not allowed to recurse
164 * (which occurs if the same task tries to acquire the same
165 * lock instance multiple times), but multiple locks of the
166 * same lock class might be taken, if the order of the locks
167 * is always the same. This ordering rule can be expressed
168 * to lockdep via the _nested() APIs, but enumerating the
169 * subclasses that are used. (If the nesting relationship is
170 * static then another method for expressing nested locking is
171 * the explicit definition of lock class keys and the use of
172 * lockdep_set_class() at lock initialization time.
David Brazdil0f672f62019-12-10 10:32:29 +0000173 * See Documentation/locking/lockdep-design.rst for more details.)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 */
175extern void down_read_nested(struct rw_semaphore *sem, int subclass);
176extern void down_write_nested(struct rw_semaphore *sem, int subclass);
177extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
178extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
179
180# define down_write_nest_lock(sem, nest_lock) \
181do { \
182 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
183 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
184} while (0);
185
186/*
187 * Take/release a lock when not the owner will release it.
188 *
189 * [ This API should be avoided as much as possible - the
190 * proper abstraction for this case is completions. ]
191 */
192extern void down_read_non_owner(struct rw_semaphore *sem);
193extern void up_read_non_owner(struct rw_semaphore *sem);
194#else
195# define down_read_nested(sem, subclass) down_read(sem)
196# define down_write_nest_lock(sem, nest_lock) down_write(sem)
197# define down_write_nested(sem, subclass) down_write(sem)
198# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
199# define down_read_non_owner(sem) down_read(sem)
200# define up_read_non_owner(sem) up_read(sem)
201#endif
202
203#endif /* _LINUX_RWSEM_H */