blob: 5fda40f97fe916043b8217e8fd8b6a3ccdb0f26f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PERCPU_RWSEM_H
3#define _LINUX_PERCPU_RWSEM_H
4
5#include <linux/atomic.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006#include <linux/percpu.h>
7#include <linux/rcuwait.h>
Olivier Deprez157378f2022-04-04 15:47:50 +02008#include <linux/wait.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009#include <linux/rcu_sync.h>
10#include <linux/lockdep.h>
11
12struct percpu_rw_semaphore {
13 struct rcu_sync rss;
14 unsigned int __percpu *read_count;
Olivier Deprez157378f2022-04-04 15:47:50 +020015 struct rcuwait writer;
16 wait_queue_head_t waiters;
17 atomic_t block;
18#ifdef CONFIG_DEBUG_LOCK_ALLOC
19 struct lockdep_map dep_map;
20#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021};
22
Olivier Deprez157378f2022-04-04 15:47:50 +020023#ifdef CONFIG_DEBUG_LOCK_ALLOC
24#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
25#else
26#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
27#endif
28
David Brazdil0f672f62019-12-10 10:32:29 +000029#define __DEFINE_PERCPU_RWSEM(name, is_static) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
David Brazdil0f672f62019-12-10 10:32:29 +000031is_static struct percpu_rw_semaphore name = { \
32 .rss = __RCU_SYNC_INITIALIZER(name.rss), \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033 .read_count = &__percpu_rwsem_rc_##name, \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034 .writer = __RCUWAIT_INITIALIZER(name.writer), \
Olivier Deprez157378f2022-04-04 15:47:50 +020035 .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
36 .block = ATOMIC_INIT(0), \
37 __PERCPU_RWSEM_DEP_MAP_INIT(name) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038}
Olivier Deprez157378f2022-04-04 15:47:50 +020039
David Brazdil0f672f62019-12-10 10:32:29 +000040#define DEFINE_PERCPU_RWSEM(name) \
41 __DEFINE_PERCPU_RWSEM(name, /* not static */)
42#define DEFINE_STATIC_PERCPU_RWSEM(name) \
43 __DEFINE_PERCPU_RWSEM(name, static)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044
Olivier Deprez157378f2022-04-04 15:47:50 +020045extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046
David Brazdil0f672f62019-12-10 10:32:29 +000047static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048{
49 might_sleep();
50
Olivier Deprez157378f2022-04-04 15:47:50 +020051 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052
53 preempt_disable();
54 /*
55 * We are in an RCU-sched read-side critical section, so the writer
56 * cannot both change sem->state from readers_fast and start checking
57 * counters while we are here. So if we see !sem->state, we know that
58 * the writer won't be checking until we're past the preempt_enable()
David Brazdil0f672f62019-12-10 10:32:29 +000059 * and that once the synchronize_rcu() is done, the writer will see
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060 * anything we did within this RCU-sched read-size critical section.
61 */
Olivier Deprez157378f2022-04-04 15:47:50 +020062 if (likely(rcu_sync_is_idle(&sem->rss)))
63 this_cpu_inc(*sem->read_count);
64 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065 __percpu_down_read(sem, false); /* Unconditional memory barrier */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 /*
David Brazdil0f672f62019-12-10 10:32:29 +000067 * The preempt_enable() prevents the compiler from
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068 * bleeding the critical section out.
69 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070 preempt_enable();
71}
72
Olivier Deprez157378f2022-04-04 15:47:50 +020073static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074{
Olivier Deprez157378f2022-04-04 15:47:50 +020075 bool ret = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076
77 preempt_disable();
78 /*
79 * Same as in percpu_down_read().
80 */
Olivier Deprez157378f2022-04-04 15:47:50 +020081 if (likely(rcu_sync_is_idle(&sem->rss)))
82 this_cpu_inc(*sem->read_count);
83 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
85 preempt_enable();
86 /*
87 * The barrier() from preempt_enable() prevents the compiler from
88 * bleeding the critical section out.
89 */
90
91 if (ret)
Olivier Deprez157378f2022-04-04 15:47:50 +020092 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093
94 return ret;
95}
96
David Brazdil0f672f62019-12-10 10:32:29 +000097static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098{
Olivier Deprez157378f2022-04-04 15:47:50 +020099 rwsem_release(&sem->dep_map, _RET_IP_);
100
David Brazdil0f672f62019-12-10 10:32:29 +0000101 preempt_disable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 /*
103 * Same as in percpu_down_read().
104 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200105 if (likely(rcu_sync_is_idle(&sem->rss))) {
106 this_cpu_dec(*sem->read_count);
107 } else {
108 /*
109 * slowpath; reader will only ever wake a single blocked
110 * writer.
111 */
112 smp_mb(); /* B matches C */
113 /*
114 * In other words, if they see our decrement (presumably to
115 * aggregate zero, as that is the only time it matters) they
116 * will also see our critical section.
117 */
118 this_cpu_dec(*sem->read_count);
119 rcuwait_wake_up(&sem->writer);
120 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121 preempt_enable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122}
123
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124extern void percpu_down_write(struct percpu_rw_semaphore *);
125extern void percpu_up_write(struct percpu_rw_semaphore *);
126
127extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
128 const char *, struct lock_class_key *);
129
130extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
131
132#define percpu_init_rwsem(sem) \
133({ \
134 static struct lock_class_key rwsem_key; \
135 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
136})
137
Olivier Deprez157378f2022-04-04 15:47:50 +0200138#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
139#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140
141static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
142 bool read, unsigned long ip)
143{
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 lock_release(&sem->dep_map, ip);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145}
146
147static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
148 bool read, unsigned long ip)
149{
Olivier Deprez157378f2022-04-04 15:47:50 +0200150 lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151}
152
153#endif