blob: 5a5a1941ca156d36270b37095bf7ef9f7c2247f1 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0+ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny variant.
5 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 * Copyright (C) IBM Corporation, 2017
7 *
David Brazdil0f672f62019-12-10 10:32:29 +00008 * Author: Paul McKenney <paulmck@linux.ibm.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 */
10
11#ifndef _LINUX_SRCU_TINY_H
12#define _LINUX_SRCU_TINY_H
13
14#include <linux/swait.h>
15
16struct srcu_struct {
17 short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
18 short srcu_idx; /* Current reader array element. */
19 u8 srcu_gp_running; /* GP workqueue running? */
20 u8 srcu_gp_waiting; /* GP waiting for readers? */
21 struct swait_queue_head srcu_wq;
22 /* Last srcu_read_unlock() wakes GP. */
23 struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
24 struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
25 struct work_struct srcu_work; /* For driving grace periods. */
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 struct lockdep_map dep_map;
28#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
29};
30
31void srcu_drive_gp(struct work_struct *wp);
32
33#define __SRCU_STRUCT_INIT(name, __ignored) \
34{ \
35 .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
36 .srcu_cb_tail = &name.srcu_cb_head, \
37 .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
38 __SRCU_DEP_MAP_INIT(name) \
39}
40
41/*
42 * This odd _STATIC_ arrangement is needed for API compatibility with
43 * Tree SRCU, which needs some per-CPU data.
44 */
45#define DEFINE_SRCU(name) \
46 struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
47#define DEFINE_STATIC_SRCU(name) \
48 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
49
David Brazdil0f672f62019-12-10 10:32:29 +000050void synchronize_srcu(struct srcu_struct *ssp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52/*
53 * Counts the new reader in the appropriate per-CPU element of the
54 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
55 * __srcu_read_unlock() must be in the same handler instance. Returns an
56 * index that must be passed to the matching srcu_read_unlock().
57 */
David Brazdil0f672f62019-12-10 10:32:29 +000058static inline int __srcu_read_lock(struct srcu_struct *ssp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059{
60 int idx;
61
David Brazdil0f672f62019-12-10 10:32:29 +000062 idx = READ_ONCE(ssp->srcu_idx);
63 WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064 return idx;
65}
66
David Brazdil0f672f62019-12-10 10:32:29 +000067static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068{
David Brazdil0f672f62019-12-10 10:32:29 +000069 synchronize_srcu(ssp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070}
71
David Brazdil0f672f62019-12-10 10:32:29 +000072static inline void srcu_barrier(struct srcu_struct *ssp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073{
David Brazdil0f672f62019-12-10 10:32:29 +000074 synchronize_srcu(ssp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075}
76
77/* Defined here to avoid size increase for non-torture kernels. */
David Brazdil0f672f62019-12-10 10:32:29 +000078static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 char *tt, char *tf)
80{
81 int idx;
82
David Brazdil0f672f62019-12-10 10:32:29 +000083 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
85 tt, tf, idx,
David Brazdil0f672f62019-12-10 10:32:29 +000086 READ_ONCE(ssp->srcu_lock_nesting[!idx]),
87 READ_ONCE(ssp->srcu_lock_nesting[idx]));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088}
89
90#endif