blob: 9bf1dfe7781f3aac603844d9ce29c20f74a36b29 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0+ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Copyright IBM Corporation, 2008
6 *
David Brazdil0f672f62019-12-10 10:32:29 +00007 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 *
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
11 */
12#ifndef __LINUX_TINY_H
13#define __LINUX_TINY_H
14
David Brazdil0f672f62019-12-10 10:32:29 +000015#include <asm/param.h> /* for HZ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016
17/* Never flag non-existent other CPUs! */
18static inline bool rcu_eqs_special_set(int cpu) { return false; }
19
20static inline unsigned long get_state_synchronize_rcu(void)
21{
22 return 0;
23}
24
25static inline void cond_synchronize_rcu(unsigned long oldstate)
26{
27 might_sleep();
28}
29
David Brazdil0f672f62019-12-10 10:32:29 +000030extern void rcu_barrier(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031
32static inline void synchronize_rcu_expedited(void)
33{
David Brazdil0f672f62019-12-10 10:32:29 +000034 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035}
36
David Brazdil0f672f62019-12-10 10:32:29 +000037static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038{
39 call_rcu(head, func);
40}
41
David Brazdil0f672f62019-12-10 10:32:29 +000042void rcu_qs(void);
43
44static inline void rcu_softirq_qs(void)
45{
46 rcu_qs();
47}
48
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049#define rcu_note_context_switch(preempt) \
50 do { \
David Brazdil0f672f62019-12-10 10:32:29 +000051 rcu_qs(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 rcu_tasks_qs(current); \
53 } while (0)
54
55static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
56{
57 *nextevt = KTIME_MAX;
58 return 0;
59}
60
61/*
62 * Take advantage of the fact that there is only one CPU, which
63 * allows us to ignore virtualization-based context switches.
64 */
65static inline void rcu_virt_note_context_switch(int cpu) { }
66static inline void rcu_cpu_stall_reset(void) { }
David Brazdil0f672f62019-12-10 10:32:29 +000067static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068static inline void rcu_idle_enter(void) { }
69static inline void rcu_idle_exit(void) { }
70static inline void rcu_irq_enter(void) { }
71static inline void rcu_irq_exit_irqson(void) { }
72static inline void rcu_irq_enter_irqson(void) { }
73static inline void rcu_irq_exit(void) { }
74static inline void exit_rcu(void) { }
David Brazdil0f672f62019-12-10 10:32:29 +000075static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
76{
77 return false;
78}
79static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080#ifdef CONFIG_SRCU
81void rcu_scheduler_starting(void);
82#else /* #ifndef CONFIG_SRCU */
83static inline void rcu_scheduler_starting(void) { }
84#endif /* #else #ifndef CONFIG_SRCU */
85static inline void rcu_end_inkernel_boot(void) { }
86static inline bool rcu_is_watching(void) { return true; }
87
88/* Avoid RCU read-side critical sections leaking across. */
89static inline void rcu_all_qs(void) { barrier(); }
90
91/* RCUtree hotplug events */
92#define rcutree_prepare_cpu NULL
93#define rcutree_online_cpu NULL
94#define rcutree_offline_cpu NULL
95#define rcutree_dead_cpu NULL
96#define rcutree_dying_cpu NULL
97static inline void rcu_cpu_starting(unsigned int cpu) { }
98
99#endif /* __LINUX_RCUTINY_H */