blob: 59eb5cd567d7af2a6e2f5e917d8655f6099d3009 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0+ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Copyright IBM Corporation, 2008
6 *
7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
David Brazdil0f672f62019-12-10 10:32:29 +00008 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 *
David Brazdil0f672f62019-12-10 10:32:29 +000010 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 *
13 * For detailed explanation of Read-Copy Update mechanism see -
14 * Documentation/RCU
15 */
16
17#ifndef __LINUX_RCUTREE_H
18#define __LINUX_RCUTREE_H
19
David Brazdil0f672f62019-12-10 10:32:29 +000020void rcu_softirq_qs(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021void rcu_note_context_switch(bool preempt);
22int rcu_needs_cpu(u64 basem, u64 *nextevt);
23void rcu_cpu_stall_reset(void);
24
25/*
26 * Note a virtualization-based context switch. This is simply a
27 * wrapper around rcu_note_context_switch(), which allows TINY_RCU
28 * to save a few bytes. The caller must have disabled interrupts.
29 */
30static inline void rcu_virt_note_context_switch(int cpu)
31{
32 rcu_note_context_switch(false);
33}
34
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035void synchronize_rcu_expedited(void);
Olivier Deprez157378f2022-04-04 15:47:50 +020036void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038void rcu_barrier(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039bool rcu_eqs_special_set(int cpu);
Olivier Deprez157378f2022-04-04 15:47:50 +020040void rcu_momentary_dyntick_idle(void);
41void kfree_rcu_scheduler_running(void);
42bool rcu_gp_might_be_stalled(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043unsigned long get_state_synchronize_rcu(void);
44void cond_synchronize_rcu(unsigned long oldstate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045
46void rcu_idle_enter(void);
47void rcu_idle_exit(void);
48void rcu_irq_enter(void);
49void rcu_irq_exit(void);
Olivier Deprez157378f2022-04-04 15:47:50 +020050void rcu_irq_exit_preempt(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051void rcu_irq_enter_irqson(void);
52void rcu_irq_exit_irqson(void);
53
Olivier Deprez157378f2022-04-04 15:47:50 +020054#ifdef CONFIG_PROVE_RCU
55void rcu_irq_exit_check_preempt(void);
56#else
57static inline void rcu_irq_exit_check_preempt(void) { }
58#endif
59
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060void exit_rcu(void);
61
62void rcu_scheduler_starting(void);
63extern int rcu_scheduler_active __read_mostly;
64void rcu_end_inkernel_boot(void);
Olivier Deprez157378f2022-04-04 15:47:50 +020065bool rcu_inkernel_boot_has_ended(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066bool rcu_is_watching(void);
David Brazdil0f672f62019-12-10 10:32:29 +000067#ifndef CONFIG_PREEMPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068void rcu_all_qs(void);
David Brazdil0f672f62019-12-10 10:32:29 +000069#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070
71/* RCUtree hotplug events */
72int rcutree_prepare_cpu(unsigned int cpu);
73int rcutree_online_cpu(unsigned int cpu);
74int rcutree_offline_cpu(unsigned int cpu);
75int rcutree_dead_cpu(unsigned int cpu);
76int rcutree_dying_cpu(unsigned int cpu);
77void rcu_cpu_starting(unsigned int cpu);
78
79#endif /* __LINUX_RCUTREE_H */