blob: 18b1ed9864b02c8909097fceab9317dc4e97650f [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0+ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Copyright IBM Corporation, 2008
6 *
7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
David Brazdil0f672f62019-12-10 10:32:29 +00008 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 *
David Brazdil0f672f62019-12-10 10:32:29 +000010 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 *
13 * For detailed explanation of Read-Copy Update mechanism see -
14 * Documentation/RCU
15 */
16
17#ifndef __LINUX_RCUTREE_H
18#define __LINUX_RCUTREE_H
19
David Brazdil0f672f62019-12-10 10:32:29 +000020void rcu_softirq_qs(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021void rcu_note_context_switch(bool preempt);
22int rcu_needs_cpu(u64 basem, u64 *nextevt);
23void rcu_cpu_stall_reset(void);
24
25/*
26 * Note a virtualization-based context switch. This is simply a
27 * wrapper around rcu_note_context_switch(), which allows TINY_RCU
28 * to save a few bytes. The caller must have disabled interrupts.
29 */
30static inline void rcu_virt_note_context_switch(int cpu)
31{
32 rcu_note_context_switch(false);
33}
34
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035void synchronize_rcu_expedited(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
37
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038void rcu_barrier(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039bool rcu_eqs_special_set(int cpu);
40unsigned long get_state_synchronize_rcu(void);
41void cond_synchronize_rcu(unsigned long oldstate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042
43void rcu_idle_enter(void);
44void rcu_idle_exit(void);
45void rcu_irq_enter(void);
46void rcu_irq_exit(void);
47void rcu_irq_enter_irqson(void);
48void rcu_irq_exit_irqson(void);
49
50void exit_rcu(void);
51
52void rcu_scheduler_starting(void);
53extern int rcu_scheduler_active __read_mostly;
54void rcu_end_inkernel_boot(void);
55bool rcu_is_watching(void);
David Brazdil0f672f62019-12-10 10:32:29 +000056#ifndef CONFIG_PREEMPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057void rcu_all_qs(void);
David Brazdil0f672f62019-12-10 10:32:29 +000058#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
60/* RCUtree hotplug events */
61int rcutree_prepare_cpu(unsigned int cpu);
62int rcutree_online_cpu(unsigned int cpu);
63int rcutree_offline_cpu(unsigned int cpu);
64int rcutree_dead_cpu(unsigned int cpu);
65int rcutree_dying_cpu(unsigned int cpu);
66void rcu_cpu_starting(unsigned int cpu);
67
68#endif /* __LINUX_RCUTREE_H */