blob: 754f67ac4326a7f9cd33efe117911abc305be978 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef LINUX_HARDIRQ_H
3#define LINUX_HARDIRQ_H
4
Olivier Deprez157378f2022-04-04 15:47:50 +02005#include <linux/context_tracking_state.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006#include <linux/preempt.h>
7#include <linux/lockdep.h>
8#include <linux/ftrace_irq.h>
9#include <linux/vtime.h>
10#include <asm/hardirq.h>
11
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012extern void synchronize_irq(unsigned int irq);
13extern bool synchronize_hardirq(unsigned int irq);
14
Olivier Deprez157378f2022-04-04 15:47:50 +020015#ifdef CONFIG_NO_HZ_FULL
16void __rcu_irq_enter_check_tick(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#else
Olivier Deprez157378f2022-04-04 15:47:50 +020018static inline void __rcu_irq_enter_check_tick(void) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019#endif
20
Olivier Deprez157378f2022-04-04 15:47:50 +020021static __always_inline void rcu_irq_enter_check_tick(void)
22{
23 if (context_tracking_enabled())
24 __rcu_irq_enter_check_tick();
25}
26
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027/*
28 * It is safe to do non-atomic ops on ->hardirq_context,
29 * because NMI handlers may not preempt and the ops are
30 * always balanced, so the interrupted value of ->hardirq_context
31 * will always be restored.
32 */
33#define __irq_enter() \
34 do { \
35 account_irq_enter_time(current); \
36 preempt_count_add(HARDIRQ_OFFSET); \
Olivier Deprez157378f2022-04-04 15:47:50 +020037 lockdep_hardirq_enter(); \
38 } while (0)
39
40/*
41 * Like __irq_enter() without time accounting for fast
42 * interrupts, e.g. reschedule IPI where time accounting
43 * is more expensive than the actual interrupt.
44 */
45#define __irq_enter_raw() \
46 do { \
47 preempt_count_add(HARDIRQ_OFFSET); \
48 lockdep_hardirq_enter(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049 } while (0)
50
51/*
52 * Enter irq context (on NO_HZ, update jiffies):
53 */
Olivier Deprez157378f2022-04-04 15:47:50 +020054void irq_enter(void);
55/*
56 * Like irq_enter(), but RCU is already watching.
57 */
58void irq_enter_rcu(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
60/*
61 * Exit irq context without processing softirqs:
62 */
63#define __irq_exit() \
64 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +020065 lockdep_hardirq_exit(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 account_irq_exit_time(current); \
67 preempt_count_sub(HARDIRQ_OFFSET); \
68 } while (0)
69
70/*
Olivier Deprez157378f2022-04-04 15:47:50 +020071 * Like __irq_exit() without time accounting
72 */
73#define __irq_exit_raw() \
74 do { \
75 lockdep_hardirq_exit(); \
76 preempt_count_sub(HARDIRQ_OFFSET); \
77 } while (0)
78
79/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080 * Exit irq context and process softirqs if needed:
81 */
Olivier Deprez157378f2022-04-04 15:47:50 +020082void irq_exit(void);
83
84/*
85 * Like irq_exit(), but return with RCU watching.
86 */
87void irq_exit_rcu(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088
David Brazdil0f672f62019-12-10 10:32:29 +000089#ifndef arch_nmi_enter
90#define arch_nmi_enter() do { } while (0)
91#define arch_nmi_exit() do { } while (0)
92#endif
93
Olivier Deprez157378f2022-04-04 15:47:50 +020094#ifdef CONFIG_TINY_RCU
95static inline void rcu_nmi_enter(void) { }
96static inline void rcu_nmi_exit(void) { }
97#else
98extern void rcu_nmi_enter(void);
99extern void rcu_nmi_exit(void);
100#endif
101
102/*
103 * NMI vs Tracing
104 * --------------
105 *
106 * We must not land in a tracer until (or after) we've changed preempt_count
107 * such that in_nmi() becomes true. To that effect all NMI C entry points must
108 * be marked 'notrace' and call nmi_enter() as soon as possible.
109 */
110
111/*
112 * nmi_enter() can nest up to 15 times; see NMI_BITS.
113 */
114#define __nmi_enter() \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200116 lockdep_off(); \
David Brazdil0f672f62019-12-10 10:32:29 +0000117 arch_nmi_enter(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 printk_nmi_enter(); \
Olivier Deprez157378f2022-04-04 15:47:50 +0200119 BUG_ON(in_nmi() == NMI_MASK); \
120 __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
121 } while (0)
122
123#define nmi_enter() \
124 do { \
125 __nmi_enter(); \
126 lockdep_hardirq_enter(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 rcu_nmi_enter(); \
Olivier Deprez157378f2022-04-04 15:47:50 +0200128 instrumentation_begin(); \
129 ftrace_nmi_enter(); \
130 instrumentation_end(); \
131 } while (0)
132
133#define __nmi_exit() \
134 do { \
135 BUG_ON(!in_nmi()); \
136 __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
137 printk_nmi_exit(); \
138 arch_nmi_exit(); \
139 lockdep_on(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140 } while (0)
141
142#define nmi_exit() \
143 do { \
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 instrumentation_begin(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145 ftrace_nmi_exit(); \
Olivier Deprez157378f2022-04-04 15:47:50 +0200146 instrumentation_end(); \
147 rcu_nmi_exit(); \
148 lockdep_hardirq_exit(); \
149 __nmi_exit(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150 } while (0)
151
152#endif /* LINUX_HARDIRQ_H */