Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef LINUX_HARDIRQ_H |
| 3 | #define LINUX_HARDIRQ_H |
| 4 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5 | #include <linux/context_tracking_state.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | #include <linux/preempt.h> |
| 7 | #include <linux/lockdep.h> |
| 8 | #include <linux/ftrace_irq.h> |
| 9 | #include <linux/vtime.h> |
| 10 | #include <asm/hardirq.h> |
| 11 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | extern void synchronize_irq(unsigned int irq); |
| 13 | extern bool synchronize_hardirq(unsigned int irq); |
| 14 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 15 | #ifdef CONFIG_NO_HZ_FULL |
| 16 | void __rcu_irq_enter_check_tick(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | #else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 18 | static inline void __rcu_irq_enter_check_tick(void) { } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | #endif |
| 20 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 21 | static __always_inline void rcu_irq_enter_check_tick(void) |
| 22 | { |
| 23 | if (context_tracking_enabled()) |
| 24 | __rcu_irq_enter_check_tick(); |
| 25 | } |
| 26 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | /* |
| 28 | * It is safe to do non-atomic ops on ->hardirq_context, |
| 29 | * because NMI handlers may not preempt and the ops are |
| 30 | * always balanced, so the interrupted value of ->hardirq_context |
| 31 | * will always be restored. |
| 32 | */ |
| 33 | #define __irq_enter() \ |
| 34 | do { \ |
| 35 | account_irq_enter_time(current); \ |
| 36 | preempt_count_add(HARDIRQ_OFFSET); \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 37 | lockdep_hardirq_enter(); \ |
| 38 | } while (0) |
| 39 | |
| 40 | /* |
| 41 | * Like __irq_enter() without time accounting for fast |
| 42 | * interrupts, e.g. reschedule IPI where time accounting |
| 43 | * is more expensive than the actual interrupt. |
| 44 | */ |
| 45 | #define __irq_enter_raw() \ |
| 46 | do { \ |
| 47 | preempt_count_add(HARDIRQ_OFFSET); \ |
| 48 | lockdep_hardirq_enter(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | } while (0) |
| 50 | |
| 51 | /* |
| 52 | * Enter irq context (on NO_HZ, update jiffies): |
| 53 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 54 | void irq_enter(void); |
| 55 | /* |
| 56 | * Like irq_enter(), but RCU is already watching. |
| 57 | */ |
| 58 | void irq_enter_rcu(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | |
| 60 | /* |
| 61 | * Exit irq context without processing softirqs: |
| 62 | */ |
| 63 | #define __irq_exit() \ |
| 64 | do { \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 65 | lockdep_hardirq_exit(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | account_irq_exit_time(current); \ |
| 67 | preempt_count_sub(HARDIRQ_OFFSET); \ |
| 68 | } while (0) |
| 69 | |
| 70 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 71 | * Like __irq_exit() without time accounting |
| 72 | */ |
| 73 | #define __irq_exit_raw() \ |
| 74 | do { \ |
| 75 | lockdep_hardirq_exit(); \ |
| 76 | preempt_count_sub(HARDIRQ_OFFSET); \ |
| 77 | } while (0) |
| 78 | |
| 79 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | * Exit irq context and process softirqs if needed: |
| 81 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 82 | void irq_exit(void); |
| 83 | |
| 84 | /* |
| 85 | * Like irq_exit(), but return with RCU watching. |
| 86 | */ |
| 87 | void irq_exit_rcu(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 89 | #ifndef arch_nmi_enter |
| 90 | #define arch_nmi_enter() do { } while (0) |
| 91 | #define arch_nmi_exit() do { } while (0) |
| 92 | #endif |
| 93 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 94 | #ifdef CONFIG_TINY_RCU |
| 95 | static inline void rcu_nmi_enter(void) { } |
| 96 | static inline void rcu_nmi_exit(void) { } |
| 97 | #else |
| 98 | extern void rcu_nmi_enter(void); |
| 99 | extern void rcu_nmi_exit(void); |
| 100 | #endif |
| 101 | |
| 102 | /* |
| 103 | * NMI vs Tracing |
| 104 | * -------------- |
| 105 | * |
| 106 | * We must not land in a tracer until (or after) we've changed preempt_count |
| 107 | * such that in_nmi() becomes true. To that effect all NMI C entry points must |
| 108 | * be marked 'notrace' and call nmi_enter() as soon as possible. |
| 109 | */ |
| 110 | |
| 111 | /* |
| 112 | * nmi_enter() can nest up to 15 times; see NMI_BITS. |
| 113 | */ |
| 114 | #define __nmi_enter() \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | do { \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 116 | lockdep_off(); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 117 | arch_nmi_enter(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | printk_nmi_enter(); \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 119 | BUG_ON(in_nmi() == NMI_MASK); \ |
| 120 | __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
| 121 | } while (0) |
| 122 | |
| 123 | #define nmi_enter() \ |
| 124 | do { \ |
| 125 | __nmi_enter(); \ |
| 126 | lockdep_hardirq_enter(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | rcu_nmi_enter(); \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 128 | instrumentation_begin(); \ |
| 129 | ftrace_nmi_enter(); \ |
| 130 | instrumentation_end(); \ |
| 131 | } while (0) |
| 132 | |
| 133 | #define __nmi_exit() \ |
| 134 | do { \ |
| 135 | BUG_ON(!in_nmi()); \ |
| 136 | __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
| 137 | printk_nmi_exit(); \ |
| 138 | arch_nmi_exit(); \ |
| 139 | lockdep_on(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | } while (0) |
| 141 | |
| 142 | #define nmi_exit() \ |
| 143 | do { \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 144 | instrumentation_begin(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 145 | ftrace_nmi_exit(); \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 146 | instrumentation_end(); \ |
| 147 | rcu_nmi_exit(); \ |
| 148 | lockdep_hardirq_exit(); \ |
| 149 | __nmi_exit(); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 150 | } while (0) |
| 151 | |
| 152 | #endif /* LINUX_HARDIRQ_H */ |