blob: 0fbbcdf0c178ec5dc6ff5372fa248b121b65b49d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef LINUX_HARDIRQ_H
3#define LINUX_HARDIRQ_H
4
5#include <linux/preempt.h>
6#include <linux/lockdep.h>
7#include <linux/ftrace_irq.h>
8#include <linux/vtime.h>
9#include <asm/hardirq.h>
10
11
12extern void synchronize_irq(unsigned int irq);
13extern bool synchronize_hardirq(unsigned int irq);
14
15#if defined(CONFIG_TINY_RCU)
16
17static inline void rcu_nmi_enter(void)
18{
19}
20
21static inline void rcu_nmi_exit(void)
22{
23}
24
25#else
26extern void rcu_nmi_enter(void);
27extern void rcu_nmi_exit(void);
28#endif
29
30/*
31 * It is safe to do non-atomic ops on ->hardirq_context,
32 * because NMI handlers may not preempt and the ops are
33 * always balanced, so the interrupted value of ->hardirq_context
34 * will always be restored.
35 */
36#define __irq_enter() \
37 do { \
38 account_irq_enter_time(current); \
39 preempt_count_add(HARDIRQ_OFFSET); \
40 trace_hardirq_enter(); \
41 } while (0)
42
43/*
44 * Enter irq context (on NO_HZ, update jiffies):
45 */
46extern void irq_enter(void);
47
48/*
49 * Exit irq context without processing softirqs:
50 */
51#define __irq_exit() \
52 do { \
53 trace_hardirq_exit(); \
54 account_irq_exit_time(current); \
55 preempt_count_sub(HARDIRQ_OFFSET); \
56 } while (0)
57
58/*
59 * Exit irq context and process softirqs if needed:
60 */
61extern void irq_exit(void);
62
63#define nmi_enter() \
64 do { \
65 printk_nmi_enter(); \
66 lockdep_off(); \
67 ftrace_nmi_enter(); \
68 BUG_ON(in_nmi()); \
69 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
70 rcu_nmi_enter(); \
71 trace_hardirq_enter(); \
72 } while (0)
73
74#define nmi_exit() \
75 do { \
76 trace_hardirq_exit(); \
77 rcu_nmi_exit(); \
78 BUG_ON(!in_nmi()); \
79 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
80 ftrace_nmi_exit(); \
81 lockdep_on(); \
82 printk_nmi_exit(); \
83 } while (0)
84
85#endif /* LINUX_HARDIRQ_H */