blob: f700ff2df074e783dee0ab8117835701f71d80ef [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/include/linux/nmi.h
4 */
5#ifndef LINUX_NMI_H
6#define LINUX_NMI_H
7
8#include <linux/sched.h>
9#include <asm/irq.h>
10#if defined(CONFIG_HAVE_NMI_WATCHDOG)
11#include <asm/nmi.h>
12#endif
13
14#ifdef CONFIG_LOCKUP_DETECTOR
15void lockup_detector_init(void);
16void lockup_detector_soft_poweroff(void);
17void lockup_detector_cleanup(void);
18bool is_hardlockup(void);
19
20extern int watchdog_user_enabled;
21extern int nmi_watchdog_user_enabled;
22extern int soft_watchdog_user_enabled;
23extern int watchdog_thresh;
24extern unsigned long watchdog_enabled;
25
26extern struct cpumask watchdog_cpumask;
27extern unsigned long *watchdog_cpumask_bits;
28#ifdef CONFIG_SMP
29extern int sysctl_softlockup_all_cpu_backtrace;
30extern int sysctl_hardlockup_all_cpu_backtrace;
31#else
32#define sysctl_softlockup_all_cpu_backtrace 0
33#define sysctl_hardlockup_all_cpu_backtrace 0
34#endif /* !CONFIG_SMP */
35
36#else /* CONFIG_LOCKUP_DETECTOR */
37static inline void lockup_detector_init(void) { }
38static inline void lockup_detector_soft_poweroff(void) { }
39static inline void lockup_detector_cleanup(void) { }
40#endif /* !CONFIG_LOCKUP_DETECTOR */
41
42#ifdef CONFIG_SOFTLOCKUP_DETECTOR
43extern void touch_softlockup_watchdog_sched(void);
44extern void touch_softlockup_watchdog(void);
45extern void touch_softlockup_watchdog_sync(void);
46extern void touch_all_softlockup_watchdogs(void);
47extern unsigned int softlockup_panic;
48
49extern int lockup_detector_online_cpu(unsigned int cpu);
50extern int lockup_detector_offline_cpu(unsigned int cpu);
51#else /* CONFIG_SOFTLOCKUP_DETECTOR */
52static inline void touch_softlockup_watchdog_sched(void) { }
53static inline void touch_softlockup_watchdog(void) { }
54static inline void touch_softlockup_watchdog_sync(void) { }
55static inline void touch_all_softlockup_watchdogs(void) { }
56
57#define lockup_detector_online_cpu NULL
58#define lockup_detector_offline_cpu NULL
59#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
60
61#ifdef CONFIG_DETECT_HUNG_TASK
62void reset_hung_task_detector(void);
63#else
64static inline void reset_hung_task_detector(void) { }
65#endif
66
67/*
68 * The run state of the lockup detectors is controlled by the content of the
69 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
70 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
71 *
72 * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
73 * 'soft_watchdog_user_enabled' are variables that are only used as an
74 * 'interface' between the parameters in /proc/sys/kernel and the internal
75 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
76 * handled differently because its value is not boolean, and the lockup
77 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
78 */
79#define NMI_WATCHDOG_ENABLED_BIT 0
80#define SOFT_WATCHDOG_ENABLED_BIT 1
81#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
82#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
83
84#if defined(CONFIG_HARDLOCKUP_DETECTOR)
85extern void hardlockup_detector_disable(void);
86extern unsigned int hardlockup_panic;
87#else
88static inline void hardlockup_detector_disable(void) {}
89#endif
90
91#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
92# define NMI_WATCHDOG_SYSCTL_PERM 0644
93#else
94# define NMI_WATCHDOG_SYSCTL_PERM 0444
95#endif
96
97#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
98extern void arch_touch_nmi_watchdog(void);
99extern void hardlockup_detector_perf_stop(void);
100extern void hardlockup_detector_perf_restart(void);
101extern void hardlockup_detector_perf_disable(void);
102extern void hardlockup_detector_perf_enable(void);
103extern void hardlockup_detector_perf_cleanup(void);
104extern int hardlockup_detector_perf_init(void);
105#else
106static inline void hardlockup_detector_perf_stop(void) { }
107static inline void hardlockup_detector_perf_restart(void) { }
108static inline void hardlockup_detector_perf_disable(void) { }
109static inline void hardlockup_detector_perf_enable(void) { }
110static inline void hardlockup_detector_perf_cleanup(void) { }
111# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
112static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
113static inline void arch_touch_nmi_watchdog(void) {}
114# else
115static inline int hardlockup_detector_perf_init(void) { return 0; }
116# endif
117#endif
118
119void watchdog_nmi_stop(void);
120void watchdog_nmi_start(void);
121int watchdog_nmi_probe(void);
122int watchdog_nmi_enable(unsigned int cpu);
123void watchdog_nmi_disable(unsigned int cpu);
124
Olivier Deprez92d4c212022-12-06 15:05:30 +0100125void lockup_detector_reconfigure(void);
126
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127/**
128 * touch_nmi_watchdog - restart NMI watchdog timeout.
129 *
130 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
131 * may be used to reset the timeout - for code which intentionally
132 * disables interrupts for a long time. This call is stateless.
133 */
134static inline void touch_nmi_watchdog(void)
135{
136 arch_touch_nmi_watchdog();
137 touch_softlockup_watchdog();
138}
139
140/*
141 * Create trigger_all_cpu_backtrace() out of the arch-provided
142 * base function. Return whether such support was available,
143 * to allow calling code to fall back to some other mechanism:
144 */
145#ifdef arch_trigger_cpumask_backtrace
146static inline bool trigger_all_cpu_backtrace(void)
147{
148 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
149 return true;
150}
151
152static inline bool trigger_allbutself_cpu_backtrace(void)
153{
154 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
155 return true;
156}
157
158static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
159{
160 arch_trigger_cpumask_backtrace(mask, false);
161 return true;
162}
163
164static inline bool trigger_single_cpu_backtrace(int cpu)
165{
166 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
167 return true;
168}
169
170/* generic implementation */
171void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
172 bool exclude_self,
173 void (*raise)(cpumask_t *mask));
174bool nmi_cpu_backtrace(struct pt_regs *regs);
175
176#else
177static inline bool trigger_all_cpu_backtrace(void)
178{
179 return false;
180}
181static inline bool trigger_allbutself_cpu_backtrace(void)
182{
183 return false;
184}
185static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
186{
187 return false;
188}
189static inline bool trigger_single_cpu_backtrace(int cpu)
190{
191 return false;
192}
193#endif
194
195#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
196u64 hw_nmi_get_sample_period(int watchdog_thresh);
197#endif
198
199#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
200 defined(CONFIG_HARDLOCKUP_DETECTOR)
201void watchdog_update_hrtimer_threshold(u64 period);
202#else
203static inline void watchdog_update_hrtimer_threshold(u64 period) { }
204#endif
205
206struct ctl_table;
Olivier Deprez157378f2022-04-04 15:47:50 +0200207int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
208int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
209int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
210int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
211int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212
213#ifdef CONFIG_HAVE_ACPI_APEI_NMI
214#include <asm/nmi.h>
215#endif
216
217#endif