Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Watchdog support on powerpc systems. |
| 4 | * |
| 5 | * Copyright 2017, IBM Corporation. |
| 6 | * |
| 7 | * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c |
| 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) "watchdog: " fmt |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/param.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/percpu.h> |
| 16 | #include <linux/cpu.h> |
| 17 | #include <linux/nmi.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/export.h> |
| 20 | #include <linux/kprobes.h> |
| 21 | #include <linux/hardirq.h> |
| 22 | #include <linux/reboot.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/kdebug.h> |
| 25 | #include <linux/sched/debug.h> |
| 26 | #include <linux/delay.h> |
| 27 | #include <linux/smp.h> |
| 28 | |
| 29 | #include <asm/paca.h> |
| 30 | |
| 31 | /* |
| 32 | * The powerpc watchdog ensures that each CPU is able to service timers. |
| 33 | * The watchdog sets up a simple timer on each CPU to run once per timer |
| 34 | * period, and updates a per-cpu timestamp and a "pending" cpumask. This is |
| 35 | * the heartbeat. |
| 36 | * |
| 37 | * Then there are two systems to check that the heartbeat is still running. |
| 38 | * The local soft-NMI, and the SMP checker. |
| 39 | * |
| 40 | * The soft-NMI checker can detect lockups on the local CPU. When interrupts |
| 41 | * are disabled with local_irq_disable(), platforms that use soft-masking |
| 42 | * can leave hardware interrupts enabled and handle them with a masked |
| 43 | * interrupt handler. The masked handler can send the timer interrupt to the |
| 44 | * watchdog's soft_nmi_interrupt(), which appears to Linux as an NMI |
| 45 | * interrupt, and can be used to detect CPUs stuck with IRQs disabled. |
| 46 | * |
| 47 | * The soft-NMI checker will compare the heartbeat timestamp for this CPU |
| 48 | * with the current time, and take action if the difference exceeds the |
| 49 | * watchdog threshold. |
| 50 | * |
| 51 | * The limitation of the soft-NMI watchdog is that it does not work when |
| 52 | * interrupts are hard disabled or otherwise not being serviced. This is |
| 53 | * solved by also having a SMP watchdog where all CPUs check all other |
| 54 | * CPUs heartbeat. |
| 55 | * |
| 56 | * The SMP checker can detect lockups on other CPUs. A gobal "pending" |
| 57 | * cpumask is kept, containing all CPUs which enable the watchdog. Each |
| 58 | * CPU clears their pending bit in their heartbeat timer. When the bitmask |
| 59 | * becomes empty, the last CPU to clear its pending bit updates a global |
| 60 | * timestamp and refills the pending bitmask. |
| 61 | * |
| 62 | * In the heartbeat timer, if any CPU notices that the global timestamp has |
| 63 | * not been updated for a period exceeding the watchdog threshold, then it |
| 64 | * means the CPU(s) with their bit still set in the pending mask have had |
| 65 | * their heartbeat stop, and action is taken. |
| 66 | * |
| 67 | * Some platforms implement true NMI IPIs, which can be used by the SMP |
| 68 | * watchdog to detect an unresponsive CPU and pull it out of its stuck |
| 69 | * state with the NMI IPI, to get crash/debug data from it. This way the |
| 70 | * SMP watchdog can detect hardware interrupts off lockups. |
| 71 | */ |
| 72 | |
| 73 | static cpumask_t wd_cpus_enabled __read_mostly; |
| 74 | |
| 75 | static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */ |
| 76 | static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */ |
| 77 | |
| 78 | static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */ |
| 79 | |
| 80 | static DEFINE_PER_CPU(struct timer_list, wd_timer); |
| 81 | static DEFINE_PER_CPU(u64, wd_timer_tb); |
| 82 | |
| 83 | /* SMP checker bits */ |
| 84 | static unsigned long __wd_smp_lock; |
| 85 | static cpumask_t wd_smp_cpus_pending; |
| 86 | static cpumask_t wd_smp_cpus_stuck; |
| 87 | static u64 wd_smp_last_reset_tb; |
| 88 | |
| 89 | static inline void wd_smp_lock(unsigned long *flags) |
| 90 | { |
| 91 | /* |
| 92 | * Avoid locking layers if possible. |
| 93 | * This may be called from low level interrupt handlers at some |
| 94 | * point in future. |
| 95 | */ |
| 96 | raw_local_irq_save(*flags); |
| 97 | hard_irq_disable(); /* Make it soft-NMI safe */ |
| 98 | while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) { |
| 99 | raw_local_irq_restore(*flags); |
| 100 | spin_until_cond(!test_bit(0, &__wd_smp_lock)); |
| 101 | raw_local_irq_save(*flags); |
| 102 | hard_irq_disable(); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | static inline void wd_smp_unlock(unsigned long *flags) |
| 107 | { |
| 108 | clear_bit_unlock(0, &__wd_smp_lock); |
| 109 | raw_local_irq_restore(*flags); |
| 110 | } |
| 111 | |
| 112 | static void wd_lockup_ipi(struct pt_regs *regs) |
| 113 | { |
| 114 | int cpu = raw_smp_processor_id(); |
| 115 | u64 tb = get_tb(); |
| 116 | |
| 117 | pr_emerg("CPU %d Hard LOCKUP\n", cpu); |
| 118 | pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n", |
| 119 | cpu, tb, per_cpu(wd_timer_tb, cpu), |
| 120 | tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); |
| 121 | print_modules(); |
| 122 | print_irqtrace_events(current); |
| 123 | if (regs) |
| 124 | show_regs(regs); |
| 125 | else |
| 126 | dump_stack(); |
| 127 | |
| 128 | /* Do not panic from here because that can recurse into NMI IPI layer */ |
| 129 | } |
| 130 | |
| 131 | static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb) |
| 132 | { |
| 133 | cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask); |
| 134 | cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask); |
| 135 | if (cpumask_empty(&wd_smp_cpus_pending)) { |
| 136 | wd_smp_last_reset_tb = tb; |
| 137 | cpumask_andnot(&wd_smp_cpus_pending, |
| 138 | &wd_cpus_enabled, |
| 139 | &wd_smp_cpus_stuck); |
| 140 | } |
| 141 | } |
| 142 | static void set_cpu_stuck(int cpu, u64 tb) |
| 143 | { |
| 144 | set_cpumask_stuck(cpumask_of(cpu), tb); |
| 145 | } |
| 146 | |
| 147 | static void watchdog_smp_panic(int cpu, u64 tb) |
| 148 | { |
| 149 | unsigned long flags; |
| 150 | int c; |
| 151 | |
| 152 | wd_smp_lock(&flags); |
| 153 | /* Double check some things under lock */ |
| 154 | if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb) |
| 155 | goto out; |
| 156 | if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) |
| 157 | goto out; |
| 158 | if (cpumask_weight(&wd_smp_cpus_pending) == 0) |
| 159 | goto out; |
| 160 | |
| 161 | pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n", |
| 162 | cpu, cpumask_pr_args(&wd_smp_cpus_pending)); |
| 163 | pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n", |
| 164 | cpu, tb, wd_smp_last_reset_tb, |
| 165 | tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000); |
| 166 | |
| 167 | if (!sysctl_hardlockup_all_cpu_backtrace) { |
| 168 | /* |
| 169 | * Try to trigger the stuck CPUs, unless we are going to |
| 170 | * get a backtrace on all of them anyway. |
| 171 | */ |
| 172 | for_each_cpu(c, &wd_smp_cpus_pending) { |
| 173 | if (c == cpu) |
| 174 | continue; |
| 175 | smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000); |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | /* Take the stuck CPUs out of the watch group */ |
| 180 | set_cpumask_stuck(&wd_smp_cpus_pending, tb); |
| 181 | |
| 182 | wd_smp_unlock(&flags); |
| 183 | |
| 184 | printk_safe_flush(); |
| 185 | /* |
| 186 | * printk_safe_flush() seems to require another print |
| 187 | * before anything actually goes out to console. |
| 188 | */ |
| 189 | if (sysctl_hardlockup_all_cpu_backtrace) |
| 190 | trigger_allbutself_cpu_backtrace(); |
| 191 | |
| 192 | if (hardlockup_panic) |
| 193 | nmi_panic(NULL, "Hard LOCKUP"); |
| 194 | |
| 195 | return; |
| 196 | |
| 197 | out: |
| 198 | wd_smp_unlock(&flags); |
| 199 | } |
| 200 | |
| 201 | static void wd_smp_clear_cpu_pending(int cpu, u64 tb) |
| 202 | { |
| 203 | if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) { |
| 204 | if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) { |
| 205 | struct pt_regs *regs = get_irq_regs(); |
| 206 | unsigned long flags; |
| 207 | |
| 208 | wd_smp_lock(&flags); |
| 209 | |
| 210 | pr_emerg("CPU %d became unstuck TB:%lld\n", |
| 211 | cpu, tb); |
| 212 | print_irqtrace_events(current); |
| 213 | if (regs) |
| 214 | show_regs(regs); |
| 215 | else |
| 216 | dump_stack(); |
| 217 | |
| 218 | cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); |
| 219 | wd_smp_unlock(&flags); |
| 220 | } |
| 221 | return; |
| 222 | } |
| 223 | cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); |
| 224 | if (cpumask_empty(&wd_smp_cpus_pending)) { |
| 225 | unsigned long flags; |
| 226 | |
| 227 | wd_smp_lock(&flags); |
| 228 | if (cpumask_empty(&wd_smp_cpus_pending)) { |
| 229 | wd_smp_last_reset_tb = tb; |
| 230 | cpumask_andnot(&wd_smp_cpus_pending, |
| 231 | &wd_cpus_enabled, |
| 232 | &wd_smp_cpus_stuck); |
| 233 | } |
| 234 | wd_smp_unlock(&flags); |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | static void watchdog_timer_interrupt(int cpu) |
| 239 | { |
| 240 | u64 tb = get_tb(); |
| 241 | |
| 242 | per_cpu(wd_timer_tb, cpu) = tb; |
| 243 | |
| 244 | wd_smp_clear_cpu_pending(cpu, tb); |
| 245 | |
| 246 | if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb) |
| 247 | watchdog_smp_panic(cpu, tb); |
| 248 | } |
| 249 | |
| 250 | void soft_nmi_interrupt(struct pt_regs *regs) |
| 251 | { |
| 252 | unsigned long flags; |
| 253 | int cpu = raw_smp_processor_id(); |
| 254 | u64 tb; |
| 255 | |
| 256 | if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) |
| 257 | return; |
| 258 | |
| 259 | nmi_enter(); |
| 260 | |
| 261 | __this_cpu_inc(irq_stat.soft_nmi_irqs); |
| 262 | |
| 263 | tb = get_tb(); |
| 264 | if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { |
| 265 | wd_smp_lock(&flags); |
| 266 | if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { |
| 267 | wd_smp_unlock(&flags); |
| 268 | goto out; |
| 269 | } |
| 270 | set_cpu_stuck(cpu, tb); |
| 271 | |
| 272 | pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n", |
| 273 | cpu, (void *)regs->nip); |
| 274 | pr_emerg("CPU %d TB:%lld, last heartbeat TB:%lld (%lldms ago)\n", |
| 275 | cpu, tb, per_cpu(wd_timer_tb, cpu), |
| 276 | tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); |
| 277 | print_modules(); |
| 278 | print_irqtrace_events(current); |
| 279 | show_regs(regs); |
| 280 | |
| 281 | wd_smp_unlock(&flags); |
| 282 | |
| 283 | if (sysctl_hardlockup_all_cpu_backtrace) |
| 284 | trigger_allbutself_cpu_backtrace(); |
| 285 | |
| 286 | if (hardlockup_panic) |
| 287 | nmi_panic(regs, "Hard LOCKUP"); |
| 288 | } |
| 289 | if (wd_panic_timeout_tb < 0x7fffffff) |
| 290 | mtspr(SPRN_DEC, wd_panic_timeout_tb); |
| 291 | |
| 292 | out: |
| 293 | nmi_exit(); |
| 294 | } |
| 295 | |
| 296 | static void wd_timer_reset(unsigned int cpu, struct timer_list *t) |
| 297 | { |
| 298 | t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms); |
| 299 | if (wd_timer_period_ms > 1000) |
| 300 | t->expires = __round_jiffies_up(t->expires, cpu); |
| 301 | add_timer_on(t, cpu); |
| 302 | } |
| 303 | |
| 304 | static void wd_timer_fn(struct timer_list *t) |
| 305 | { |
| 306 | int cpu = smp_processor_id(); |
| 307 | |
| 308 | watchdog_timer_interrupt(cpu); |
| 309 | |
| 310 | wd_timer_reset(cpu, t); |
| 311 | } |
| 312 | |
| 313 | void arch_touch_nmi_watchdog(void) |
| 314 | { |
| 315 | unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; |
| 316 | int cpu = smp_processor_id(); |
| 317 | u64 tb = get_tb(); |
| 318 | |
| 319 | if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { |
| 320 | per_cpu(wd_timer_tb, cpu) = tb; |
| 321 | wd_smp_clear_cpu_pending(cpu, tb); |
| 322 | } |
| 323 | } |
| 324 | EXPORT_SYMBOL(arch_touch_nmi_watchdog); |
| 325 | |
| 326 | static void start_watchdog_timer_on(unsigned int cpu) |
| 327 | { |
| 328 | struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); |
| 329 | |
| 330 | per_cpu(wd_timer_tb, cpu) = get_tb(); |
| 331 | |
| 332 | timer_setup(t, wd_timer_fn, TIMER_PINNED); |
| 333 | wd_timer_reset(cpu, t); |
| 334 | } |
| 335 | |
| 336 | static void stop_watchdog_timer_on(unsigned int cpu) |
| 337 | { |
| 338 | struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); |
| 339 | |
| 340 | del_timer_sync(t); |
| 341 | } |
| 342 | |
| 343 | static int start_wd_on_cpu(unsigned int cpu) |
| 344 | { |
| 345 | unsigned long flags; |
| 346 | |
| 347 | if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { |
| 348 | WARN_ON(1); |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) |
| 353 | return 0; |
| 354 | |
| 355 | if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) |
| 356 | return 0; |
| 357 | |
| 358 | wd_smp_lock(&flags); |
| 359 | cpumask_set_cpu(cpu, &wd_cpus_enabled); |
| 360 | if (cpumask_weight(&wd_cpus_enabled) == 1) { |
| 361 | cpumask_set_cpu(cpu, &wd_smp_cpus_pending); |
| 362 | wd_smp_last_reset_tb = get_tb(); |
| 363 | } |
| 364 | wd_smp_unlock(&flags); |
| 365 | |
| 366 | start_watchdog_timer_on(cpu); |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
| 371 | static int stop_wd_on_cpu(unsigned int cpu) |
| 372 | { |
| 373 | unsigned long flags; |
| 374 | |
| 375 | if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) |
| 376 | return 0; /* Can happen in CPU unplug case */ |
| 377 | |
| 378 | stop_watchdog_timer_on(cpu); |
| 379 | |
| 380 | wd_smp_lock(&flags); |
| 381 | cpumask_clear_cpu(cpu, &wd_cpus_enabled); |
| 382 | wd_smp_unlock(&flags); |
| 383 | |
| 384 | wd_smp_clear_cpu_pending(cpu, get_tb()); |
| 385 | |
| 386 | return 0; |
| 387 | } |
| 388 | |
| 389 | static void watchdog_calc_timeouts(void) |
| 390 | { |
| 391 | wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq; |
| 392 | |
| 393 | /* Have the SMP detector trigger a bit later */ |
| 394 | wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2; |
| 395 | |
| 396 | /* 2/5 is the factor that the perf based detector uses */ |
| 397 | wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; |
| 398 | } |
| 399 | |
| 400 | void watchdog_nmi_stop(void) |
| 401 | { |
| 402 | int cpu; |
| 403 | |
| 404 | for_each_cpu(cpu, &wd_cpus_enabled) |
| 405 | stop_wd_on_cpu(cpu); |
| 406 | } |
| 407 | |
| 408 | void watchdog_nmi_start(void) |
| 409 | { |
| 410 | int cpu; |
| 411 | |
| 412 | watchdog_calc_timeouts(); |
| 413 | for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) |
| 414 | start_wd_on_cpu(cpu); |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Invoked from core watchdog init. |
| 419 | */ |
| 420 | int __init watchdog_nmi_probe(void) |
| 421 | { |
| 422 | int err; |
| 423 | |
| 424 | err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 425 | "powerpc/watchdog:online", |
| 426 | start_wd_on_cpu, stop_wd_on_cpu); |
| 427 | if (err < 0) { |
| 428 | pr_warn("could not be initialized"); |
| 429 | return err; |
| 430 | } |
| 431 | return 0; |
| 432 | } |