Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Architecture specific (PPC64) functions for kexec based crash dumps. |
| 3 | * |
| 4 | * Copyright (C) 2005, IBM Corp. |
| 5 | * |
| 6 | * Created by: Haren Myneni |
| 7 | * |
| 8 | * This source code is licensed under the GNU General Public License, |
| 9 | * Version 2. See the file COPYING for more details. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/reboot.h> |
| 16 | #include <linux/kexec.h> |
| 17 | #include <linux/export.h> |
| 18 | #include <linux/crash_dump.h> |
| 19 | #include <linux/delay.h> |
| 20 | #include <linux/irq.h> |
| 21 | #include <linux/types.h> |
| 22 | |
| 23 | #include <asm/processor.h> |
| 24 | #include <asm/machdep.h> |
| 25 | #include <asm/kexec.h> |
| 26 | #include <asm/prom.h> |
| 27 | #include <asm/smp.h> |
| 28 | #include <asm/setjmp.h> |
| 29 | #include <asm/debug.h> |
| 30 | |
| 31 | /* |
| 32 | * The primary CPU waits a while for all secondary CPUs to enter. This is to |
| 33 | * avoid sending an IPI if the secondary CPUs are entering |
| 34 | * crash_kexec_secondary on their own (eg via a system reset). |
| 35 | * |
| 36 | * The secondary timeout has to be longer than the primary. Both timeouts are |
| 37 | * in milliseconds. |
| 38 | */ |
| 39 | #define PRIMARY_TIMEOUT 500 |
| 40 | #define SECONDARY_TIMEOUT 1000 |
| 41 | |
| 42 | #define IPI_TIMEOUT 10000 |
| 43 | #define REAL_MODE_TIMEOUT 10000 |
| 44 | |
| 45 | static int time_to_dump; |
| 46 | /* |
| 47 | * crash_wake_offline should be set to 1 by platforms that intend to wake |
| 48 | * up offline cpus prior to jumping to a kdump kernel. Currently powernv |
| 49 | * sets it to 1, since we want to avoid things from happening when an |
| 50 | * offline CPU wakes up due to something like an HMI (malfunction error), |
| 51 | * which propagates to all threads. |
| 52 | */ |
| 53 | int crash_wake_offline; |
| 54 | |
| 55 | #define CRASH_HANDLER_MAX 3 |
| 56 | /* List of shutdown handles */ |
| 57 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX]; |
| 58 | static DEFINE_SPINLOCK(crash_handlers_lock); |
| 59 | |
| 60 | static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; |
| 61 | static int crash_shutdown_cpu = -1; |
| 62 | |
| 63 | static int handle_fault(struct pt_regs *regs) |
| 64 | { |
| 65 | if (crash_shutdown_cpu == smp_processor_id()) |
| 66 | longjmp(crash_shutdown_buf, 1); |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | #ifdef CONFIG_SMP |
| 71 | |
| 72 | static atomic_t cpus_in_crash; |
| 73 | void crash_ipi_callback(struct pt_regs *regs) |
| 74 | { |
| 75 | static cpumask_t cpus_state_saved = CPU_MASK_NONE; |
| 76 | |
| 77 | int cpu = smp_processor_id(); |
| 78 | |
| 79 | hard_irq_disable(); |
| 80 | if (!cpumask_test_cpu(cpu, &cpus_state_saved)) { |
| 81 | crash_save_cpu(regs, cpu); |
| 82 | cpumask_set_cpu(cpu, &cpus_state_saved); |
| 83 | } |
| 84 | |
| 85 | atomic_inc(&cpus_in_crash); |
| 86 | smp_mb__after_atomic(); |
| 87 | |
| 88 | /* |
| 89 | * Starting the kdump boot. |
| 90 | * This barrier is needed to make sure that all CPUs are stopped. |
| 91 | */ |
| 92 | while (!time_to_dump) |
| 93 | cpu_relax(); |
| 94 | |
| 95 | if (ppc_md.kexec_cpu_down) |
| 96 | ppc_md.kexec_cpu_down(1, 1); |
| 97 | |
| 98 | #ifdef CONFIG_PPC64 |
| 99 | kexec_smp_wait(); |
| 100 | #else |
| 101 | for (;;); /* FIXME */ |
| 102 | #endif |
| 103 | |
| 104 | /* NOTREACHED */ |
| 105 | } |
| 106 | |
| 107 | static void crash_kexec_prepare_cpus(int cpu) |
| 108 | { |
| 109 | unsigned int msecs; |
| 110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
| 111 | int tries = 0; |
| 112 | int (*old_handler)(struct pt_regs *regs); |
| 113 | |
| 114 | printk(KERN_EMERG "Sending IPI to other CPUs\n"); |
| 115 | |
| 116 | if (crash_wake_offline) |
| 117 | ncpus = num_present_cpus() - 1; |
| 118 | |
| 119 | crash_send_ipi(crash_ipi_callback); |
| 120 | smp_wmb(); |
| 121 | |
| 122 | again: |
| 123 | /* |
| 124 | * FIXME: Until we will have the way to stop other CPUs reliably, |
| 125 | * the crash CPU will send an IPI and wait for other CPUs to |
| 126 | * respond. |
| 127 | */ |
| 128 | msecs = IPI_TIMEOUT; |
| 129 | while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0)) |
| 130 | mdelay(1); |
| 131 | |
| 132 | /* Would it be better to replace the trap vector here? */ |
| 133 | |
| 134 | if (atomic_read(&cpus_in_crash) >= ncpus) { |
| 135 | printk(KERN_EMERG "IPI complete\n"); |
| 136 | return; |
| 137 | } |
| 138 | |
| 139 | printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n", |
| 140 | ncpus - atomic_read(&cpus_in_crash)); |
| 141 | |
| 142 | /* |
| 143 | * If we have a panic timeout set then we can't wait indefinitely |
| 144 | * for someone to activate system reset. We also give up on the |
| 145 | * second time through if system reset fail to work. |
| 146 | */ |
| 147 | if ((panic_timeout > 0) || (tries > 0)) |
| 148 | return; |
| 149 | |
| 150 | /* |
| 151 | * A system reset will cause all CPUs to take an 0x100 exception. |
| 152 | * The primary CPU returns here via setjmp, and the secondary |
| 153 | * CPUs reexecute the crash_kexec_secondary path. |
| 154 | */ |
| 155 | old_handler = __debugger; |
| 156 | __debugger = handle_fault; |
| 157 | crash_shutdown_cpu = smp_processor_id(); |
| 158 | |
| 159 | if (setjmp(crash_shutdown_buf) == 0) { |
| 160 | printk(KERN_EMERG "Activate system reset (dumprestart) " |
| 161 | "to stop other cpu(s)\n"); |
| 162 | |
| 163 | /* |
| 164 | * A system reset will force all CPUs to execute the |
| 165 | * crash code again. We need to reset cpus_in_crash so we |
| 166 | * wait for everyone to do this. |
| 167 | */ |
| 168 | atomic_set(&cpus_in_crash, 0); |
| 169 | smp_mb(); |
| 170 | |
| 171 | while (atomic_read(&cpus_in_crash) < ncpus) |
| 172 | cpu_relax(); |
| 173 | } |
| 174 | |
| 175 | crash_shutdown_cpu = -1; |
| 176 | __debugger = old_handler; |
| 177 | |
| 178 | tries++; |
| 179 | goto again; |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * This function will be called by secondary cpus. |
| 184 | */ |
| 185 | void crash_kexec_secondary(struct pt_regs *regs) |
| 186 | { |
| 187 | unsigned long flags; |
| 188 | int msecs = SECONDARY_TIMEOUT; |
| 189 | |
| 190 | local_irq_save(flags); |
| 191 | |
| 192 | /* Wait for the primary crash CPU to signal its progress */ |
| 193 | while (crashing_cpu < 0) { |
| 194 | if (--msecs < 0) { |
| 195 | /* No response, kdump image may not have been loaded */ |
| 196 | local_irq_restore(flags); |
| 197 | return; |
| 198 | } |
| 199 | |
| 200 | mdelay(1); |
| 201 | } |
| 202 | |
| 203 | crash_ipi_callback(regs); |
| 204 | } |
| 205 | |
| 206 | #else /* ! CONFIG_SMP */ |
| 207 | |
| 208 | static void crash_kexec_prepare_cpus(int cpu) |
| 209 | { |
| 210 | /* |
| 211 | * move the secondaries to us so that we can copy |
| 212 | * the new kernel 0-0x100 safely |
| 213 | * |
| 214 | * do this if kexec in setup.c ? |
| 215 | */ |
| 216 | #ifdef CONFIG_PPC64 |
| 217 | smp_release_cpus(); |
| 218 | #else |
| 219 | /* FIXME */ |
| 220 | #endif |
| 221 | } |
| 222 | |
| 223 | void crash_kexec_secondary(struct pt_regs *regs) |
| 224 | { |
| 225 | } |
| 226 | #endif /* CONFIG_SMP */ |
| 227 | |
| 228 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
| 229 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) |
| 230 | static void __maybe_unused crash_kexec_wait_realmode(int cpu) |
| 231 | { |
| 232 | unsigned int msecs; |
| 233 | int i; |
| 234 | |
| 235 | msecs = REAL_MODE_TIMEOUT; |
| 236 | for (i=0; i < nr_cpu_ids && msecs > 0; i++) { |
| 237 | if (i == cpu) |
| 238 | continue; |
| 239 | |
| 240 | while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) { |
| 241 | barrier(); |
| 242 | if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0)) |
| 243 | break; |
| 244 | msecs--; |
| 245 | mdelay(1); |
| 246 | } |
| 247 | } |
| 248 | mb(); |
| 249 | } |
| 250 | #else |
| 251 | static inline void crash_kexec_wait_realmode(int cpu) {} |
| 252 | #endif /* CONFIG_SMP && CONFIG_PPC64 */ |
| 253 | |
| 254 | /* |
| 255 | * Register a function to be called on shutdown. Only use this if you |
| 256 | * can't reset your device in the second kernel. |
| 257 | */ |
| 258 | int crash_shutdown_register(crash_shutdown_t handler) |
| 259 | { |
| 260 | unsigned int i, rc; |
| 261 | |
| 262 | spin_lock(&crash_handlers_lock); |
| 263 | for (i = 0 ; i < CRASH_HANDLER_MAX; i++) |
| 264 | if (!crash_shutdown_handles[i]) { |
| 265 | /* Insert handle at first empty entry */ |
| 266 | crash_shutdown_handles[i] = handler; |
| 267 | rc = 0; |
| 268 | break; |
| 269 | } |
| 270 | |
| 271 | if (i == CRASH_HANDLER_MAX) { |
| 272 | printk(KERN_ERR "Crash shutdown handles full, " |
| 273 | "not registered.\n"); |
| 274 | rc = 1; |
| 275 | } |
| 276 | |
| 277 | spin_unlock(&crash_handlers_lock); |
| 278 | return rc; |
| 279 | } |
| 280 | EXPORT_SYMBOL(crash_shutdown_register); |
| 281 | |
| 282 | int crash_shutdown_unregister(crash_shutdown_t handler) |
| 283 | { |
| 284 | unsigned int i, rc; |
| 285 | |
| 286 | spin_lock(&crash_handlers_lock); |
| 287 | for (i = 0 ; i < CRASH_HANDLER_MAX; i++) |
| 288 | if (crash_shutdown_handles[i] == handler) |
| 289 | break; |
| 290 | |
| 291 | if (i == CRASH_HANDLER_MAX) { |
| 292 | printk(KERN_ERR "Crash shutdown handle not found\n"); |
| 293 | rc = 1; |
| 294 | } else { |
| 295 | /* Shift handles down */ |
| 296 | for (; i < (CRASH_HANDLER_MAX - 1); i++) |
| 297 | crash_shutdown_handles[i] = |
| 298 | crash_shutdown_handles[i+1]; |
| 299 | /* |
| 300 | * Reset last entry to NULL now that it has been shifted down, |
| 301 | * this will allow new handles to be added here. |
| 302 | */ |
| 303 | crash_shutdown_handles[i] = NULL; |
| 304 | rc = 0; |
| 305 | } |
| 306 | |
| 307 | spin_unlock(&crash_handlers_lock); |
| 308 | return rc; |
| 309 | } |
| 310 | EXPORT_SYMBOL(crash_shutdown_unregister); |
| 311 | |
| 312 | void default_machine_crash_shutdown(struct pt_regs *regs) |
| 313 | { |
| 314 | unsigned int i; |
| 315 | int (*old_handler)(struct pt_regs *regs); |
| 316 | |
| 317 | /* |
| 318 | * This function is only called after the system |
| 319 | * has panicked or is otherwise in a critical state. |
| 320 | * The minimum amount of code to allow a kexec'd kernel |
| 321 | * to run successfully needs to happen here. |
| 322 | * |
| 323 | * In practice this means stopping other cpus in |
| 324 | * an SMP system. |
| 325 | * The kernel is broken so disable interrupts. |
| 326 | */ |
| 327 | hard_irq_disable(); |
| 328 | |
| 329 | /* |
| 330 | * Make a note of crashing cpu. Will be used in machine_kexec |
| 331 | * such that another IPI will not be sent. |
| 332 | */ |
| 333 | crashing_cpu = smp_processor_id(); |
| 334 | |
| 335 | /* |
| 336 | * If we came in via system reset, wait a while for the secondary |
| 337 | * CPUs to enter. |
| 338 | */ |
| 339 | if (TRAP(regs) == 0x100) |
| 340 | mdelay(PRIMARY_TIMEOUT); |
| 341 | |
| 342 | crash_kexec_prepare_cpus(crashing_cpu); |
| 343 | |
| 344 | crash_save_cpu(regs, crashing_cpu); |
| 345 | |
| 346 | time_to_dump = 1; |
| 347 | |
| 348 | crash_kexec_wait_realmode(crashing_cpu); |
| 349 | |
| 350 | machine_kexec_mask_interrupts(); |
| 351 | |
| 352 | /* |
| 353 | * Call registered shutdown routines safely. Swap out |
| 354 | * __debugger_fault_handler, and replace on exit. |
| 355 | */ |
| 356 | old_handler = __debugger_fault_handler; |
| 357 | __debugger_fault_handler = handle_fault; |
| 358 | crash_shutdown_cpu = smp_processor_id(); |
| 359 | for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) { |
| 360 | if (setjmp(crash_shutdown_buf) == 0) { |
| 361 | /* |
| 362 | * Insert syncs and delay to ensure |
| 363 | * instructions in the dangerous region don't |
| 364 | * leak away from this protected region. |
| 365 | */ |
| 366 | asm volatile("sync; isync"); |
| 367 | /* dangerous region */ |
| 368 | crash_shutdown_handles[i](); |
| 369 | asm volatile("sync; isync"); |
| 370 | } |
| 371 | } |
| 372 | crash_shutdown_cpu = -1; |
| 373 | __debugger_fault_handler = old_handler; |
| 374 | |
| 375 | if (ppc_md.kexec_cpu_down) |
| 376 | ppc_md.kexec_cpu_down(1, 0); |
| 377 | } |