Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * This file contains the 64-bit "server" PowerPC variant |
| 4 | * of the low level exception handling including exception |
| 5 | * vectors, exception return, part of the slb and stab |
| 6 | * handling and other fixed offset specific things. |
| 7 | * |
| 8 | * This file is meant to be #included from head_64.S due to |
| 9 | * position dependent assembly. |
| 10 | * |
| 11 | * Most of this originates from head_64.S and thus has the same |
| 12 | * copyright history. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <asm/hw_irq.h> |
| 17 | #include <asm/exception-64s.h> |
| 18 | #include <asm/ptrace.h> |
| 19 | #include <asm/cpuidle.h> |
| 20 | #include <asm/head-64.h> |
| 21 | #include <asm/feature-fixups.h> |
| 22 | |
| 23 | /* |
| 24 | * There are a few constraints to be concerned with. |
| 25 | * - Real mode exceptions code/data must be located at their physical location. |
| 26 | * - Virtual mode exceptions must be mapped at their 0xc000... location. |
| 27 | * - Fixed location code must not call directly beyond the __end_interrupts |
| 28 | * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence |
| 29 | * must be used. |
| 30 | * - LOAD_HANDLER targets must be within first 64K of physical 0 / |
| 31 | * virtual 0xc00... |
| 32 | * - Conditional branch targets must be within +/-32K of caller. |
| 33 | * |
| 34 | * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and |
| 35 | * therefore don't have to run in physically located code or rfid to |
| 36 | * virtual mode kernel code. However on relocatable kernels they do have |
| 37 | * to branch to KERNELBASE offset because the rest of the kernel (outside |
| 38 | * the exception vectors) may be located elsewhere. |
| 39 | * |
| 40 | * Virtual exceptions correspond with physical, except their entry points |
| 41 | * are offset by 0xc000000000000000 and also tend to get an added 0x4000 |
| 42 | * offset applied. Virtual exceptions are enabled with the Alternate |
| 43 | * Interrupt Location (AIL) bit set in the LPCR. However this does not |
| 44 | * guarantee they will be delivered virtually. Some conditions (see the ISA) |
| 45 | * cause exceptions to be delivered in real mode. |
| 46 | * |
| 47 | * It's impossible to receive interrupts below 0x300 via AIL. |
| 48 | * |
| 49 | * KVM: None of the virtual exceptions are from the guest. Anything that |
| 50 | * escalated to HV=1 from HV=0 is delivered via real mode handlers. |
| 51 | * |
| 52 | * |
| 53 | * We layout physical memory as follows: |
| 54 | * 0x0000 - 0x00ff : Secondary processor spin code |
| 55 | * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors |
| 56 | * 0x1900 - 0x3fff : Real mode trampolines |
| 57 | * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors |
| 58 | * 0x5900 - 0x6fff : Relon mode trampolines |
| 59 | * 0x7000 - 0x7fff : FWNMI data area |
| 60 | * 0x8000 - .... : Common interrupt handlers, remaining early |
| 61 | * setup code, rest of kernel. |
| 62 | * |
| 63 | * We could reclaim 0x4000-0x42ff for real mode trampolines if the space |
| 64 | * is necessary. Until then it's more consistent to explicitly put VIRT_NONE |
| 65 | * vectors there. |
| 66 | */ |
| 67 | OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) |
| 68 | OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000) |
| 69 | OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900) |
| 70 | OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) |
| 71 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) |
| 72 | /* |
| 73 | * Data area reserved for FWNMI option. |
| 74 | * This address (0x7000) is fixed by the RPA. |
| 75 | * pseries and powernv need to keep the whole page from |
| 76 | * 0x7000 to 0x8000 free for use by the firmware |
| 77 | */ |
| 78 | ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000) |
| 79 | OPEN_TEXT_SECTION(0x8000) |
| 80 | #else |
| 81 | OPEN_TEXT_SECTION(0x7000) |
| 82 | #endif |
| 83 | |
| 84 | USE_FIXED_SECTION(real_vectors) |
| 85 | |
| 86 | /* |
| 87 | * This is the start of the interrupt handlers for pSeries |
| 88 | * This code runs with relocation off. |
| 89 | * Code from here to __end_interrupts gets copied down to real |
| 90 | * address 0x100 when we are running a relocatable kernel. |
| 91 | * Therefore any relative branches in this section must only |
| 92 | * branch to labels in this section. |
| 93 | */ |
| 94 | .globl __start_interrupts |
| 95 | __start_interrupts: |
| 96 | |
| 97 | /* No virt vectors corresponding with 0x0..0x100 */ |
| 98 | EXC_VIRT_NONE(0x4000, 0x100) |
| 99 | |
| 100 | |
| 101 | #ifdef CONFIG_PPC_P7_NAP |
| 102 | /* |
| 103 | * If running native on arch 2.06 or later, check if we are waking up |
| 104 | * from nap/sleep/winkle, and branch to idle handler. This tests SRR1 |
| 105 | * bits 46:47. A non-0 value indicates that we are coming from a power |
| 106 | * saving state. The idle wakeup handler initially runs in real mode, |
| 107 | * but we branch to the 0xc000... address so we can turn on relocation |
| 108 | * with mtmsr. |
| 109 | */ |
| 110 | #define IDLETEST(n) \ |
| 111 | BEGIN_FTR_SECTION ; \ |
| 112 | mfspr r10,SPRN_SRR1 ; \ |
| 113 | rlwinm. r10,r10,47-31,30,31 ; \ |
| 114 | beq- 1f ; \ |
| 115 | cmpwi cr3,r10,2 ; \ |
| 116 | BRANCH_TO_C000(r10, system_reset_idle_common) ; \ |
| 117 | 1: \ |
| 118 | KVMTEST_PR(n) ; \ |
| 119 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) |
| 120 | #else |
| 121 | #define IDLETEST NOTEST |
| 122 | #endif |
| 123 | |
| 124 | EXC_REAL_BEGIN(system_reset, 0x100, 0x100) |
| 125 | SET_SCRATCH0(r13) |
| 126 | /* |
| 127 | * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is |
| 128 | * being used, so a nested NMI exception would corrupt it. |
| 129 | */ |
| 130 | EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD, |
| 131 | IDLETEST, 0x100) |
| 132 | |
| 133 | EXC_REAL_END(system_reset, 0x100, 0x100) |
| 134 | EXC_VIRT_NONE(0x4100, 0x100) |
| 135 | TRAMP_KVM(PACA_EXNMI, 0x100) |
| 136 | |
| 137 | #ifdef CONFIG_PPC_P7_NAP |
| 138 | EXC_COMMON_BEGIN(system_reset_idle_common) |
| 139 | mfspr r12,SPRN_SRR1 |
| 140 | b pnv_powersave_wakeup |
| 141 | #endif |
| 142 | |
| 143 | /* |
| 144 | * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does |
| 145 | * the right thing. We do not want to reconcile because that goes |
| 146 | * through irq tracing which we don't want in NMI. |
| 147 | * |
| 148 | * Save PACAIRQHAPPENED because some code will do a hard disable |
| 149 | * (e.g., xmon). So we want to restore this back to where it was |
| 150 | * when we return. DAR is unused in the stack, so save it there. |
| 151 | */ |
| 152 | #define ADD_RECONCILE_NMI \ |
| 153 | li r10,IRQS_ALL_DISABLED; \ |
| 154 | stb r10,PACAIRQSOFTMASK(r13); \ |
| 155 | lbz r10,PACAIRQHAPPENED(r13); \ |
| 156 | std r10,_DAR(r1) |
| 157 | |
| 158 | EXC_COMMON_BEGIN(system_reset_common) |
| 159 | /* |
| 160 | * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able |
| 161 | * to recover, but nested NMI will notice in_nmi and not recover |
| 162 | * because of the use of the NMI stack. in_nmi reentrancy is tested in |
| 163 | * system_reset_exception. |
| 164 | */ |
| 165 | lhz r10,PACA_IN_NMI(r13) |
| 166 | addi r10,r10,1 |
| 167 | sth r10,PACA_IN_NMI(r13) |
| 168 | li r10,MSR_RI |
| 169 | mtmsrd r10,1 |
| 170 | |
| 171 | mr r10,r1 |
| 172 | ld r1,PACA_NMI_EMERG_SP(r13) |
| 173 | subi r1,r1,INT_FRAME_SIZE |
| 174 | EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100, |
| 175 | system_reset, system_reset_exception, |
| 176 | ADD_NVGPRS;ADD_RECONCILE_NMI) |
| 177 | |
| 178 | /* This (and MCE) can be simplified with mtmsrd L=1 */ |
| 179 | /* Clear MSR_RI before setting SRR0 and SRR1. */ |
| 180 | li r0,MSR_RI |
| 181 | mfmsr r9 |
| 182 | andc r9,r9,r0 |
| 183 | mtmsrd r9,1 |
| 184 | |
| 185 | /* |
| 186 | * MSR_RI is clear, now we can decrement paca->in_nmi. |
| 187 | */ |
| 188 | lhz r10,PACA_IN_NMI(r13) |
| 189 | subi r10,r10,1 |
| 190 | sth r10,PACA_IN_NMI(r13) |
| 191 | |
| 192 | /* |
| 193 | * Restore soft mask settings. |
| 194 | */ |
| 195 | ld r10,_DAR(r1) |
| 196 | stb r10,PACAIRQHAPPENED(r13) |
| 197 | ld r10,SOFTE(r1) |
| 198 | stb r10,PACAIRQSOFTMASK(r13) |
| 199 | |
| 200 | /* |
| 201 | * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP. |
| 202 | * Should share common bits... |
| 203 | */ |
| 204 | |
| 205 | /* Move original SRR0 and SRR1 into the respective regs */ |
| 206 | ld r9,_MSR(r1) |
| 207 | mtspr SPRN_SRR1,r9 |
| 208 | ld r3,_NIP(r1) |
| 209 | mtspr SPRN_SRR0,r3 |
| 210 | ld r9,_CTR(r1) |
| 211 | mtctr r9 |
| 212 | ld r9,_XER(r1) |
| 213 | mtxer r9 |
| 214 | ld r9,_LINK(r1) |
| 215 | mtlr r9 |
| 216 | REST_GPR(0, r1) |
| 217 | REST_8GPRS(2, r1) |
| 218 | REST_GPR(10, r1) |
| 219 | ld r11,_CCR(r1) |
| 220 | mtcr r11 |
| 221 | REST_GPR(11, r1) |
| 222 | REST_2GPRS(12, r1) |
| 223 | /* restore original r1. */ |
| 224 | ld r1,GPR1(r1) |
| 225 | RFI_TO_USER_OR_KERNEL |
| 226 | |
| 227 | #ifdef CONFIG_PPC_PSERIES |
| 228 | /* |
| 229 | * Vectors for the FWNMI option. Share common code. |
| 230 | */ |
| 231 | TRAMP_REAL_BEGIN(system_reset_fwnmi) |
| 232 | SET_SCRATCH0(r13) /* save r13 */ |
| 233 | /* See comment at system_reset exception */ |
| 234 | EXCEPTION_PROLOG_NORI(PACA_EXNMI, system_reset_common, EXC_STD, |
| 235 | NOTEST, 0x100) |
| 236 | #endif /* CONFIG_PPC_PSERIES */ |
| 237 | |
| 238 | |
| 239 | EXC_REAL_BEGIN(machine_check, 0x200, 0x100) |
| 240 | /* This is moved out of line as it can be patched by FW, but |
| 241 | * some code path might still want to branch into the original |
| 242 | * vector |
| 243 | */ |
| 244 | SET_SCRATCH0(r13) /* save r13 */ |
| 245 | EXCEPTION_PROLOG_0(PACA_EXMC) |
| 246 | BEGIN_FTR_SECTION |
| 247 | b machine_check_powernv_early |
| 248 | FTR_SECTION_ELSE |
| 249 | b machine_check_pSeries_0 |
| 250 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) |
| 251 | EXC_REAL_END(machine_check, 0x200, 0x100) |
| 252 | EXC_VIRT_NONE(0x4200, 0x100) |
| 253 | TRAMP_REAL_BEGIN(machine_check_powernv_early) |
| 254 | BEGIN_FTR_SECTION |
| 255 | EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) |
| 256 | /* |
| 257 | * Register contents: |
| 258 | * R13 = PACA |
| 259 | * R9 = CR |
| 260 | * Original R9 to R13 is saved on PACA_EXMC |
| 261 | * |
| 262 | * Switch to mc_emergency stack and handle re-entrancy (we limit |
| 263 | * the nested MCE upto level 4 to avoid stack overflow). |
| 264 | * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 |
| 265 | * |
| 266 | * We use paca->in_mce to check whether this is the first entry or |
| 267 | * nested machine check. We increment paca->in_mce to track nested |
| 268 | * machine checks. |
| 269 | * |
| 270 | * If this is the first entry then set stack pointer to |
| 271 | * paca->mc_emergency_sp, otherwise r1 is already pointing to |
| 272 | * stack frame on mc_emergency stack. |
| 273 | * |
| 274 | * NOTE: We are here with MSR_ME=0 (off), which means we risk a |
| 275 | * checkstop if we get another machine check exception before we do |
| 276 | * rfid with MSR_ME=1. |
| 277 | * |
| 278 | * This interrupt can wake directly from idle. If that is the case, |
| 279 | * the machine check is handled then the idle wakeup code is called |
| 280 | * to restore state. |
| 281 | */ |
| 282 | mr r11,r1 /* Save r1 */ |
| 283 | lhz r10,PACA_IN_MCE(r13) |
| 284 | cmpwi r10,0 /* Are we in nested machine check */ |
| 285 | bne 0f /* Yes, we are. */ |
| 286 | /* First machine check entry */ |
| 287 | ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ |
| 288 | 0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ |
| 289 | addi r10,r10,1 /* increment paca->in_mce */ |
| 290 | sth r10,PACA_IN_MCE(r13) |
| 291 | /* Limit nested MCE to level 4 to avoid stack overflow */ |
| 292 | cmpwi r10,MAX_MCE_DEPTH |
| 293 | bgt 2f /* Check if we hit limit of 4 */ |
| 294 | std r11,GPR1(r1) /* Save r1 on the stack. */ |
| 295 | std r11,0(r1) /* make stack chain pointer */ |
| 296 | mfspr r11,SPRN_SRR0 /* Save SRR0 */ |
| 297 | std r11,_NIP(r1) |
| 298 | mfspr r11,SPRN_SRR1 /* Save SRR1 */ |
| 299 | std r11,_MSR(r1) |
| 300 | mfspr r11,SPRN_DAR /* Save DAR */ |
| 301 | std r11,_DAR(r1) |
| 302 | mfspr r11,SPRN_DSISR /* Save DSISR */ |
| 303 | std r11,_DSISR(r1) |
| 304 | std r9,_CCR(r1) /* Save CR in stackframe */ |
| 305 | /* Save r9 through r13 from EXMC save area to stack frame. */ |
| 306 | EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) |
| 307 | mfmsr r11 /* get MSR value */ |
| 308 | ori r11,r11,MSR_ME /* turn on ME bit */ |
| 309 | ori r11,r11,MSR_RI /* turn on RI bit */ |
| 310 | LOAD_HANDLER(r12, machine_check_handle_early) |
| 311 | 1: mtspr SPRN_SRR0,r12 |
| 312 | mtspr SPRN_SRR1,r11 |
| 313 | RFI_TO_KERNEL |
| 314 | b . /* prevent speculative execution */ |
| 315 | 2: |
| 316 | /* Stack overflow. Stay on emergency stack and panic. |
| 317 | * Keep the ME bit off while panic-ing, so that if we hit |
| 318 | * another machine check we checkstop. |
| 319 | */ |
| 320 | addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */ |
| 321 | ld r11,PACAKMSR(r13) |
| 322 | LOAD_HANDLER(r12, unrecover_mce) |
| 323 | li r10,MSR_ME |
| 324 | andc r11,r11,r10 /* Turn off MSR_ME */ |
| 325 | b 1b |
| 326 | b . /* prevent speculative execution */ |
| 327 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
| 328 | |
| 329 | TRAMP_REAL_BEGIN(machine_check_pSeries) |
| 330 | .globl machine_check_fwnmi |
| 331 | machine_check_fwnmi: |
| 332 | SET_SCRATCH0(r13) /* save r13 */ |
| 333 | EXCEPTION_PROLOG_0(PACA_EXMC) |
| 334 | machine_check_pSeries_0: |
| 335 | EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200) |
| 336 | /* |
| 337 | * MSR_RI is not enabled, because PACA_EXMC is being used, so a |
| 338 | * nested machine check corrupts it. machine_check_common enables |
| 339 | * MSR_RI. |
| 340 | */ |
| 341 | EXCEPTION_PROLOG_2_NORI(machine_check_common, EXC_STD) |
| 342 | |
| 343 | TRAMP_KVM_SKIP(PACA_EXMC, 0x200) |
| 344 | |
| 345 | EXC_COMMON_BEGIN(machine_check_common) |
| 346 | /* |
| 347 | * Machine check is different because we use a different |
| 348 | * save area: PACA_EXMC instead of PACA_EXGEN. |
| 349 | */ |
| 350 | mfspr r10,SPRN_DAR |
| 351 | std r10,PACA_EXMC+EX_DAR(r13) |
| 352 | mfspr r10,SPRN_DSISR |
| 353 | stw r10,PACA_EXMC+EX_DSISR(r13) |
| 354 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) |
| 355 | FINISH_NAP |
| 356 | RECONCILE_IRQ_STATE(r10, r11) |
| 357 | ld r3,PACA_EXMC+EX_DAR(r13) |
| 358 | lwz r4,PACA_EXMC+EX_DSISR(r13) |
| 359 | /* Enable MSR_RI when finished with PACA_EXMC */ |
| 360 | li r10,MSR_RI |
| 361 | mtmsrd r10,1 |
| 362 | std r3,_DAR(r1) |
| 363 | std r4,_DSISR(r1) |
| 364 | bl save_nvgprs |
| 365 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 366 | bl machine_check_exception |
| 367 | b ret_from_except |
| 368 | |
| 369 | #define MACHINE_CHECK_HANDLER_WINDUP \ |
| 370 | /* Clear MSR_RI before setting SRR0 and SRR1. */\ |
| 371 | li r0,MSR_RI; \ |
| 372 | mfmsr r9; /* get MSR value */ \ |
| 373 | andc r9,r9,r0; \ |
| 374 | mtmsrd r9,1; /* Clear MSR_RI */ \ |
| 375 | /* Move original SRR0 and SRR1 into the respective regs */ \ |
| 376 | ld r9,_MSR(r1); \ |
| 377 | mtspr SPRN_SRR1,r9; \ |
| 378 | ld r3,_NIP(r1); \ |
| 379 | mtspr SPRN_SRR0,r3; \ |
| 380 | ld r9,_CTR(r1); \ |
| 381 | mtctr r9; \ |
| 382 | ld r9,_XER(r1); \ |
| 383 | mtxer r9; \ |
| 384 | ld r9,_LINK(r1); \ |
| 385 | mtlr r9; \ |
| 386 | REST_GPR(0, r1); \ |
| 387 | REST_8GPRS(2, r1); \ |
| 388 | REST_GPR(10, r1); \ |
| 389 | ld r11,_CCR(r1); \ |
| 390 | mtcr r11; \ |
| 391 | /* Decrement paca->in_mce. */ \ |
| 392 | lhz r12,PACA_IN_MCE(r13); \ |
| 393 | subi r12,r12,1; \ |
| 394 | sth r12,PACA_IN_MCE(r13); \ |
| 395 | REST_GPR(11, r1); \ |
| 396 | REST_2GPRS(12, r1); \ |
| 397 | /* restore original r1. */ \ |
| 398 | ld r1,GPR1(r1) |
| 399 | |
| 400 | #ifdef CONFIG_PPC_P7_NAP |
| 401 | /* |
| 402 | * This is an idle wakeup. Low level machine check has already been |
| 403 | * done. Queue the event then call the idle code to do the wake up. |
| 404 | */ |
| 405 | EXC_COMMON_BEGIN(machine_check_idle_common) |
| 406 | bl machine_check_queue_event |
| 407 | |
| 408 | /* |
| 409 | * We have not used any non-volatile GPRs here, and as a rule |
| 410 | * most exception code including machine check does not. |
| 411 | * Therefore PACA_NAPSTATELOST does not need to be set. Idle |
| 412 | * wakeup will restore volatile registers. |
| 413 | * |
| 414 | * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. |
| 415 | * |
| 416 | * Then decrement MCE nesting after finishing with the stack. |
| 417 | */ |
| 418 | ld r3,_MSR(r1) |
| 419 | |
| 420 | lhz r11,PACA_IN_MCE(r13) |
| 421 | subi r11,r11,1 |
| 422 | sth r11,PACA_IN_MCE(r13) |
| 423 | |
| 424 | /* Turn off the RI bit because SRR1 is used by idle wakeup code. */ |
| 425 | /* Recoverability could be improved by reducing the use of SRR1. */ |
| 426 | li r11,0 |
| 427 | mtmsrd r11,1 |
| 428 | |
| 429 | b pnv_powersave_wakeup_mce |
| 430 | #endif |
| 431 | /* |
| 432 | * Handle machine check early in real mode. We come here with |
| 433 | * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. |
| 434 | */ |
| 435 | EXC_COMMON_BEGIN(machine_check_handle_early) |
| 436 | std r0,GPR0(r1) /* Save r0 */ |
| 437 | EXCEPTION_PROLOG_COMMON_3(0x200) |
| 438 | bl save_nvgprs |
| 439 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 440 | bl machine_check_early |
| 441 | std r3,RESULT(r1) /* Save result */ |
| 442 | ld r12,_MSR(r1) |
| 443 | |
| 444 | #ifdef CONFIG_PPC_P7_NAP |
| 445 | /* |
| 446 | * Check if thread was in power saving mode. We come here when any |
| 447 | * of the following is true: |
| 448 | * a. thread wasn't in power saving mode |
| 449 | * b. thread was in power saving mode with no state loss, |
| 450 | * supervisor state loss or hypervisor state loss. |
| 451 | * |
| 452 | * Go back to nap/sleep/winkle mode again if (b) is true. |
| 453 | */ |
| 454 | BEGIN_FTR_SECTION |
| 455 | rlwinm. r11,r12,47-31,30,31 |
| 456 | bne machine_check_idle_common |
| 457 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) |
| 458 | #endif |
| 459 | |
| 460 | /* |
| 461 | * Check if we are coming from hypervisor userspace. If yes then we |
| 462 | * continue in host kernel in V mode to deliver the MC event. |
| 463 | */ |
| 464 | rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ |
| 465 | beq 5f |
| 466 | andi. r11,r12,MSR_PR /* See if coming from user. */ |
| 467 | bne 9f /* continue in V mode if we are. */ |
| 468 | |
| 469 | 5: |
| 470 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 471 | /* |
| 472 | * We are coming from kernel context. Check if we are coming from |
| 473 | * guest. if yes, then we can continue. We will fall through |
| 474 | * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. |
| 475 | */ |
| 476 | lbz r11,HSTATE_IN_GUEST(r13) |
| 477 | cmpwi r11,0 /* Check if coming from guest */ |
| 478 | bne 9f /* continue if we are. */ |
| 479 | #endif |
| 480 | /* |
| 481 | * At this point we are not sure about what context we come from. |
| 482 | * Queue up the MCE event and return from the interrupt. |
| 483 | * But before that, check if this is an un-recoverable exception. |
| 484 | * If yes, then stay on emergency stack and panic. |
| 485 | */ |
| 486 | andi. r11,r12,MSR_RI |
| 487 | bne 2f |
| 488 | 1: mfspr r11,SPRN_SRR0 |
| 489 | LOAD_HANDLER(r10,unrecover_mce) |
| 490 | mtspr SPRN_SRR0,r10 |
| 491 | ld r10,PACAKMSR(r13) |
| 492 | /* |
| 493 | * We are going down. But there are chances that we might get hit by |
| 494 | * another MCE during panic path and we may run into unstable state |
| 495 | * with no way out. Hence, turn ME bit off while going down, so that |
| 496 | * when another MCE is hit during panic path, system will checkstop |
| 497 | * and hypervisor will get restarted cleanly by SP. |
| 498 | */ |
| 499 | li r3,MSR_ME |
| 500 | andc r10,r10,r3 /* Turn off MSR_ME */ |
| 501 | mtspr SPRN_SRR1,r10 |
| 502 | RFI_TO_KERNEL |
| 503 | b . |
| 504 | 2: |
| 505 | /* |
| 506 | * Check if we have successfully handled/recovered from error, if not |
| 507 | * then stay on emergency stack and panic. |
| 508 | */ |
| 509 | ld r3,RESULT(r1) /* Load result */ |
| 510 | cmpdi r3,0 /* see if we handled MCE successfully */ |
| 511 | |
| 512 | beq 1b /* if !handled then panic */ |
| 513 | /* |
| 514 | * Return from MC interrupt. |
| 515 | * Queue up the MCE event so that we can log it later, while |
| 516 | * returning from kernel or opal call. |
| 517 | */ |
| 518 | bl machine_check_queue_event |
| 519 | MACHINE_CHECK_HANDLER_WINDUP |
| 520 | RFI_TO_USER_OR_KERNEL |
| 521 | 9: |
| 522 | /* Deliver the machine check to host kernel in V mode. */ |
| 523 | MACHINE_CHECK_HANDLER_WINDUP |
| 524 | b machine_check_pSeries |
| 525 | |
| 526 | EXC_COMMON_BEGIN(unrecover_mce) |
| 527 | /* Invoke machine_check_exception to print MCE event and panic. */ |
| 528 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 529 | bl machine_check_exception |
| 530 | /* |
| 531 | * We will not reach here. Even if we did, there is no way out. Call |
| 532 | * unrecoverable_exception and die. |
| 533 | */ |
| 534 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 535 | bl unrecoverable_exception |
| 536 | b 1b |
| 537 | |
| 538 | |
| 539 | EXC_REAL(data_access, 0x300, 0x80) |
| 540 | EXC_VIRT(data_access, 0x4300, 0x80, 0x300) |
| 541 | TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) |
| 542 | |
| 543 | EXC_COMMON_BEGIN(data_access_common) |
| 544 | /* |
| 545 | * Here r13 points to the paca, r9 contains the saved CR, |
| 546 | * SRR0 and SRR1 are saved in r11 and r12, |
| 547 | * r9 - r13 are saved in paca->exgen. |
| 548 | */ |
| 549 | mfspr r10,SPRN_DAR |
| 550 | std r10,PACA_EXGEN+EX_DAR(r13) |
| 551 | mfspr r10,SPRN_DSISR |
| 552 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
| 553 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) |
| 554 | RECONCILE_IRQ_STATE(r10, r11) |
| 555 | ld r12,_MSR(r1) |
| 556 | ld r3,PACA_EXGEN+EX_DAR(r13) |
| 557 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
| 558 | li r5,0x300 |
| 559 | std r3,_DAR(r1) |
| 560 | std r4,_DSISR(r1) |
| 561 | BEGIN_MMU_FTR_SECTION |
| 562 | b do_hash_page /* Try to handle as hpte fault */ |
| 563 | MMU_FTR_SECTION_ELSE |
| 564 | b handle_page_fault |
| 565 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) |
| 566 | |
| 567 | |
| 568 | EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) |
| 569 | SET_SCRATCH0(r13) |
| 570 | EXCEPTION_PROLOG_0(PACA_EXSLB) |
| 571 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) |
| 572 | mr r12,r3 /* save r3 */ |
| 573 | mfspr r3,SPRN_DAR |
| 574 | mfspr r11,SPRN_SRR1 |
| 575 | crset 4*cr6+eq |
| 576 | BRANCH_TO_COMMON(r10, slb_miss_common) |
| 577 | EXC_REAL_END(data_access_slb, 0x380, 0x80) |
| 578 | |
| 579 | EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) |
| 580 | SET_SCRATCH0(r13) |
| 581 | EXCEPTION_PROLOG_0(PACA_EXSLB) |
| 582 | EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) |
| 583 | mr r12,r3 /* save r3 */ |
| 584 | mfspr r3,SPRN_DAR |
| 585 | mfspr r11,SPRN_SRR1 |
| 586 | crset 4*cr6+eq |
| 587 | BRANCH_TO_COMMON(r10, slb_miss_common) |
| 588 | EXC_VIRT_END(data_access_slb, 0x4380, 0x80) |
| 589 | TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) |
| 590 | |
| 591 | |
| 592 | EXC_REAL(instruction_access, 0x400, 0x80) |
| 593 | EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) |
| 594 | TRAMP_KVM(PACA_EXGEN, 0x400) |
| 595 | |
| 596 | EXC_COMMON_BEGIN(instruction_access_common) |
| 597 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) |
| 598 | RECONCILE_IRQ_STATE(r10, r11) |
| 599 | ld r12,_MSR(r1) |
| 600 | ld r3,_NIP(r1) |
| 601 | andis. r4,r12,DSISR_SRR1_MATCH_64S@h |
| 602 | li r5,0x400 |
| 603 | std r3,_DAR(r1) |
| 604 | std r4,_DSISR(r1) |
| 605 | BEGIN_MMU_FTR_SECTION |
| 606 | b do_hash_page /* Try to handle as hpte fault */ |
| 607 | MMU_FTR_SECTION_ELSE |
| 608 | b handle_page_fault |
| 609 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) |
| 610 | |
| 611 | |
| 612 | EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) |
| 613 | SET_SCRATCH0(r13) |
| 614 | EXCEPTION_PROLOG_0(PACA_EXSLB) |
| 615 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) |
| 616 | mr r12,r3 /* save r3 */ |
| 617 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
| 618 | mfspr r11,SPRN_SRR1 |
| 619 | crclr 4*cr6+eq |
| 620 | BRANCH_TO_COMMON(r10, slb_miss_common) |
| 621 | EXC_REAL_END(instruction_access_slb, 0x480, 0x80) |
| 622 | |
| 623 | EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) |
| 624 | SET_SCRATCH0(r13) |
| 625 | EXCEPTION_PROLOG_0(PACA_EXSLB) |
| 626 | EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) |
| 627 | mr r12,r3 /* save r3 */ |
| 628 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
| 629 | mfspr r11,SPRN_SRR1 |
| 630 | crclr 4*cr6+eq |
| 631 | BRANCH_TO_COMMON(r10, slb_miss_common) |
| 632 | EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) |
| 633 | TRAMP_KVM(PACA_EXSLB, 0x480) |
| 634 | |
| 635 | |
| 636 | /* |
| 637 | * This handler is used by the 0x380 and 0x480 SLB miss interrupts, as well as |
| 638 | * the virtual mode 0x4380 and 0x4480 interrupts if AIL is enabled. |
| 639 | */ |
| 640 | EXC_COMMON_BEGIN(slb_miss_common) |
| 641 | /* |
| 642 | * r13 points to the PACA, r9 contains the saved CR, |
| 643 | * r12 contains the saved r3, |
| 644 | * r11 contain the saved SRR1, SRR0 is still ready for return |
| 645 | * r3 has the faulting address |
| 646 | * r9 - r13 are saved in paca->exslb. |
| 647 | * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss |
| 648 | * We assume we aren't going to take any exceptions during this |
| 649 | * procedure. |
| 650 | */ |
| 651 | mflr r10 |
| 652 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
| 653 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
| 654 | |
| 655 | andi. r9,r11,MSR_PR // Check for exception from userspace |
| 656 | cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later |
| 657 | |
| 658 | /* |
| 659 | * Test MSR_RI before calling slb_allocate_realmode, because the |
| 660 | * MSR in r11 gets clobbered. However we still want to allocate |
| 661 | * SLB in case MSR_RI=0, to minimise the risk of getting stuck in |
| 662 | * recursive SLB faults. So use cr5 for this, which is preserved. |
| 663 | */ |
| 664 | andi. r11,r11,MSR_RI /* check for unrecoverable exception */ |
| 665 | cmpdi cr5,r11,MSR_RI |
| 666 | |
| 667 | crset 4*cr0+eq |
| 668 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 669 | BEGIN_MMU_FTR_SECTION |
| 670 | bl slb_allocate |
| 671 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) |
| 672 | #endif |
| 673 | |
| 674 | ld r10,PACA_EXSLB+EX_LR(r13) |
| 675 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ |
| 676 | mtlr r10 |
| 677 | |
| 678 | /* |
| 679 | * Large address, check whether we have to allocate new contexts. |
| 680 | */ |
| 681 | beq- 8f |
| 682 | |
| 683 | bne- cr5,2f /* if unrecoverable exception, oops */ |
| 684 | |
| 685 | /* All done -- return from exception. */ |
| 686 | |
| 687 | bne cr4,1f /* returning to kernel */ |
| 688 | |
| 689 | mtcrf 0x80,r9 |
| 690 | mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ |
| 691 | mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ |
| 692 | mtcrf 0x02,r9 /* I/D indication is in cr6 */ |
| 693 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ |
| 694 | |
| 695 | RESTORE_CTR(r9, PACA_EXSLB) |
| 696 | RESTORE_PPR_PACA(PACA_EXSLB, r9) |
| 697 | mr r3,r12 |
| 698 | ld r9,PACA_EXSLB+EX_R9(r13) |
| 699 | ld r10,PACA_EXSLB+EX_R10(r13) |
| 700 | ld r11,PACA_EXSLB+EX_R11(r13) |
| 701 | ld r12,PACA_EXSLB+EX_R12(r13) |
| 702 | ld r13,PACA_EXSLB+EX_R13(r13) |
| 703 | RFI_TO_USER |
| 704 | b . /* prevent speculative execution */ |
| 705 | 1: |
| 706 | mtcrf 0x80,r9 |
| 707 | mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ |
| 708 | mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ |
| 709 | mtcrf 0x02,r9 /* I/D indication is in cr6 */ |
| 710 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ |
| 711 | |
| 712 | RESTORE_CTR(r9, PACA_EXSLB) |
| 713 | RESTORE_PPR_PACA(PACA_EXSLB, r9) |
| 714 | mr r3,r12 |
| 715 | ld r9,PACA_EXSLB+EX_R9(r13) |
| 716 | ld r10,PACA_EXSLB+EX_R10(r13) |
| 717 | ld r11,PACA_EXSLB+EX_R11(r13) |
| 718 | ld r12,PACA_EXSLB+EX_R12(r13) |
| 719 | ld r13,PACA_EXSLB+EX_R13(r13) |
| 720 | RFI_TO_KERNEL |
| 721 | b . /* prevent speculative execution */ |
| 722 | |
| 723 | |
| 724 | 2: std r3,PACA_EXSLB+EX_DAR(r13) |
| 725 | mr r3,r12 |
| 726 | mfspr r11,SPRN_SRR0 |
| 727 | mfspr r12,SPRN_SRR1 |
| 728 | LOAD_HANDLER(r10,unrecov_slb) |
| 729 | mtspr SPRN_SRR0,r10 |
| 730 | ld r10,PACAKMSR(r13) |
| 731 | mtspr SPRN_SRR1,r10 |
| 732 | RFI_TO_KERNEL |
| 733 | b . |
| 734 | |
| 735 | 8: std r3,PACA_EXSLB+EX_DAR(r13) |
| 736 | mr r3,r12 |
| 737 | mfspr r11,SPRN_SRR0 |
| 738 | mfspr r12,SPRN_SRR1 |
| 739 | LOAD_HANDLER(r10, large_addr_slb) |
| 740 | mtspr SPRN_SRR0,r10 |
| 741 | ld r10,PACAKMSR(r13) |
| 742 | mtspr SPRN_SRR1,r10 |
| 743 | RFI_TO_KERNEL |
| 744 | b . |
| 745 | |
| 746 | EXC_COMMON_BEGIN(unrecov_slb) |
| 747 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) |
| 748 | RECONCILE_IRQ_STATE(r10, r11) |
| 749 | bl save_nvgprs |
| 750 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 751 | bl unrecoverable_exception |
| 752 | b 1b |
| 753 | |
| 754 | EXC_COMMON_BEGIN(large_addr_slb) |
| 755 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB) |
| 756 | RECONCILE_IRQ_STATE(r10, r11) |
| 757 | ld r3, PACA_EXSLB+EX_DAR(r13) |
| 758 | std r3, _DAR(r1) |
| 759 | beq cr6, 2f |
| 760 | li r10, 0x481 /* fix trap number for I-SLB miss */ |
| 761 | std r10, _TRAP(r1) |
| 762 | 2: bl save_nvgprs |
| 763 | addi r3, r1, STACK_FRAME_OVERHEAD |
| 764 | bl slb_miss_large_addr |
| 765 | b ret_from_except |
| 766 | |
| 767 | EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) |
| 768 | .globl hardware_interrupt_hv; |
| 769 | hardware_interrupt_hv: |
| 770 | BEGIN_FTR_SECTION |
| 771 | MASKABLE_EXCEPTION_HV(0x500, hardware_interrupt_common, IRQS_DISABLED) |
| 772 | FTR_SECTION_ELSE |
| 773 | MASKABLE_EXCEPTION(0x500, hardware_interrupt_common, IRQS_DISABLED) |
| 774 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) |
| 775 | EXC_REAL_END(hardware_interrupt, 0x500, 0x100) |
| 776 | |
| 777 | EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100) |
| 778 | .globl hardware_interrupt_relon_hv; |
| 779 | hardware_interrupt_relon_hv: |
| 780 | BEGIN_FTR_SECTION |
| 781 | MASKABLE_RELON_EXCEPTION_HV(0x500, hardware_interrupt_common, |
| 782 | IRQS_DISABLED) |
| 783 | FTR_SECTION_ELSE |
| 784 | __MASKABLE_RELON_EXCEPTION(0x500, hardware_interrupt_common, |
| 785 | EXC_STD, SOFTEN_TEST_PR, IRQS_DISABLED) |
| 786 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) |
| 787 | EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100) |
| 788 | |
| 789 | TRAMP_KVM(PACA_EXGEN, 0x500) |
| 790 | TRAMP_KVM_HV(PACA_EXGEN, 0x500) |
| 791 | EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ) |
| 792 | |
| 793 | |
| 794 | EXC_REAL(alignment, 0x600, 0x100) |
| 795 | EXC_VIRT(alignment, 0x4600, 0x100, 0x600) |
| 796 | TRAMP_KVM(PACA_EXGEN, 0x600) |
| 797 | EXC_COMMON_BEGIN(alignment_common) |
| 798 | mfspr r10,SPRN_DAR |
| 799 | std r10,PACA_EXGEN+EX_DAR(r13) |
| 800 | mfspr r10,SPRN_DSISR |
| 801 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
| 802 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) |
| 803 | ld r3,PACA_EXGEN+EX_DAR(r13) |
| 804 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
| 805 | std r3,_DAR(r1) |
| 806 | std r4,_DSISR(r1) |
| 807 | bl save_nvgprs |
| 808 | RECONCILE_IRQ_STATE(r10, r11) |
| 809 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 810 | bl alignment_exception |
| 811 | b ret_from_except |
| 812 | |
| 813 | |
| 814 | EXC_REAL(program_check, 0x700, 0x100) |
| 815 | EXC_VIRT(program_check, 0x4700, 0x100, 0x700) |
| 816 | TRAMP_KVM(PACA_EXGEN, 0x700) |
| 817 | EXC_COMMON_BEGIN(program_check_common) |
| 818 | /* |
| 819 | * It's possible to receive a TM Bad Thing type program check with |
| 820 | * userspace register values (in particular r1), but with SRR1 reporting |
| 821 | * that we came from the kernel. Normally that would confuse the bad |
| 822 | * stack logic, and we would report a bad kernel stack pointer. Instead |
| 823 | * we switch to the emergency stack if we're taking a TM Bad Thing from |
| 824 | * the kernel. |
| 825 | */ |
| 826 | li r10,MSR_PR /* Build a mask of MSR_PR .. */ |
| 827 | oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */ |
| 828 | and r10,r10,r12 /* Mask SRR1 with that. */ |
| 829 | srdi r10,r10,8 /* Shift it so we can compare */ |
| 830 | cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */ |
| 831 | bne 1f /* If != go to normal path. */ |
| 832 | |
| 833 | /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */ |
| 834 | andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */ |
| 835 | /* 3 in EXCEPTION_PROLOG_COMMON */ |
| 836 | mr r10,r1 /* Save r1 */ |
| 837 | ld r1,PACAEMERGSP(r13) /* Use emergency stack */ |
| 838 | subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ |
| 839 | b 3f /* Jump into the macro !! */ |
| 840 | 1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) |
| 841 | bl save_nvgprs |
| 842 | RECONCILE_IRQ_STATE(r10, r11) |
| 843 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 844 | bl program_check_exception |
| 845 | b ret_from_except |
| 846 | |
| 847 | |
| 848 | EXC_REAL(fp_unavailable, 0x800, 0x100) |
| 849 | EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800) |
| 850 | TRAMP_KVM(PACA_EXGEN, 0x800) |
| 851 | EXC_COMMON_BEGIN(fp_unavailable_common) |
| 852 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) |
| 853 | bne 1f /* if from user, just load it up */ |
| 854 | bl save_nvgprs |
| 855 | RECONCILE_IRQ_STATE(r10, r11) |
| 856 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 857 | bl kernel_fp_unavailable_exception |
| 858 | BUG_OPCODE |
| 859 | 1: |
| 860 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 861 | BEGIN_FTR_SECTION |
| 862 | /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in |
| 863 | * transaction), go do TM stuff |
| 864 | */ |
| 865 | rldicl. r0, r12, (64-MSR_TS_LG), (64-2) |
| 866 | bne- 2f |
| 867 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
| 868 | #endif |
| 869 | bl load_up_fpu |
| 870 | b fast_exception_return |
| 871 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 872 | 2: /* User process was in a transaction */ |
| 873 | bl save_nvgprs |
| 874 | RECONCILE_IRQ_STATE(r10, r11) |
| 875 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 876 | bl fp_unavailable_tm |
| 877 | b ret_from_except |
| 878 | #endif |
| 879 | |
| 880 | |
| 881 | EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) |
| 882 | EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) |
| 883 | TRAMP_KVM(PACA_EXGEN, 0x900) |
| 884 | EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) |
| 885 | |
| 886 | |
| 887 | EXC_REAL_HV(hdecrementer, 0x980, 0x80) |
| 888 | EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980) |
| 889 | TRAMP_KVM_HV(PACA_EXGEN, 0x980) |
| 890 | EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) |
| 891 | |
| 892 | |
| 893 | EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED) |
| 894 | EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED) |
| 895 | TRAMP_KVM(PACA_EXGEN, 0xa00) |
| 896 | #ifdef CONFIG_PPC_DOORBELL |
| 897 | EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception) |
| 898 | #else |
| 899 | EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception) |
| 900 | #endif |
| 901 | |
| 902 | |
| 903 | EXC_REAL(trap_0b, 0xb00, 0x100) |
| 904 | EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00) |
| 905 | TRAMP_KVM(PACA_EXGEN, 0xb00) |
| 906 | EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) |
| 907 | |
| 908 | /* |
| 909 | * system call / hypercall (0xc00, 0x4c00) |
| 910 | * |
| 911 | * The system call exception is invoked with "sc 0" and does not alter HV bit. |
| 912 | * There is support for kernel code to invoke system calls but there are no |
| 913 | * in-tree users. |
| 914 | * |
| 915 | * The hypercall is invoked with "sc 1" and sets HV=1. |
| 916 | * |
| 917 | * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to |
| 918 | * 0x4c00 virtual mode. |
| 919 | * |
| 920 | * Call convention: |
| 921 | * |
| 922 | * syscall register convention is in Documentation/powerpc/syscall64-abi.txt |
| 923 | * |
| 924 | * For hypercalls, the register convention is as follows: |
| 925 | * r0 volatile |
| 926 | * r1-2 nonvolatile |
| 927 | * r3 volatile parameter and return value for status |
| 928 | * r4-r10 volatile input and output value |
| 929 | * r11 volatile hypercall number and output value |
| 930 | * r12 volatile input and output value |
| 931 | * r13-r31 nonvolatile |
| 932 | * LR nonvolatile |
| 933 | * CTR volatile |
| 934 | * XER volatile |
| 935 | * CR0-1 CR5-7 volatile |
| 936 | * CR2-4 nonvolatile |
| 937 | * Other registers nonvolatile |
| 938 | * |
| 939 | * The intersection of volatile registers that don't contain possible |
| 940 | * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry |
| 941 | * without saving, though xer is not a good idea to use, as hardware may |
| 942 | * interpret some bits so it may be costly to change them. |
| 943 | */ |
| 944 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 945 | /* |
| 946 | * There is a little bit of juggling to get syscall and hcall |
| 947 | * working well. Save r13 in ctr to avoid using SPRG scratch |
| 948 | * register. |
| 949 | * |
| 950 | * Userspace syscalls have already saved the PPR, hcalls must save |
| 951 | * it before setting HMT_MEDIUM. |
| 952 | */ |
| 953 | #define SYSCALL_KVMTEST \ |
| 954 | mtctr r13; \ |
| 955 | GET_PACA(r13); \ |
| 956 | std r10,PACA_EXGEN+EX_R10(r13); \ |
| 957 | INTERRUPT_TO_KERNEL; \ |
| 958 | KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ |
| 959 | HMT_MEDIUM; \ |
| 960 | mfctr r9; |
| 961 | |
| 962 | #else |
| 963 | #define SYSCALL_KVMTEST \ |
| 964 | HMT_MEDIUM; \ |
| 965 | mr r9,r13; \ |
| 966 | GET_PACA(r13); \ |
| 967 | INTERRUPT_TO_KERNEL; |
| 968 | #endif |
| 969 | |
| 970 | #define LOAD_SYSCALL_HANDLER(reg) \ |
| 971 | __LOAD_HANDLER(reg, system_call_common) |
| 972 | |
| 973 | /* |
| 974 | * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9, |
| 975 | * and HMT_MEDIUM. |
| 976 | */ |
| 977 | #define SYSCALL_REAL \ |
| 978 | mfspr r11,SPRN_SRR0 ; \ |
| 979 | mfspr r12,SPRN_SRR1 ; \ |
| 980 | LOAD_SYSCALL_HANDLER(r10) ; \ |
| 981 | mtspr SPRN_SRR0,r10 ; \ |
| 982 | ld r10,PACAKMSR(r13) ; \ |
| 983 | mtspr SPRN_SRR1,r10 ; \ |
| 984 | RFI_TO_KERNEL ; \ |
| 985 | b . ; /* prevent speculative execution */ |
| 986 | |
| 987 | #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH |
| 988 | #define SYSCALL_FASTENDIAN_TEST \ |
| 989 | BEGIN_FTR_SECTION \ |
| 990 | cmpdi r0,0x1ebe ; \ |
| 991 | beq- 1f ; \ |
| 992 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ |
| 993 | |
| 994 | #define SYSCALL_FASTENDIAN \ |
| 995 | /* Fast LE/BE switch system call */ \ |
| 996 | 1: mfspr r12,SPRN_SRR1 ; \ |
| 997 | xori r12,r12,MSR_LE ; \ |
| 998 | mtspr SPRN_SRR1,r12 ; \ |
| 999 | mr r13,r9 ; \ |
| 1000 | RFI_TO_USER ; /* return to userspace */ \ |
| 1001 | b . ; /* prevent speculative execution */ |
| 1002 | #else |
| 1003 | #define SYSCALL_FASTENDIAN_TEST |
| 1004 | #define SYSCALL_FASTENDIAN |
| 1005 | #endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */ |
| 1006 | |
| 1007 | #if defined(CONFIG_RELOCATABLE) |
| 1008 | /* |
| 1009 | * We can't branch directly so we do it via the CTR which |
| 1010 | * is volatile across system calls. |
| 1011 | */ |
| 1012 | #define SYSCALL_VIRT \ |
| 1013 | LOAD_SYSCALL_HANDLER(r10) ; \ |
| 1014 | mtctr r10 ; \ |
| 1015 | mfspr r11,SPRN_SRR0 ; \ |
| 1016 | mfspr r12,SPRN_SRR1 ; \ |
| 1017 | li r10,MSR_RI ; \ |
| 1018 | mtmsrd r10,1 ; \ |
| 1019 | bctr ; |
| 1020 | #else |
| 1021 | /* We can branch directly */ |
| 1022 | #define SYSCALL_VIRT \ |
| 1023 | mfspr r11,SPRN_SRR0 ; \ |
| 1024 | mfspr r12,SPRN_SRR1 ; \ |
| 1025 | li r10,MSR_RI ; \ |
| 1026 | mtmsrd r10,1 ; /* Set RI (EE=0) */ \ |
| 1027 | b system_call_common ; |
| 1028 | #endif |
| 1029 | |
| 1030 | EXC_REAL_BEGIN(system_call, 0xc00, 0x100) |
| 1031 | SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */ |
| 1032 | SYSCALL_FASTENDIAN_TEST |
| 1033 | SYSCALL_REAL |
| 1034 | SYSCALL_FASTENDIAN |
| 1035 | EXC_REAL_END(system_call, 0xc00, 0x100) |
| 1036 | |
| 1037 | EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100) |
| 1038 | SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */ |
| 1039 | SYSCALL_FASTENDIAN_TEST |
| 1040 | SYSCALL_VIRT |
| 1041 | SYSCALL_FASTENDIAN |
| 1042 | EXC_VIRT_END(system_call, 0x4c00, 0x100) |
| 1043 | |
| 1044 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 1045 | /* |
| 1046 | * This is a hcall, so register convention is as above, with these |
| 1047 | * differences: |
| 1048 | * r13 = PACA |
| 1049 | * ctr = orig r13 |
| 1050 | * orig r10 saved in PACA |
| 1051 | */ |
| 1052 | TRAMP_KVM_BEGIN(do_kvm_0xc00) |
| 1053 | /* |
| 1054 | * Save the PPR (on systems that support it) before changing to |
| 1055 | * HMT_MEDIUM. That allows the KVM code to save that value into the |
| 1056 | * guest state (it is the guest's PPR value). |
| 1057 | */ |
| 1058 | OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR) |
| 1059 | HMT_MEDIUM |
| 1060 | OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR) |
| 1061 | mfctr r10 |
| 1062 | SET_SCRATCH0(r10) |
| 1063 | std r9,PACA_EXGEN+EX_R9(r13) |
| 1064 | mfcr r9 |
| 1065 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) |
| 1066 | #endif |
| 1067 | |
| 1068 | |
| 1069 | EXC_REAL(single_step, 0xd00, 0x100) |
| 1070 | EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00) |
| 1071 | TRAMP_KVM(PACA_EXGEN, 0xd00) |
| 1072 | EXC_COMMON(single_step_common, 0xd00, single_step_exception) |
| 1073 | |
| 1074 | EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20) |
| 1075 | EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00) |
| 1076 | TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00) |
| 1077 | EXC_COMMON_BEGIN(h_data_storage_common) |
| 1078 | mfspr r10,SPRN_HDAR |
| 1079 | std r10,PACA_EXGEN+EX_DAR(r13) |
| 1080 | mfspr r10,SPRN_HDSISR |
| 1081 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
| 1082 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) |
| 1083 | bl save_nvgprs |
| 1084 | RECONCILE_IRQ_STATE(r10, r11) |
| 1085 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1086 | bl unknown_exception |
| 1087 | b ret_from_except |
| 1088 | |
| 1089 | |
| 1090 | EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20) |
| 1091 | EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20) |
| 1092 | TRAMP_KVM_HV(PACA_EXGEN, 0xe20) |
| 1093 | EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception) |
| 1094 | |
| 1095 | |
| 1096 | EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20) |
| 1097 | EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40) |
| 1098 | TRAMP_KVM_HV(PACA_EXGEN, 0xe40) |
| 1099 | EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt) |
| 1100 | |
| 1101 | |
| 1102 | /* |
| 1103 | * hmi_exception trampoline is a special case. It jumps to hmi_exception_early |
| 1104 | * first, and then eventaully from there to the trampoline to get into virtual |
| 1105 | * mode. |
| 1106 | */ |
| 1107 | __EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early) |
| 1108 | __TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED) |
| 1109 | EXC_VIRT_NONE(0x4e60, 0x20) |
| 1110 | TRAMP_KVM_HV(PACA_EXGEN, 0xe60) |
| 1111 | TRAMP_REAL_BEGIN(hmi_exception_early) |
| 1112 | EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60) |
| 1113 | mr r10,r1 /* Save r1 */ |
| 1114 | ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */ |
| 1115 | subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ |
| 1116 | mfspr r11,SPRN_HSRR0 /* Save HSRR0 */ |
| 1117 | mfspr r12,SPRN_HSRR1 /* Save HSRR1 */ |
| 1118 | EXCEPTION_PROLOG_COMMON_1() |
| 1119 | EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) |
| 1120 | EXCEPTION_PROLOG_COMMON_3(0xe60) |
| 1121 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1122 | BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */ |
| 1123 | cmpdi cr0,r3,0 |
| 1124 | |
| 1125 | /* Windup the stack. */ |
| 1126 | /* Move original HSRR0 and HSRR1 into the respective regs */ |
| 1127 | ld r9,_MSR(r1) |
| 1128 | mtspr SPRN_HSRR1,r9 |
| 1129 | ld r3,_NIP(r1) |
| 1130 | mtspr SPRN_HSRR0,r3 |
| 1131 | ld r9,_CTR(r1) |
| 1132 | mtctr r9 |
| 1133 | ld r9,_XER(r1) |
| 1134 | mtxer r9 |
| 1135 | ld r9,_LINK(r1) |
| 1136 | mtlr r9 |
| 1137 | REST_GPR(0, r1) |
| 1138 | REST_8GPRS(2, r1) |
| 1139 | REST_GPR(10, r1) |
| 1140 | ld r11,_CCR(r1) |
| 1141 | REST_2GPRS(12, r1) |
| 1142 | bne 1f |
| 1143 | mtcr r11 |
| 1144 | REST_GPR(11, r1) |
| 1145 | ld r1,GPR1(r1) |
| 1146 | HRFI_TO_USER_OR_KERNEL |
| 1147 | |
| 1148 | 1: mtcr r11 |
| 1149 | REST_GPR(11, r1) |
| 1150 | ld r1,GPR1(r1) |
| 1151 | |
| 1152 | /* |
| 1153 | * Go to virtual mode and pull the HMI event information from |
| 1154 | * firmware. |
| 1155 | */ |
| 1156 | .globl hmi_exception_after_realmode |
| 1157 | hmi_exception_after_realmode: |
| 1158 | SET_SCRATCH0(r13) |
| 1159 | EXCEPTION_PROLOG_0(PACA_EXGEN) |
| 1160 | b tramp_real_hmi_exception |
| 1161 | |
| 1162 | EXC_COMMON_BEGIN(hmi_exception_common) |
| 1163 | EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception, |
| 1164 | ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON) |
| 1165 | |
| 1166 | EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED) |
| 1167 | EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED) |
| 1168 | TRAMP_KVM_HV(PACA_EXGEN, 0xe80) |
| 1169 | #ifdef CONFIG_PPC_DOORBELL |
| 1170 | EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception) |
| 1171 | #else |
| 1172 | EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception) |
| 1173 | #endif |
| 1174 | |
| 1175 | |
| 1176 | EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED) |
| 1177 | EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED) |
| 1178 | TRAMP_KVM_HV(PACA_EXGEN, 0xea0) |
| 1179 | EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ) |
| 1180 | |
| 1181 | |
| 1182 | EXC_REAL_NONE(0xec0, 0x20) |
| 1183 | EXC_VIRT_NONE(0x4ec0, 0x20) |
| 1184 | EXC_REAL_NONE(0xee0, 0x20) |
| 1185 | EXC_VIRT_NONE(0x4ee0, 0x20) |
| 1186 | |
| 1187 | |
| 1188 | EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED) |
| 1189 | EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED) |
| 1190 | TRAMP_KVM(PACA_EXGEN, 0xf00) |
| 1191 | EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception) |
| 1192 | |
| 1193 | |
| 1194 | EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20) |
| 1195 | EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20) |
| 1196 | TRAMP_KVM(PACA_EXGEN, 0xf20) |
| 1197 | EXC_COMMON_BEGIN(altivec_unavailable_common) |
| 1198 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) |
| 1199 | #ifdef CONFIG_ALTIVEC |
| 1200 | BEGIN_FTR_SECTION |
| 1201 | beq 1f |
| 1202 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1203 | BEGIN_FTR_SECTION_NESTED(69) |
| 1204 | /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in |
| 1205 | * transaction), go do TM stuff |
| 1206 | */ |
| 1207 | rldicl. r0, r12, (64-MSR_TS_LG), (64-2) |
| 1208 | bne- 2f |
| 1209 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) |
| 1210 | #endif |
| 1211 | bl load_up_altivec |
| 1212 | b fast_exception_return |
| 1213 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1214 | 2: /* User process was in a transaction */ |
| 1215 | bl save_nvgprs |
| 1216 | RECONCILE_IRQ_STATE(r10, r11) |
| 1217 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1218 | bl altivec_unavailable_tm |
| 1219 | b ret_from_except |
| 1220 | #endif |
| 1221 | 1: |
| 1222 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 1223 | #endif |
| 1224 | bl save_nvgprs |
| 1225 | RECONCILE_IRQ_STATE(r10, r11) |
| 1226 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1227 | bl altivec_unavailable_exception |
| 1228 | b ret_from_except |
| 1229 | |
| 1230 | |
| 1231 | EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20) |
| 1232 | EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40) |
| 1233 | TRAMP_KVM(PACA_EXGEN, 0xf40) |
| 1234 | EXC_COMMON_BEGIN(vsx_unavailable_common) |
| 1235 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) |
| 1236 | #ifdef CONFIG_VSX |
| 1237 | BEGIN_FTR_SECTION |
| 1238 | beq 1f |
| 1239 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1240 | BEGIN_FTR_SECTION_NESTED(69) |
| 1241 | /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in |
| 1242 | * transaction), go do TM stuff |
| 1243 | */ |
| 1244 | rldicl. r0, r12, (64-MSR_TS_LG), (64-2) |
| 1245 | bne- 2f |
| 1246 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) |
| 1247 | #endif |
| 1248 | b load_up_vsx |
| 1249 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1250 | 2: /* User process was in a transaction */ |
| 1251 | bl save_nvgprs |
| 1252 | RECONCILE_IRQ_STATE(r10, r11) |
| 1253 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1254 | bl vsx_unavailable_tm |
| 1255 | b ret_from_except |
| 1256 | #endif |
| 1257 | 1: |
| 1258 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| 1259 | #endif |
| 1260 | bl save_nvgprs |
| 1261 | RECONCILE_IRQ_STATE(r10, r11) |
| 1262 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1263 | bl vsx_unavailable_exception |
| 1264 | b ret_from_except |
| 1265 | |
| 1266 | |
| 1267 | EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20) |
| 1268 | EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60) |
| 1269 | TRAMP_KVM(PACA_EXGEN, 0xf60) |
| 1270 | EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception) |
| 1271 | |
| 1272 | |
| 1273 | EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20) |
| 1274 | EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80) |
| 1275 | TRAMP_KVM_HV(PACA_EXGEN, 0xf80) |
| 1276 | EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception) |
| 1277 | |
| 1278 | |
| 1279 | EXC_REAL_NONE(0xfa0, 0x20) |
| 1280 | EXC_VIRT_NONE(0x4fa0, 0x20) |
| 1281 | EXC_REAL_NONE(0xfc0, 0x20) |
| 1282 | EXC_VIRT_NONE(0x4fc0, 0x20) |
| 1283 | EXC_REAL_NONE(0xfe0, 0x20) |
| 1284 | EXC_VIRT_NONE(0x4fe0, 0x20) |
| 1285 | |
| 1286 | EXC_REAL_NONE(0x1000, 0x100) |
| 1287 | EXC_VIRT_NONE(0x5000, 0x100) |
| 1288 | EXC_REAL_NONE(0x1100, 0x100) |
| 1289 | EXC_VIRT_NONE(0x5100, 0x100) |
| 1290 | |
| 1291 | #ifdef CONFIG_CBE_RAS |
| 1292 | EXC_REAL_HV(cbe_system_error, 0x1200, 0x100) |
| 1293 | EXC_VIRT_NONE(0x5200, 0x100) |
| 1294 | TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200) |
| 1295 | EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception) |
| 1296 | #else /* CONFIG_CBE_RAS */ |
| 1297 | EXC_REAL_NONE(0x1200, 0x100) |
| 1298 | EXC_VIRT_NONE(0x5200, 0x100) |
| 1299 | #endif |
| 1300 | |
| 1301 | |
| 1302 | EXC_REAL(instruction_breakpoint, 0x1300, 0x100) |
| 1303 | EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300) |
| 1304 | TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300) |
| 1305 | EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception) |
| 1306 | |
| 1307 | EXC_REAL_NONE(0x1400, 0x100) |
| 1308 | EXC_VIRT_NONE(0x5400, 0x100) |
| 1309 | |
| 1310 | EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) |
| 1311 | mtspr SPRN_SPRG_HSCRATCH0,r13 |
| 1312 | EXCEPTION_PROLOG_0(PACA_EXGEN) |
| 1313 | EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) |
| 1314 | |
| 1315 | #ifdef CONFIG_PPC_DENORMALISATION |
| 1316 | mfspr r10,SPRN_HSRR1 |
| 1317 | andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ |
| 1318 | bne+ denorm_assist |
| 1319 | #endif |
| 1320 | |
| 1321 | KVMTEST_HV(0x1500) |
| 1322 | EXCEPTION_PROLOG_2(denorm_common, EXC_HV) |
| 1323 | EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100) |
| 1324 | |
| 1325 | #ifdef CONFIG_PPC_DENORMALISATION |
| 1326 | EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100) |
| 1327 | b exc_real_0x1500_denorm_exception_hv |
| 1328 | EXC_VIRT_END(denorm_exception, 0x5500, 0x100) |
| 1329 | #else |
| 1330 | EXC_VIRT_NONE(0x5500, 0x100) |
| 1331 | #endif |
| 1332 | |
| 1333 | TRAMP_KVM_HV(PACA_EXGEN, 0x1500) |
| 1334 | |
| 1335 | #ifdef CONFIG_PPC_DENORMALISATION |
| 1336 | TRAMP_REAL_BEGIN(denorm_assist) |
| 1337 | BEGIN_FTR_SECTION |
| 1338 | /* |
| 1339 | * To denormalise we need to move a copy of the register to itself. |
| 1340 | * For POWER6 do that here for all FP regs. |
| 1341 | */ |
| 1342 | mfmsr r10 |
| 1343 | ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1) |
| 1344 | xori r10,r10,(MSR_FE0|MSR_FE1) |
| 1345 | mtmsrd r10 |
| 1346 | sync |
| 1347 | |
| 1348 | #define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 |
| 1349 | #define FMR4(n) FMR2(n) ; FMR2(n+2) |
| 1350 | #define FMR8(n) FMR4(n) ; FMR4(n+4) |
| 1351 | #define FMR16(n) FMR8(n) ; FMR8(n+8) |
| 1352 | #define FMR32(n) FMR16(n) ; FMR16(n+16) |
| 1353 | FMR32(0) |
| 1354 | |
| 1355 | FTR_SECTION_ELSE |
| 1356 | /* |
| 1357 | * To denormalise we need to move a copy of the register to itself. |
| 1358 | * For POWER7 do that here for the first 32 VSX registers only. |
| 1359 | */ |
| 1360 | mfmsr r10 |
| 1361 | oris r10,r10,MSR_VSX@h |
| 1362 | mtmsrd r10 |
| 1363 | sync |
| 1364 | |
| 1365 | #define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) |
| 1366 | #define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) |
| 1367 | #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) |
| 1368 | #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) |
| 1369 | #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) |
| 1370 | XVCPSGNDP32(0) |
| 1371 | |
| 1372 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) |
| 1373 | |
| 1374 | BEGIN_FTR_SECTION |
| 1375 | b denorm_done |
| 1376 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
| 1377 | /* |
| 1378 | * To denormalise we need to move a copy of the register to itself. |
| 1379 | * For POWER8 we need to do that for all 64 VSX registers |
| 1380 | */ |
| 1381 | XVCPSGNDP32(32) |
| 1382 | denorm_done: |
| 1383 | mfspr r11,SPRN_HSRR0 |
| 1384 | subi r11,r11,4 |
| 1385 | mtspr SPRN_HSRR0,r11 |
| 1386 | mtcrf 0x80,r9 |
| 1387 | ld r9,PACA_EXGEN+EX_R9(r13) |
| 1388 | RESTORE_PPR_PACA(PACA_EXGEN, r10) |
| 1389 | BEGIN_FTR_SECTION |
| 1390 | ld r10,PACA_EXGEN+EX_CFAR(r13) |
| 1391 | mtspr SPRN_CFAR,r10 |
| 1392 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
| 1393 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 1394 | ld r11,PACA_EXGEN+EX_R11(r13) |
| 1395 | ld r12,PACA_EXGEN+EX_R12(r13) |
| 1396 | ld r13,PACA_EXGEN+EX_R13(r13) |
| 1397 | HRFI_TO_UNKNOWN |
| 1398 | b . |
| 1399 | #endif |
| 1400 | |
| 1401 | EXC_COMMON(denorm_common, 0x1500, unknown_exception) |
| 1402 | |
| 1403 | |
| 1404 | #ifdef CONFIG_CBE_RAS |
| 1405 | EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100) |
| 1406 | EXC_VIRT_NONE(0x5600, 0x100) |
| 1407 | TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600) |
| 1408 | EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception) |
| 1409 | #else /* CONFIG_CBE_RAS */ |
| 1410 | EXC_REAL_NONE(0x1600, 0x100) |
| 1411 | EXC_VIRT_NONE(0x5600, 0x100) |
| 1412 | #endif |
| 1413 | |
| 1414 | |
| 1415 | EXC_REAL(altivec_assist, 0x1700, 0x100) |
| 1416 | EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700) |
| 1417 | TRAMP_KVM(PACA_EXGEN, 0x1700) |
| 1418 | #ifdef CONFIG_ALTIVEC |
| 1419 | EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception) |
| 1420 | #else |
| 1421 | EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception) |
| 1422 | #endif |
| 1423 | |
| 1424 | |
| 1425 | #ifdef CONFIG_CBE_RAS |
| 1426 | EXC_REAL_HV(cbe_thermal, 0x1800, 0x100) |
| 1427 | EXC_VIRT_NONE(0x5800, 0x100) |
| 1428 | TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800) |
| 1429 | EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception) |
| 1430 | #else /* CONFIG_CBE_RAS */ |
| 1431 | EXC_REAL_NONE(0x1800, 0x100) |
| 1432 | EXC_VIRT_NONE(0x5800, 0x100) |
| 1433 | #endif |
| 1434 | |
| 1435 | #ifdef CONFIG_PPC_WATCHDOG |
| 1436 | |
| 1437 | #define MASKED_DEC_HANDLER_LABEL 3f |
| 1438 | |
| 1439 | #define MASKED_DEC_HANDLER(_H) \ |
| 1440 | 3: /* soft-nmi */ \ |
| 1441 | std r12,PACA_EXGEN+EX_R12(r13); \ |
| 1442 | GET_SCRATCH0(r10); \ |
| 1443 | std r10,PACA_EXGEN+EX_R13(r13); \ |
| 1444 | EXCEPTION_PROLOG_2(soft_nmi_common, _H) |
| 1445 | |
| 1446 | /* |
| 1447 | * Branch to soft_nmi_interrupt using the emergency stack. The emergency |
| 1448 | * stack is one that is usable by maskable interrupts so long as MSR_EE |
| 1449 | * remains off. It is used for recovery when something has corrupted the |
| 1450 | * normal kernel stack, for example. The "soft NMI" must not use the process |
| 1451 | * stack because we want irq disabled sections to avoid touching the stack |
| 1452 | * at all (other than PMU interrupts), so use the emergency stack for this, |
| 1453 | * and run it entirely with interrupts hard disabled. |
| 1454 | */ |
| 1455 | EXC_COMMON_BEGIN(soft_nmi_common) |
| 1456 | mr r10,r1 |
| 1457 | ld r1,PACAEMERGSP(r13) |
| 1458 | subi r1,r1,INT_FRAME_SIZE |
| 1459 | EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900, |
| 1460 | system_reset, soft_nmi_interrupt, |
| 1461 | ADD_NVGPRS;ADD_RECONCILE) |
| 1462 | b ret_from_except |
| 1463 | |
| 1464 | #else /* CONFIG_PPC_WATCHDOG */ |
| 1465 | #define MASKED_DEC_HANDLER_LABEL 2f /* normal return */ |
| 1466 | #define MASKED_DEC_HANDLER(_H) |
| 1467 | #endif /* CONFIG_PPC_WATCHDOG */ |
| 1468 | |
| 1469 | /* |
| 1470 | * An interrupt came in while soft-disabled. We set paca->irq_happened, then: |
| 1471 | * - If it was a decrementer interrupt, we bump the dec to max and and return. |
| 1472 | * - If it was a doorbell we return immediately since doorbells are edge |
| 1473 | * triggered and won't automatically refire. |
| 1474 | * - If it was a HMI we return immediately since we handled it in realmode |
| 1475 | * and it won't refire. |
| 1476 | * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. |
| 1477 | * This is called with r10 containing the value to OR to the paca field. |
| 1478 | */ |
| 1479 | #define MASKED_INTERRUPT(_H) \ |
| 1480 | masked_##_H##interrupt: \ |
| 1481 | std r11,PACA_EXGEN+EX_R11(r13); \ |
| 1482 | lbz r11,PACAIRQHAPPENED(r13); \ |
| 1483 | or r11,r11,r10; \ |
| 1484 | stb r11,PACAIRQHAPPENED(r13); \ |
| 1485 | cmpwi r10,PACA_IRQ_DEC; \ |
| 1486 | bne 1f; \ |
| 1487 | lis r10,0x7fff; \ |
| 1488 | ori r10,r10,0xffff; \ |
| 1489 | mtspr SPRN_DEC,r10; \ |
| 1490 | b MASKED_DEC_HANDLER_LABEL; \ |
| 1491 | 1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \ |
| 1492 | beq 2f; \ |
| 1493 | mfspr r10,SPRN_##_H##SRR1; \ |
| 1494 | xori r10,r10,MSR_EE; /* clear MSR_EE */ \ |
| 1495 | mtspr SPRN_##_H##SRR1,r10; \ |
| 1496 | ori r11,r11,PACA_IRQ_HARD_DIS; \ |
| 1497 | stb r11,PACAIRQHAPPENED(r13); \ |
| 1498 | 2: /* done */ \ |
| 1499 | mtcrf 0x80,r9; \ |
| 1500 | std r1,PACAR1(r13); \ |
| 1501 | ld r9,PACA_EXGEN+EX_R9(r13); \ |
| 1502 | ld r10,PACA_EXGEN+EX_R10(r13); \ |
| 1503 | ld r11,PACA_EXGEN+EX_R11(r13); \ |
| 1504 | /* returns to kernel where r13 must be set up, so don't restore it */ \ |
| 1505 | ##_H##RFI_TO_KERNEL; \ |
| 1506 | b .; \ |
| 1507 | MASKED_DEC_HANDLER(_H) |
| 1508 | |
| 1509 | TRAMP_REAL_BEGIN(stf_barrier_fallback) |
| 1510 | std r9,PACA_EXRFI+EX_R9(r13) |
| 1511 | std r10,PACA_EXRFI+EX_R10(r13) |
| 1512 | sync |
| 1513 | ld r9,PACA_EXRFI+EX_R9(r13) |
| 1514 | ld r10,PACA_EXRFI+EX_R10(r13) |
| 1515 | ori 31,31,0 |
| 1516 | .rept 14 |
| 1517 | b 1f |
| 1518 | 1: |
| 1519 | .endr |
| 1520 | blr |
| 1521 | |
| 1522 | TRAMP_REAL_BEGIN(rfi_flush_fallback) |
| 1523 | SET_SCRATCH0(r13); |
| 1524 | GET_PACA(r13); |
| 1525 | std r1,PACA_EXRFI+EX_R12(r13) |
| 1526 | ld r1,PACAKSAVE(r13) |
| 1527 | std r9,PACA_EXRFI+EX_R9(r13) |
| 1528 | std r10,PACA_EXRFI+EX_R10(r13) |
| 1529 | std r11,PACA_EXRFI+EX_R11(r13) |
| 1530 | mfctr r9 |
| 1531 | ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) |
| 1532 | ld r11,PACA_L1D_FLUSH_SIZE(r13) |
| 1533 | srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ |
| 1534 | mtctr r11 |
| 1535 | DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ |
| 1536 | |
| 1537 | /* order ld/st prior to dcbt stop all streams with flushing */ |
| 1538 | sync |
| 1539 | |
| 1540 | /* |
| 1541 | * The load adresses are at staggered offsets within cachelines, |
| 1542 | * which suits some pipelines better (on others it should not |
| 1543 | * hurt). |
| 1544 | */ |
| 1545 | 1: |
| 1546 | ld r11,(0x80 + 8)*0(r10) |
| 1547 | ld r11,(0x80 + 8)*1(r10) |
| 1548 | ld r11,(0x80 + 8)*2(r10) |
| 1549 | ld r11,(0x80 + 8)*3(r10) |
| 1550 | ld r11,(0x80 + 8)*4(r10) |
| 1551 | ld r11,(0x80 + 8)*5(r10) |
| 1552 | ld r11,(0x80 + 8)*6(r10) |
| 1553 | ld r11,(0x80 + 8)*7(r10) |
| 1554 | addi r10,r10,0x80*8 |
| 1555 | bdnz 1b |
| 1556 | |
| 1557 | mtctr r9 |
| 1558 | ld r9,PACA_EXRFI+EX_R9(r13) |
| 1559 | ld r10,PACA_EXRFI+EX_R10(r13) |
| 1560 | ld r11,PACA_EXRFI+EX_R11(r13) |
| 1561 | ld r1,PACA_EXRFI+EX_R12(r13) |
| 1562 | GET_SCRATCH0(r13); |
| 1563 | rfid |
| 1564 | |
| 1565 | TRAMP_REAL_BEGIN(hrfi_flush_fallback) |
| 1566 | SET_SCRATCH0(r13); |
| 1567 | GET_PACA(r13); |
| 1568 | std r1,PACA_EXRFI+EX_R12(r13) |
| 1569 | ld r1,PACAKSAVE(r13) |
| 1570 | std r9,PACA_EXRFI+EX_R9(r13) |
| 1571 | std r10,PACA_EXRFI+EX_R10(r13) |
| 1572 | std r11,PACA_EXRFI+EX_R11(r13) |
| 1573 | mfctr r9 |
| 1574 | ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) |
| 1575 | ld r11,PACA_L1D_FLUSH_SIZE(r13) |
| 1576 | srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ |
| 1577 | mtctr r11 |
| 1578 | DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ |
| 1579 | |
| 1580 | /* order ld/st prior to dcbt stop all streams with flushing */ |
| 1581 | sync |
| 1582 | |
| 1583 | /* |
| 1584 | * The load adresses are at staggered offsets within cachelines, |
| 1585 | * which suits some pipelines better (on others it should not |
| 1586 | * hurt). |
| 1587 | */ |
| 1588 | 1: |
| 1589 | ld r11,(0x80 + 8)*0(r10) |
| 1590 | ld r11,(0x80 + 8)*1(r10) |
| 1591 | ld r11,(0x80 + 8)*2(r10) |
| 1592 | ld r11,(0x80 + 8)*3(r10) |
| 1593 | ld r11,(0x80 + 8)*4(r10) |
| 1594 | ld r11,(0x80 + 8)*5(r10) |
| 1595 | ld r11,(0x80 + 8)*6(r10) |
| 1596 | ld r11,(0x80 + 8)*7(r10) |
| 1597 | addi r10,r10,0x80*8 |
| 1598 | bdnz 1b |
| 1599 | |
| 1600 | mtctr r9 |
| 1601 | ld r9,PACA_EXRFI+EX_R9(r13) |
| 1602 | ld r10,PACA_EXRFI+EX_R10(r13) |
| 1603 | ld r11,PACA_EXRFI+EX_R11(r13) |
| 1604 | ld r1,PACA_EXRFI+EX_R12(r13) |
| 1605 | GET_SCRATCH0(r13); |
| 1606 | hrfid |
| 1607 | |
| 1608 | /* |
| 1609 | * Real mode exceptions actually use this too, but alternate |
| 1610 | * instruction code patches (which end up in the common .text area) |
| 1611 | * cannot reach these if they are put there. |
| 1612 | */ |
| 1613 | USE_FIXED_SECTION(virt_trampolines) |
| 1614 | MASKED_INTERRUPT() |
| 1615 | MASKED_INTERRUPT(H) |
| 1616 | |
| 1617 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
| 1618 | TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) |
| 1619 | /* |
| 1620 | * Here all GPRs are unchanged from when the interrupt happened |
| 1621 | * except for r13, which is saved in SPRG_SCRATCH0. |
| 1622 | */ |
| 1623 | mfspr r13, SPRN_SRR0 |
| 1624 | addi r13, r13, 4 |
| 1625 | mtspr SPRN_SRR0, r13 |
| 1626 | GET_SCRATCH0(r13) |
| 1627 | RFI_TO_KERNEL |
| 1628 | b . |
| 1629 | |
| 1630 | TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) |
| 1631 | /* |
| 1632 | * Here all GPRs are unchanged from when the interrupt happened |
| 1633 | * except for r13, which is saved in SPRG_SCRATCH0. |
| 1634 | */ |
| 1635 | mfspr r13, SPRN_HSRR0 |
| 1636 | addi r13, r13, 4 |
| 1637 | mtspr SPRN_HSRR0, r13 |
| 1638 | GET_SCRATCH0(r13) |
| 1639 | HRFI_TO_KERNEL |
| 1640 | b . |
| 1641 | #endif |
| 1642 | |
| 1643 | /* |
| 1644 | * Ensure that any handlers that get invoked from the exception prologs |
| 1645 | * above are below the first 64KB (0x10000) of the kernel image because |
| 1646 | * the prologs assemble the addresses of these handlers using the |
| 1647 | * LOAD_HANDLER macro, which uses an ori instruction. |
| 1648 | */ |
| 1649 | |
| 1650 | /*** Common interrupt handlers ***/ |
| 1651 | |
| 1652 | |
| 1653 | /* |
| 1654 | * Relocation-on interrupts: A subset of the interrupts can be delivered |
| 1655 | * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering |
| 1656 | * it. Addresses are the same as the original interrupt addresses, but |
| 1657 | * offset by 0xc000000000004000. |
| 1658 | * It's impossible to receive interrupts below 0x300 via this mechanism. |
| 1659 | * KVM: None of these traps are from the guest ; anything that escalated |
| 1660 | * to HV=1 from HV=0 is delivered via real mode handlers. |
| 1661 | */ |
| 1662 | |
| 1663 | /* |
| 1664 | * This uses the standard macro, since the original 0x300 vector |
| 1665 | * only has extra guff for STAB-based processors -- which never |
| 1666 | * come here. |
| 1667 | */ |
| 1668 | |
| 1669 | EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) |
| 1670 | b __ppc64_runlatch_on |
| 1671 | |
| 1672 | USE_FIXED_SECTION(virt_trampolines) |
| 1673 | /* |
| 1674 | * The __end_interrupts marker must be past the out-of-line (OOL) |
| 1675 | * handlers, so that they are copied to real address 0x100 when running |
| 1676 | * a relocatable kernel. This ensures they can be reached from the short |
| 1677 | * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch |
| 1678 | * directly, without using LOAD_HANDLER(). |
| 1679 | */ |
| 1680 | .align 7 |
| 1681 | .globl __end_interrupts |
| 1682 | __end_interrupts: |
| 1683 | DEFINE_FIXED_SYMBOL(__end_interrupts) |
| 1684 | |
| 1685 | #ifdef CONFIG_PPC_970_NAP |
| 1686 | EXC_COMMON_BEGIN(power4_fixup_nap) |
| 1687 | andc r9,r9,r10 |
| 1688 | std r9,TI_LOCAL_FLAGS(r11) |
| 1689 | ld r10,_LINK(r1) /* make idle task do the */ |
| 1690 | std r10,_NIP(r1) /* equivalent of a blr */ |
| 1691 | blr |
| 1692 | #endif |
| 1693 | |
| 1694 | CLOSE_FIXED_SECTION(real_vectors); |
| 1695 | CLOSE_FIXED_SECTION(real_trampolines); |
| 1696 | CLOSE_FIXED_SECTION(virt_vectors); |
| 1697 | CLOSE_FIXED_SECTION(virt_trampolines); |
| 1698 | |
| 1699 | USE_TEXT_SECTION() |
| 1700 | |
| 1701 | /* |
| 1702 | * Hash table stuff |
| 1703 | */ |
| 1704 | .balign IFETCH_ALIGN_BYTES |
| 1705 | do_hash_page: |
| 1706 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1707 | lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h |
| 1708 | ori r0,r0,DSISR_BAD_FAULT_64S@l |
| 1709 | and. r0,r4,r0 /* weird error? */ |
| 1710 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
| 1711 | CURRENT_THREAD_INFO(r11, r1) |
| 1712 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
| 1713 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
| 1714 | bne 77f /* then don't call hash_page now */ |
| 1715 | |
| 1716 | /* |
| 1717 | * r3 contains the faulting address |
| 1718 | * r4 msr |
| 1719 | * r5 contains the trap number |
| 1720 | * r6 contains dsisr |
| 1721 | * |
| 1722 | * at return r3 = 0 for success, 1 for page fault, negative for error |
| 1723 | */ |
| 1724 | mr r4,r12 |
| 1725 | ld r6,_DSISR(r1) |
| 1726 | bl __hash_page /* build HPTE if possible */ |
| 1727 | cmpdi r3,0 /* see if __hash_page succeeded */ |
| 1728 | |
| 1729 | /* Success */ |
| 1730 | beq fast_exc_return_irq /* Return from exception on success */ |
| 1731 | |
| 1732 | /* Error */ |
| 1733 | blt- 13f |
| 1734 | |
| 1735 | /* Reload DSISR into r4 for the DABR check below */ |
| 1736 | ld r4,_DSISR(r1) |
| 1737 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 1738 | |
| 1739 | /* Here we have a page fault that hash_page can't handle. */ |
| 1740 | handle_page_fault: |
| 1741 | 11: andis. r0,r4,DSISR_DABRMATCH@h |
| 1742 | bne- handle_dabr_fault |
| 1743 | ld r4,_DAR(r1) |
| 1744 | ld r5,_DSISR(r1) |
| 1745 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1746 | bl do_page_fault |
| 1747 | cmpdi r3,0 |
| 1748 | beq+ 12f |
| 1749 | bl save_nvgprs |
| 1750 | mr r5,r3 |
| 1751 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1752 | lwz r4,_DAR(r1) |
| 1753 | bl bad_page_fault |
| 1754 | b ret_from_except |
| 1755 | |
| 1756 | /* We have a data breakpoint exception - handle it */ |
| 1757 | handle_dabr_fault: |
| 1758 | bl save_nvgprs |
| 1759 | ld r4,_DAR(r1) |
| 1760 | ld r5,_DSISR(r1) |
| 1761 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1762 | bl do_break |
| 1763 | 12: b ret_from_except_lite |
| 1764 | |
| 1765 | |
| 1766 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1767 | /* We have a page fault that hash_page could handle but HV refused |
| 1768 | * the PTE insertion |
| 1769 | */ |
| 1770 | 13: bl save_nvgprs |
| 1771 | mr r5,r3 |
| 1772 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1773 | ld r4,_DAR(r1) |
| 1774 | bl low_hash_fault |
| 1775 | b ret_from_except |
| 1776 | #endif |
| 1777 | |
| 1778 | /* |
| 1779 | * We come here as a result of a DSI at a point where we don't want |
| 1780 | * to call hash_page, such as when we are accessing memory (possibly |
| 1781 | * user memory) inside a PMU interrupt that occurred while interrupts |
| 1782 | * were soft-disabled. We want to invoke the exception handler for |
| 1783 | * the access, or panic if there isn't a handler. |
| 1784 | */ |
| 1785 | 77: bl save_nvgprs |
| 1786 | mr r4,r3 |
| 1787 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1788 | li r5,SIGSEGV |
| 1789 | bl bad_page_fault |
| 1790 | b ret_from_except |
| 1791 | |
| 1792 | /* |
| 1793 | * Here we have detected that the kernel stack pointer is bad. |
| 1794 | * R9 contains the saved CR, r13 points to the paca, |
| 1795 | * r10 contains the (bad) kernel stack pointer, |
| 1796 | * r11 and r12 contain the saved SRR0 and SRR1. |
| 1797 | * We switch to using an emergency stack, save the registers there, |
| 1798 | * and call kernel_bad_stack(), which panics. |
| 1799 | */ |
| 1800 | bad_stack: |
| 1801 | ld r1,PACAEMERGSP(r13) |
| 1802 | subi r1,r1,64+INT_FRAME_SIZE |
| 1803 | std r9,_CCR(r1) |
| 1804 | std r10,GPR1(r1) |
| 1805 | std r11,_NIP(r1) |
| 1806 | std r12,_MSR(r1) |
| 1807 | mfspr r11,SPRN_DAR |
| 1808 | mfspr r12,SPRN_DSISR |
| 1809 | std r11,_DAR(r1) |
| 1810 | std r12,_DSISR(r1) |
| 1811 | mflr r10 |
| 1812 | mfctr r11 |
| 1813 | mfxer r12 |
| 1814 | std r10,_LINK(r1) |
| 1815 | std r11,_CTR(r1) |
| 1816 | std r12,_XER(r1) |
| 1817 | SAVE_GPR(0,r1) |
| 1818 | SAVE_GPR(2,r1) |
| 1819 | ld r10,EX_R3(r3) |
| 1820 | std r10,GPR3(r1) |
| 1821 | SAVE_GPR(4,r1) |
| 1822 | SAVE_4GPRS(5,r1) |
| 1823 | ld r9,EX_R9(r3) |
| 1824 | ld r10,EX_R10(r3) |
| 1825 | SAVE_2GPRS(9,r1) |
| 1826 | ld r9,EX_R11(r3) |
| 1827 | ld r10,EX_R12(r3) |
| 1828 | ld r11,EX_R13(r3) |
| 1829 | std r9,GPR11(r1) |
| 1830 | std r10,GPR12(r1) |
| 1831 | std r11,GPR13(r1) |
| 1832 | BEGIN_FTR_SECTION |
| 1833 | ld r10,EX_CFAR(r3) |
| 1834 | std r10,ORIG_GPR3(r1) |
| 1835 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
| 1836 | SAVE_8GPRS(14,r1) |
| 1837 | SAVE_10GPRS(22,r1) |
| 1838 | lhz r12,PACA_TRAP_SAVE(r13) |
| 1839 | std r12,_TRAP(r1) |
| 1840 | addi r11,r1,INT_FRAME_SIZE |
| 1841 | std r11,0(r1) |
| 1842 | li r12,0 |
| 1843 | std r12,0(r11) |
| 1844 | ld r2,PACATOC(r13) |
| 1845 | ld r11,exception_marker@toc(r2) |
| 1846 | std r12,RESULT(r1) |
| 1847 | std r11,STACK_FRAME_OVERHEAD-16(r1) |
| 1848 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 1849 | bl kernel_bad_stack |
| 1850 | b 1b |
| 1851 | _ASM_NOKPROBE_SYMBOL(bad_stack); |
| 1852 | |
| 1853 | /* |
| 1854 | * When doorbell is triggered from system reset wakeup, the message is |
| 1855 | * not cleared, so it would fire again when EE is enabled. |
| 1856 | * |
| 1857 | * When coming from local_irq_enable, there may be the same problem if |
| 1858 | * we were hard disabled. |
| 1859 | * |
| 1860 | * Execute msgclr to clear pending exceptions before handling it. |
| 1861 | */ |
| 1862 | h_doorbell_common_msgclr: |
| 1863 | LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36)) |
| 1864 | PPC_MSGCLR(3) |
| 1865 | b h_doorbell_common |
| 1866 | |
| 1867 | doorbell_super_common_msgclr: |
| 1868 | LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36)) |
| 1869 | PPC_MSGCLRP(3) |
| 1870 | b doorbell_super_common |
| 1871 | |
| 1872 | /* |
| 1873 | * Called from arch_local_irq_enable when an interrupt needs |
| 1874 | * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate |
| 1875 | * which kind of interrupt. MSR:EE is already off. We generate a |
| 1876 | * stackframe like if a real interrupt had happened. |
| 1877 | * |
| 1878 | * Note: While MSR:EE is off, we need to make sure that _MSR |
| 1879 | * in the generated frame has EE set to 1 or the exception |
| 1880 | * handler will not properly re-enable them. |
| 1881 | * |
| 1882 | * Note that we don't specify LR as the NIP (return address) for |
| 1883 | * the interrupt because that would unbalance the return branch |
| 1884 | * predictor. |
| 1885 | */ |
| 1886 | _GLOBAL(__replay_interrupt) |
| 1887 | /* We are going to jump to the exception common code which |
| 1888 | * will retrieve various register values from the PACA which |
| 1889 | * we don't give a damn about, so we don't bother storing them. |
| 1890 | */ |
| 1891 | mfmsr r12 |
| 1892 | LOAD_REG_ADDR(r11, replay_interrupt_return) |
| 1893 | mfcr r9 |
| 1894 | ori r12,r12,MSR_EE |
| 1895 | cmpwi r3,0x900 |
| 1896 | beq decrementer_common |
| 1897 | cmpwi r3,0x500 |
| 1898 | BEGIN_FTR_SECTION |
| 1899 | beq h_virt_irq_common |
| 1900 | FTR_SECTION_ELSE |
| 1901 | beq hardware_interrupt_common |
| 1902 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300) |
| 1903 | cmpwi r3,0xf00 |
| 1904 | beq performance_monitor_common |
| 1905 | BEGIN_FTR_SECTION |
| 1906 | cmpwi r3,0xa00 |
| 1907 | beq h_doorbell_common_msgclr |
| 1908 | cmpwi r3,0xe60 |
| 1909 | beq hmi_exception_common |
| 1910 | FTR_SECTION_ELSE |
| 1911 | cmpwi r3,0xa00 |
| 1912 | beq doorbell_super_common_msgclr |
| 1913 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) |
| 1914 | replay_interrupt_return: |
| 1915 | blr |
| 1916 | |
| 1917 | _ASM_NOKPROBE_SYMBOL(__replay_interrupt) |