David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3 | * |
| 4 | * Copyright IBM Corp. 2007 |
| 5 | * Copyright 2011 Freescale Semiconductor, Inc. |
| 6 | * |
| 7 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <asm/ppc_asm.h> |
| 11 | #include <asm/kvm_asm.h> |
| 12 | #include <asm/reg.h> |
| 13 | #include <asm/page.h> |
| 14 | #include <asm/asm-offsets.h> |
| 15 | |
| 16 | /* The host stack layout: */ |
| 17 | #define HOST_R1 0 /* Implied by stwu. */ |
| 18 | #define HOST_CALLEE_LR 4 |
| 19 | #define HOST_RUN 8 |
| 20 | /* r2 is special: it holds 'current', and it made nonvolatile in the |
| 21 | * kernel with the -ffixed-r2 gcc option. */ |
| 22 | #define HOST_R2 12 |
| 23 | #define HOST_CR 16 |
| 24 | #define HOST_NV_GPRS 20 |
| 25 | #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) |
| 26 | #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) |
| 27 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) |
| 28 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ |
| 29 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ |
| 30 | |
| 31 | #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ |
| 32 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
| 33 | (1<<BOOKE_INTERRUPT_DEBUG)) |
| 34 | |
| 35 | #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ |
| 36 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
| 37 | (1<<BOOKE_INTERRUPT_ALIGNMENT)) |
| 38 | |
| 39 | #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ |
| 40 | (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ |
| 41 | (1<<BOOKE_INTERRUPT_PROGRAM) | \ |
| 42 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
| 43 | (1<<BOOKE_INTERRUPT_ALIGNMENT)) |
| 44 | |
| 45 | .macro __KVM_HANDLER ivor_nr scratch srr0 |
| 46 | /* Get pointer to vcpu and record exit number. */ |
| 47 | mtspr \scratch , r4 |
| 48 | mfspr r4, SPRN_SPRG_THREAD |
| 49 | lwz r4, THREAD_KVM_VCPU(r4) |
| 50 | stw r3, VCPU_GPR(R3)(r4) |
| 51 | stw r5, VCPU_GPR(R5)(r4) |
| 52 | stw r6, VCPU_GPR(R6)(r4) |
| 53 | mfspr r3, \scratch |
| 54 | mfctr r5 |
| 55 | stw r3, VCPU_GPR(R4)(r4) |
| 56 | stw r5, VCPU_CTR(r4) |
| 57 | mfspr r3, \srr0 |
| 58 | lis r6, kvmppc_resume_host@h |
| 59 | stw r3, VCPU_PC(r4) |
| 60 | li r5, \ivor_nr |
| 61 | ori r6, r6, kvmppc_resume_host@l |
| 62 | mtctr r6 |
| 63 | bctr |
| 64 | .endm |
| 65 | |
| 66 | .macro KVM_HANDLER ivor_nr scratch srr0 |
| 67 | _GLOBAL(kvmppc_handler_\ivor_nr) |
| 68 | __KVM_HANDLER \ivor_nr \scratch \srr0 |
| 69 | .endm |
| 70 | |
| 71 | .macro KVM_DBG_HANDLER ivor_nr scratch srr0 |
| 72 | _GLOBAL(kvmppc_handler_\ivor_nr) |
| 73 | mtspr \scratch, r4 |
| 74 | mfspr r4, SPRN_SPRG_THREAD |
| 75 | lwz r4, THREAD_KVM_VCPU(r4) |
| 76 | stw r3, VCPU_CRIT_SAVE(r4) |
| 77 | mfcr r3 |
| 78 | mfspr r4, SPRN_CSRR1 |
| 79 | andi. r4, r4, MSR_PR |
| 80 | bne 1f |
| 81 | /* debug interrupt happened in enter/exit path */ |
| 82 | mfspr r4, SPRN_CSRR1 |
| 83 | rlwinm r4, r4, 0, ~MSR_DE |
| 84 | mtspr SPRN_CSRR1, r4 |
| 85 | lis r4, 0xffff |
| 86 | ori r4, r4, 0xffff |
| 87 | mtspr SPRN_DBSR, r4 |
| 88 | mfspr r4, SPRN_SPRG_THREAD |
| 89 | lwz r4, THREAD_KVM_VCPU(r4) |
| 90 | mtcr r3 |
| 91 | lwz r3, VCPU_CRIT_SAVE(r4) |
| 92 | mfspr r4, \scratch |
| 93 | rfci |
| 94 | 1: /* debug interrupt happened in guest */ |
| 95 | mtcr r3 |
| 96 | mfspr r4, SPRN_SPRG_THREAD |
| 97 | lwz r4, THREAD_KVM_VCPU(r4) |
| 98 | lwz r3, VCPU_CRIT_SAVE(r4) |
| 99 | mfspr r4, \scratch |
| 100 | __KVM_HANDLER \ivor_nr \scratch \srr0 |
| 101 | .endm |
| 102 | |
| 103 | .macro KVM_HANDLER_ADDR ivor_nr |
| 104 | .long kvmppc_handler_\ivor_nr |
| 105 | .endm |
| 106 | |
| 107 | .macro KVM_HANDLER_END |
| 108 | .long kvmppc_handlers_end |
| 109 | .endm |
| 110 | |
| 111 | _GLOBAL(kvmppc_handlers_start) |
| 112 | KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 |
| 113 | KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0 |
| 114 | KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 115 | KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 116 | KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 117 | KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 118 | KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 119 | KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 120 | KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 121 | KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 122 | KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 123 | KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 124 | KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 |
| 125 | KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 126 | KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 127 | KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 |
| 128 | KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 129 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 130 | KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 |
| 131 | _GLOBAL(kvmppc_handlers_end) |
| 132 | |
| 133 | /* Registers: |
| 134 | * SPRG_SCRATCH0: guest r4 |
| 135 | * r4: vcpu pointer |
| 136 | * r5: KVM exit number |
| 137 | */ |
| 138 | _GLOBAL(kvmppc_resume_host) |
| 139 | mfcr r3 |
| 140 | stw r3, VCPU_CR(r4) |
| 141 | stw r7, VCPU_GPR(R7)(r4) |
| 142 | stw r8, VCPU_GPR(R8)(r4) |
| 143 | stw r9, VCPU_GPR(R9)(r4) |
| 144 | |
| 145 | li r6, 1 |
| 146 | slw r6, r6, r5 |
| 147 | |
| 148 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 149 | /* save exit time */ |
| 150 | 1: |
| 151 | mfspr r7, SPRN_TBRU |
| 152 | mfspr r8, SPRN_TBRL |
| 153 | mfspr r9, SPRN_TBRU |
| 154 | cmpw r9, r7 |
| 155 | bne 1b |
| 156 | stw r8, VCPU_TIMING_EXIT_TBL(r4) |
| 157 | stw r9, VCPU_TIMING_EXIT_TBU(r4) |
| 158 | #endif |
| 159 | |
| 160 | /* Save the faulting instruction and all GPRs for emulation. */ |
| 161 | andi. r7, r6, NEED_INST_MASK |
| 162 | beq ..skip_inst_copy |
| 163 | mfspr r9, SPRN_SRR0 |
| 164 | mfmsr r8 |
| 165 | ori r7, r8, MSR_DS |
| 166 | mtmsr r7 |
| 167 | isync |
| 168 | lwz r9, 0(r9) |
| 169 | mtmsr r8 |
| 170 | isync |
| 171 | stw r9, VCPU_LAST_INST(r4) |
| 172 | |
| 173 | stw r15, VCPU_GPR(R15)(r4) |
| 174 | stw r16, VCPU_GPR(R16)(r4) |
| 175 | stw r17, VCPU_GPR(R17)(r4) |
| 176 | stw r18, VCPU_GPR(R18)(r4) |
| 177 | stw r19, VCPU_GPR(R19)(r4) |
| 178 | stw r20, VCPU_GPR(R20)(r4) |
| 179 | stw r21, VCPU_GPR(R21)(r4) |
| 180 | stw r22, VCPU_GPR(R22)(r4) |
| 181 | stw r23, VCPU_GPR(R23)(r4) |
| 182 | stw r24, VCPU_GPR(R24)(r4) |
| 183 | stw r25, VCPU_GPR(R25)(r4) |
| 184 | stw r26, VCPU_GPR(R26)(r4) |
| 185 | stw r27, VCPU_GPR(R27)(r4) |
| 186 | stw r28, VCPU_GPR(R28)(r4) |
| 187 | stw r29, VCPU_GPR(R29)(r4) |
| 188 | stw r30, VCPU_GPR(R30)(r4) |
| 189 | stw r31, VCPU_GPR(R31)(r4) |
| 190 | ..skip_inst_copy: |
| 191 | |
| 192 | /* Also grab DEAR and ESR before the host can clobber them. */ |
| 193 | |
| 194 | andi. r7, r6, NEED_DEAR_MASK |
| 195 | beq ..skip_dear |
| 196 | mfspr r9, SPRN_DEAR |
| 197 | stw r9, VCPU_FAULT_DEAR(r4) |
| 198 | ..skip_dear: |
| 199 | |
| 200 | andi. r7, r6, NEED_ESR_MASK |
| 201 | beq ..skip_esr |
| 202 | mfspr r9, SPRN_ESR |
| 203 | stw r9, VCPU_FAULT_ESR(r4) |
| 204 | ..skip_esr: |
| 205 | |
| 206 | /* Save remaining volatile guest register state to vcpu. */ |
| 207 | stw r0, VCPU_GPR(R0)(r4) |
| 208 | stw r1, VCPU_GPR(R1)(r4) |
| 209 | stw r2, VCPU_GPR(R2)(r4) |
| 210 | stw r10, VCPU_GPR(R10)(r4) |
| 211 | stw r11, VCPU_GPR(R11)(r4) |
| 212 | stw r12, VCPU_GPR(R12)(r4) |
| 213 | stw r13, VCPU_GPR(R13)(r4) |
| 214 | stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ |
| 215 | mflr r3 |
| 216 | stw r3, VCPU_LR(r4) |
| 217 | mfxer r3 |
| 218 | stw r3, VCPU_XER(r4) |
| 219 | |
| 220 | /* Restore host stack pointer and PID before IVPR, since the host |
| 221 | * exception handlers use them. */ |
| 222 | lwz r1, VCPU_HOST_STACK(r4) |
| 223 | lwz r3, VCPU_HOST_PID(r4) |
| 224 | mtspr SPRN_PID, r3 |
| 225 | |
| 226 | #ifdef CONFIG_FSL_BOOKE |
| 227 | /* we cheat and know that Linux doesn't use PID1 which is always 0 */ |
| 228 | lis r3, 0 |
| 229 | mtspr SPRN_PID1, r3 |
| 230 | #endif |
| 231 | |
| 232 | /* Restore host IVPR before re-enabling interrupts. We cheat and know |
| 233 | * that Linux IVPR is always 0xc0000000. */ |
| 234 | lis r3, 0xc000 |
| 235 | mtspr SPRN_IVPR, r3 |
| 236 | |
| 237 | /* Switch to kernel stack and jump to handler. */ |
| 238 | LOAD_REG_ADDR(r3, kvmppc_handle_exit) |
| 239 | mtctr r3 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 240 | mr r3, r4 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 241 | lwz r2, HOST_R2(r1) |
| 242 | mr r14, r4 /* Save vcpu pointer. */ |
| 243 | |
| 244 | bctrl /* kvmppc_handle_exit() */ |
| 245 | |
| 246 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
| 247 | mr r4, r14 |
| 248 | lwz r14, VCPU_GPR(R14)(r4) |
| 249 | |
| 250 | /* Sometimes instruction emulation must restore complete GPR state. */ |
| 251 | andi. r5, r3, RESUME_FLAG_NV |
| 252 | beq ..skip_nv_load |
| 253 | lwz r15, VCPU_GPR(R15)(r4) |
| 254 | lwz r16, VCPU_GPR(R16)(r4) |
| 255 | lwz r17, VCPU_GPR(R17)(r4) |
| 256 | lwz r18, VCPU_GPR(R18)(r4) |
| 257 | lwz r19, VCPU_GPR(R19)(r4) |
| 258 | lwz r20, VCPU_GPR(R20)(r4) |
| 259 | lwz r21, VCPU_GPR(R21)(r4) |
| 260 | lwz r22, VCPU_GPR(R22)(r4) |
| 261 | lwz r23, VCPU_GPR(R23)(r4) |
| 262 | lwz r24, VCPU_GPR(R24)(r4) |
| 263 | lwz r25, VCPU_GPR(R25)(r4) |
| 264 | lwz r26, VCPU_GPR(R26)(r4) |
| 265 | lwz r27, VCPU_GPR(R27)(r4) |
| 266 | lwz r28, VCPU_GPR(R28)(r4) |
| 267 | lwz r29, VCPU_GPR(R29)(r4) |
| 268 | lwz r30, VCPU_GPR(R30)(r4) |
| 269 | lwz r31, VCPU_GPR(R31)(r4) |
| 270 | ..skip_nv_load: |
| 271 | |
| 272 | /* Should we return to the guest? */ |
| 273 | andi. r5, r3, RESUME_FLAG_HOST |
| 274 | beq lightweight_exit |
| 275 | |
| 276 | srawi r3, r3, 2 /* Shift -ERR back down. */ |
| 277 | |
| 278 | heavyweight_exit: |
| 279 | /* Not returning to guest. */ |
| 280 | |
| 281 | #ifdef CONFIG_SPE |
| 282 | /* save guest SPEFSCR and load host SPEFSCR */ |
| 283 | mfspr r9, SPRN_SPEFSCR |
| 284 | stw r9, VCPU_SPEFSCR(r4) |
| 285 | lwz r9, VCPU_HOST_SPEFSCR(r4) |
| 286 | mtspr SPRN_SPEFSCR, r9 |
| 287 | #endif |
| 288 | |
| 289 | /* We already saved guest volatile register state; now save the |
| 290 | * non-volatiles. */ |
| 291 | stw r15, VCPU_GPR(R15)(r4) |
| 292 | stw r16, VCPU_GPR(R16)(r4) |
| 293 | stw r17, VCPU_GPR(R17)(r4) |
| 294 | stw r18, VCPU_GPR(R18)(r4) |
| 295 | stw r19, VCPU_GPR(R19)(r4) |
| 296 | stw r20, VCPU_GPR(R20)(r4) |
| 297 | stw r21, VCPU_GPR(R21)(r4) |
| 298 | stw r22, VCPU_GPR(R22)(r4) |
| 299 | stw r23, VCPU_GPR(R23)(r4) |
| 300 | stw r24, VCPU_GPR(R24)(r4) |
| 301 | stw r25, VCPU_GPR(R25)(r4) |
| 302 | stw r26, VCPU_GPR(R26)(r4) |
| 303 | stw r27, VCPU_GPR(R27)(r4) |
| 304 | stw r28, VCPU_GPR(R28)(r4) |
| 305 | stw r29, VCPU_GPR(R29)(r4) |
| 306 | stw r30, VCPU_GPR(R30)(r4) |
| 307 | stw r31, VCPU_GPR(R31)(r4) |
| 308 | |
| 309 | /* Load host non-volatile register state from host stack. */ |
| 310 | lwz r14, HOST_NV_GPR(R14)(r1) |
| 311 | lwz r15, HOST_NV_GPR(R15)(r1) |
| 312 | lwz r16, HOST_NV_GPR(R16)(r1) |
| 313 | lwz r17, HOST_NV_GPR(R17)(r1) |
| 314 | lwz r18, HOST_NV_GPR(R18)(r1) |
| 315 | lwz r19, HOST_NV_GPR(R19)(r1) |
| 316 | lwz r20, HOST_NV_GPR(R20)(r1) |
| 317 | lwz r21, HOST_NV_GPR(R21)(r1) |
| 318 | lwz r22, HOST_NV_GPR(R22)(r1) |
| 319 | lwz r23, HOST_NV_GPR(R23)(r1) |
| 320 | lwz r24, HOST_NV_GPR(R24)(r1) |
| 321 | lwz r25, HOST_NV_GPR(R25)(r1) |
| 322 | lwz r26, HOST_NV_GPR(R26)(r1) |
| 323 | lwz r27, HOST_NV_GPR(R27)(r1) |
| 324 | lwz r28, HOST_NV_GPR(R28)(r1) |
| 325 | lwz r29, HOST_NV_GPR(R29)(r1) |
| 326 | lwz r30, HOST_NV_GPR(R30)(r1) |
| 327 | lwz r31, HOST_NV_GPR(R31)(r1) |
| 328 | |
| 329 | /* Return to kvm_vcpu_run(). */ |
| 330 | lwz r4, HOST_STACK_LR(r1) |
| 331 | lwz r5, HOST_CR(r1) |
| 332 | addi r1, r1, HOST_STACK_SIZE |
| 333 | mtlr r4 |
| 334 | mtcr r5 |
| 335 | /* r3 still contains the return code from kvmppc_handle_exit(). */ |
| 336 | blr |
| 337 | |
| 338 | |
| 339 | /* Registers: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 340 | * r3: vcpu pointer |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 341 | */ |
| 342 | _GLOBAL(__kvmppc_vcpu_run) |
| 343 | stwu r1, -HOST_STACK_SIZE(r1) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 344 | stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 345 | |
| 346 | /* Save host state to stack. */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 347 | mr r4, r3 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 348 | mflr r3 |
| 349 | stw r3, HOST_STACK_LR(r1) |
| 350 | mfcr r5 |
| 351 | stw r5, HOST_CR(r1) |
| 352 | |
| 353 | /* Save host non-volatile register state to stack. */ |
| 354 | stw r14, HOST_NV_GPR(R14)(r1) |
| 355 | stw r15, HOST_NV_GPR(R15)(r1) |
| 356 | stw r16, HOST_NV_GPR(R16)(r1) |
| 357 | stw r17, HOST_NV_GPR(R17)(r1) |
| 358 | stw r18, HOST_NV_GPR(R18)(r1) |
| 359 | stw r19, HOST_NV_GPR(R19)(r1) |
| 360 | stw r20, HOST_NV_GPR(R20)(r1) |
| 361 | stw r21, HOST_NV_GPR(R21)(r1) |
| 362 | stw r22, HOST_NV_GPR(R22)(r1) |
| 363 | stw r23, HOST_NV_GPR(R23)(r1) |
| 364 | stw r24, HOST_NV_GPR(R24)(r1) |
| 365 | stw r25, HOST_NV_GPR(R25)(r1) |
| 366 | stw r26, HOST_NV_GPR(R26)(r1) |
| 367 | stw r27, HOST_NV_GPR(R27)(r1) |
| 368 | stw r28, HOST_NV_GPR(R28)(r1) |
| 369 | stw r29, HOST_NV_GPR(R29)(r1) |
| 370 | stw r30, HOST_NV_GPR(R30)(r1) |
| 371 | stw r31, HOST_NV_GPR(R31)(r1) |
| 372 | |
| 373 | /* Load guest non-volatiles. */ |
| 374 | lwz r14, VCPU_GPR(R14)(r4) |
| 375 | lwz r15, VCPU_GPR(R15)(r4) |
| 376 | lwz r16, VCPU_GPR(R16)(r4) |
| 377 | lwz r17, VCPU_GPR(R17)(r4) |
| 378 | lwz r18, VCPU_GPR(R18)(r4) |
| 379 | lwz r19, VCPU_GPR(R19)(r4) |
| 380 | lwz r20, VCPU_GPR(R20)(r4) |
| 381 | lwz r21, VCPU_GPR(R21)(r4) |
| 382 | lwz r22, VCPU_GPR(R22)(r4) |
| 383 | lwz r23, VCPU_GPR(R23)(r4) |
| 384 | lwz r24, VCPU_GPR(R24)(r4) |
| 385 | lwz r25, VCPU_GPR(R25)(r4) |
| 386 | lwz r26, VCPU_GPR(R26)(r4) |
| 387 | lwz r27, VCPU_GPR(R27)(r4) |
| 388 | lwz r28, VCPU_GPR(R28)(r4) |
| 389 | lwz r29, VCPU_GPR(R29)(r4) |
| 390 | lwz r30, VCPU_GPR(R30)(r4) |
| 391 | lwz r31, VCPU_GPR(R31)(r4) |
| 392 | |
| 393 | #ifdef CONFIG_SPE |
| 394 | /* save host SPEFSCR and load guest SPEFSCR */ |
| 395 | mfspr r3, SPRN_SPEFSCR |
| 396 | stw r3, VCPU_HOST_SPEFSCR(r4) |
| 397 | lwz r3, VCPU_SPEFSCR(r4) |
| 398 | mtspr SPRN_SPEFSCR, r3 |
| 399 | #endif |
| 400 | |
| 401 | lightweight_exit: |
| 402 | stw r2, HOST_R2(r1) |
| 403 | |
| 404 | mfspr r3, SPRN_PID |
| 405 | stw r3, VCPU_HOST_PID(r4) |
| 406 | lwz r3, VCPU_SHADOW_PID(r4) |
| 407 | mtspr SPRN_PID, r3 |
| 408 | |
| 409 | #ifdef CONFIG_FSL_BOOKE |
| 410 | lwz r3, VCPU_SHADOW_PID1(r4) |
| 411 | mtspr SPRN_PID1, r3 |
| 412 | #endif |
| 413 | |
| 414 | /* Load some guest volatiles. */ |
| 415 | lwz r0, VCPU_GPR(R0)(r4) |
| 416 | lwz r2, VCPU_GPR(R2)(r4) |
| 417 | lwz r9, VCPU_GPR(R9)(r4) |
| 418 | lwz r10, VCPU_GPR(R10)(r4) |
| 419 | lwz r11, VCPU_GPR(R11)(r4) |
| 420 | lwz r12, VCPU_GPR(R12)(r4) |
| 421 | lwz r13, VCPU_GPR(R13)(r4) |
| 422 | lwz r3, VCPU_LR(r4) |
| 423 | mtlr r3 |
| 424 | lwz r3, VCPU_XER(r4) |
| 425 | mtxer r3 |
| 426 | |
| 427 | /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, |
| 428 | * so how do we make sure vcpu won't fault? */ |
| 429 | lis r8, kvmppc_booke_handlers@ha |
| 430 | lwz r8, kvmppc_booke_handlers@l(r8) |
| 431 | mtspr SPRN_IVPR, r8 |
| 432 | |
| 433 | lwz r5, VCPU_SHARED(r4) |
| 434 | |
| 435 | /* Can't switch the stack pointer until after IVPR is switched, |
| 436 | * because host interrupt handlers would get confused. */ |
| 437 | lwz r1, VCPU_GPR(R1)(r4) |
| 438 | |
| 439 | /* |
| 440 | * Host interrupt handlers may have clobbered these |
| 441 | * guest-readable SPRGs, or the guest kernel may have |
| 442 | * written directly to the shared area, so we |
| 443 | * need to reload them here with the guest's values. |
| 444 | */ |
| 445 | PPC_LD(r3, VCPU_SHARED_SPRG4, r5) |
| 446 | mtspr SPRN_SPRG4W, r3 |
| 447 | PPC_LD(r3, VCPU_SHARED_SPRG5, r5) |
| 448 | mtspr SPRN_SPRG5W, r3 |
| 449 | PPC_LD(r3, VCPU_SHARED_SPRG6, r5) |
| 450 | mtspr SPRN_SPRG6W, r3 |
| 451 | PPC_LD(r3, VCPU_SHARED_SPRG7, r5) |
| 452 | mtspr SPRN_SPRG7W, r3 |
| 453 | |
| 454 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 455 | /* save enter time */ |
| 456 | 1: |
| 457 | mfspr r6, SPRN_TBRU |
| 458 | mfspr r7, SPRN_TBRL |
| 459 | mfspr r8, SPRN_TBRU |
| 460 | cmpw r8, r6 |
| 461 | bne 1b |
| 462 | stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) |
| 463 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) |
| 464 | #endif |
| 465 | |
| 466 | /* Finish loading guest volatiles and jump to guest. */ |
| 467 | lwz r3, VCPU_CTR(r4) |
| 468 | lwz r5, VCPU_CR(r4) |
| 469 | lwz r6, VCPU_PC(r4) |
| 470 | lwz r7, VCPU_SHADOW_MSR(r4) |
| 471 | mtctr r3 |
| 472 | mtcr r5 |
| 473 | mtsrr0 r6 |
| 474 | mtsrr1 r7 |
| 475 | lwz r5, VCPU_GPR(R5)(r4) |
| 476 | lwz r6, VCPU_GPR(R6)(r4) |
| 477 | lwz r7, VCPU_GPR(R7)(r4) |
| 478 | lwz r8, VCPU_GPR(R8)(r4) |
| 479 | |
| 480 | /* Clear any debug events which occurred since we disabled MSR[DE]. |
| 481 | * XXX This gives us a 3-instruction window in which a breakpoint |
| 482 | * intended for guest context could fire in the host instead. */ |
| 483 | lis r3, 0xffff |
| 484 | ori r3, r3, 0xffff |
| 485 | mtspr SPRN_DBSR, r3 |
| 486 | |
| 487 | lwz r3, VCPU_GPR(R3)(r4) |
| 488 | lwz r4, VCPU_GPR(R4)(r4) |
| 489 | rfi |
| 490 | |
| 491 | .data |
| 492 | .align 4 |
| 493 | .globl kvmppc_booke_handler_addr |
| 494 | kvmppc_booke_handler_addr: |
| 495 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL |
| 496 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK |
| 497 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE |
| 498 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE |
| 499 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL |
| 500 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT |
| 501 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM |
| 502 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL |
| 503 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL |
| 504 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL |
| 505 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER |
| 506 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT |
| 507 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG |
| 508 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS |
| 509 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS |
| 510 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG |
| 511 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL |
| 512 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA |
| 513 | KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND |
| 514 | KVM_HANDLER_END /*Always keep this in end*/ |
| 515 | |
| 516 | #ifdef CONFIG_SPE |
| 517 | _GLOBAL(kvmppc_save_guest_spe) |
| 518 | cmpi 0,r3,0 |
| 519 | beqlr- |
| 520 | SAVE_32EVRS(0, r4, r3, VCPU_EVR) |
| 521 | evxor evr6, evr6, evr6 |
| 522 | evmwumiaa evr6, evr6, evr6 |
| 523 | li r4,VCPU_ACC |
| 524 | evstddx evr6, r4, r3 /* save acc */ |
| 525 | blr |
| 526 | |
| 527 | _GLOBAL(kvmppc_load_guest_spe) |
| 528 | cmpi 0,r3,0 |
| 529 | beqlr- |
| 530 | li r4,VCPU_ACC |
| 531 | evlddx evr6,r4,r3 |
| 532 | evmra evr6,evr6 /* load acc */ |
| 533 | REST_32EVRS(0, r4, r3, VCPU_EVR) |
| 534 | blr |
| 535 | #endif |