David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Boot code and exception vectors for Book3E processors |
| 4 | * |
| 5 | * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/threads.h> |
| 9 | #include <asm/reg.h> |
| 10 | #include <asm/page.h> |
| 11 | #include <asm/ppc_asm.h> |
| 12 | #include <asm/asm-offsets.h> |
| 13 | #include <asm/cputable.h> |
| 14 | #include <asm/setup.h> |
| 15 | #include <asm/thread_info.h> |
| 16 | #include <asm/reg_a2.h> |
| 17 | #include <asm/exception-64e.h> |
| 18 | #include <asm/bug.h> |
| 19 | #include <asm/irqflags.h> |
| 20 | #include <asm/ptrace.h> |
| 21 | #include <asm/ppc-opcode.h> |
| 22 | #include <asm/mmu.h> |
| 23 | #include <asm/hw_irq.h> |
| 24 | #include <asm/kvm_asm.h> |
| 25 | #include <asm/kvm_booke_hv_asm.h> |
| 26 | #include <asm/feature-fixups.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | #include <asm/context_tracking.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | |
| 29 | /* XXX This will ultimately add space for a special exception save |
| 30 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... |
| 31 | * when taking special interrupts. For now we don't support that, |
| 32 | * special interrupts from within a non-standard level will probably |
| 33 | * blow you up |
| 34 | */ |
| 35 | #define SPECIAL_EXC_SRR0 0 |
| 36 | #define SPECIAL_EXC_SRR1 1 |
| 37 | #define SPECIAL_EXC_SPRG_GEN 2 |
| 38 | #define SPECIAL_EXC_SPRG_TLB 3 |
| 39 | #define SPECIAL_EXC_MAS0 4 |
| 40 | #define SPECIAL_EXC_MAS1 5 |
| 41 | #define SPECIAL_EXC_MAS2 6 |
| 42 | #define SPECIAL_EXC_MAS3 7 |
| 43 | #define SPECIAL_EXC_MAS6 8 |
| 44 | #define SPECIAL_EXC_MAS7 9 |
| 45 | #define SPECIAL_EXC_MAS5 10 /* E.HV only */ |
| 46 | #define SPECIAL_EXC_MAS8 11 /* E.HV only */ |
| 47 | #define SPECIAL_EXC_IRQHAPPENED 12 |
| 48 | #define SPECIAL_EXC_DEAR 13 |
| 49 | #define SPECIAL_EXC_ESR 14 |
| 50 | #define SPECIAL_EXC_SOFTE 15 |
| 51 | #define SPECIAL_EXC_CSRR0 16 |
| 52 | #define SPECIAL_EXC_CSRR1 17 |
| 53 | /* must be even to keep 16-byte stack alignment */ |
| 54 | #define SPECIAL_EXC_END 18 |
| 55 | |
| 56 | #define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8) |
| 57 | #define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288) |
| 58 | |
| 59 | #define SPECIAL_EXC_STORE(reg, name) \ |
| 60 | std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) |
| 61 | |
| 62 | #define SPECIAL_EXC_LOAD(reg, name) \ |
| 63 | ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) |
| 64 | |
| 65 | special_reg_save: |
| 66 | lbz r9,PACAIRQHAPPENED(r13) |
| 67 | RECONCILE_IRQ_STATE(r3,r4) |
| 68 | |
| 69 | /* |
| 70 | * We only need (or have stack space) to save this stuff if |
| 71 | * we interrupted the kernel. |
| 72 | */ |
| 73 | ld r3,_MSR(r1) |
| 74 | andi. r3,r3,MSR_PR |
| 75 | bnelr |
| 76 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 77 | /* |
| 78 | * Advance to the next TLB exception frame for handler |
| 79 | * types that don't do it automatically. |
| 80 | */ |
| 81 | LOAD_REG_ADDR(r11,extlb_level_exc) |
| 82 | lwz r12,0(r11) |
| 83 | mfspr r10,SPRN_SPRG_TLB_EXFRAME |
| 84 | add r10,r10,r12 |
| 85 | mtspr SPRN_SPRG_TLB_EXFRAME,r10 |
| 86 | |
| 87 | /* |
| 88 | * Save registers needed to allow nesting of certain exceptions |
| 89 | * (such as TLB misses) inside special exception levels |
| 90 | */ |
| 91 | mfspr r10,SPRN_SRR0 |
| 92 | SPECIAL_EXC_STORE(r10,SRR0) |
| 93 | mfspr r10,SPRN_SRR1 |
| 94 | SPECIAL_EXC_STORE(r10,SRR1) |
| 95 | mfspr r10,SPRN_SPRG_GEN_SCRATCH |
| 96 | SPECIAL_EXC_STORE(r10,SPRG_GEN) |
| 97 | mfspr r10,SPRN_SPRG_TLB_SCRATCH |
| 98 | SPECIAL_EXC_STORE(r10,SPRG_TLB) |
| 99 | mfspr r10,SPRN_MAS0 |
| 100 | SPECIAL_EXC_STORE(r10,MAS0) |
| 101 | mfspr r10,SPRN_MAS1 |
| 102 | SPECIAL_EXC_STORE(r10,MAS1) |
| 103 | mfspr r10,SPRN_MAS2 |
| 104 | SPECIAL_EXC_STORE(r10,MAS2) |
| 105 | mfspr r10,SPRN_MAS3 |
| 106 | SPECIAL_EXC_STORE(r10,MAS3) |
| 107 | mfspr r10,SPRN_MAS6 |
| 108 | SPECIAL_EXC_STORE(r10,MAS6) |
| 109 | mfspr r10,SPRN_MAS7 |
| 110 | SPECIAL_EXC_STORE(r10,MAS7) |
| 111 | BEGIN_FTR_SECTION |
| 112 | mfspr r10,SPRN_MAS5 |
| 113 | SPECIAL_EXC_STORE(r10,MAS5) |
| 114 | mfspr r10,SPRN_MAS8 |
| 115 | SPECIAL_EXC_STORE(r10,MAS8) |
| 116 | |
| 117 | /* MAS5/8 could have inappropriate values if we interrupted KVM code */ |
| 118 | li r10,0 |
| 119 | mtspr SPRN_MAS5,r10 |
| 120 | mtspr SPRN_MAS8,r10 |
| 121 | END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) |
| 122 | SPECIAL_EXC_STORE(r9,IRQHAPPENED) |
| 123 | |
| 124 | mfspr r10,SPRN_DEAR |
| 125 | SPECIAL_EXC_STORE(r10,DEAR) |
| 126 | mfspr r10,SPRN_ESR |
| 127 | SPECIAL_EXC_STORE(r10,ESR) |
| 128 | |
| 129 | lbz r10,PACAIRQSOFTMASK(r13) |
| 130 | SPECIAL_EXC_STORE(r10,SOFTE) |
| 131 | ld r10,_NIP(r1) |
| 132 | SPECIAL_EXC_STORE(r10,CSRR0) |
| 133 | ld r10,_MSR(r1) |
| 134 | SPECIAL_EXC_STORE(r10,CSRR1) |
| 135 | |
| 136 | blr |
| 137 | |
| 138 | ret_from_level_except: |
| 139 | ld r3,_MSR(r1) |
| 140 | andi. r3,r3,MSR_PR |
| 141 | beq 1f |
| 142 | b ret_from_except |
| 143 | 1: |
| 144 | |
| 145 | LOAD_REG_ADDR(r11,extlb_level_exc) |
| 146 | lwz r12,0(r11) |
| 147 | mfspr r10,SPRN_SPRG_TLB_EXFRAME |
| 148 | sub r10,r10,r12 |
| 149 | mtspr SPRN_SPRG_TLB_EXFRAME,r10 |
| 150 | |
| 151 | /* |
| 152 | * It's possible that the special level exception interrupted a |
| 153 | * TLB miss handler, and inserted the same entry that the |
| 154 | * interrupted handler was about to insert. On CPUs without TLB |
| 155 | * write conditional, this can result in a duplicate TLB entry. |
| 156 | * Wipe all non-bolted entries to be safe. |
| 157 | * |
| 158 | * Note that this doesn't protect against any TLB misses |
| 159 | * we may take accessing the stack from here to the end of |
| 160 | * the special level exception. It's not clear how we can |
| 161 | * reasonably protect against that, but only CPUs with |
| 162 | * neither TLB write conditional nor bolted kernel memory |
| 163 | * are affected. Do any such CPUs even exist? |
| 164 | */ |
| 165 | PPC_TLBILX_ALL(0,R0) |
| 166 | |
| 167 | REST_NVGPRS(r1) |
| 168 | |
| 169 | SPECIAL_EXC_LOAD(r10,SRR0) |
| 170 | mtspr SPRN_SRR0,r10 |
| 171 | SPECIAL_EXC_LOAD(r10,SRR1) |
| 172 | mtspr SPRN_SRR1,r10 |
| 173 | SPECIAL_EXC_LOAD(r10,SPRG_GEN) |
| 174 | mtspr SPRN_SPRG_GEN_SCRATCH,r10 |
| 175 | SPECIAL_EXC_LOAD(r10,SPRG_TLB) |
| 176 | mtspr SPRN_SPRG_TLB_SCRATCH,r10 |
| 177 | SPECIAL_EXC_LOAD(r10,MAS0) |
| 178 | mtspr SPRN_MAS0,r10 |
| 179 | SPECIAL_EXC_LOAD(r10,MAS1) |
| 180 | mtspr SPRN_MAS1,r10 |
| 181 | SPECIAL_EXC_LOAD(r10,MAS2) |
| 182 | mtspr SPRN_MAS2,r10 |
| 183 | SPECIAL_EXC_LOAD(r10,MAS3) |
| 184 | mtspr SPRN_MAS3,r10 |
| 185 | SPECIAL_EXC_LOAD(r10,MAS6) |
| 186 | mtspr SPRN_MAS6,r10 |
| 187 | SPECIAL_EXC_LOAD(r10,MAS7) |
| 188 | mtspr SPRN_MAS7,r10 |
| 189 | BEGIN_FTR_SECTION |
| 190 | SPECIAL_EXC_LOAD(r10,MAS5) |
| 191 | mtspr SPRN_MAS5,r10 |
| 192 | SPECIAL_EXC_LOAD(r10,MAS8) |
| 193 | mtspr SPRN_MAS8,r10 |
| 194 | END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) |
| 195 | |
| 196 | lbz r6,PACAIRQSOFTMASK(r13) |
| 197 | ld r5,SOFTE(r1) |
| 198 | |
| 199 | /* Interrupts had better not already be enabled... */ |
| 200 | tweqi r6,IRQS_ENABLED |
| 201 | |
| 202 | andi. r6,r5,IRQS_DISABLED |
| 203 | bne 1f |
| 204 | |
| 205 | TRACE_ENABLE_INTS |
| 206 | stb r5,PACAIRQSOFTMASK(r13) |
| 207 | 1: |
| 208 | /* |
| 209 | * Restore PACAIRQHAPPENED rather than setting it based on |
| 210 | * the return MSR[EE], since we could have interrupted |
| 211 | * __check_irq_replay() or other inconsistent transitory |
| 212 | * states that must remain that way. |
| 213 | */ |
| 214 | SPECIAL_EXC_LOAD(r10,IRQHAPPENED) |
| 215 | stb r10,PACAIRQHAPPENED(r13) |
| 216 | |
| 217 | SPECIAL_EXC_LOAD(r10,DEAR) |
| 218 | mtspr SPRN_DEAR,r10 |
| 219 | SPECIAL_EXC_LOAD(r10,ESR) |
| 220 | mtspr SPRN_ESR,r10 |
| 221 | |
| 222 | stdcx. r0,0,r1 /* to clear the reservation */ |
| 223 | |
| 224 | REST_4GPRS(2, r1) |
| 225 | REST_4GPRS(6, r1) |
| 226 | |
| 227 | ld r10,_CTR(r1) |
| 228 | ld r11,_XER(r1) |
| 229 | mtctr r10 |
| 230 | mtxer r11 |
| 231 | |
| 232 | blr |
| 233 | |
| 234 | .macro ret_from_level srr0 srr1 paca_ex scratch |
| 235 | bl ret_from_level_except |
| 236 | |
| 237 | ld r10,_LINK(r1) |
| 238 | ld r11,_CCR(r1) |
| 239 | ld r0,GPR13(r1) |
| 240 | mtlr r10 |
| 241 | mtcr r11 |
| 242 | |
| 243 | ld r10,GPR10(r1) |
| 244 | ld r11,GPR11(r1) |
| 245 | ld r12,GPR12(r1) |
| 246 | mtspr \scratch,r0 |
| 247 | |
| 248 | std r10,\paca_ex+EX_R10(r13); |
| 249 | std r11,\paca_ex+EX_R11(r13); |
| 250 | ld r10,_NIP(r1) |
| 251 | ld r11,_MSR(r1) |
| 252 | ld r0,GPR0(r1) |
| 253 | ld r1,GPR1(r1) |
| 254 | mtspr \srr0,r10 |
| 255 | mtspr \srr1,r11 |
| 256 | ld r10,\paca_ex+EX_R10(r13) |
| 257 | ld r11,\paca_ex+EX_R11(r13) |
| 258 | mfspr r13,\scratch |
| 259 | .endm |
| 260 | |
| 261 | ret_from_crit_except: |
| 262 | ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH |
| 263 | rfci |
| 264 | |
| 265 | ret_from_mc_except: |
| 266 | ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH |
| 267 | rfmci |
| 268 | |
| 269 | /* Exception prolog code for all exceptions */ |
| 270 | #define EXCEPTION_PROLOG(n, intnum, type, addition) \ |
| 271 | mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ |
| 272 | mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ |
| 273 | std r10,PACA_EX##type+EX_R10(r13); \ |
| 274 | std r11,PACA_EX##type+EX_R11(r13); \ |
| 275 | mfcr r10; /* save CR */ \ |
| 276 | mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ |
| 277 | DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ |
| 278 | stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ |
| 279 | addition; /* additional code for that exc. */ \ |
| 280 | std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ |
| 281 | type##_SET_KSTACK; /* get special stack if necessary */\ |
| 282 | andi. r10,r11,MSR_PR; /* save stack pointer */ \ |
| 283 | beq 1f; /* branch around if supervisor */ \ |
| 284 | ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 285 | 1: type##_BTB_FLUSH \ |
| 286 | cmpdi cr1,r1,0; /* check if SP makes sense */ \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 287 | bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ |
| 288 | mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ |
| 289 | |
| 290 | /* Exception type-specific macros */ |
| 291 | #define GEN_SET_KSTACK \ |
| 292 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ |
| 293 | #define SPRN_GEN_SRR0 SPRN_SRR0 |
| 294 | #define SPRN_GEN_SRR1 SPRN_SRR1 |
| 295 | |
| 296 | #define GDBELL_SET_KSTACK GEN_SET_KSTACK |
| 297 | #define SPRN_GDBELL_SRR0 SPRN_GSRR0 |
| 298 | #define SPRN_GDBELL_SRR1 SPRN_GSRR1 |
| 299 | |
| 300 | #define CRIT_SET_KSTACK \ |
| 301 | ld r1,PACA_CRIT_STACK(r13); \ |
| 302 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE |
| 303 | #define SPRN_CRIT_SRR0 SPRN_CSRR0 |
| 304 | #define SPRN_CRIT_SRR1 SPRN_CSRR1 |
| 305 | |
| 306 | #define DBG_SET_KSTACK \ |
| 307 | ld r1,PACA_DBG_STACK(r13); \ |
| 308 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE |
| 309 | #define SPRN_DBG_SRR0 SPRN_DSRR0 |
| 310 | #define SPRN_DBG_SRR1 SPRN_DSRR1 |
| 311 | |
| 312 | #define MC_SET_KSTACK \ |
| 313 | ld r1,PACA_MC_STACK(r13); \ |
| 314 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE |
| 315 | #define SPRN_MC_SRR0 SPRN_MCSRR0 |
| 316 | #define SPRN_MC_SRR1 SPRN_MCSRR1 |
| 317 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 318 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 319 | #define GEN_BTB_FLUSH \ |
| 320 | START_BTB_FLUSH_SECTION \ |
| 321 | beq 1f; \ |
| 322 | BTB_FLUSH(r10) \ |
| 323 | 1: \ |
| 324 | END_BTB_FLUSH_SECTION |
| 325 | |
| 326 | #define CRIT_BTB_FLUSH \ |
| 327 | START_BTB_FLUSH_SECTION \ |
| 328 | BTB_FLUSH(r10) \ |
| 329 | END_BTB_FLUSH_SECTION |
| 330 | |
| 331 | #define DBG_BTB_FLUSH CRIT_BTB_FLUSH |
| 332 | #define MC_BTB_FLUSH CRIT_BTB_FLUSH |
| 333 | #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH |
| 334 | #else |
| 335 | #define GEN_BTB_FLUSH |
| 336 | #define CRIT_BTB_FLUSH |
| 337 | #define DBG_BTB_FLUSH |
| 338 | #define MC_BTB_FLUSH |
| 339 | #define GDBELL_BTB_FLUSH |
| 340 | #endif |
| 341 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 342 | #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 343 | EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) |
| 344 | |
| 345 | #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 346 | EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) |
| 347 | |
| 348 | #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 349 | EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) |
| 350 | |
| 351 | #define MC_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 352 | EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) |
| 353 | |
| 354 | #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ |
| 355 | EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) |
| 356 | |
| 357 | /* Variants of the "addition" argument for the prolog |
| 358 | */ |
| 359 | #define PROLOG_ADDITION_NONE_GEN(n) |
| 360 | #define PROLOG_ADDITION_NONE_GDBELL(n) |
| 361 | #define PROLOG_ADDITION_NONE_CRIT(n) |
| 362 | #define PROLOG_ADDITION_NONE_DBG(n) |
| 363 | #define PROLOG_ADDITION_NONE_MC(n) |
| 364 | |
| 365 | #define PROLOG_ADDITION_MASKABLE_GEN(n) \ |
| 366 | lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ |
| 367 | andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ |
| 368 | bne masked_interrupt_book3e_##n |
| 369 | |
| 370 | #define PROLOG_ADDITION_2REGS_GEN(n) \ |
| 371 | std r14,PACA_EXGEN+EX_R14(r13); \ |
| 372 | std r15,PACA_EXGEN+EX_R15(r13) |
| 373 | |
| 374 | #define PROLOG_ADDITION_1REG_GEN(n) \ |
| 375 | std r14,PACA_EXGEN+EX_R14(r13); |
| 376 | |
| 377 | #define PROLOG_ADDITION_2REGS_CRIT(n) \ |
| 378 | std r14,PACA_EXCRIT+EX_R14(r13); \ |
| 379 | std r15,PACA_EXCRIT+EX_R15(r13) |
| 380 | |
| 381 | #define PROLOG_ADDITION_2REGS_DBG(n) \ |
| 382 | std r14,PACA_EXDBG+EX_R14(r13); \ |
| 383 | std r15,PACA_EXDBG+EX_R15(r13) |
| 384 | |
| 385 | #define PROLOG_ADDITION_2REGS_MC(n) \ |
| 386 | std r14,PACA_EXMC+EX_R14(r13); \ |
| 387 | std r15,PACA_EXMC+EX_R15(r13) |
| 388 | |
| 389 | |
| 390 | /* Core exception code for all exceptions except TLB misses. */ |
| 391 | #define EXCEPTION_COMMON_LVL(n, scratch, excf) \ |
| 392 | exc_##n##_common: \ |
| 393 | std r0,GPR0(r1); /* save r0 in stackframe */ \ |
| 394 | std r2,GPR2(r1); /* save r2 in stackframe */ \ |
| 395 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ |
| 396 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ |
| 397 | std r9,GPR9(r1); /* save r9 in stackframe */ \ |
| 398 | std r10,_NIP(r1); /* save SRR0 to stackframe */ \ |
| 399 | std r11,_MSR(r1); /* save SRR1 to stackframe */ \ |
| 400 | beq 2f; /* if from kernel mode */ \ |
| 401 | ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \ |
| 402 | 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ |
| 403 | ld r4,excf+EX_R11(r13); /* get back r11 */ \ |
| 404 | mfspr r5,scratch; /* get back r13 */ \ |
| 405 | std r12,GPR12(r1); /* save r12 in stackframe */ \ |
| 406 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ |
| 407 | mflr r6; /* save LR in stackframe */ \ |
| 408 | mfctr r7; /* save CTR in stackframe */ \ |
| 409 | mfspr r8,SPRN_XER; /* save XER in stackframe */ \ |
| 410 | ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ |
| 411 | lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ |
| 412 | lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ |
| 413 | ld r12,exception_marker@toc(r2); \ |
| 414 | li r0,0; \ |
| 415 | std r3,GPR10(r1); /* save r10 to stackframe */ \ |
| 416 | std r4,GPR11(r1); /* save r11 to stackframe */ \ |
| 417 | std r5,GPR13(r1); /* save it to stackframe */ \ |
| 418 | std r6,_LINK(r1); \ |
| 419 | std r7,_CTR(r1); \ |
| 420 | std r8,_XER(r1); \ |
| 421 | li r3,(n)+1; /* indicate partial regs in trap */ \ |
| 422 | std r9,0(r1); /* store stack frame back link */ \ |
| 423 | std r10,_CCR(r1); /* store orig CR in stackframe */ \ |
| 424 | std r9,GPR1(r1); /* store stack frame back link */ \ |
| 425 | std r11,SOFTE(r1); /* and save it to stackframe */ \ |
| 426 | std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ |
| 427 | std r3,_TRAP(r1); /* set trap number */ \ |
| 428 | std r0,RESULT(r1); /* clear regs->result */ |
| 429 | |
| 430 | #define EXCEPTION_COMMON(n) \ |
| 431 | EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) |
| 432 | #define EXCEPTION_COMMON_CRIT(n) \ |
| 433 | EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT) |
| 434 | #define EXCEPTION_COMMON_MC(n) \ |
| 435 | EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC) |
| 436 | #define EXCEPTION_COMMON_DBG(n) \ |
| 437 | EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG) |
| 438 | |
| 439 | /* |
| 440 | * This is meant for exceptions that don't immediately hard-enable. We |
| 441 | * set a bit in paca->irq_happened to ensure that a subsequent call to |
| 442 | * arch_local_irq_restore() will properly hard-enable and avoid the |
| 443 | * fast-path, and then reconcile irq state. |
| 444 | */ |
| 445 | #define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4) |
| 446 | |
| 447 | /* |
| 448 | * This is called by exceptions that don't use INTS_DISABLE (that did not |
| 449 | * touch irq indicators in the PACA). This will restore MSR:EE to it's |
| 450 | * previous value |
| 451 | * |
| 452 | * XXX In the long run, we may want to open-code it in order to separate the |
| 453 | * load from the wrtee, thus limiting the latency caused by the dependency |
| 454 | * but at this point, I'll favor code clarity until we have a near to final |
| 455 | * implementation |
| 456 | */ |
| 457 | #define INTS_RESTORE_HARD \ |
| 458 | ld r11,_MSR(r1); \ |
| 459 | wrtee r11; |
| 460 | |
| 461 | /* XXX FIXME: Restore r14/r15 when necessary */ |
| 462 | #define BAD_STACK_TRAMPOLINE(n) \ |
| 463 | exc_##n##_bad_stack: \ |
| 464 | li r1,(n); /* get exception number */ \ |
| 465 | sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ |
| 466 | b bad_stack_book3e; /* bad stack error */ |
| 467 | |
| 468 | /* WARNING: If you change the layout of this stub, make sure you check |
| 469 | * the debug exception handler which handles single stepping |
| 470 | * into exceptions from userspace, and the MM code in |
| 471 | * arch/powerpc/mm/tlb_nohash.c which patches the branch here |
| 472 | * and would need to be updated if that branch is moved |
| 473 | */ |
| 474 | #define EXCEPTION_STUB(loc, label) \ |
| 475 | . = interrupt_base_book3e + loc; \ |
| 476 | nop; /* To make debug interrupts happy */ \ |
| 477 | b exc_##label##_book3e; |
| 478 | |
| 479 | #define ACK_NONE(r) |
| 480 | #define ACK_DEC(r) \ |
| 481 | lis r,TSR_DIS@h; \ |
| 482 | mtspr SPRN_TSR,r |
| 483 | #define ACK_FIT(r) \ |
| 484 | lis r,TSR_FIS@h; \ |
| 485 | mtspr SPRN_TSR,r |
| 486 | |
| 487 | /* Used by asynchronous interrupt that may happen in the idle loop. |
| 488 | * |
| 489 | * This check if the thread was in the idle loop, and if yes, returns |
| 490 | * to the caller rather than the PC. This is to avoid a race if |
| 491 | * interrupts happen before the wait instruction. |
| 492 | */ |
| 493 | #define CHECK_NAPPING() \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 494 | ld r11, PACA_THREAD_INFO(r13); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 495 | ld r10,TI_LOCAL_FLAGS(r11); \ |
| 496 | andi. r9,r10,_TLF_NAPPING; \ |
| 497 | beq+ 1f; \ |
| 498 | ld r8,_LINK(r1); \ |
| 499 | rlwinm r7,r10,0,~_TLF_NAPPING; \ |
| 500 | std r8,_NIP(r1); \ |
| 501 | std r7,TI_LOCAL_FLAGS(r11); \ |
| 502 | 1: |
| 503 | |
| 504 | |
| 505 | #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ |
| 506 | START_EXCEPTION(label); \ |
| 507 | NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ |
| 508 | EXCEPTION_COMMON(trapnum) \ |
| 509 | INTS_DISABLE; \ |
| 510 | ack(r8); \ |
| 511 | CHECK_NAPPING(); \ |
| 512 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
| 513 | bl hdlr; \ |
| 514 | b ret_from_except_lite; |
| 515 | |
| 516 | /* This value is used to mark exception frames on the stack. */ |
| 517 | .section ".toc","aw" |
| 518 | exception_marker: |
| 519 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
| 520 | |
| 521 | |
| 522 | /* |
| 523 | * And here we have the exception vectors ! |
| 524 | */ |
| 525 | |
| 526 | .text |
| 527 | .balign 0x1000 |
| 528 | .globl interrupt_base_book3e |
| 529 | interrupt_base_book3e: /* fake trap */ |
| 530 | EXCEPTION_STUB(0x000, machine_check) |
| 531 | EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */ |
| 532 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ |
| 533 | EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ |
| 534 | EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ |
| 535 | EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ |
| 536 | EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ |
| 537 | EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ |
| 538 | EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ |
| 539 | EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ |
| 540 | EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ |
| 541 | EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ |
| 542 | EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ |
| 543 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ |
| 544 | EXCEPTION_STUB(0x1c0, data_tlb_miss) |
| 545 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) |
| 546 | EXCEPTION_STUB(0x200, altivec_unavailable) |
| 547 | EXCEPTION_STUB(0x220, altivec_assist) |
| 548 | EXCEPTION_STUB(0x260, perfmon) |
| 549 | EXCEPTION_STUB(0x280, doorbell) |
| 550 | EXCEPTION_STUB(0x2a0, doorbell_crit) |
| 551 | EXCEPTION_STUB(0x2c0, guest_doorbell) |
| 552 | EXCEPTION_STUB(0x2e0, guest_doorbell_crit) |
| 553 | EXCEPTION_STUB(0x300, hypercall) |
| 554 | EXCEPTION_STUB(0x320, ehpriv) |
| 555 | EXCEPTION_STUB(0x340, lrat_error) |
| 556 | |
| 557 | .globl __end_interrupts |
| 558 | __end_interrupts: |
| 559 | |
| 560 | /* Critical Input Interrupt */ |
| 561 | START_EXCEPTION(critical_input); |
| 562 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, |
| 563 | PROLOG_ADDITION_NONE) |
| 564 | EXCEPTION_COMMON_CRIT(0x100) |
| 565 | bl save_nvgprs |
| 566 | bl special_reg_save |
| 567 | CHECK_NAPPING(); |
| 568 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 569 | bl unknown_exception |
| 570 | b ret_from_crit_except |
| 571 | |
| 572 | /* Machine Check Interrupt */ |
| 573 | START_EXCEPTION(machine_check); |
| 574 | MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, |
| 575 | PROLOG_ADDITION_NONE) |
| 576 | EXCEPTION_COMMON_MC(0x000) |
| 577 | bl save_nvgprs |
| 578 | bl special_reg_save |
| 579 | CHECK_NAPPING(); |
| 580 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 581 | bl machine_check_exception |
| 582 | b ret_from_mc_except |
| 583 | |
| 584 | /* Data Storage Interrupt */ |
| 585 | START_EXCEPTION(data_storage) |
| 586 | NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, |
| 587 | PROLOG_ADDITION_2REGS) |
| 588 | mfspr r14,SPRN_DEAR |
| 589 | mfspr r15,SPRN_ESR |
| 590 | EXCEPTION_COMMON(0x300) |
| 591 | INTS_DISABLE |
| 592 | b storage_fault_common |
| 593 | |
| 594 | /* Instruction Storage Interrupt */ |
| 595 | START_EXCEPTION(instruction_storage); |
| 596 | NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, |
| 597 | PROLOG_ADDITION_2REGS) |
| 598 | li r15,0 |
| 599 | mr r14,r10 |
| 600 | EXCEPTION_COMMON(0x400) |
| 601 | INTS_DISABLE |
| 602 | b storage_fault_common |
| 603 | |
| 604 | /* External Input Interrupt */ |
| 605 | MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, |
| 606 | external_input, do_IRQ, ACK_NONE) |
| 607 | |
| 608 | /* Alignment */ |
| 609 | START_EXCEPTION(alignment); |
| 610 | NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, |
| 611 | PROLOG_ADDITION_2REGS) |
| 612 | mfspr r14,SPRN_DEAR |
| 613 | mfspr r15,SPRN_ESR |
| 614 | EXCEPTION_COMMON(0x600) |
| 615 | b alignment_more /* no room, go out of line */ |
| 616 | |
| 617 | /* Program Interrupt */ |
| 618 | START_EXCEPTION(program); |
| 619 | NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, |
| 620 | PROLOG_ADDITION_1REG) |
| 621 | mfspr r14,SPRN_ESR |
| 622 | EXCEPTION_COMMON(0x700) |
| 623 | INTS_DISABLE |
| 624 | std r14,_DSISR(r1) |
| 625 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 626 | ld r14,PACA_EXGEN+EX_R14(r13) |
| 627 | bl save_nvgprs |
| 628 | bl program_check_exception |
| 629 | b ret_from_except |
| 630 | |
| 631 | /* Floating Point Unavailable Interrupt */ |
| 632 | START_EXCEPTION(fp_unavailable); |
| 633 | NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, |
| 634 | PROLOG_ADDITION_NONE) |
| 635 | /* we can probably do a shorter exception entry for that one... */ |
| 636 | EXCEPTION_COMMON(0x800) |
| 637 | ld r12,_MSR(r1) |
| 638 | andi. r0,r12,MSR_PR; |
| 639 | beq- 1f |
| 640 | bl load_up_fpu |
| 641 | b fast_exception_return |
| 642 | 1: INTS_DISABLE |
| 643 | bl save_nvgprs |
| 644 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 645 | bl kernel_fp_unavailable_exception |
| 646 | b ret_from_except |
| 647 | |
| 648 | /* Altivec Unavailable Interrupt */ |
| 649 | START_EXCEPTION(altivec_unavailable); |
| 650 | NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, |
| 651 | PROLOG_ADDITION_NONE) |
| 652 | /* we can probably do a shorter exception entry for that one... */ |
| 653 | EXCEPTION_COMMON(0x200) |
| 654 | #ifdef CONFIG_ALTIVEC |
| 655 | BEGIN_FTR_SECTION |
| 656 | ld r12,_MSR(r1) |
| 657 | andi. r0,r12,MSR_PR; |
| 658 | beq- 1f |
| 659 | bl load_up_altivec |
| 660 | b fast_exception_return |
| 661 | 1: |
| 662 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 663 | #endif |
| 664 | INTS_DISABLE |
| 665 | bl save_nvgprs |
| 666 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 667 | bl altivec_unavailable_exception |
| 668 | b ret_from_except |
| 669 | |
| 670 | /* AltiVec Assist */ |
| 671 | START_EXCEPTION(altivec_assist); |
| 672 | NORMAL_EXCEPTION_PROLOG(0x220, |
| 673 | BOOKE_INTERRUPT_ALTIVEC_ASSIST, |
| 674 | PROLOG_ADDITION_NONE) |
| 675 | EXCEPTION_COMMON(0x220) |
| 676 | INTS_DISABLE |
| 677 | bl save_nvgprs |
| 678 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 679 | #ifdef CONFIG_ALTIVEC |
| 680 | BEGIN_FTR_SECTION |
| 681 | bl altivec_assist_exception |
| 682 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 683 | #else |
| 684 | bl unknown_exception |
| 685 | #endif |
| 686 | b ret_from_except |
| 687 | |
| 688 | |
| 689 | /* Decrementer Interrupt */ |
| 690 | MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, |
| 691 | decrementer, timer_interrupt, ACK_DEC) |
| 692 | |
| 693 | /* Fixed Interval Timer Interrupt */ |
| 694 | MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, |
| 695 | fixed_interval, unknown_exception, ACK_FIT) |
| 696 | |
| 697 | /* Watchdog Timer Interrupt */ |
| 698 | START_EXCEPTION(watchdog); |
| 699 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, |
| 700 | PROLOG_ADDITION_NONE) |
| 701 | EXCEPTION_COMMON_CRIT(0x9f0) |
| 702 | bl save_nvgprs |
| 703 | bl special_reg_save |
| 704 | CHECK_NAPPING(); |
| 705 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 706 | #ifdef CONFIG_BOOKE_WDT |
| 707 | bl WatchdogException |
| 708 | #else |
| 709 | bl unknown_exception |
| 710 | #endif |
| 711 | b ret_from_crit_except |
| 712 | |
| 713 | /* System Call Interrupt */ |
| 714 | START_EXCEPTION(system_call) |
| 715 | mr r9,r13 /* keep a copy of userland r13 */ |
| 716 | mfspr r11,SPRN_SRR0 /* get return address */ |
| 717 | mfspr r12,SPRN_SRR1 /* get previous MSR */ |
| 718 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ |
| 719 | b system_call_common |
| 720 | |
| 721 | /* Auxiliary Processor Unavailable Interrupt */ |
| 722 | START_EXCEPTION(ap_unavailable); |
| 723 | NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, |
| 724 | PROLOG_ADDITION_NONE) |
| 725 | EXCEPTION_COMMON(0xf20) |
| 726 | INTS_DISABLE |
| 727 | bl save_nvgprs |
| 728 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 729 | bl unknown_exception |
| 730 | b ret_from_except |
| 731 | |
| 732 | /* Debug exception as a critical interrupt*/ |
| 733 | START_EXCEPTION(debug_crit); |
| 734 | CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, |
| 735 | PROLOG_ADDITION_2REGS) |
| 736 | |
| 737 | /* |
| 738 | * If there is a single step or branch-taken exception in an |
| 739 | * exception entry sequence, it was probably meant to apply to |
| 740 | * the code where the exception occurred (since exception entry |
| 741 | * doesn't turn off DE automatically). We simulate the effect |
| 742 | * of turning off DE on entry to an exception handler by turning |
| 743 | * off DE in the CSRR1 value and clearing the debug status. |
| 744 | */ |
| 745 | |
| 746 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
| 747 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
| 748 | beq+ 1f |
| 749 | |
| 750 | #ifdef CONFIG_RELOCATABLE |
| 751 | ld r15,PACATOC(r13) |
| 752 | ld r14,interrupt_base_book3e@got(r15) |
| 753 | ld r15,__end_interrupts@got(r15) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 754 | cmpld cr0,r10,r14 |
| 755 | cmpld cr1,r10,r15 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 756 | #else |
| 757 | LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) |
| 758 | cmpld cr0, r10, r14 |
| 759 | LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts) |
| 760 | cmpld cr1, r10, r14 |
| 761 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 762 | blt+ cr0,1f |
| 763 | bge+ cr1,1f |
| 764 | |
| 765 | /* here it looks like we got an inappropriate debug exception. */ |
| 766 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
| 767 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ |
| 768 | mtspr SPRN_DBSR,r14 |
| 769 | mtspr SPRN_CSRR1,r11 |
| 770 | lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ |
| 771 | ld r1,PACA_EXCRIT+EX_R1(r13) |
| 772 | ld r14,PACA_EXCRIT+EX_R14(r13) |
| 773 | ld r15,PACA_EXCRIT+EX_R15(r13) |
| 774 | mtcr r10 |
| 775 | ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ |
| 776 | ld r11,PACA_EXCRIT+EX_R11(r13) |
| 777 | mfspr r13,SPRN_SPRG_CRIT_SCRATCH |
| 778 | rfci |
| 779 | |
| 780 | /* Normal debug exception */ |
| 781 | /* XXX We only handle coming from userspace for now since we can't |
| 782 | * quite save properly an interrupted kernel state yet |
| 783 | */ |
| 784 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ |
| 785 | beq kernel_dbg_exc; /* if from kernel mode */ |
| 786 | |
| 787 | /* Now we mash up things to make it look like we are coming on a |
| 788 | * normal exception |
| 789 | */ |
| 790 | mfspr r14,SPRN_DBSR |
| 791 | EXCEPTION_COMMON_CRIT(0xd00) |
| 792 | std r14,_DSISR(r1) |
| 793 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 794 | mr r4,r14 |
| 795 | ld r14,PACA_EXCRIT+EX_R14(r13) |
| 796 | ld r15,PACA_EXCRIT+EX_R15(r13) |
| 797 | bl save_nvgprs |
| 798 | bl DebugException |
| 799 | b ret_from_except |
| 800 | |
| 801 | kernel_dbg_exc: |
| 802 | b . /* NYI */ |
| 803 | |
| 804 | /* Debug exception as a debug interrupt*/ |
| 805 | START_EXCEPTION(debug_debug); |
| 806 | DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, |
| 807 | PROLOG_ADDITION_2REGS) |
| 808 | |
| 809 | /* |
| 810 | * If there is a single step or branch-taken exception in an |
| 811 | * exception entry sequence, it was probably meant to apply to |
| 812 | * the code where the exception occurred (since exception entry |
| 813 | * doesn't turn off DE automatically). We simulate the effect |
| 814 | * of turning off DE on entry to an exception handler by turning |
| 815 | * off DE in the DSRR1 value and clearing the debug status. |
| 816 | */ |
| 817 | |
| 818 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
| 819 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
| 820 | beq+ 1f |
| 821 | |
| 822 | #ifdef CONFIG_RELOCATABLE |
| 823 | ld r15,PACATOC(r13) |
| 824 | ld r14,interrupt_base_book3e@got(r15) |
| 825 | ld r15,__end_interrupts@got(r15) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 826 | cmpld cr0,r10,r14 |
| 827 | cmpld cr1,r10,r15 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 828 | #else |
| 829 | LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) |
| 830 | cmpld cr0, r10, r14 |
| 831 | LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts) |
| 832 | cmpld cr1, r10, r14 |
| 833 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 834 | blt+ cr0,1f |
| 835 | bge+ cr1,1f |
| 836 | |
| 837 | /* here it looks like we got an inappropriate debug exception. */ |
| 838 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
| 839 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ |
| 840 | mtspr SPRN_DBSR,r14 |
| 841 | mtspr SPRN_DSRR1,r11 |
| 842 | lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ |
| 843 | ld r1,PACA_EXDBG+EX_R1(r13) |
| 844 | ld r14,PACA_EXDBG+EX_R14(r13) |
| 845 | ld r15,PACA_EXDBG+EX_R15(r13) |
| 846 | mtcr r10 |
| 847 | ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ |
| 848 | ld r11,PACA_EXDBG+EX_R11(r13) |
| 849 | mfspr r13,SPRN_SPRG_DBG_SCRATCH |
| 850 | rfdi |
| 851 | |
| 852 | /* Normal debug exception */ |
| 853 | /* XXX We only handle coming from userspace for now since we can't |
| 854 | * quite save properly an interrupted kernel state yet |
| 855 | */ |
| 856 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ |
| 857 | beq kernel_dbg_exc; /* if from kernel mode */ |
| 858 | |
| 859 | /* Now we mash up things to make it look like we are coming on a |
| 860 | * normal exception |
| 861 | */ |
| 862 | mfspr r14,SPRN_DBSR |
| 863 | EXCEPTION_COMMON_DBG(0xd08) |
| 864 | INTS_DISABLE |
| 865 | std r14,_DSISR(r1) |
| 866 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 867 | mr r4,r14 |
| 868 | ld r14,PACA_EXDBG+EX_R14(r13) |
| 869 | ld r15,PACA_EXDBG+EX_R15(r13) |
| 870 | bl save_nvgprs |
| 871 | bl DebugException |
| 872 | b ret_from_except |
| 873 | |
| 874 | START_EXCEPTION(perfmon); |
| 875 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, |
| 876 | PROLOG_ADDITION_NONE) |
| 877 | EXCEPTION_COMMON(0x260) |
| 878 | INTS_DISABLE |
| 879 | CHECK_NAPPING() |
| 880 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 881 | bl performance_monitor_exception |
| 882 | b ret_from_except_lite |
| 883 | |
| 884 | /* Doorbell interrupt */ |
| 885 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, |
| 886 | doorbell, doorbell_exception, ACK_NONE) |
| 887 | |
| 888 | /* Doorbell critical Interrupt */ |
| 889 | START_EXCEPTION(doorbell_crit); |
| 890 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, |
| 891 | PROLOG_ADDITION_NONE) |
| 892 | EXCEPTION_COMMON_CRIT(0x2a0) |
| 893 | bl save_nvgprs |
| 894 | bl special_reg_save |
| 895 | CHECK_NAPPING(); |
| 896 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 897 | bl unknown_exception |
| 898 | b ret_from_crit_except |
| 899 | |
| 900 | /* |
| 901 | * Guest doorbell interrupt |
| 902 | * This general exception use GSRRx save/restore registers |
| 903 | */ |
| 904 | START_EXCEPTION(guest_doorbell); |
| 905 | GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, |
| 906 | PROLOG_ADDITION_NONE) |
| 907 | EXCEPTION_COMMON(0x2c0) |
| 908 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 909 | bl save_nvgprs |
| 910 | INTS_RESTORE_HARD |
| 911 | bl unknown_exception |
| 912 | b ret_from_except |
| 913 | |
| 914 | /* Guest Doorbell critical Interrupt */ |
| 915 | START_EXCEPTION(guest_doorbell_crit); |
| 916 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, |
| 917 | PROLOG_ADDITION_NONE) |
| 918 | EXCEPTION_COMMON_CRIT(0x2e0) |
| 919 | bl save_nvgprs |
| 920 | bl special_reg_save |
| 921 | CHECK_NAPPING(); |
| 922 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 923 | bl unknown_exception |
| 924 | b ret_from_crit_except |
| 925 | |
| 926 | /* Hypervisor call */ |
| 927 | START_EXCEPTION(hypercall); |
| 928 | NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, |
| 929 | PROLOG_ADDITION_NONE) |
| 930 | EXCEPTION_COMMON(0x310) |
| 931 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 932 | bl save_nvgprs |
| 933 | INTS_RESTORE_HARD |
| 934 | bl unknown_exception |
| 935 | b ret_from_except |
| 936 | |
| 937 | /* Embedded Hypervisor priviledged */ |
| 938 | START_EXCEPTION(ehpriv); |
| 939 | NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, |
| 940 | PROLOG_ADDITION_NONE) |
| 941 | EXCEPTION_COMMON(0x320) |
| 942 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 943 | bl save_nvgprs |
| 944 | INTS_RESTORE_HARD |
| 945 | bl unknown_exception |
| 946 | b ret_from_except |
| 947 | |
| 948 | /* LRAT Error interrupt */ |
| 949 | START_EXCEPTION(lrat_error); |
| 950 | NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, |
| 951 | PROLOG_ADDITION_NONE) |
| 952 | EXCEPTION_COMMON(0x340) |
| 953 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 954 | bl save_nvgprs |
| 955 | INTS_RESTORE_HARD |
| 956 | bl unknown_exception |
| 957 | b ret_from_except |
| 958 | |
| 959 | /* |
| 960 | * An interrupt came in while soft-disabled; We mark paca->irq_happened |
| 961 | * accordingly and if the interrupt is level sensitive, we hard disable |
| 962 | * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so |
| 963 | * keep these in synch. |
| 964 | */ |
| 965 | |
| 966 | .macro masked_interrupt_book3e paca_irq full_mask |
| 967 | lbz r10,PACAIRQHAPPENED(r13) |
| 968 | .if \full_mask == 1 |
| 969 | ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS |
| 970 | .else |
| 971 | ori r10,r10,\paca_irq |
| 972 | .endif |
| 973 | stb r10,PACAIRQHAPPENED(r13) |
| 974 | |
| 975 | .if \full_mask == 1 |
| 976 | rldicl r10,r11,48,1 /* clear MSR_EE */ |
| 977 | rotldi r11,r10,16 |
| 978 | mtspr SPRN_SRR1,r11 |
| 979 | .endif |
| 980 | |
| 981 | lwz r11,PACA_EXGEN+EX_CR(r13) |
| 982 | mtcr r11 |
| 983 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 984 | ld r11,PACA_EXGEN+EX_R11(r13) |
| 985 | mfspr r13,SPRN_SPRG_GEN_SCRATCH |
| 986 | rfi |
| 987 | b . |
| 988 | .endm |
| 989 | |
| 990 | masked_interrupt_book3e_0x500: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 991 | masked_interrupt_book3e PACA_IRQ_EE 1 |
| 992 | |
| 993 | masked_interrupt_book3e_0x900: |
| 994 | ACK_DEC(r10); |
| 995 | masked_interrupt_book3e PACA_IRQ_DEC 0 |
| 996 | |
| 997 | masked_interrupt_book3e_0x980: |
| 998 | ACK_FIT(r10); |
| 999 | masked_interrupt_book3e PACA_IRQ_DEC 0 |
| 1000 | |
| 1001 | masked_interrupt_book3e_0x280: |
| 1002 | masked_interrupt_book3e_0x2c0: |
| 1003 | masked_interrupt_book3e PACA_IRQ_DBELL 0 |
| 1004 | |
| 1005 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1006 | * This is called from 0x300 and 0x400 handlers after the prologs with |
| 1007 | * r14 and r15 containing the fault address and error code, with the |
| 1008 | * original values stashed away in the PACA |
| 1009 | */ |
| 1010 | storage_fault_common: |
| 1011 | std r14,_DAR(r1) |
| 1012 | std r15,_DSISR(r1) |
| 1013 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1014 | mr r4,r14 |
| 1015 | mr r5,r15 |
| 1016 | ld r14,PACA_EXGEN+EX_R14(r13) |
| 1017 | ld r15,PACA_EXGEN+EX_R15(r13) |
| 1018 | bl do_page_fault |
| 1019 | cmpdi r3,0 |
| 1020 | bne- 1f |
| 1021 | b ret_from_except_lite |
| 1022 | 1: bl save_nvgprs |
| 1023 | mr r5,r3 |
| 1024 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1025 | ld r4,_DAR(r1) |
| 1026 | bl bad_page_fault |
| 1027 | b ret_from_except |
| 1028 | |
| 1029 | /* |
| 1030 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it |
| 1031 | * continues here. |
| 1032 | */ |
| 1033 | alignment_more: |
| 1034 | std r14,_DAR(r1) |
| 1035 | std r15,_DSISR(r1) |
| 1036 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1037 | ld r14,PACA_EXGEN+EX_R14(r13) |
| 1038 | ld r15,PACA_EXGEN+EX_R15(r13) |
| 1039 | bl save_nvgprs |
| 1040 | INTS_RESTORE_HARD |
| 1041 | bl alignment_exception |
| 1042 | b ret_from_except |
| 1043 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1044 | .align 7 |
| 1045 | _GLOBAL(ret_from_except) |
| 1046 | ld r11,_TRAP(r1) |
| 1047 | andi. r0,r11,1 |
| 1048 | bne ret_from_except_lite |
| 1049 | REST_NVGPRS(r1) |
| 1050 | |
| 1051 | _GLOBAL(ret_from_except_lite) |
| 1052 | /* |
| 1053 | * Disable interrupts so that current_thread_info()->flags |
| 1054 | * can't change between when we test it and when we return |
| 1055 | * from the interrupt. |
| 1056 | */ |
| 1057 | wrteei 0 |
| 1058 | |
| 1059 | ld r9, PACA_THREAD_INFO(r13) |
| 1060 | ld r3,_MSR(r1) |
| 1061 | ld r10,PACACURRENT(r13) |
| 1062 | ld r4,TI_FLAGS(r9) |
| 1063 | andi. r3,r3,MSR_PR |
| 1064 | beq resume_kernel |
| 1065 | lwz r3,(THREAD+THREAD_DBCR0)(r10) |
| 1066 | |
| 1067 | /* Check current_thread_info()->flags */ |
| 1068 | andi. r0,r4,_TIF_USER_WORK_MASK |
| 1069 | bne 1f |
| 1070 | /* |
| 1071 | * Check to see if the dbcr0 register is set up to debug. |
| 1072 | * Use the internal debug mode bit to do this. |
| 1073 | */ |
| 1074 | andis. r0,r3,DBCR0_IDM@h |
| 1075 | beq restore |
| 1076 | mfmsr r0 |
| 1077 | rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ |
| 1078 | mtmsr r0 |
| 1079 | mtspr SPRN_DBCR0,r3 |
| 1080 | li r10, -1 |
| 1081 | mtspr SPRN_DBSR,r10 |
| 1082 | b restore |
| 1083 | 1: andi. r0,r4,_TIF_NEED_RESCHED |
| 1084 | beq 2f |
| 1085 | bl restore_interrupts |
| 1086 | SCHEDULE_USER |
| 1087 | b ret_from_except_lite |
| 1088 | 2: |
| 1089 | bl save_nvgprs |
| 1090 | /* |
| 1091 | * Use a non volatile GPR to save and restore our thread_info flags |
| 1092 | * across the call to restore_interrupts. |
| 1093 | */ |
| 1094 | mr r30,r4 |
| 1095 | bl restore_interrupts |
| 1096 | mr r4,r30 |
| 1097 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 1098 | bl do_notify_resume |
| 1099 | b ret_from_except |
| 1100 | |
| 1101 | resume_kernel: |
| 1102 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
| 1103 | andis. r8,r4,_TIF_EMULATE_STACK_STORE@h |
| 1104 | beq+ 1f |
| 1105 | |
| 1106 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ |
| 1107 | |
| 1108 | ld r3,GPR1(r1) |
| 1109 | subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ |
| 1110 | mr r4,r1 /* src: current exception frame */ |
| 1111 | mr r1,r3 /* Reroute the trampoline frame to r1 */ |
| 1112 | |
| 1113 | /* Copy from the original to the trampoline. */ |
| 1114 | li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ |
| 1115 | li r6,0 /* start offset: 0 */ |
| 1116 | mtctr r5 |
| 1117 | 2: ldx r0,r6,r4 |
| 1118 | stdx r0,r6,r3 |
| 1119 | addi r6,r6,8 |
| 1120 | bdnz 2b |
| 1121 | |
| 1122 | /* Do real store operation to complete stdu */ |
| 1123 | ld r5,GPR1(r1) |
| 1124 | std r8,0(r5) |
| 1125 | |
| 1126 | /* Clear _TIF_EMULATE_STACK_STORE flag */ |
| 1127 | lis r11,_TIF_EMULATE_STACK_STORE@h |
| 1128 | addi r5,r9,TI_FLAGS |
| 1129 | 0: ldarx r4,0,r5 |
| 1130 | andc r4,r4,r11 |
| 1131 | stdcx. r4,0,r5 |
| 1132 | bne- 0b |
| 1133 | 1: |
| 1134 | |
| 1135 | #ifdef CONFIG_PREEMPT |
| 1136 | /* Check if we need to preempt */ |
| 1137 | andi. r0,r4,_TIF_NEED_RESCHED |
| 1138 | beq+ restore |
| 1139 | /* Check that preempt_count() == 0 and interrupts are enabled */ |
| 1140 | lwz r8,TI_PREEMPT(r9) |
| 1141 | cmpwi cr0,r8,0 |
| 1142 | bne restore |
| 1143 | ld r0,SOFTE(r1) |
| 1144 | andi. r0,r0,IRQS_DISABLED |
| 1145 | bne restore |
| 1146 | |
| 1147 | /* |
| 1148 | * Here we are preempting the current task. We want to make |
| 1149 | * sure we are soft-disabled first and reconcile irq state. |
| 1150 | */ |
| 1151 | RECONCILE_IRQ_STATE(r3,r4) |
| 1152 | bl preempt_schedule_irq |
| 1153 | |
| 1154 | /* |
| 1155 | * arch_local_irq_restore() from preempt_schedule_irq above may |
| 1156 | * enable hard interrupt but we really should disable interrupts |
| 1157 | * when we return from the interrupt, and so that we don't get |
| 1158 | * interrupted after loading SRR0/1. |
| 1159 | */ |
| 1160 | wrteei 0 |
| 1161 | #endif /* CONFIG_PREEMPT */ |
| 1162 | |
| 1163 | restore: |
| 1164 | /* |
| 1165 | * This is the main kernel exit path. First we check if we |
| 1166 | * are about to re-enable interrupts |
| 1167 | */ |
| 1168 | ld r5,SOFTE(r1) |
| 1169 | lbz r6,PACAIRQSOFTMASK(r13) |
| 1170 | andi. r5,r5,IRQS_DISABLED |
| 1171 | bne .Lrestore_irq_off |
| 1172 | |
| 1173 | /* We are enabling, were we already enabled ? Yes, just return */ |
| 1174 | andi. r6,r6,IRQS_DISABLED |
| 1175 | beq cr0,fast_exception_return |
| 1176 | |
| 1177 | /* |
| 1178 | * We are about to soft-enable interrupts (we are hard disabled |
| 1179 | * at this point). We check if there's anything that needs to |
| 1180 | * be replayed first. |
| 1181 | */ |
| 1182 | lbz r0,PACAIRQHAPPENED(r13) |
| 1183 | cmpwi cr0,r0,0 |
| 1184 | bne- .Lrestore_check_irq_replay |
| 1185 | |
| 1186 | /* |
| 1187 | * Get here when nothing happened while soft-disabled, just |
| 1188 | * soft-enable and move-on. We will hard-enable as a side |
| 1189 | * effect of rfi |
| 1190 | */ |
| 1191 | .Lrestore_no_replay: |
| 1192 | TRACE_ENABLE_INTS |
| 1193 | li r0,IRQS_ENABLED |
| 1194 | stb r0,PACAIRQSOFTMASK(r13); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1195 | |
| 1196 | /* This is the return from load_up_fpu fast path which could do with |
| 1197 | * less GPR restores in fact, but for now we have a single return path |
| 1198 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1199 | fast_exception_return: |
| 1200 | wrteei 0 |
| 1201 | 1: mr r0,r13 |
| 1202 | ld r10,_MSR(r1) |
| 1203 | REST_4GPRS(2, r1) |
| 1204 | andi. r6,r10,MSR_PR |
| 1205 | REST_2GPRS(6, r1) |
| 1206 | beq 1f |
| 1207 | ACCOUNT_CPU_USER_EXIT(r13, r10, r11) |
| 1208 | ld r0,GPR13(r1) |
| 1209 | |
| 1210 | 1: stdcx. r0,0,r1 /* to clear the reservation */ |
| 1211 | |
| 1212 | ld r8,_CCR(r1) |
| 1213 | ld r9,_LINK(r1) |
| 1214 | ld r10,_CTR(r1) |
| 1215 | ld r11,_XER(r1) |
| 1216 | mtcr r8 |
| 1217 | mtlr r9 |
| 1218 | mtctr r10 |
| 1219 | mtxer r11 |
| 1220 | REST_2GPRS(8, r1) |
| 1221 | ld r10,GPR10(r1) |
| 1222 | ld r11,GPR11(r1) |
| 1223 | ld r12,GPR12(r1) |
| 1224 | mtspr SPRN_SPRG_GEN_SCRATCH,r0 |
| 1225 | |
| 1226 | std r10,PACA_EXGEN+EX_R10(r13); |
| 1227 | std r11,PACA_EXGEN+EX_R11(r13); |
| 1228 | ld r10,_NIP(r1) |
| 1229 | ld r11,_MSR(r1) |
| 1230 | ld r0,GPR0(r1) |
| 1231 | ld r1,GPR1(r1) |
| 1232 | mtspr SPRN_SRR0,r10 |
| 1233 | mtspr SPRN_SRR1,r11 |
| 1234 | ld r10,PACA_EXGEN+EX_R10(r13) |
| 1235 | ld r11,PACA_EXGEN+EX_R11(r13) |
| 1236 | mfspr r13,SPRN_SPRG_GEN_SCRATCH |
| 1237 | rfi |
| 1238 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1239 | /* |
| 1240 | * We are returning to a context with interrupts soft disabled. |
| 1241 | * |
| 1242 | * However, we may also about to hard enable, so we need to |
| 1243 | * make sure that in this case, we also clear PACA_IRQ_HARD_DIS |
| 1244 | * or that bit can get out of sync and bad things will happen |
| 1245 | */ |
| 1246 | .Lrestore_irq_off: |
| 1247 | ld r3,_MSR(r1) |
| 1248 | lbz r7,PACAIRQHAPPENED(r13) |
| 1249 | andi. r0,r3,MSR_EE |
| 1250 | beq 1f |
| 1251 | rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS |
| 1252 | stb r7,PACAIRQHAPPENED(r13) |
| 1253 | 1: |
| 1254 | #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
| 1255 | /* The interrupt should not have soft enabled. */ |
| 1256 | lbz r7,PACAIRQSOFTMASK(r13) |
| 1257 | 1: tdeqi r7,IRQS_ENABLED |
| 1258 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
| 1259 | #endif |
| 1260 | b fast_exception_return |
| 1261 | |
| 1262 | /* |
| 1263 | * Something did happen, check if a re-emit is needed |
| 1264 | * (this also clears paca->irq_happened) |
| 1265 | */ |
| 1266 | .Lrestore_check_irq_replay: |
| 1267 | /* XXX: We could implement a fast path here where we check |
| 1268 | * for irq_happened being just 0x01, in which case we can |
| 1269 | * clear it and return. That means that we would potentially |
| 1270 | * miss a decrementer having wrapped all the way around. |
| 1271 | * |
| 1272 | * Still, this might be useful for things like hash_page |
| 1273 | */ |
| 1274 | bl __check_irq_replay |
| 1275 | cmpwi cr0,r3,0 |
| 1276 | beq .Lrestore_no_replay |
| 1277 | |
| 1278 | /* |
| 1279 | * We need to re-emit an interrupt. We do so by re-using our |
| 1280 | * existing exception frame. We first change the trap value, |
| 1281 | * but we need to ensure we preserve the low nibble of it |
| 1282 | */ |
| 1283 | ld r4,_TRAP(r1) |
| 1284 | clrldi r4,r4,60 |
| 1285 | or r4,r4,r3 |
| 1286 | std r4,_TRAP(r1) |
| 1287 | |
| 1288 | /* |
| 1289 | * PACA_IRQ_HARD_DIS won't always be set here, so set it now |
| 1290 | * to reconcile the IRQ state. Tracing is already accounted for. |
| 1291 | */ |
| 1292 | lbz r4,PACAIRQHAPPENED(r13) |
| 1293 | ori r4,r4,PACA_IRQ_HARD_DIS |
| 1294 | stb r4,PACAIRQHAPPENED(r13) |
| 1295 | |
| 1296 | /* |
| 1297 | * Then find the right handler and call it. Interrupts are |
| 1298 | * still soft-disabled and we keep them that way. |
| 1299 | */ |
| 1300 | cmpwi cr0,r3,0x500 |
| 1301 | bne 1f |
| 1302 | addi r3,r1,STACK_FRAME_OVERHEAD; |
| 1303 | bl do_IRQ |
| 1304 | b ret_from_except |
| 1305 | 1: cmpwi cr0,r3,0x900 |
| 1306 | bne 1f |
| 1307 | addi r3,r1,STACK_FRAME_OVERHEAD; |
| 1308 | bl timer_interrupt |
| 1309 | b ret_from_except |
| 1310 | #ifdef CONFIG_PPC_DOORBELL |
| 1311 | 1: |
| 1312 | cmpwi cr0,r3,0x280 |
| 1313 | bne 1f |
| 1314 | addi r3,r1,STACK_FRAME_OVERHEAD; |
| 1315 | bl doorbell_exception |
| 1316 | #endif /* CONFIG_PPC_DOORBELL */ |
| 1317 | 1: b ret_from_except /* What else to do here ? */ |
| 1318 | |
| 1319 | _ASM_NOKPROBE_SYMBOL(ret_from_except); |
| 1320 | _ASM_NOKPROBE_SYMBOL(ret_from_except_lite); |
| 1321 | _ASM_NOKPROBE_SYMBOL(resume_kernel); |
| 1322 | _ASM_NOKPROBE_SYMBOL(restore); |
| 1323 | _ASM_NOKPROBE_SYMBOL(fast_exception_return); |
| 1324 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1325 | /* |
| 1326 | * Trampolines used when spotting a bad kernel stack pointer in |
| 1327 | * the exception entry code. |
| 1328 | * |
| 1329 | * TODO: move some bits like SRR0 read to trampoline, pass PACA |
| 1330 | * index around, etc... to handle crit & mcheck |
| 1331 | */ |
| 1332 | BAD_STACK_TRAMPOLINE(0x000) |
| 1333 | BAD_STACK_TRAMPOLINE(0x100) |
| 1334 | BAD_STACK_TRAMPOLINE(0x200) |
| 1335 | BAD_STACK_TRAMPOLINE(0x220) |
| 1336 | BAD_STACK_TRAMPOLINE(0x260) |
| 1337 | BAD_STACK_TRAMPOLINE(0x280) |
| 1338 | BAD_STACK_TRAMPOLINE(0x2a0) |
| 1339 | BAD_STACK_TRAMPOLINE(0x2c0) |
| 1340 | BAD_STACK_TRAMPOLINE(0x2e0) |
| 1341 | BAD_STACK_TRAMPOLINE(0x300) |
| 1342 | BAD_STACK_TRAMPOLINE(0x310) |
| 1343 | BAD_STACK_TRAMPOLINE(0x320) |
| 1344 | BAD_STACK_TRAMPOLINE(0x340) |
| 1345 | BAD_STACK_TRAMPOLINE(0x400) |
| 1346 | BAD_STACK_TRAMPOLINE(0x500) |
| 1347 | BAD_STACK_TRAMPOLINE(0x600) |
| 1348 | BAD_STACK_TRAMPOLINE(0x700) |
| 1349 | BAD_STACK_TRAMPOLINE(0x800) |
| 1350 | BAD_STACK_TRAMPOLINE(0x900) |
| 1351 | BAD_STACK_TRAMPOLINE(0x980) |
| 1352 | BAD_STACK_TRAMPOLINE(0x9f0) |
| 1353 | BAD_STACK_TRAMPOLINE(0xa00) |
| 1354 | BAD_STACK_TRAMPOLINE(0xb00) |
| 1355 | BAD_STACK_TRAMPOLINE(0xc00) |
| 1356 | BAD_STACK_TRAMPOLINE(0xd00) |
| 1357 | BAD_STACK_TRAMPOLINE(0xd08) |
| 1358 | BAD_STACK_TRAMPOLINE(0xe00) |
| 1359 | BAD_STACK_TRAMPOLINE(0xf00) |
| 1360 | BAD_STACK_TRAMPOLINE(0xf20) |
| 1361 | |
| 1362 | .globl bad_stack_book3e |
| 1363 | bad_stack_book3e: |
| 1364 | /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ |
| 1365 | mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ |
| 1366 | ld r1,PACAEMERGSP(r13) |
| 1367 | subi r1,r1,64+INT_FRAME_SIZE |
| 1368 | std r10,_NIP(r1) |
| 1369 | std r11,_MSR(r1) |
| 1370 | ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ |
| 1371 | lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ |
| 1372 | std r10,GPR1(r1) |
| 1373 | std r11,_CCR(r1) |
| 1374 | mfspr r10,SPRN_DEAR |
| 1375 | mfspr r11,SPRN_ESR |
| 1376 | std r10,_DAR(r1) |
| 1377 | std r11,_DSISR(r1) |
| 1378 | std r0,GPR0(r1); /* save r0 in stackframe */ \ |
| 1379 | std r2,GPR2(r1); /* save r2 in stackframe */ \ |
| 1380 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ |
| 1381 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ |
| 1382 | std r9,GPR9(r1); /* save r9 in stackframe */ \ |
| 1383 | ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ |
| 1384 | ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ |
| 1385 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ |
| 1386 | std r3,GPR10(r1); /* save r10 to stackframe */ \ |
| 1387 | std r4,GPR11(r1); /* save r11 to stackframe */ \ |
| 1388 | std r12,GPR12(r1); /* save r12 in stackframe */ \ |
| 1389 | std r5,GPR13(r1); /* save it to stackframe */ \ |
| 1390 | mflr r10 |
| 1391 | mfctr r11 |
| 1392 | mfxer r12 |
| 1393 | std r10,_LINK(r1) |
| 1394 | std r11,_CTR(r1) |
| 1395 | std r12,_XER(r1) |
| 1396 | SAVE_10GPRS(14,r1) |
| 1397 | SAVE_8GPRS(24,r1) |
| 1398 | lhz r12,PACA_TRAP_SAVE(r13) |
| 1399 | std r12,_TRAP(r1) |
| 1400 | addi r11,r1,INT_FRAME_SIZE |
| 1401 | std r11,0(r1) |
| 1402 | li r12,0 |
| 1403 | std r12,0(r11) |
| 1404 | ld r2,PACATOC(r13) |
| 1405 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
| 1406 | bl kernel_bad_stack |
| 1407 | b 1b |
| 1408 | |
| 1409 | /* |
| 1410 | * Setup the initial TLB for a core. This current implementation |
| 1411 | * assume that whatever we are running off will not conflict with |
| 1412 | * the new mapping at PAGE_OFFSET. |
| 1413 | */ |
| 1414 | _GLOBAL(initial_tlb_book3e) |
| 1415 | |
| 1416 | /* Look for the first TLB with IPROT set */ |
| 1417 | mfspr r4,SPRN_TLB0CFG |
| 1418 | andi. r3,r4,TLBnCFG_IPROT |
| 1419 | lis r3,MAS0_TLBSEL(0)@h |
| 1420 | bne found_iprot |
| 1421 | |
| 1422 | mfspr r4,SPRN_TLB1CFG |
| 1423 | andi. r3,r4,TLBnCFG_IPROT |
| 1424 | lis r3,MAS0_TLBSEL(1)@h |
| 1425 | bne found_iprot |
| 1426 | |
| 1427 | mfspr r4,SPRN_TLB2CFG |
| 1428 | andi. r3,r4,TLBnCFG_IPROT |
| 1429 | lis r3,MAS0_TLBSEL(2)@h |
| 1430 | bne found_iprot |
| 1431 | |
| 1432 | lis r3,MAS0_TLBSEL(3)@h |
| 1433 | mfspr r4,SPRN_TLB3CFG |
| 1434 | /* fall through */ |
| 1435 | |
| 1436 | found_iprot: |
| 1437 | andi. r5,r4,TLBnCFG_HES |
| 1438 | bne have_hes |
| 1439 | |
| 1440 | mflr r8 /* save LR */ |
| 1441 | /* 1. Find the index of the entry we're executing in |
| 1442 | * |
| 1443 | * r3 = MAS0_TLBSEL (for the iprot array) |
| 1444 | * r4 = SPRN_TLBnCFG |
| 1445 | */ |
| 1446 | bl invstr /* Find our address */ |
| 1447 | invstr: mflr r6 /* Make it accessible */ |
| 1448 | mfmsr r7 |
| 1449 | rlwinm r5,r7,27,31,31 /* extract MSR[IS] */ |
| 1450 | mfspr r7,SPRN_PID |
| 1451 | slwi r7,r7,16 |
| 1452 | or r7,r7,r5 |
| 1453 | mtspr SPRN_MAS6,r7 |
| 1454 | tlbsx 0,r6 /* search MSR[IS], SPID=PID */ |
| 1455 | |
| 1456 | mfspr r3,SPRN_MAS0 |
| 1457 | rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */ |
| 1458 | |
| 1459 | mfspr r7,SPRN_MAS1 /* Insure IPROT set */ |
| 1460 | oris r7,r7,MAS1_IPROT@h |
| 1461 | mtspr SPRN_MAS1,r7 |
| 1462 | tlbwe |
| 1463 | |
| 1464 | /* 2. Invalidate all entries except the entry we're executing in |
| 1465 | * |
| 1466 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in |
| 1467 | * r4 = SPRN_TLBnCFG |
| 1468 | * r5 = ESEL of entry we are running in |
| 1469 | */ |
| 1470 | andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */ |
| 1471 | li r6,0 /* Set Entry counter to 0 */ |
| 1472 | 1: mr r7,r3 /* Set MAS0(TLBSEL) */ |
| 1473 | rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ |
| 1474 | mtspr SPRN_MAS0,r7 |
| 1475 | tlbre |
| 1476 | mfspr r7,SPRN_MAS1 |
| 1477 | rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ |
| 1478 | cmpw r5,r6 |
| 1479 | beq skpinv /* Dont update the current execution TLB */ |
| 1480 | mtspr SPRN_MAS1,r7 |
| 1481 | tlbwe |
| 1482 | isync |
| 1483 | skpinv: addi r6,r6,1 /* Increment */ |
| 1484 | cmpw r6,r4 /* Are we done? */ |
| 1485 | bne 1b /* If not, repeat */ |
| 1486 | |
| 1487 | /* Invalidate all TLBs */ |
| 1488 | PPC_TLBILX_ALL(0,R0) |
| 1489 | sync |
| 1490 | isync |
| 1491 | |
| 1492 | /* 3. Setup a temp mapping and jump to it |
| 1493 | * |
| 1494 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in |
| 1495 | * r5 = ESEL of entry we are running in |
| 1496 | */ |
| 1497 | andi. r7,r5,0x1 /* Find an entry not used and is non-zero */ |
| 1498 | addi r7,r7,0x1 |
| 1499 | mr r4,r3 /* Set MAS0(TLBSEL) = 1 */ |
| 1500 | mtspr SPRN_MAS0,r4 |
| 1501 | tlbre |
| 1502 | |
| 1503 | rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */ |
| 1504 | mtspr SPRN_MAS0,r4 |
| 1505 | |
| 1506 | mfspr r7,SPRN_MAS1 |
| 1507 | xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */ |
| 1508 | mtspr SPRN_MAS1,r6 |
| 1509 | |
| 1510 | tlbwe |
| 1511 | |
| 1512 | mfmsr r6 |
| 1513 | xori r6,r6,MSR_IS |
| 1514 | mtspr SPRN_SRR1,r6 |
| 1515 | bl 1f /* Find our address */ |
| 1516 | 1: mflr r6 |
| 1517 | addi r6,r6,(2f - 1b) |
| 1518 | mtspr SPRN_SRR0,r6 |
| 1519 | rfi |
| 1520 | 2: |
| 1521 | |
| 1522 | /* 4. Clear out PIDs & Search info |
| 1523 | * |
| 1524 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in |
| 1525 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1526 | * r5 = MAS3 |
| 1527 | */ |
| 1528 | li r6,0 |
| 1529 | mtspr SPRN_MAS6,r6 |
| 1530 | mtspr SPRN_PID,r6 |
| 1531 | |
| 1532 | /* 5. Invalidate mapping we started in |
| 1533 | * |
| 1534 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in |
| 1535 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1536 | * r5 = MAS3 |
| 1537 | */ |
| 1538 | mtspr SPRN_MAS0,r3 |
| 1539 | tlbre |
| 1540 | mfspr r6,SPRN_MAS1 |
| 1541 | rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */ |
| 1542 | mtspr SPRN_MAS1,r6 |
| 1543 | tlbwe |
| 1544 | sync |
| 1545 | isync |
| 1546 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1547 | /* 6. Setup KERNELBASE mapping in TLB[0] |
| 1548 | * |
| 1549 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in |
| 1550 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1551 | * r5 = MAS3 |
| 1552 | */ |
| 1553 | rlwinm r3,r3,0,16,3 /* clear ESEL */ |
| 1554 | mtspr SPRN_MAS0,r3 |
| 1555 | lis r6,(MAS1_VALID|MAS1_IPROT)@h |
| 1556 | ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l |
| 1557 | mtspr SPRN_MAS1,r6 |
| 1558 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1559 | LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1560 | mtspr SPRN_MAS2,r6 |
| 1561 | |
| 1562 | rlwinm r5,r5,0,0,25 |
| 1563 | ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX |
| 1564 | mtspr SPRN_MAS3,r5 |
| 1565 | li r5,-1 |
| 1566 | rlwinm r5,r5,0,0,25 |
| 1567 | |
| 1568 | tlbwe |
| 1569 | |
| 1570 | /* 7. Jump to KERNELBASE mapping |
| 1571 | * |
| 1572 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping |
| 1573 | */ |
| 1574 | /* Now we branch the new virtual address mapped by this entry */ |
| 1575 | bl 1f /* Find our address */ |
| 1576 | 1: mflr r6 |
| 1577 | addi r6,r6,(2f - 1b) |
| 1578 | tovirt(r6,r6) |
| 1579 | lis r7,MSR_KERNEL@h |
| 1580 | ori r7,r7,MSR_KERNEL@l |
| 1581 | mtspr SPRN_SRR0,r6 |
| 1582 | mtspr SPRN_SRR1,r7 |
| 1583 | rfi /* start execution out of TLB1[0] entry */ |
| 1584 | 2: |
| 1585 | |
| 1586 | /* 8. Clear out the temp mapping |
| 1587 | * |
| 1588 | * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in |
| 1589 | */ |
| 1590 | mtspr SPRN_MAS0,r4 |
| 1591 | tlbre |
| 1592 | mfspr r5,SPRN_MAS1 |
| 1593 | rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */ |
| 1594 | mtspr SPRN_MAS1,r5 |
| 1595 | tlbwe |
| 1596 | sync |
| 1597 | isync |
| 1598 | |
| 1599 | /* We translate LR and return */ |
| 1600 | tovirt(r8,r8) |
| 1601 | mtlr r8 |
| 1602 | blr |
| 1603 | |
| 1604 | have_hes: |
| 1605 | /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the |
| 1606 | * kernel linear mapping. We also set MAS8 once for all here though |
| 1607 | * that will have to be made dependent on whether we are running under |
| 1608 | * a hypervisor I suppose. |
| 1609 | */ |
| 1610 | |
| 1611 | /* BEWARE, MAGIC |
| 1612 | * This code is called as an ordinary function on the boot CPU. But to |
| 1613 | * avoid duplication, this code is also used in SCOM bringup of |
| 1614 | * secondary CPUs. We read the code between the initial_tlb_code_start |
| 1615 | * and initial_tlb_code_end labels one instruction at a time and RAM it |
| 1616 | * into the new core via SCOM. That doesn't process branches, so there |
| 1617 | * must be none between those two labels. It also means if this code |
| 1618 | * ever takes any parameters, the SCOM code must also be updated to |
| 1619 | * provide them. |
| 1620 | */ |
| 1621 | .globl a2_tlbinit_code_start |
| 1622 | a2_tlbinit_code_start: |
| 1623 | |
| 1624 | ori r11,r3,MAS0_WQ_ALLWAYS |
| 1625 | oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ |
| 1626 | mtspr SPRN_MAS0,r11 |
| 1627 | lis r3,(MAS1_VALID | MAS1_IPROT)@h |
| 1628 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT |
| 1629 | mtspr SPRN_MAS1,r3 |
| 1630 | LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) |
| 1631 | mtspr SPRN_MAS2,r3 |
| 1632 | li r3,MAS3_SR | MAS3_SW | MAS3_SX |
| 1633 | mtspr SPRN_MAS7_MAS3,r3 |
| 1634 | li r3,0 |
| 1635 | mtspr SPRN_MAS8,r3 |
| 1636 | |
| 1637 | /* Write the TLB entry */ |
| 1638 | tlbwe |
| 1639 | |
| 1640 | .globl a2_tlbinit_after_linear_map |
| 1641 | a2_tlbinit_after_linear_map: |
| 1642 | |
| 1643 | /* Now we branch the new virtual address mapped by this entry */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1644 | LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1645 | mtctr r3 |
| 1646 | bctr |
| 1647 | |
| 1648 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything |
| 1649 | * else (including IPROTed things left by firmware) |
| 1650 | * r4 = TLBnCFG |
| 1651 | * r3 = current address (more or less) |
| 1652 | */ |
| 1653 | |
| 1654 | li r5,0 |
| 1655 | mtspr SPRN_MAS6,r5 |
| 1656 | tlbsx 0,r3 |
| 1657 | |
| 1658 | rlwinm r9,r4,0,TLBnCFG_N_ENTRY |
| 1659 | rlwinm r10,r4,8,0xff |
| 1660 | addi r10,r10,-1 /* Get inner loop mask */ |
| 1661 | |
| 1662 | li r3,1 |
| 1663 | |
| 1664 | mfspr r5,SPRN_MAS1 |
| 1665 | rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) |
| 1666 | |
| 1667 | mfspr r6,SPRN_MAS2 |
| 1668 | rldicr r6,r6,0,51 /* Extract EPN */ |
| 1669 | |
| 1670 | mfspr r7,SPRN_MAS0 |
| 1671 | rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ |
| 1672 | |
| 1673 | rlwinm r8,r7,16,0xfff /* Extract ESEL */ |
| 1674 | |
| 1675 | 2: add r4,r3,r8 |
| 1676 | and r4,r4,r10 |
| 1677 | |
| 1678 | rlwimi r7,r4,16,MAS0_ESEL_MASK |
| 1679 | |
| 1680 | mtspr SPRN_MAS0,r7 |
| 1681 | mtspr SPRN_MAS1,r5 |
| 1682 | mtspr SPRN_MAS2,r6 |
| 1683 | tlbwe |
| 1684 | |
| 1685 | addi r3,r3,1 |
| 1686 | and. r4,r3,r10 |
| 1687 | |
| 1688 | bne 3f |
| 1689 | addis r6,r6,(1<<30)@h |
| 1690 | 3: |
| 1691 | cmpw r3,r9 |
| 1692 | blt 2b |
| 1693 | |
| 1694 | .globl a2_tlbinit_after_iprot_flush |
| 1695 | a2_tlbinit_after_iprot_flush: |
| 1696 | |
| 1697 | PPC_TLBILX(0,0,R0) |
| 1698 | sync |
| 1699 | isync |
| 1700 | |
| 1701 | .globl a2_tlbinit_code_end |
| 1702 | a2_tlbinit_code_end: |
| 1703 | |
| 1704 | /* We translate LR and return */ |
| 1705 | mflr r3 |
| 1706 | tovirt(r3,r3) |
| 1707 | mtlr r3 |
| 1708 | blr |
| 1709 | |
| 1710 | /* |
| 1711 | * Main entry (boot CPU, thread 0) |
| 1712 | * |
| 1713 | * We enter here from head_64.S, possibly after the prom_init trampoline |
| 1714 | * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits |
| 1715 | * mode. Anything else is as it was left by the bootloader |
| 1716 | * |
| 1717 | * Initial requirements of this port: |
| 1718 | * |
| 1719 | * - Kernel loaded at 0 physical |
| 1720 | * - A good lump of memory mapped 0:0 by UTLB entry 0 |
| 1721 | * - MSR:IS & MSR:DS set to 0 |
| 1722 | * |
| 1723 | * Note that some of the above requirements will be relaxed in the future |
| 1724 | * as the kernel becomes smarter at dealing with different initial conditions |
| 1725 | * but for now you have to be careful |
| 1726 | */ |
| 1727 | _GLOBAL(start_initialization_book3e) |
| 1728 | mflr r28 |
| 1729 | |
| 1730 | /* First, we need to setup some initial TLBs to map the kernel |
| 1731 | * text, data and bss at PAGE_OFFSET. We don't have a real mode |
| 1732 | * and always use AS 0, so we just set it up to match our link |
| 1733 | * address and never use 0 based addresses. |
| 1734 | */ |
| 1735 | bl initial_tlb_book3e |
| 1736 | |
| 1737 | /* Init global core bits */ |
| 1738 | bl init_core_book3e |
| 1739 | |
| 1740 | /* Init per-thread bits */ |
| 1741 | bl init_thread_book3e |
| 1742 | |
| 1743 | /* Return to common init code */ |
| 1744 | tovirt(r28,r28) |
| 1745 | mtlr r28 |
| 1746 | blr |
| 1747 | |
| 1748 | |
| 1749 | /* |
| 1750 | * Secondary core/processor entry |
| 1751 | * |
| 1752 | * This is entered for thread 0 of a secondary core, all other threads |
| 1753 | * are expected to be stopped. It's similar to start_initialization_book3e |
| 1754 | * except that it's generally entered from the holding loop in head_64.S |
| 1755 | * after CPUs have been gathered by Open Firmware. |
| 1756 | * |
| 1757 | * We assume we are in 32 bits mode running with whatever TLB entry was |
| 1758 | * set for us by the firmware or POR engine. |
| 1759 | */ |
| 1760 | _GLOBAL(book3e_secondary_core_init_tlb_set) |
| 1761 | li r4,1 |
| 1762 | b generic_secondary_smp_init |
| 1763 | |
| 1764 | _GLOBAL(book3e_secondary_core_init) |
| 1765 | mflr r28 |
| 1766 | |
| 1767 | /* Do we need to setup initial TLB entry ? */ |
| 1768 | cmplwi r4,0 |
| 1769 | bne 2f |
| 1770 | |
| 1771 | /* Setup TLB for this core */ |
| 1772 | bl initial_tlb_book3e |
| 1773 | |
| 1774 | /* We can return from the above running at a different |
| 1775 | * address, so recalculate r2 (TOC) |
| 1776 | */ |
| 1777 | bl relative_toc |
| 1778 | |
| 1779 | /* Init global core bits */ |
| 1780 | 2: bl init_core_book3e |
| 1781 | |
| 1782 | /* Init per-thread bits */ |
| 1783 | 3: bl init_thread_book3e |
| 1784 | |
| 1785 | /* Return to common init code at proper virtual address. |
| 1786 | * |
| 1787 | * Due to various previous assumptions, we know we entered this |
| 1788 | * function at either the final PAGE_OFFSET mapping or using a |
| 1789 | * 1:1 mapping at 0, so we don't bother doing a complicated check |
| 1790 | * here, we just ensure the return address has the right top bits. |
| 1791 | * |
| 1792 | * Note that if we ever want to be smarter about where we can be |
| 1793 | * started from, we have to be careful that by the time we reach |
| 1794 | * the code below we may already be running at a different location |
| 1795 | * than the one we were called from since initial_tlb_book3e can |
| 1796 | * have moved us already. |
| 1797 | */ |
| 1798 | cmpdi cr0,r28,0 |
| 1799 | blt 1f |
| 1800 | lis r3,PAGE_OFFSET@highest |
| 1801 | sldi r3,r3,32 |
| 1802 | or r28,r28,r3 |
| 1803 | 1: mtlr r28 |
| 1804 | blr |
| 1805 | |
| 1806 | _GLOBAL(book3e_secondary_thread_init) |
| 1807 | mflr r28 |
| 1808 | b 3b |
| 1809 | |
| 1810 | .globl init_core_book3e |
| 1811 | init_core_book3e: |
| 1812 | /* Establish the interrupt vector base */ |
| 1813 | tovirt(r2,r2) |
| 1814 | LOAD_REG_ADDR(r3, interrupt_base_book3e) |
| 1815 | mtspr SPRN_IVPR,r3 |
| 1816 | sync |
| 1817 | blr |
| 1818 | |
| 1819 | init_thread_book3e: |
| 1820 | lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h |
| 1821 | mtspr SPRN_EPCR,r3 |
| 1822 | |
| 1823 | /* Make sure interrupts are off */ |
| 1824 | wrteei 0 |
| 1825 | |
| 1826 | /* disable all timers and clear out status */ |
| 1827 | li r3,0 |
| 1828 | mtspr SPRN_TCR,r3 |
| 1829 | mfspr r3,SPRN_TSR |
| 1830 | mtspr SPRN_TSR,r3 |
| 1831 | |
| 1832 | blr |
| 1833 | |
| 1834 | _GLOBAL(__setup_base_ivors) |
| 1835 | SET_IVOR(0, 0x020) /* Critical Input */ |
| 1836 | SET_IVOR(1, 0x000) /* Machine Check */ |
| 1837 | SET_IVOR(2, 0x060) /* Data Storage */ |
| 1838 | SET_IVOR(3, 0x080) /* Instruction Storage */ |
| 1839 | SET_IVOR(4, 0x0a0) /* External Input */ |
| 1840 | SET_IVOR(5, 0x0c0) /* Alignment */ |
| 1841 | SET_IVOR(6, 0x0e0) /* Program */ |
| 1842 | SET_IVOR(7, 0x100) /* FP Unavailable */ |
| 1843 | SET_IVOR(8, 0x120) /* System Call */ |
| 1844 | SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ |
| 1845 | SET_IVOR(10, 0x160) /* Decrementer */ |
| 1846 | SET_IVOR(11, 0x180) /* Fixed Interval Timer */ |
| 1847 | SET_IVOR(12, 0x1a0) /* Watchdog Timer */ |
| 1848 | SET_IVOR(13, 0x1c0) /* Data TLB Error */ |
| 1849 | SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ |
| 1850 | SET_IVOR(15, 0x040) /* Debug */ |
| 1851 | |
| 1852 | sync |
| 1853 | |
| 1854 | blr |
| 1855 | |
| 1856 | _GLOBAL(setup_altivec_ivors) |
| 1857 | SET_IVOR(32, 0x200) /* AltiVec Unavailable */ |
| 1858 | SET_IVOR(33, 0x220) /* AltiVec Assist */ |
| 1859 | blr |
| 1860 | |
| 1861 | _GLOBAL(setup_perfmon_ivor) |
| 1862 | SET_IVOR(35, 0x260) /* Performance Monitor */ |
| 1863 | blr |
| 1864 | |
| 1865 | _GLOBAL(setup_doorbell_ivors) |
| 1866 | SET_IVOR(36, 0x280) /* Processor Doorbell */ |
| 1867 | SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ |
| 1868 | blr |
| 1869 | |
| 1870 | _GLOBAL(setup_ehv_ivors) |
| 1871 | SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ |
| 1872 | SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ |
| 1873 | SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ |
| 1874 | SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ |
| 1875 | blr |
| 1876 | |
| 1877 | _GLOBAL(setup_lrat_ivor) |
| 1878 | SET_IVOR(42, 0x340) /* LRAT Error */ |
| 1879 | blr |