blob: 3fd3ef352e3fde8f1f83772c6df71e63e21483e6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <asm/unistd.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/thread_info.h>
24#include <asm/code-patching-asm.h>
25#include <asm/ppc_asm.h>
26#include <asm/asm-offsets.h>
27#include <asm/cputable.h>
28#include <asm/firmware.h>
29#include <asm/bug.h>
30#include <asm/ptrace.h>
31#include <asm/irqflags.h>
32#include <asm/hw_irq.h>
33#include <asm/context_tracking.h>
34#include <asm/tm.h>
35#include <asm/ppc-opcode.h>
36#include <asm/barrier.h>
37#include <asm/export.h>
38#include <asm/asm-compat.h>
39#ifdef CONFIG_PPC_BOOK3S
40#include <asm/exception-64s.h>
41#else
42#include <asm/exception-64e.h>
43#endif
44#include <asm/feature-fixups.h>
David Brazdil0f672f62019-12-10 10:32:29 +000045#include <asm/kup.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046
47/*
48 * System calls.
49 */
50 .section ".toc","aw"
51SYS_CALL_TABLE:
52 .tc sys_call_table[TC],sys_call_table
53
David Brazdil0f672f62019-12-10 10:32:29 +000054COMPAT_SYS_CALL_TABLE:
55 .tc compat_sys_call_table[TC],compat_sys_call_table
56
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057/* This value is used to mark exception frames on the stack. */
58exception_marker:
59 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
60
61 .section ".text"
62 .align 7
63
64 .globl system_call_common
65system_call_common:
66#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
67BEGIN_FTR_SECTION
68 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
69 bne .Ltabort_syscall
70END_FTR_SECTION_IFSET(CPU_FTR_TM)
71#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 mr r10,r1
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073 ld r1,PACAKSAVE(r13)
David Brazdil0f672f62019-12-10 10:32:29 +000074 std r10,0(r1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 std r11,_NIP(r1)
76 std r12,_MSR(r1)
77 std r0,GPR0(r1)
78 std r10,GPR1(r1)
David Brazdil0f672f62019-12-10 10:32:29 +000079#ifdef CONFIG_PPC_FSL_BOOK3E
80START_BTB_FLUSH_SECTION
81 BTB_FLUSH(r10)
82END_BTB_FLUSH_SECTION
83#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
David Brazdil0f672f62019-12-10 10:32:29 +000085 std r2,GPR2(r1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086 std r3,GPR3(r1)
87 mfcr r2
88 std r4,GPR4(r1)
89 std r5,GPR5(r1)
90 std r6,GPR6(r1)
91 std r7,GPR7(r1)
92 std r8,GPR8(r1)
93 li r11,0
94 std r11,GPR9(r1)
95 std r11,GPR10(r1)
96 std r11,GPR11(r1)
97 std r11,GPR12(r1)
98 std r11,_XER(r1)
99 std r11,_CTR(r1)
100 std r9,GPR13(r1)
101 mflr r10
102 /*
103 * This clears CR0.SO (bit 28), which is the error indication on
104 * return from this system call.
105 */
106 rldimi r2,r11,28,(63-28)
107 li r11,0xc01
108 std r10,_LINK(r1)
109 std r11,_TRAP(r1)
110 std r3,ORIG_GPR3(r1)
111 std r2,_CCR(r1)
112 ld r2,PACATOC(r13)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114 ld r11,exception_marker@toc(r2)
115 std r11,-16(r9) /* "regshere" marker */
David Brazdil0f672f62019-12-10 10:32:29 +0000116
117 kuap_check_amr r10, r11
118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
120BEGIN_FW_FTR_SECTION
David Brazdil0f672f62019-12-10 10:32:29 +0000121 /* see if there are any DTL entries to process */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
123 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
124 addi r10,r10,LPPACA_DTLIDX
125 LDX_BE r10,0,r10 /* get log write index */
David Brazdil0f672f62019-12-10 10:32:29 +0000126 cmpd r11,r10
127 beq+ 33f
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128 bl accumulate_stolen_time
129 REST_GPR(0,r1)
130 REST_4GPRS(3,r1)
131 REST_2GPRS(7,r1)
132 addi r9,r1,STACK_FRAME_OVERHEAD
13333:
134END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
135#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
136
137 /*
138 * A syscall should always be called with interrupts enabled
139 * so we just unconditionally hard-enable here. When some kind
140 * of irq tracing is used, we additionally check that condition
141 * is correct
142 */
143#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
144 lbz r10,PACAIRQSOFTMASK(r13)
1451: tdnei r10,IRQS_ENABLED
146 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
147#endif
148
149#ifdef CONFIG_PPC_BOOK3E
150 wrteei 1
151#else
152 li r11,MSR_RI
153 ori r11,r11,MSR_EE
154 mtmsrd r11,1
155#endif /* CONFIG_PPC_BOOK3E */
156
157system_call: /* label this so stack traces look sane */
158 /* We do need to set SOFTE in the stack frame or the return
159 * from interrupt will be painful
160 */
161 li r10,IRQS_ENABLED
162 std r10,SOFTE(r1)
163
David Brazdil0f672f62019-12-10 10:32:29 +0000164 ld r11, PACA_THREAD_INFO(r13)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 ld r10,TI_FLAGS(r11)
166 andi. r11,r10,_TIF_SYSCALL_DOTRACE
167 bne .Lsyscall_dotrace /* does not return */
168 cmpldi 0,r0,NR_syscalls
169 bge- .Lsyscall_enosys
170
171.Lsyscall:
172/*
173 * Need to vector to 32 Bit or default sys_call_table here,
174 * based on caller's run-mode / personality.
175 */
176 ld r11,SYS_CALL_TABLE@toc(2)
David Brazdil0f672f62019-12-10 10:32:29 +0000177 andis. r10,r10,_TIF_32BIT@h
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 beq 15f
David Brazdil0f672f62019-12-10 10:32:29 +0000179 ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180 clrldi r3,r3,32
181 clrldi r4,r4,32
182 clrldi r5,r5,32
183 clrldi r6,r6,32
184 clrldi r7,r7,32
185 clrldi r8,r8,32
18615:
David Brazdil0f672f62019-12-10 10:32:29 +0000187 slwi r0,r0,3
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188
189 barrier_nospec_asm
190 /*
191 * Prevent the load of the handler below (based on the user-passed
192 * system call number) being speculatively executed until the test
193 * against NR_syscalls and branch to .Lsyscall_enosys above has
194 * committed.
195 */
196
197 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
198 mtctr r12
199 bctrl /* Call handler */
200
David Brazdil0f672f62019-12-10 10:32:29 +0000201 /* syscall_exit can exit to kernel mode, via ret_from_kernel_thread */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202.Lsyscall_exit:
203 std r3,RESULT(r1)
204
205#ifdef CONFIG_DEBUG_RSEQ
206 /* Check whether the syscall is issued inside a restartable sequence */
207 addi r3,r1,STACK_FRAME_OVERHEAD
208 bl rseq_syscall
209 ld r3,RESULT(r1)
210#endif
211
David Brazdil0f672f62019-12-10 10:32:29 +0000212 ld r12, PACA_THREAD_INFO(r13)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213
214 ld r8,_MSR(r1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000215
216/*
217 * This is a few instructions into the actual syscall exit path (which actually
218 * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
219 * number of visible symbols for profiling purposes.
220 *
221 * We can probe from system_call until this point as MSR_RI is set. But once it
222 * is cleared below, we won't be able to take a trap.
223 *
224 * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
225 */
226system_call_exit:
227 /*
228 * Disable interrupts so current_thread_info()->flags can't change,
229 * and so that we don't get interrupted after loading SRR0/1.
David Brazdil0f672f62019-12-10 10:32:29 +0000230 *
231 * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we
232 * could fault on the load of the TI_FLAGS below.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233 */
234#ifdef CONFIG_PPC_BOOK3E
235 wrteei 0
236#else
David Brazdil0f672f62019-12-10 10:32:29 +0000237 li r11,MSR_RI
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238 mtmsrd r11,1
239#endif /* CONFIG_PPC_BOOK3E */
240
241 ld r9,TI_FLAGS(r12)
242 li r11,-MAX_ERRNO
243 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
244 bne- .Lsyscall_exit_work
245
246 andi. r0,r8,MSR_FP
247 beq 2f
248#ifdef CONFIG_ALTIVEC
249 andis. r0,r8,MSR_VEC@h
250 bne 3f
251#endif
2522: addi r3,r1,STACK_FRAME_OVERHEAD
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253 bl restore_math
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254 ld r8,_MSR(r1)
255 ld r3,RESULT(r1)
256 li r11,-MAX_ERRNO
257
2583: cmpld r3,r11
259 ld r5,_CCR(r1)
260 bge- .Lsyscall_error
261.Lsyscall_error_cont:
262 ld r7,_NIP(r1)
263BEGIN_FTR_SECTION
264 stdcx. r0,0,r1 /* to clear the reservation */
265END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
266 andi. r6,r8,MSR_PR
267 ld r4,_LINK(r1)
268
David Brazdil0f672f62019-12-10 10:32:29 +0000269 kuap_check_amr r10, r11
270
271#ifdef CONFIG_PPC_BOOK3S
272 /*
273 * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
274 * this later, but testing shows that doing it here causes less slow
275 * down than doing it closer to the rfid.
276 */
277 li r11,0
278 mtmsrd r11,1
279#endif
280
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281 beq- 1f
282 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
283
284BEGIN_FTR_SECTION
285 HMT_MEDIUM_LOW
286END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
287
David Brazdil0f672f62019-12-10 10:32:29 +0000288#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
289 std r8, PACATMSCRATCH(r13)
290#endif
291
292 /*
293 * We don't need to restore AMR on the way back to userspace for KUAP.
294 * The value of AMR only matters while we're in the kernel.
295 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
297 ld r2,GPR2(r1)
298 ld r1,GPR1(r1)
299 mtlr r4
300 mtcr r5
301 mtspr SPRN_SRR0,r7
302 mtspr SPRN_SRR1,r8
303 RFI_TO_USER
304 b . /* prevent speculative execution */
305
David Brazdil0f672f62019-12-10 10:32:29 +00003061: /* exit to kernel */
307 kuap_restore_amr r2
308
309 ld r2,GPR2(r1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 ld r1,GPR1(r1)
311 mtlr r4
312 mtcr r5
313 mtspr SPRN_SRR0,r7
314 mtspr SPRN_SRR1,r8
315 RFI_TO_KERNEL
316 b . /* prevent speculative execution */
317
318.Lsyscall_error:
319 oris r5,r5,0x1000 /* Set SO bit in CR */
320 neg r3,r3
321 std r5,_CCR(r1)
322 b .Lsyscall_error_cont
323
324/* Traced system call support */
325.Lsyscall_dotrace:
326 bl save_nvgprs
327 addi r3,r1,STACK_FRAME_OVERHEAD
328 bl do_syscall_trace_enter
329
330 /*
331 * We use the return value of do_syscall_trace_enter() as the syscall
332 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
333 * returns an invalid syscall number and the test below against
334 * NR_syscalls will fail.
335 */
336 mr r0,r3
337
338 /* Restore argument registers just clobbered and/or possibly changed. */
339 ld r3,GPR3(r1)
340 ld r4,GPR4(r1)
341 ld r5,GPR5(r1)
342 ld r6,GPR6(r1)
343 ld r7,GPR7(r1)
344 ld r8,GPR8(r1)
345
346 /* Repopulate r9 and r10 for the syscall path */
347 addi r9,r1,STACK_FRAME_OVERHEAD
David Brazdil0f672f62019-12-10 10:32:29 +0000348 ld r10, PACA_THREAD_INFO(r13)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000349 ld r10,TI_FLAGS(r10)
350
351 cmpldi r0,NR_syscalls
352 blt+ .Lsyscall
353
354 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
355 b .Lsyscall_exit
356
357
358.Lsyscall_enosys:
359 li r3,-ENOSYS
360 b .Lsyscall_exit
361
362.Lsyscall_exit_work:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
364 If TIF_NOERROR is set, just save r3 as it is. */
365
366 andi. r0,r9,_TIF_RESTOREALL
367 beq+ 0f
368 REST_NVGPRS(r1)
369 b 2f
3700: cmpld r3,r11 /* r11 is -MAX_ERRNO */
371 blt+ 1f
372 andi. r0,r9,_TIF_NOERROR
373 bne- 1f
374 ld r5,_CCR(r1)
375 neg r3,r3
376 oris r5,r5,0x1000 /* Set SO bit in CR */
377 std r5,_CCR(r1)
3781: std r3,GPR3(r1)
3792: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
380 beq 4f
381
382 /* Clear per-syscall TIF flags if any are set. */
383
384 li r11,_TIF_PERSYSCALL_MASK
385 addi r12,r12,TI_FLAGS
3863: ldarx r10,0,r12
387 andc r10,r10,r11
388 stdcx. r10,0,r12
389 bne- 3b
390 subi r12,r12,TI_FLAGS
391
3924: /* Anything else left to do? */
393BEGIN_FTR_SECTION
David Brazdil0f672f62019-12-10 10:32:29 +0000394 lis r3,DEFAULT_PPR@highest /* Set default PPR */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 sldi r3,r3,32 /* bits 11-13 are used for ppr */
David Brazdil0f672f62019-12-10 10:32:29 +0000396 std r3,_PPR(r1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
398
399 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
400 beq ret_from_except_lite
401
402 /* Re-enable interrupts */
403#ifdef CONFIG_PPC_BOOK3E
404 wrteei 1
405#else
406 li r10,MSR_RI
407 ori r10,r10,MSR_EE
408 mtmsrd r10,1
409#endif /* CONFIG_PPC_BOOK3E */
410
411 bl save_nvgprs
412 addi r3,r1,STACK_FRAME_OVERHEAD
413 bl do_syscall_trace_leave
414 b ret_from_except
415
416#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
417.Ltabort_syscall:
418 /* Firstly we need to enable TM in the kernel */
419 mfmsr r10
420 li r9, 1
421 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
422 mtmsrd r10, 0
423
424 /* tabort, this dooms the transaction, nothing else */
425 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
426 TABORT(R9)
427
428 /*
429 * Return directly to userspace. We have corrupted user register state,
430 * but userspace will never see that register state. Execution will
431 * resume after the tbegin of the aborted transaction with the
432 * checkpointed register state.
433 */
434 li r9, MSR_RI
435 andc r10, r10, r9
436 mtmsrd r10, 1
437 mtspr SPRN_SRR0, r11
438 mtspr SPRN_SRR1, r12
439 RFI_TO_USER
440 b . /* prevent speculative execution */
441#endif
442_ASM_NOKPROBE_SYMBOL(system_call_common);
443_ASM_NOKPROBE_SYMBOL(system_call_exit);
444
445/* Save non-volatile GPRs, if not already saved. */
446_GLOBAL(save_nvgprs)
447 ld r11,_TRAP(r1)
448 andi. r0,r11,1
449 beqlr-
450 SAVE_NVGPRS(r1)
451 clrrdi r0,r11,1
452 std r0,_TRAP(r1)
453 blr
454_ASM_NOKPROBE_SYMBOL(save_nvgprs);
455
456
457/*
458 * The sigsuspend and rt_sigsuspend system calls can call do_signal
459 * and thus put the process into the stopped state where we might
460 * want to examine its user state with ptrace. Therefore we need
461 * to save all the nonvolatile registers (r14 - r31) before calling
462 * the C code. Similarly, fork, vfork and clone need the full
463 * register state on the stack so that it can be copied to the child.
464 */
465
466_GLOBAL(ppc_fork)
467 bl save_nvgprs
468 bl sys_fork
469 b .Lsyscall_exit
470
471_GLOBAL(ppc_vfork)
472 bl save_nvgprs
473 bl sys_vfork
474 b .Lsyscall_exit
475
476_GLOBAL(ppc_clone)
477 bl save_nvgprs
478 bl sys_clone
479 b .Lsyscall_exit
480
David Brazdil0f672f62019-12-10 10:32:29 +0000481_GLOBAL(ppc_clone3)
482 bl save_nvgprs
483 bl sys_clone3
484 b .Lsyscall_exit
485
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000486_GLOBAL(ppc32_swapcontext)
487 bl save_nvgprs
488 bl compat_sys_swapcontext
489 b .Lsyscall_exit
490
491_GLOBAL(ppc64_swapcontext)
492 bl save_nvgprs
493 bl sys_swapcontext
494 b .Lsyscall_exit
495
496_GLOBAL(ppc_switch_endian)
497 bl save_nvgprs
498 bl sys_switch_endian
499 b .Lsyscall_exit
500
501_GLOBAL(ret_from_fork)
502 bl schedule_tail
503 REST_NVGPRS(r1)
504 li r3,0
505 b .Lsyscall_exit
506
507_GLOBAL(ret_from_kernel_thread)
508 bl schedule_tail
509 REST_NVGPRS(r1)
510 mtlr r14
511 mr r3,r15
512#ifdef PPC64_ELF_ABI_v2
513 mr r12,r14
514#endif
515 blrl
516 li r3,0
517 b .Lsyscall_exit
518
519#ifdef CONFIG_PPC_BOOK3S_64
520
521#define FLUSH_COUNT_CACHE \
5221: nop; \
523 patch_site 1b, patch__call_flush_count_cache
524
525
526#define BCCTR_FLUSH .long 0x4c400420
527
528.macro nops number
529 .rept \number
530 nop
531 .endr
532.endm
533
534.balign 32
535.global flush_count_cache
536flush_count_cache:
537 /* Save LR into r9 */
538 mflr r9
539
David Brazdil0f672f62019-12-10 10:32:29 +0000540 // Flush the link stack
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000541 .rept 64
542 bl .+4
543 .endr
544 b 1f
545 nops 6
546
547 .balign 32
548 /* Restore LR */
5491: mtlr r9
David Brazdil0f672f62019-12-10 10:32:29 +0000550
551 // If we're just flushing the link stack, return here
5523: nop
553 patch_site 3b patch__flush_link_stack_return
554
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555 li r9,0x7fff
556 mtctr r9
557
558 BCCTR_FLUSH
559
5602: nop
561 patch_site 2b patch__flush_count_cache_return
562
563 nops 3
564
565 .rept 278
566 .balign 32
567 BCCTR_FLUSH
568 nops 7
569 .endr
570
571 blr
572#else
573#define FLUSH_COUNT_CACHE
574#endif /* CONFIG_PPC_BOOK3S_64 */
575
576/*
577 * This routine switches between two different tasks. The process
578 * state of one is saved on its kernel stack. Then the state
579 * of the other is restored from its kernel stack. The memory
580 * management hardware is updated to the second process's state.
581 * Finally, we can return to the second process, via ret_from_except.
582 * On entry, r3 points to the THREAD for the current task, r4
583 * points to the THREAD for the new task.
584 *
585 * Note: there are two ways to get to the "going out" portion
586 * of this code; either by coming in via the entry (_switch)
587 * or via "fork" which must set up an environment equivalent
588 * to the "_switch" path. If you change this you'll have to change
589 * the fork code also.
590 *
591 * The code which creates the new task context is in 'copy_thread'
592 * in arch/powerpc/kernel/process.c
593 */
594 .align 7
595_GLOBAL(_switch)
596 mflr r0
597 std r0,16(r1)
598 stdu r1,-SWITCH_FRAME_SIZE(r1)
599 /* r3-r13 are caller saved -- Cort */
600 SAVE_8GPRS(14, r1)
601 SAVE_10GPRS(22, r1)
602 std r0,_NIP(r1) /* Return to switch caller */
603 mfcr r23
604 std r23,_CCR(r1)
605 std r1,KSP(r3) /* Set old stack pointer */
606
David Brazdil0f672f62019-12-10 10:32:29 +0000607 kuap_check_amr r9, r10
608
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000609 FLUSH_COUNT_CACHE
610
611 /*
612 * On SMP kernels, care must be taken because a task may be
613 * scheduled off CPUx and on to CPUy. Memory ordering must be
614 * considered.
615 *
616 * Cacheable stores on CPUx will be visible when the task is
617 * scheduled on CPUy by virtue of the core scheduler barriers
618 * (see "Notes on Program-Order guarantees on SMP systems." in
619 * kernel/sched/core.c).
620 *
621 * Uncacheable stores in the case of involuntary preemption must
622 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
623 * is implemented as hwsync on powerpc, which orders MMIO too. So
624 * long as there is an hwsync in the context switch path, it will
625 * be executed on the source CPU after the task has performed
626 * all MMIO ops on that CPU, and on the destination CPU before the
627 * task performs any MMIO ops there.
628 */
629
630 /*
631 * The kernel context switch path must contain a spin_lock,
632 * which contains larx/stcx, which will clear any reservation
633 * of the task being switched.
634 */
635#ifdef CONFIG_PPC_BOOK3S
636/* Cancel all explict user streams as they will have no use after context
637 * switch and will stop the HW from creating streams itself
638 */
639 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
640#endif
641
642 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
643 std r6,PACACURRENT(r13) /* Set new 'current' */
David Brazdil0f672f62019-12-10 10:32:29 +0000644#if defined(CONFIG_STACKPROTECTOR)
645 ld r6, TASK_CANARY(r6)
646 std r6, PACA_CANARY(r13)
647#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648
649 ld r8,KSP(r4) /* new stack pointer */
650#ifdef CONFIG_PPC_BOOK3S_64
651BEGIN_MMU_FTR_SECTION
652 b 2f
653END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
654BEGIN_FTR_SECTION
655 clrrdi r6,r8,28 /* get its ESID */
656 clrrdi r9,r1,28 /* get current sp ESID */
657FTR_SECTION_ELSE
658 clrrdi r6,r8,40 /* get its 1T ESID */
659 clrrdi r9,r1,40 /* get current sp 1T ESID */
660ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
661 clrldi. r0,r6,2 /* is new ESID c00000000? */
662 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
663 cror eq,4*cr1+eq,eq
664 beq 2f /* if yes, don't slbie it */
665
666 /* Bolt in the new stack SLB entry */
667 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
668 oris r0,r6,(SLB_ESID_V)@h
669 ori r0,r0,(SLB_NUM_BOLTED-1)@l
670BEGIN_FTR_SECTION
671 li r9,MMU_SEGSIZE_1T /* insert B field */
672 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
673 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
674END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
675
676 /* Update the last bolted SLB. No write barriers are needed
677 * here, provided we only update the current CPU's SLB shadow
678 * buffer.
679 */
680 ld r9,PACA_SLBSHADOWPTR(r13)
681 li r12,0
682 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
683 li r12,SLBSHADOW_STACKVSID
684 STDX_BE r7,r12,r9 /* Save VSID */
685 li r12,SLBSHADOW_STACKESID
686 STDX_BE r0,r12,r9 /* Save ESID */
687
688 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
689 * we have 1TB segments, the only CPUs known to have the errata
690 * only support less than 1TB of system memory and we'll never
691 * actually hit this code path.
692 */
693
694 isync
695 slbie r6
David Brazdil0f672f62019-12-10 10:32:29 +0000696BEGIN_FTR_SECTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000697 slbie r6 /* Workaround POWER5 < DD2.1 issue */
David Brazdil0f672f62019-12-10 10:32:29 +0000698END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000699 slbmte r7,r0
700 isync
7012:
702#endif /* CONFIG_PPC_BOOK3S_64 */
703
David Brazdil0f672f62019-12-10 10:32:29 +0000704 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000705 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
706 because we don't need to leave the 288-byte ABI gap at the
707 top of the kernel stack. */
708 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
709
710 /*
711 * PMU interrupts in radix may come in here. They will use r1, not
712 * PACAKSAVE, so this stack switch will not cause a problem. They
713 * will store to the process stack, which may then be migrated to
714 * another CPU. However the rq lock release on this CPU paired with
715 * the rq lock acquire on the new CPU before the stack becomes
716 * active on the new CPU, will order those stores.
717 */
718 mr r1,r8 /* start using new stack pointer */
719 std r7,PACAKSAVE(r13)
720
721 ld r6,_CCR(r1)
722 mtcrf 0xFF,r6
723
724 /* r3-r13 are destroyed -- Cort */
725 REST_8GPRS(14, r1)
726 REST_10GPRS(22, r1)
727
728 /* convert old thread to its task_struct for return value */
729 addi r3,r3,-THREAD
730 ld r7,_NIP(r1) /* Return to _switch caller in new task */
731 mtlr r7
732 addi r1,r1,SWITCH_FRAME_SIZE
733 blr
734
735 .align 7
736_GLOBAL(ret_from_except)
737 ld r11,_TRAP(r1)
738 andi. r0,r11,1
739 bne ret_from_except_lite
740 REST_NVGPRS(r1)
741
742_GLOBAL(ret_from_except_lite)
743 /*
744 * Disable interrupts so that current_thread_info()->flags
745 * can't change between when we test it and when we return
746 * from the interrupt.
747 */
748#ifdef CONFIG_PPC_BOOK3E
749 wrteei 0
750#else
751 li r10,MSR_RI
752 mtmsrd r10,1 /* Update machine state */
753#endif /* CONFIG_PPC_BOOK3E */
754
David Brazdil0f672f62019-12-10 10:32:29 +0000755 ld r9, PACA_THREAD_INFO(r13)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756 ld r3,_MSR(r1)
757#ifdef CONFIG_PPC_BOOK3E
758 ld r10,PACACURRENT(r13)
759#endif /* CONFIG_PPC_BOOK3E */
760 ld r4,TI_FLAGS(r9)
761 andi. r3,r3,MSR_PR
762 beq resume_kernel
763#ifdef CONFIG_PPC_BOOK3E
764 lwz r3,(THREAD+THREAD_DBCR0)(r10)
765#endif /* CONFIG_PPC_BOOK3E */
766
767 /* Check current_thread_info()->flags */
768 andi. r0,r4,_TIF_USER_WORK_MASK
769 bne 1f
770#ifdef CONFIG_PPC_BOOK3E
771 /*
772 * Check to see if the dbcr0 register is set up to debug.
773 * Use the internal debug mode bit to do this.
774 */
775 andis. r0,r3,DBCR0_IDM@h
776 beq restore
777 mfmsr r0
778 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
779 mtmsr r0
780 mtspr SPRN_DBCR0,r3
781 li r10, -1
782 mtspr SPRN_DBSR,r10
783 b restore
784#else
785 addi r3,r1,STACK_FRAME_OVERHEAD
786 bl restore_math
787 b restore
788#endif
7891: andi. r0,r4,_TIF_NEED_RESCHED
790 beq 2f
791 bl restore_interrupts
792 SCHEDULE_USER
793 b ret_from_except_lite
7942:
795#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
796 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
797 bne 3f /* only restore TM if nothing else to do */
798 addi r3,r1,STACK_FRAME_OVERHEAD
799 bl restore_tm_state
800 b restore
8013:
802#endif
803 bl save_nvgprs
804 /*
805 * Use a non volatile GPR to save and restore our thread_info flags
806 * across the call to restore_interrupts.
807 */
808 mr r30,r4
809 bl restore_interrupts
810 mr r4,r30
811 addi r3,r1,STACK_FRAME_OVERHEAD
812 bl do_notify_resume
813 b ret_from_except
814
815resume_kernel:
816 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
817 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
818 beq+ 1f
819
820 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
821
822 ld r3,GPR1(r1)
823 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
824 mr r4,r1 /* src: current exception frame */
825 mr r1,r3 /* Reroute the trampoline frame to r1 */
826
827 /* Copy from the original to the trampoline. */
828 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
829 li r6,0 /* start offset: 0 */
830 mtctr r5
8312: ldx r0,r6,r4
832 stdx r0,r6,r3
833 addi r6,r6,8
834 bdnz 2b
835
836 /* Do real store operation to complete stdu */
837 ld r5,GPR1(r1)
838 std r8,0(r5)
839
840 /* Clear _TIF_EMULATE_STACK_STORE flag */
841 lis r11,_TIF_EMULATE_STACK_STORE@h
842 addi r5,r9,TI_FLAGS
8430: ldarx r4,0,r5
844 andc r4,r4,r11
845 stdcx. r4,0,r5
846 bne- 0b
8471:
848
849#ifdef CONFIG_PREEMPT
850 /* Check if we need to preempt */
851 andi. r0,r4,_TIF_NEED_RESCHED
852 beq+ restore
853 /* Check that preempt_count() == 0 and interrupts are enabled */
854 lwz r8,TI_PREEMPT(r9)
855 cmpwi cr0,r8,0
856 bne restore
857 ld r0,SOFTE(r1)
858 andi. r0,r0,IRQS_DISABLED
859 bne restore
860
861 /*
862 * Here we are preempting the current task. We want to make
863 * sure we are soft-disabled first and reconcile irq state.
864 */
865 RECONCILE_IRQ_STATE(r3,r4)
David Brazdil0f672f62019-12-10 10:32:29 +0000866 bl preempt_schedule_irq
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000867
868 /*
869 * arch_local_irq_restore() from preempt_schedule_irq above may
870 * enable hard interrupt but we really should disable interrupts
871 * when we return from the interrupt, and so that we don't get
872 * interrupted after loading SRR0/1.
873 */
874#ifdef CONFIG_PPC_BOOK3E
875 wrteei 0
876#else
877 li r10,MSR_RI
878 mtmsrd r10,1 /* Update machine state */
879#endif /* CONFIG_PPC_BOOK3E */
880#endif /* CONFIG_PREEMPT */
881
882 .globl fast_exc_return_irq
883fast_exc_return_irq:
884restore:
885 /*
886 * This is the main kernel exit path. First we check if we
887 * are about to re-enable interrupts
888 */
889 ld r5,SOFTE(r1)
890 lbz r6,PACAIRQSOFTMASK(r13)
891 andi. r5,r5,IRQS_DISABLED
892 bne .Lrestore_irq_off
893
894 /* We are enabling, were we already enabled ? Yes, just return */
895 andi. r6,r6,IRQS_DISABLED
896 beq cr0,.Ldo_restore
897
898 /*
899 * We are about to soft-enable interrupts (we are hard disabled
900 * at this point). We check if there's anything that needs to
901 * be replayed first.
902 */
903 lbz r0,PACAIRQHAPPENED(r13)
904 cmpwi cr0,r0,0
905 bne- .Lrestore_check_irq_replay
906
907 /*
908 * Get here when nothing happened while soft-disabled, just
909 * soft-enable and move-on. We will hard-enable as a side
910 * effect of rfi
911 */
912.Lrestore_no_replay:
913 TRACE_ENABLE_INTS
914 li r0,IRQS_ENABLED
915 stb r0,PACAIRQSOFTMASK(r13);
916
917 /*
918 * Final return path. BookE is handled in a different file
919 */
920.Ldo_restore:
921#ifdef CONFIG_PPC_BOOK3E
922 b exception_return_book3e
923#else
924 /*
925 * Clear the reservation. If we know the CPU tracks the address of
926 * the reservation then we can potentially save some cycles and use
927 * a larx. On POWER6 and POWER7 this is significantly faster.
928 */
929BEGIN_FTR_SECTION
930 stdcx. r0,0,r1 /* to clear the reservation */
931FTR_SECTION_ELSE
932 ldarx r4,0,r1
933ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
934
935 /*
936 * Some code path such as load_up_fpu or altivec return directly
937 * here. They run entirely hard disabled and do not alter the
938 * interrupt state. They also don't use lwarx/stwcx. and thus
939 * are known not to leave dangling reservations.
940 */
941 .globl fast_exception_return
942fast_exception_return:
943 ld r3,_MSR(r1)
944 ld r4,_CTR(r1)
945 ld r0,_LINK(r1)
946 mtctr r4
947 mtlr r0
948 ld r4,_XER(r1)
949 mtspr SPRN_XER,r4
950
David Brazdil0f672f62019-12-10 10:32:29 +0000951 kuap_check_amr r5, r6
952
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000953 REST_8GPRS(5, r1)
954
955 andi. r0,r3,MSR_RI
956 beq- .Lunrecov_restore
957
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000958 /*
959 * Clear RI before restoring r13. If we are returning to
960 * userspace and we take an exception after restoring r13,
961 * we end up corrupting the userspace r13 value.
962 */
963 li r4,0
964 mtmsrd r4,1
965
966#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
967 /* TM debug */
968 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
969#endif
970 /*
971 * r13 is our per cpu area, only restore it if we are returning to
972 * userspace the value stored in the stack frame may belong to
973 * another CPU.
974 */
975 andi. r0,r3,MSR_PR
976 beq 1f
977BEGIN_FTR_SECTION
David Brazdil0f672f62019-12-10 10:32:29 +0000978 /* Restore PPR */
979 ld r2,_PPR(r1)
980 mtspr SPRN_PPR,r2
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000981END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
982 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
983 REST_GPR(13, r1)
984
David Brazdil0f672f62019-12-10 10:32:29 +0000985 /*
986 * We don't need to restore AMR on the way back to userspace for KUAP.
987 * The value of AMR only matters while we're in the kernel.
988 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000989 mtspr SPRN_SRR1,r3
990
991 ld r2,_CCR(r1)
992 mtcrf 0xFF,r2
993 ld r2,_NIP(r1)
994 mtspr SPRN_SRR0,r2
995
996 ld r0,GPR0(r1)
997 ld r2,GPR2(r1)
998 ld r3,GPR3(r1)
999 ld r4,GPR4(r1)
1000 ld r1,GPR1(r1)
1001 RFI_TO_USER
1002 b . /* prevent speculative execution */
1003
10041: mtspr SPRN_SRR1,r3
1005
1006 ld r2,_CCR(r1)
1007 mtcrf 0xFF,r2
1008 ld r2,_NIP(r1)
1009 mtspr SPRN_SRR0,r2
1010
David Brazdil0f672f62019-12-10 10:32:29 +00001011 /*
1012 * Leaving a stale exception_marker on the stack can confuse
1013 * the reliable stack unwinder later on. Clear it.
1014 */
1015 li r2,0
1016 std r2,STACK_FRAME_OVERHEAD-16(r1)
1017
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001018 ld r0,GPR0(r1)
1019 ld r2,GPR2(r1)
1020 ld r3,GPR3(r1)
David Brazdil0f672f62019-12-10 10:32:29 +00001021
1022 kuap_restore_amr r4
1023
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001024 ld r4,GPR4(r1)
1025 ld r1,GPR1(r1)
1026 RFI_TO_KERNEL
1027 b . /* prevent speculative execution */
1028
1029#endif /* CONFIG_PPC_BOOK3E */
1030
1031 /*
1032 * We are returning to a context with interrupts soft disabled.
1033 *
1034 * However, we may also about to hard enable, so we need to
1035 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1036 * or that bit can get out of sync and bad things will happen
1037 */
1038.Lrestore_irq_off:
1039 ld r3,_MSR(r1)
1040 lbz r7,PACAIRQHAPPENED(r13)
1041 andi. r0,r3,MSR_EE
1042 beq 1f
1043 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1044 stb r7,PACAIRQHAPPENED(r13)
10451:
1046#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1047 /* The interrupt should not have soft enabled. */
1048 lbz r7,PACAIRQSOFTMASK(r13)
10491: tdeqi r7,IRQS_ENABLED
1050 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1051#endif
1052 b .Ldo_restore
1053
1054 /*
1055 * Something did happen, check if a re-emit is needed
1056 * (this also clears paca->irq_happened)
1057 */
1058.Lrestore_check_irq_replay:
1059 /* XXX: We could implement a fast path here where we check
1060 * for irq_happened being just 0x01, in which case we can
1061 * clear it and return. That means that we would potentially
1062 * miss a decrementer having wrapped all the way around.
1063 *
1064 * Still, this might be useful for things like hash_page
1065 */
1066 bl __check_irq_replay
1067 cmpwi cr0,r3,0
1068 beq .Lrestore_no_replay
1069
1070 /*
1071 * We need to re-emit an interrupt. We do so by re-using our
1072 * existing exception frame. We first change the trap value,
1073 * but we need to ensure we preserve the low nibble of it
1074 */
1075 ld r4,_TRAP(r1)
1076 clrldi r4,r4,60
1077 or r4,r4,r3
1078 std r4,_TRAP(r1)
1079
1080 /*
1081 * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1082 * to reconcile the IRQ state. Tracing is already accounted for.
1083 */
1084 lbz r4,PACAIRQHAPPENED(r13)
1085 ori r4,r4,PACA_IRQ_HARD_DIS
1086 stb r4,PACAIRQHAPPENED(r13)
1087
1088 /*
1089 * Then find the right handler and call it. Interrupts are
1090 * still soft-disabled and we keep them that way.
1091 */
1092 cmpwi cr0,r3,0x500
1093 bne 1f
1094 addi r3,r1,STACK_FRAME_OVERHEAD;
1095 bl do_IRQ
1096 b ret_from_except
10971: cmpwi cr0,r3,0xf00
1098 bne 1f
1099 addi r3,r1,STACK_FRAME_OVERHEAD;
1100 bl performance_monitor_exception
1101 b ret_from_except
11021: cmpwi cr0,r3,0xe60
1103 bne 1f
1104 addi r3,r1,STACK_FRAME_OVERHEAD;
1105 bl handle_hmi_exception
1106 b ret_from_except
11071: cmpwi cr0,r3,0x900
1108 bne 1f
1109 addi r3,r1,STACK_FRAME_OVERHEAD;
1110 bl timer_interrupt
1111 b ret_from_except
1112#ifdef CONFIG_PPC_DOORBELL
11131:
1114#ifdef CONFIG_PPC_BOOK3E
1115 cmpwi cr0,r3,0x280
1116#else
1117 cmpwi cr0,r3,0xa00
1118#endif /* CONFIG_PPC_BOOK3E */
1119 bne 1f
1120 addi r3,r1,STACK_FRAME_OVERHEAD;
1121 bl doorbell_exception
1122#endif /* CONFIG_PPC_DOORBELL */
11231: b ret_from_except /* What else to do here ? */
1124
1125.Lunrecov_restore:
1126 addi r3,r1,STACK_FRAME_OVERHEAD
1127 bl unrecoverable_exception
1128 b .Lunrecov_restore
1129
1130_ASM_NOKPROBE_SYMBOL(ret_from_except);
1131_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1132_ASM_NOKPROBE_SYMBOL(resume_kernel);
1133_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
1134_ASM_NOKPROBE_SYMBOL(restore);
1135_ASM_NOKPROBE_SYMBOL(fast_exception_return);
1136
1137
1138#ifdef CONFIG_PPC_RTAS
1139/*
1140 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1141 * called with the MMU off.
1142 *
1143 * In addition, we need to be in 32b mode, at least for now.
1144 *
1145 * Note: r3 is an input parameter to rtas, so don't trash it...
1146 */
1147_GLOBAL(enter_rtas)
1148 mflr r0
1149 std r0,16(r1)
David Brazdil0f672f62019-12-10 10:32:29 +00001150 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001151
1152 /* Because RTAS is running in 32b mode, it clobbers the high order half
1153 * of all registers that it saves. We therefore save those registers
1154 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1155 */
1156 SAVE_GPR(2, r1) /* Save the TOC */
1157 SAVE_GPR(13, r1) /* Save paca */
1158 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1159 SAVE_10GPRS(22, r1) /* ditto */
1160
1161 mfcr r4
1162 std r4,_CCR(r1)
1163 mfctr r5
1164 std r5,_CTR(r1)
1165 mfspr r6,SPRN_XER
1166 std r6,_XER(r1)
1167 mfdar r7
1168 std r7,_DAR(r1)
1169 mfdsisr r8
1170 std r8,_DSISR(r1)
1171
1172 /* Temporary workaround to clear CR until RTAS can be modified to
1173 * ignore all bits.
1174 */
1175 li r0,0
1176 mtcr r0
1177
1178#ifdef CONFIG_BUG
1179 /* There is no way it is acceptable to get here with interrupts enabled,
1180 * check it with the asm equivalent of WARN_ON
1181 */
1182 lbz r0,PACAIRQSOFTMASK(r13)
11831: tdeqi r0,IRQS_ENABLED
1184 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1185#endif
1186
1187 /* Hard-disable interrupts */
1188 mfmsr r6
1189 rldicl r7,r6,48,1
1190 rotldi r7,r7,16
1191 mtmsrd r7,1
1192
1193 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1194 * so they are saved in the PACA which allows us to restore
1195 * our original state after RTAS returns.
1196 */
1197 std r1,PACAR1(r13)
1198 std r6,PACASAVEDMSR(r13)
1199
1200 /* Setup our real return addr */
1201 LOAD_REG_ADDR(r4,rtas_return_loc)
1202 clrldi r4,r4,2 /* convert to realmode address */
1203 mtlr r4
1204
1205 li r0,0
1206 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1207 andc r0,r6,r0
1208
1209 li r9,1
1210 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1211 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1212 andc r6,r0,r9
1213
1214__enter_rtas:
1215 sync /* disable interrupts so SRR0/1 */
1216 mtmsrd r0 /* don't get trashed */
1217
1218 LOAD_REG_ADDR(r4, rtas)
1219 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1220 ld r4,RTASBASE(r4) /* get the rtas->base value */
1221
1222 mtspr SPRN_SRR0,r5
1223 mtspr SPRN_SRR1,r6
1224 RFI_TO_KERNEL
1225 b . /* prevent speculative execution */
1226
1227rtas_return_loc:
1228 FIXUP_ENDIAN
1229
1230 /*
1231 * Clear RI and set SF before anything.
1232 */
1233 mfmsr r6
1234 li r0,MSR_RI
1235 andc r6,r6,r0
1236 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
1237 or r6,r6,r0
1238 sync
1239 mtmsrd r6
1240
1241 /* relocation is off at this point */
1242 GET_PACA(r4)
1243 clrldi r4,r4,2 /* convert to realmode address */
1244
1245 bcl 20,31,$+4
12460: mflr r3
1247 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1248
1249 ld r1,PACAR1(r4) /* Restore our SP */
1250 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1251
1252 mtspr SPRN_SRR0,r3
1253 mtspr SPRN_SRR1,r4
1254 RFI_TO_KERNEL
1255 b . /* prevent speculative execution */
1256_ASM_NOKPROBE_SYMBOL(__enter_rtas)
1257_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
1258
1259 .align 3
12601: .8byte rtas_restore_regs
1261
1262rtas_restore_regs:
1263 /* relocation is on at this point */
1264 REST_GPR(2, r1) /* Restore the TOC */
1265 REST_GPR(13, r1) /* Restore paca */
1266 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1267 REST_10GPRS(22, r1) /* ditto */
1268
1269 GET_PACA(r13)
1270
1271 ld r4,_CCR(r1)
1272 mtcr r4
1273 ld r5,_CTR(r1)
1274 mtctr r5
1275 ld r6,_XER(r1)
1276 mtspr SPRN_XER,r6
1277 ld r7,_DAR(r1)
1278 mtdar r7
1279 ld r8,_DSISR(r1)
1280 mtdsisr r8
1281
David Brazdil0f672f62019-12-10 10:32:29 +00001282 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001283 ld r0,16(r1) /* get return address */
1284
1285 mtlr r0
1286 blr /* return to caller */
1287
1288#endif /* CONFIG_PPC_RTAS */
1289
1290_GLOBAL(enter_prom)
1291 mflr r0
1292 std r0,16(r1)
David Brazdil0f672f62019-12-10 10:32:29 +00001293 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001294
1295 /* Because PROM is running in 32b mode, it clobbers the high order half
1296 * of all registers that it saves. We therefore save those registers
1297 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1298 */
1299 SAVE_GPR(2, r1)
1300 SAVE_GPR(13, r1)
1301 SAVE_8GPRS(14, r1)
1302 SAVE_10GPRS(22, r1)
1303 mfcr r10
1304 mfmsr r11
1305 std r10,_CCR(r1)
1306 std r11,_MSR(r1)
1307
1308 /* Put PROM address in SRR0 */
1309 mtsrr0 r4
1310
1311 /* Setup our trampoline return addr in LR */
1312 bcl 20,31,$+4
13130: mflr r4
1314 addi r4,r4,(1f - 0b)
1315 mtlr r4
1316
1317 /* Prepare a 32-bit mode big endian MSR
1318 */
1319#ifdef CONFIG_PPC_BOOK3E
1320 rlwinm r11,r11,0,1,31
1321 mtsrr1 r11
1322 rfi
1323#else /* CONFIG_PPC_BOOK3E */
1324 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1325 andc r11,r11,r12
1326 mtsrr1 r11
1327 RFI_TO_KERNEL
1328#endif /* CONFIG_PPC_BOOK3E */
1329
13301: /* Return from OF */
1331 FIXUP_ENDIAN
1332
1333 /* Just make sure that r1 top 32 bits didn't get
1334 * corrupt by OF
1335 */
1336 rldicl r1,r1,0,32
1337
1338 /* Restore the MSR (back to 64 bits) */
1339 ld r0,_MSR(r1)
1340 MTMSRD(r0)
1341 isync
1342
1343 /* Restore other registers */
1344 REST_GPR(2, r1)
1345 REST_GPR(13, r1)
1346 REST_8GPRS(14, r1)
1347 REST_10GPRS(22, r1)
1348 ld r4,_CCR(r1)
1349 mtcr r4
David Brazdil0f672f62019-12-10 10:32:29 +00001350
1351 addi r1,r1,SWITCH_FRAME_SIZE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001352 ld r0,16(r1)
1353 mtlr r0
1354 blr