Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2206912..3fd3ef3 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -11,11 +12,6 @@
*
* This file contains the system call entry code, context switch
* code, and exception/interrupt return code for PowerPC.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
@@ -46,6 +42,7 @@
#include <asm/exception-64e.h>
#endif
#include <asm/feature-fixups.h>
+#include <asm/kup.h>
/*
* System calls.
@@ -54,6 +51,9 @@
SYS_CALL_TABLE:
.tc sys_call_table[TC],sys_call_table
+COMPAT_SYS_CALL_TABLE:
+ .tc compat_sys_call_table[TC],compat_sys_call_table
+
/* This value is used to mark exception frames on the stack. */
exception_marker:
.tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
@@ -69,19 +69,20 @@
bne .Ltabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
- andi. r10,r12,MSR_PR
mr r10,r1
- addi r1,r1,-INT_FRAME_SIZE
- beq- 1f
ld r1,PACAKSAVE(r13)
-1: std r10,0(r1)
+ std r10,0(r1)
std r11,_NIP(r1)
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
- beq 2f /* if from kernel mode */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
-2: std r2,GPR2(r1)
+ std r2,GPR2(r1)
std r3,GPR3(r1)
mfcr r2
std r4,GPR4(r1)
@@ -112,16 +113,18 @@
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
+
+ kuap_check_amr r10, r11
+
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
- beq 33f
- /* if from user, see if there are any DTL entries to process */
+ /* see if there are any DTL entries to process */
ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
ld r11,PACA_DTL_RIDX(r13) /* get log read index */
addi r10,r10,LPPACA_DTLIDX
LDX_BE r10,0,r10 /* get log write index */
- cmpd cr1,r11,r10
- beq+ cr1,33f
+ cmpd r11,r10
+ beq+ 33f
bl accumulate_stolen_time
REST_GPR(0,r1)
REST_4GPRS(3,r1)
@@ -158,7 +161,7 @@
li r10,IRQS_ENABLED
std r10,SOFTE(r1)
- CURRENT_THREAD_INFO(r11, r1)
+ ld r11, PACA_THREAD_INFO(r13)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_DOTRACE
bne .Lsyscall_dotrace /* does not return */
@@ -171,9 +174,9 @@
* based on caller's run-mode / personality.
*/
ld r11,SYS_CALL_TABLE@toc(2)
- andi. r10,r10,_TIF_32BIT
+ andis. r10,r10,_TIF_32BIT@h
beq 15f
- addi r11,r11,8 /* use 32-bit syscall entries */
+ ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
clrldi r3,r3,32
clrldi r4,r4,32
clrldi r5,r5,32
@@ -181,7 +184,7 @@
clrldi r7,r7,32
clrldi r8,r8,32
15:
- slwi r0,r0,4
+ slwi r0,r0,3
barrier_nospec_asm
/*
@@ -195,6 +198,7 @@
mtctr r12
bctrl /* Call handler */
+ /* syscall_exit can exit to kernel mode, via ret_from_kernel_thread */
.Lsyscall_exit:
std r3,RESULT(r1)
@@ -205,14 +209,9 @@
ld r3,RESULT(r1)
#endif
- CURRENT_THREAD_INFO(r12, r1)
+ ld r12, PACA_THREAD_INFO(r13)
ld r8,_MSR(r1)
-#ifdef CONFIG_PPC_BOOK3S
- /* No MSR:RI on BookE */
- andi. r10,r8,MSR_RI
- beq- .Lunrecov_restore
-#endif
/*
* This is a few instructions into the actual syscall exit path (which actually
@@ -228,18 +227,14 @@
/*
* Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
+ *
+ * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we
+ * could fault on the load of the TI_FLAGS below.
*/
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- /*
- * For performance reasons we clear RI the same time that we
- * clear EE. We only need to clear RI just before we restore r13
- * below, but batching it with EE saves us one expensive mtmsrd call.
- * We have to be careful to restore RI if we branch anywhere from
- * here (eg syscall_exit_work).
- */
- li r11,0
+ li r11,MSR_RI
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -255,15 +250,7 @@
bne 3f
#endif
2: addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
- li r10,MSR_RI
- mtmsrd r10,1 /* Restore RI */
-#endif
bl restore_math
-#ifdef CONFIG_PPC_BOOK3S
- li r11,0
- mtmsrd r11,1
-#endif
ld r8,_MSR(r1)
ld r3,RESULT(r1)
li r11,-MAX_ERRNO
@@ -279,6 +266,18 @@
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
+ kuap_check_amr r10, r11
+
+#ifdef CONFIG_PPC_BOOK3S
+ /*
+ * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
+ * this later, but testing shows that doing it here causes less slow
+ * down than doing it closer to the rfid.
+ */
+ li r11,0
+ mtmsrd r11,1
+#endif
+
beq- 1f
ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
@@ -286,6 +285,14 @@
HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ std r8, PACATMSCRATCH(r13)
+#endif
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
ld r2,GPR2(r1)
ld r1,GPR1(r1)
@@ -296,8 +303,10 @@
RFI_TO_USER
b . /* prevent speculative execution */
- /* exit to kernel */
-1: ld r2,GPR2(r1)
+1: /* exit to kernel */
+ kuap_restore_amr r2
+
+ ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtlr r4
mtcr r5
@@ -336,7 +345,7 @@
/* Repopulate r9 and r10 for the syscall path */
addi r9,r1,STACK_FRAME_OVERHEAD
- CURRENT_THREAD_INFO(r10, r1)
+ ld r10, PACA_THREAD_INFO(r13)
ld r10,TI_FLAGS(r10)
cmpldi r0,NR_syscalls
@@ -351,10 +360,6 @@
b .Lsyscall_exit
.Lsyscall_exit_work:
-#ifdef CONFIG_PPC_BOOK3S
- li r10,MSR_RI
- mtmsrd r10,1 /* Restore RI */
-#endif
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
@@ -386,10 +391,9 @@
4: /* Anything else left to do? */
BEGIN_FTR_SECTION
- lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
- ld r10,PACACURRENT(r13)
+ lis r3,DEFAULT_PPR@highest /* Set default PPR */
sldi r3,r3,32 /* bits 11-13 are used for ppr */
- std r3,TASKTHREADPPR(r10)
+ std r3,_PPR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
@@ -474,6 +478,11 @@
bl sys_clone
b .Lsyscall_exit
+_GLOBAL(ppc_clone3)
+ bl save_nvgprs
+ bl sys_clone3
+ b .Lsyscall_exit
+
_GLOBAL(ppc32_swapcontext)
bl save_nvgprs
bl compat_sys_swapcontext
@@ -528,6 +537,7 @@
/* Save LR into r9 */
mflr r9
+ // Flush the link stack
.rept 64
bl .+4
.endr
@@ -537,6 +547,11 @@
.balign 32
/* Restore LR */
1: mtlr r9
+
+ // If we're just flushing the link stack, return here
+3: nop
+ patch_site 3b patch__flush_link_stack_return
+
li r9,0x7fff
mtctr r9
@@ -589,6 +604,8 @@
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+ kuap_check_amr r9, r10
+
FLUSH_COUNT_CACHE
/*
@@ -624,6 +641,10 @@
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
+#if defined(CONFIG_STACKPROTECTOR)
+ ld r6, TASK_CANARY(r6)
+ std r6, PACA_CANARY(r13)
+#endif
ld r8,KSP(r4) /* new stack pointer */
#ifdef CONFIG_PPC_BOOK3S_64
@@ -672,13 +693,15 @@
isync
slbie r6
+BEGIN_FTR_SECTION
slbie r6 /* Workaround POWER5 < DD2.1 issue */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
slbmte r7,r0
isync
2:
#endif /* CONFIG_PPC_BOOK3S_64 */
- CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
+ clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the
top of the kernel stack. */
@@ -729,7 +752,7 @@
mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
- CURRENT_THREAD_INFO(r9, r1)
+ ld r9, PACA_THREAD_INFO(r13)
ld r3,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3E
ld r10,PACACURRENT(r13)
@@ -840,13 +863,7 @@
* sure we are soft-disabled first and reconcile irq state.
*/
RECONCILE_IRQ_STATE(r3,r4)
-1: bl preempt_schedule_irq
-
- /* Re-test flags and eventually loop */
- CURRENT_THREAD_INFO(r9, r1)
- ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
- bne 1b
+ bl preempt_schedule_irq
/*
* arch_local_irq_restore() from preempt_schedule_irq above may
@@ -931,17 +948,13 @@
ld r4,_XER(r1)
mtspr SPRN_XER,r4
+ kuap_check_amr r5, r6
+
REST_8GPRS(5, r1)
andi. r0,r3,MSR_RI
beq- .Lunrecov_restore
- /* Load PPR from thread struct before we clear MSR:RI */
-BEGIN_FTR_SECTION
- ld r2,PACACURRENT(r13)
- ld r2,TASKTHREADPPR(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
/*
* Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13,
@@ -962,11 +975,17 @@
andi. r0,r3,MSR_PR
beq 1f
BEGIN_FTR_SECTION
- mtspr SPRN_PPR,r2 /* Restore PPR */
+ /* Restore PPR */
+ ld r2,_PPR(r1)
+ mtspr SPRN_PPR,r2
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
mtspr SPRN_SRR1,r3
ld r2,_CCR(r1)
@@ -989,9 +1008,19 @@
ld r2,_NIP(r1)
mtspr SPRN_SRR0,r2
+ /*
+ * Leaving a stale exception_marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ li r2,0
+ std r2,STACK_FRAME_OVERHEAD-16(r1)
+
ld r0,GPR0(r1)
ld r2,GPR2(r1)
ld r3,GPR3(r1)
+
+ kuap_restore_amr r4
+
ld r4,GPR4(r1)
ld r1,GPR1(r1)
RFI_TO_KERNEL
@@ -1118,7 +1147,7 @@
_GLOBAL(enter_rtas)
mflr r0
std r0,16(r1)
- stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
/* Because RTAS is running in 32b mode, it clobbers the high order half
* of all registers that it saves. We therefore save those registers
@@ -1250,7 +1279,7 @@
ld r8,_DSISR(r1)
mtdsisr r8
- addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
+ addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
ld r0,16(r1) /* get return address */
mtlr r0
@@ -1261,7 +1290,7 @@
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
- stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
/* Because PROM is running in 32b mode, it clobbers the high order half
* of all registers that it saves. We therefore save those registers
@@ -1318,8 +1347,8 @@
REST_10GPRS(22, r1)
ld r4,_CCR(r1)
mtcr r4
-
- addi r1,r1,PROM_FRAME_SIZE
+
+ addi r1,r1,SWITCH_FRAME_SIZE
ld r0,16(r1)
mtlr r0
blr