Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fa2c08e..8ca4798 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
@@ -37,11 +29,11 @@
* the kernel thread pointer. If we came from the kernel, sscratch
* will contain 0, and we should continue on the current TP.
*/
- csrrw tp, sscratch, tp
+ csrrw tp, CSR_SSCRATCH, tp
bnez tp, _save_context
_restore_kernel_tpsp:
- csrr tp, sscratch
+ csrr tp, CSR_SSCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context:
REG_S sp, TASK_TI_USER_SP(tp)
@@ -87,11 +79,11 @@
li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp)
- csrrc s1, sstatus, t0
- csrr s2, sepc
- csrr s3, sbadaddr
- csrr s4, scause
- csrr s5, sscratch
+ csrrc s1, CSR_SSTATUS, t0
+ csrr s2, CSR_SEPC
+ csrr s3, CSR_STVAL
+ csrr s4, CSR_SCAUSE
+ csrr s5, CSR_SSCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp)
REG_S s2, PT_SEPC(sp)
@@ -106,9 +98,28 @@
*/
.macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp)
- REG_L a2, PT_SEPC(sp)
- csrw sstatus, a0
- csrw sepc, a2
+ /*
+ * The current load reservation is effectively part of the processor's
+ * state, in the sense that load reservations cannot be shared between
+ * different hart contexts. We can't actually save and restore a load
+ * reservation, so instead here we clear any existing reservation --
+ * it's always legal for implementations to clear load reservations at
+ * any point (as long as the forward progress guarantee is kept, but
+ * we'll ignore that here).
+ *
+ * Dangling load reservations can be the result of taking a trap in the
+ * middle of an LR/SC sequence, but can also be the result of a taken
+ * forward branch around an SC -- which is how we implement CAS. As a
+ * result we need to clear reservations between the last CAS and the
+ * jump back to the new context. While it is unlikely the store
+ * completes, implementations are allowed to expand reservations to be
+ * arbitrarily large.
+ */
+ REG_L a2, PT_SEPC(sp)
+ REG_SC x0, a2, PT_SEPC(sp)
+
+ csrw CSR_SSTATUS, a0
+ csrw CSR_SEPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
@@ -144,6 +155,10 @@
REG_L x2, PT_SP(sp)
.endm
+#if !IS_ENABLED(CONFIG_PREEMPT)
+.set resume_kernel, restore_all
+#endif
+
ENTRY(handle_exception)
SAVE_ALL
@@ -151,7 +166,7 @@
* Set sscratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
*/
- csrw sscratch, x0
+ csrw CSR_SSCRATCH, x0
/* Load the global pointer */
.option push
@@ -168,12 +183,15 @@
/* Handle interrupts */
move a0, sp /* pt_regs */
- move a1, s4 /* scause */
tail do_IRQ
1:
- /* Exceptions run with interrupts enabled */
- csrs sstatus, SR_SIE
+ /* Exceptions run with interrupts enabled or disabled
+ depending on the state of sstatus.SR_SPIE */
+ andi t0, s1, SR_SPIE
+ beqz t0, 1f
+ csrs CSR_SSTATUS, SR_SIE
+1:
/* Handle syscalls */
li t0, EXC_SYSCALL
beq s4, t0, handle_syscall
@@ -202,7 +220,7 @@
REG_S s2, PT_SEPC(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_TRACE
+ andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_enter
check_syscall_nr:
/* Check to make sure we don't jump to a bogus syscall number. */
@@ -222,14 +240,14 @@
REG_S a0, PT_A0(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_TRACE
+ andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_exit
ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
- csrc sstatus, SR_SIE
+ csrc CSR_SSTATUS, SR_SIE
andi s0, s0, SR_SPP
- bnez s0, restore_all
+ bnez s0, resume_kernel
resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
@@ -245,12 +263,23 @@
* Save TP into sscratch, so we can find the kernel data structures
* again.
*/
- csrw sscratch, tp
+ csrw CSR_SSCRATCH, tp
restore_all:
RESTORE_ALL
sret
+#if IS_ENABLED(CONFIG_PREEMPT)
+resume_kernel:
+ REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+ bnez s0, restore_all
+ REG_L s0, TASK_TI_FLAGS(tp)
+ andi s0, s0, _TIF_NEED_RESCHED
+ beqz s0, restore_all
+ call preempt_schedule_irq
+ j restore_all
+#endif
+
work_pending:
/* Enter slow path for supplementary processing */
la ra, ret_from_exception
@@ -258,7 +287,7 @@
bnez s1, work_resched
work_notifysig:
/* Handle pending signals and notify-resume requests */
- csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
+ csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */
tail do_notify_resume
@@ -357,93 +386,6 @@
ret
ENDPROC(__switch_to)
-ENTRY(__fstate_save)
- li a2, TASK_THREAD_F0
- add a0, a0, a2
- li t1, SR_FS
- csrs sstatus, t1
- frcsr t0
- fsd f0, TASK_THREAD_F0_F0(a0)
- fsd f1, TASK_THREAD_F1_F0(a0)
- fsd f2, TASK_THREAD_F2_F0(a0)
- fsd f3, TASK_THREAD_F3_F0(a0)
- fsd f4, TASK_THREAD_F4_F0(a0)
- fsd f5, TASK_THREAD_F5_F0(a0)
- fsd f6, TASK_THREAD_F6_F0(a0)
- fsd f7, TASK_THREAD_F7_F0(a0)
- fsd f8, TASK_THREAD_F8_F0(a0)
- fsd f9, TASK_THREAD_F9_F0(a0)
- fsd f10, TASK_THREAD_F10_F0(a0)
- fsd f11, TASK_THREAD_F11_F0(a0)
- fsd f12, TASK_THREAD_F12_F0(a0)
- fsd f13, TASK_THREAD_F13_F0(a0)
- fsd f14, TASK_THREAD_F14_F0(a0)
- fsd f15, TASK_THREAD_F15_F0(a0)
- fsd f16, TASK_THREAD_F16_F0(a0)
- fsd f17, TASK_THREAD_F17_F0(a0)
- fsd f18, TASK_THREAD_F18_F0(a0)
- fsd f19, TASK_THREAD_F19_F0(a0)
- fsd f20, TASK_THREAD_F20_F0(a0)
- fsd f21, TASK_THREAD_F21_F0(a0)
- fsd f22, TASK_THREAD_F22_F0(a0)
- fsd f23, TASK_THREAD_F23_F0(a0)
- fsd f24, TASK_THREAD_F24_F0(a0)
- fsd f25, TASK_THREAD_F25_F0(a0)
- fsd f26, TASK_THREAD_F26_F0(a0)
- fsd f27, TASK_THREAD_F27_F0(a0)
- fsd f28, TASK_THREAD_F28_F0(a0)
- fsd f29, TASK_THREAD_F29_F0(a0)
- fsd f30, TASK_THREAD_F30_F0(a0)
- fsd f31, TASK_THREAD_F31_F0(a0)
- sw t0, TASK_THREAD_FCSR_F0(a0)
- csrc sstatus, t1
- ret
-ENDPROC(__fstate_save)
-
-ENTRY(__fstate_restore)
- li a2, TASK_THREAD_F0
- add a0, a0, a2
- li t1, SR_FS
- lw t0, TASK_THREAD_FCSR_F0(a0)
- csrs sstatus, t1
- fld f0, TASK_THREAD_F0_F0(a0)
- fld f1, TASK_THREAD_F1_F0(a0)
- fld f2, TASK_THREAD_F2_F0(a0)
- fld f3, TASK_THREAD_F3_F0(a0)
- fld f4, TASK_THREAD_F4_F0(a0)
- fld f5, TASK_THREAD_F5_F0(a0)
- fld f6, TASK_THREAD_F6_F0(a0)
- fld f7, TASK_THREAD_F7_F0(a0)
- fld f8, TASK_THREAD_F8_F0(a0)
- fld f9, TASK_THREAD_F9_F0(a0)
- fld f10, TASK_THREAD_F10_F0(a0)
- fld f11, TASK_THREAD_F11_F0(a0)
- fld f12, TASK_THREAD_F12_F0(a0)
- fld f13, TASK_THREAD_F13_F0(a0)
- fld f14, TASK_THREAD_F14_F0(a0)
- fld f15, TASK_THREAD_F15_F0(a0)
- fld f16, TASK_THREAD_F16_F0(a0)
- fld f17, TASK_THREAD_F17_F0(a0)
- fld f18, TASK_THREAD_F18_F0(a0)
- fld f19, TASK_THREAD_F19_F0(a0)
- fld f20, TASK_THREAD_F20_F0(a0)
- fld f21, TASK_THREAD_F21_F0(a0)
- fld f22, TASK_THREAD_F22_F0(a0)
- fld f23, TASK_THREAD_F23_F0(a0)
- fld f24, TASK_THREAD_F24_F0(a0)
- fld f25, TASK_THREAD_F25_F0(a0)
- fld f26, TASK_THREAD_F26_F0(a0)
- fld f27, TASK_THREAD_F27_F0(a0)
- fld f28, TASK_THREAD_F28_F0(a0)
- fld f29, TASK_THREAD_F29_F0(a0)
- fld f30, TASK_THREAD_F30_F0(a0)
- fld f31, TASK_THREAD_F31_F0(a0)
- fscsr t0
- csrc sstatus, t1
- ret
-ENDPROC(__fstate_restore)
-
-
.section ".rodata"
/* Exception vector table */
ENTRY(excp_vect_table)