Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation |
| 3 | |
| 4 | #include <linux/linkage.h> |
| 5 | #include <asm/unistd.h> |
| 6 | #include <asm/assembler.h> |
| 7 | #include <asm/nds32.h> |
| 8 | #include <asm/asm-offsets.h> |
| 9 | #include <asm/thread_info.h> |
| 10 | #include <asm/current.h> |
| 11 | |
| 12 | /* |
| 13 | * $r0 = previous task_struct, |
| 14 | * $r1 = next task_struct, |
| 15 | * previous and next are guaranteed not to be the same. |
| 16 | */ |
| 17 | |
| 18 | ENTRY(__switch_to) |
| 19 | |
| 20 | la $p0, __entry_task |
| 21 | sw $r1, [$p0] |
| 22 | move $p1, $r0 |
| 23 | addi $p1, $p1, #THREAD_CPU_CONTEXT |
| 24 | smw.bi $r6, [$p1], $r14, #0xb ! push r6~r14, fp, lp, sp |
| 25 | move $r25, $r1 |
| 26 | addi $r1, $r1, #THREAD_CPU_CONTEXT |
| 27 | lmw.bi $r6, [$r1], $r14, #0xb ! pop r6~r14, fp, lp, sp |
| 28 | ret |
| 29 | |
| 30 | |
| 31 | #define tbl $r8 |
| 32 | |
| 33 | /* |
| 34 | * $r7 will be writen as syscall nr |
| 35 | */ |
| 36 | .macro get_scno |
| 37 | lwi $r7, [$sp + R15_OFFSET] |
| 38 | swi $r7, [$sp + SYSCALLNO_OFFSET] |
| 39 | .endm |
| 40 | |
| 41 | .macro updateipc |
| 42 | addi $r17, $r13, #4 ! $r13 is $IPC |
| 43 | swi $r17, [$sp + IPC_OFFSET] |
| 44 | .endm |
| 45 | |
| 46 | ENTRY(eh_syscall) |
| 47 | updateipc |
| 48 | |
| 49 | get_scno |
| 50 | gie_enable |
| 51 | |
| 52 | lwi $p0, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing |
| 53 | |
| 54 | andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls? |
| 55 | bnez $p1, __sys_trace |
| 56 | |
| 57 | la $lp, ret_fast_syscall ! return address |
| 58 | jmp_systbl: |
| 59 | addi $p1, $r7, #-__NR_syscalls ! syscall number of syscall instruction is guarded by addembler |
| 60 | bgez $p1, _SCNO_EXCEED ! call sys_* routine |
| 61 | la tbl, sys_call_table ! load syscall table pointer |
| 62 | slli $p1, $r7, #2 |
| 63 | add $p1, tbl, $p1 |
| 64 | lwi $p1, [$p1] |
| 65 | jr $p1 ! no return |
| 66 | |
| 67 | _SCNO_EXCEED: |
| 68 | ori $r0, $r7, #0 |
| 69 | ori $r1, $sp, #0 |
| 70 | b bad_syscall |
| 71 | |
| 72 | /* |
| 73 | * This is the really slow path. We're going to be doing |
| 74 | * context switches, and waiting for our parent to respond. |
| 75 | */ |
| 76 | __sys_trace: |
| 77 | move $r0, $sp |
| 78 | bal syscall_trace_enter |
| 79 | move $r7, $r0 |
| 80 | la $lp, __sys_trace_return ! return address |
| 81 | |
| 82 | addi $p1, $r7, #1 |
| 83 | beqz $p1, ret_slow_syscall ! fatal signal is pending |
| 84 | |
| 85 | addi $p1, $sp, #R0_OFFSET ! pointer to regs |
| 86 | lmw.bi $r0, [$p1], $r5 ! have to reload $r0 - $r5 |
| 87 | b jmp_systbl |
| 88 | |
| 89 | __sys_trace_return: |
| 90 | swi $r0, [$sp+#R0_OFFSET] ! T: save returned $r0 |
| 91 | move $r0, $sp ! set pt_regs for syscall_trace_leave |
| 92 | bal syscall_trace_leave |
| 93 | b ret_slow_syscall |
| 94 | |
| 95 | ENTRY(sys_rt_sigreturn_wrapper) |
| 96 | addi $r0, $sp, #0 |
| 97 | b sys_rt_sigreturn |
| 98 | ENDPROC(sys_rt_sigreturn_wrapper) |