Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2012 Regents of the University of California |
| 3 | * Copyright (C) 2017 SiFive |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation, version 2. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/linkage.h> |
| 17 | |
| 18 | #include <asm/asm.h> |
| 19 | #include <asm/csr.h> |
| 20 | #include <asm/unistd.h> |
| 21 | #include <asm/thread_info.h> |
| 22 | #include <asm/asm-offsets.h> |
| 23 | |
| 24 | .text |
| 25 | .altmacro |
| 26 | |
| 27 | /* |
| 28 | * Prepares to enter a system call or exception by saving all registers to the |
| 29 | * stack. |
| 30 | */ |
| 31 | .macro SAVE_ALL |
| 32 | LOCAL _restore_kernel_tpsp |
| 33 | LOCAL _save_context |
| 34 | |
| 35 | /* |
| 36 | * If coming from userspace, preserve the user thread pointer and load |
| 37 | * the kernel thread pointer. If we came from the kernel, sscratch |
| 38 | * will contain 0, and we should continue on the current TP. |
| 39 | */ |
| 40 | csrrw tp, sscratch, tp |
| 41 | bnez tp, _save_context |
| 42 | |
| 43 | _restore_kernel_tpsp: |
| 44 | csrr tp, sscratch |
| 45 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
| 46 | _save_context: |
| 47 | REG_S sp, TASK_TI_USER_SP(tp) |
| 48 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
| 49 | addi sp, sp, -(PT_SIZE_ON_STACK) |
| 50 | REG_S x1, PT_RA(sp) |
| 51 | REG_S x3, PT_GP(sp) |
| 52 | REG_S x5, PT_T0(sp) |
| 53 | REG_S x6, PT_T1(sp) |
| 54 | REG_S x7, PT_T2(sp) |
| 55 | REG_S x8, PT_S0(sp) |
| 56 | REG_S x9, PT_S1(sp) |
| 57 | REG_S x10, PT_A0(sp) |
| 58 | REG_S x11, PT_A1(sp) |
| 59 | REG_S x12, PT_A2(sp) |
| 60 | REG_S x13, PT_A3(sp) |
| 61 | REG_S x14, PT_A4(sp) |
| 62 | REG_S x15, PT_A5(sp) |
| 63 | REG_S x16, PT_A6(sp) |
| 64 | REG_S x17, PT_A7(sp) |
| 65 | REG_S x18, PT_S2(sp) |
| 66 | REG_S x19, PT_S3(sp) |
| 67 | REG_S x20, PT_S4(sp) |
| 68 | REG_S x21, PT_S5(sp) |
| 69 | REG_S x22, PT_S6(sp) |
| 70 | REG_S x23, PT_S7(sp) |
| 71 | REG_S x24, PT_S8(sp) |
| 72 | REG_S x25, PT_S9(sp) |
| 73 | REG_S x26, PT_S10(sp) |
| 74 | REG_S x27, PT_S11(sp) |
| 75 | REG_S x28, PT_T3(sp) |
| 76 | REG_S x29, PT_T4(sp) |
| 77 | REG_S x30, PT_T5(sp) |
| 78 | REG_S x31, PT_T6(sp) |
| 79 | |
| 80 | /* |
| 81 | * Disable user-mode memory access as it should only be set in the |
| 82 | * actual user copy routines. |
| 83 | * |
| 84 | * Disable the FPU to detect illegal usage of floating point in kernel |
| 85 | * space. |
| 86 | */ |
| 87 | li t0, SR_SUM | SR_FS |
| 88 | |
| 89 | REG_L s0, TASK_TI_USER_SP(tp) |
| 90 | csrrc s1, sstatus, t0 |
| 91 | csrr s2, sepc |
| 92 | csrr s3, sbadaddr |
| 93 | csrr s4, scause |
| 94 | csrr s5, sscratch |
| 95 | REG_S s0, PT_SP(sp) |
| 96 | REG_S s1, PT_SSTATUS(sp) |
| 97 | REG_S s2, PT_SEPC(sp) |
| 98 | REG_S s3, PT_SBADADDR(sp) |
| 99 | REG_S s4, PT_SCAUSE(sp) |
| 100 | REG_S s5, PT_TP(sp) |
| 101 | .endm |
| 102 | |
| 103 | /* |
| 104 | * Prepares to return from a system call or exception by restoring all |
| 105 | * registers from the stack. |
| 106 | */ |
| 107 | .macro RESTORE_ALL |
| 108 | REG_L a0, PT_SSTATUS(sp) |
| 109 | REG_L a2, PT_SEPC(sp) |
| 110 | csrw sstatus, a0 |
| 111 | csrw sepc, a2 |
| 112 | |
| 113 | REG_L x1, PT_RA(sp) |
| 114 | REG_L x3, PT_GP(sp) |
| 115 | REG_L x4, PT_TP(sp) |
| 116 | REG_L x5, PT_T0(sp) |
| 117 | REG_L x6, PT_T1(sp) |
| 118 | REG_L x7, PT_T2(sp) |
| 119 | REG_L x8, PT_S0(sp) |
| 120 | REG_L x9, PT_S1(sp) |
| 121 | REG_L x10, PT_A0(sp) |
| 122 | REG_L x11, PT_A1(sp) |
| 123 | REG_L x12, PT_A2(sp) |
| 124 | REG_L x13, PT_A3(sp) |
| 125 | REG_L x14, PT_A4(sp) |
| 126 | REG_L x15, PT_A5(sp) |
| 127 | REG_L x16, PT_A6(sp) |
| 128 | REG_L x17, PT_A7(sp) |
| 129 | REG_L x18, PT_S2(sp) |
| 130 | REG_L x19, PT_S3(sp) |
| 131 | REG_L x20, PT_S4(sp) |
| 132 | REG_L x21, PT_S5(sp) |
| 133 | REG_L x22, PT_S6(sp) |
| 134 | REG_L x23, PT_S7(sp) |
| 135 | REG_L x24, PT_S8(sp) |
| 136 | REG_L x25, PT_S9(sp) |
| 137 | REG_L x26, PT_S10(sp) |
| 138 | REG_L x27, PT_S11(sp) |
| 139 | REG_L x28, PT_T3(sp) |
| 140 | REG_L x29, PT_T4(sp) |
| 141 | REG_L x30, PT_T5(sp) |
| 142 | REG_L x31, PT_T6(sp) |
| 143 | |
| 144 | REG_L x2, PT_SP(sp) |
| 145 | .endm |
| 146 | |
| 147 | ENTRY(handle_exception) |
| 148 | SAVE_ALL |
| 149 | |
| 150 | /* |
| 151 | * Set sscratch register to 0, so that if a recursive exception |
| 152 | * occurs, the exception vector knows it came from the kernel |
| 153 | */ |
| 154 | csrw sscratch, x0 |
| 155 | |
| 156 | /* Load the global pointer */ |
| 157 | .option push |
| 158 | .option norelax |
| 159 | la gp, __global_pointer$ |
| 160 | .option pop |
| 161 | |
| 162 | la ra, ret_from_exception |
| 163 | /* |
| 164 | * MSB of cause differentiates between |
| 165 | * interrupts and exceptions |
| 166 | */ |
| 167 | bge s4, zero, 1f |
| 168 | |
| 169 | /* Handle interrupts */ |
| 170 | move a0, sp /* pt_regs */ |
| 171 | move a1, s4 /* scause */ |
| 172 | tail do_IRQ |
| 173 | 1: |
| 174 | /* Exceptions run with interrupts enabled */ |
| 175 | csrs sstatus, SR_SIE |
| 176 | |
| 177 | /* Handle syscalls */ |
| 178 | li t0, EXC_SYSCALL |
| 179 | beq s4, t0, handle_syscall |
| 180 | |
| 181 | /* Handle other exceptions */ |
| 182 | slli t0, s4, RISCV_LGPTR |
| 183 | la t1, excp_vect_table |
| 184 | la t2, excp_vect_table_end |
| 185 | move a0, sp /* pt_regs */ |
| 186 | add t0, t1, t0 |
| 187 | /* Check if exception code lies within bounds */ |
| 188 | bgeu t0, t2, 1f |
| 189 | REG_L t0, 0(t0) |
| 190 | jr t0 |
| 191 | 1: |
| 192 | tail do_trap_unknown |
| 193 | |
| 194 | handle_syscall: |
| 195 | /* save the initial A0 value (needed in signal handlers) */ |
| 196 | REG_S a0, PT_ORIG_A0(sp) |
| 197 | /* |
| 198 | * Advance SEPC to avoid executing the original |
| 199 | * scall instruction on sret |
| 200 | */ |
| 201 | addi s2, s2, 0x4 |
| 202 | REG_S s2, PT_SEPC(sp) |
| 203 | /* Trace syscalls, but only if requested by the user. */ |
| 204 | REG_L t0, TASK_TI_FLAGS(tp) |
| 205 | andi t0, t0, _TIF_SYSCALL_TRACE |
| 206 | bnez t0, handle_syscall_trace_enter |
| 207 | check_syscall_nr: |
| 208 | /* Check to make sure we don't jump to a bogus syscall number. */ |
| 209 | li t0, __NR_syscalls |
| 210 | la s0, sys_ni_syscall |
| 211 | /* Syscall number held in a7 */ |
| 212 | bgeu a7, t0, 1f |
| 213 | la s0, sys_call_table |
| 214 | slli t0, a7, RISCV_LGPTR |
| 215 | add s0, s0, t0 |
| 216 | REG_L s0, 0(s0) |
| 217 | 1: |
| 218 | jalr s0 |
| 219 | |
| 220 | ret_from_syscall: |
| 221 | /* Set user a0 to kernel a0 */ |
| 222 | REG_S a0, PT_A0(sp) |
| 223 | /* Trace syscalls, but only if requested by the user. */ |
| 224 | REG_L t0, TASK_TI_FLAGS(tp) |
| 225 | andi t0, t0, _TIF_SYSCALL_TRACE |
| 226 | bnez t0, handle_syscall_trace_exit |
| 227 | |
| 228 | ret_from_exception: |
| 229 | REG_L s0, PT_SSTATUS(sp) |
| 230 | csrc sstatus, SR_SIE |
| 231 | andi s0, s0, SR_SPP |
| 232 | bnez s0, restore_all |
| 233 | |
| 234 | resume_userspace: |
| 235 | /* Interrupts must be disabled here so flags are checked atomically */ |
| 236 | REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ |
| 237 | andi s1, s0, _TIF_WORK_MASK |
| 238 | bnez s1, work_pending |
| 239 | |
| 240 | /* Save unwound kernel stack pointer in thread_info */ |
| 241 | addi s0, sp, PT_SIZE_ON_STACK |
| 242 | REG_S s0, TASK_TI_KERNEL_SP(tp) |
| 243 | |
| 244 | /* |
| 245 | * Save TP into sscratch, so we can find the kernel data structures |
| 246 | * again. |
| 247 | */ |
| 248 | csrw sscratch, tp |
| 249 | |
| 250 | restore_all: |
| 251 | RESTORE_ALL |
| 252 | sret |
| 253 | |
| 254 | work_pending: |
| 255 | /* Enter slow path for supplementary processing */ |
| 256 | la ra, ret_from_exception |
| 257 | andi s1, s0, _TIF_NEED_RESCHED |
| 258 | bnez s1, work_resched |
| 259 | work_notifysig: |
| 260 | /* Handle pending signals and notify-resume requests */ |
| 261 | csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */ |
| 262 | move a0, sp /* pt_regs */ |
| 263 | move a1, s0 /* current_thread_info->flags */ |
| 264 | tail do_notify_resume |
| 265 | work_resched: |
| 266 | tail schedule |
| 267 | |
| 268 | /* Slow paths for ptrace. */ |
| 269 | handle_syscall_trace_enter: |
| 270 | move a0, sp |
| 271 | call do_syscall_trace_enter |
| 272 | REG_L a0, PT_A0(sp) |
| 273 | REG_L a1, PT_A1(sp) |
| 274 | REG_L a2, PT_A2(sp) |
| 275 | REG_L a3, PT_A3(sp) |
| 276 | REG_L a4, PT_A4(sp) |
| 277 | REG_L a5, PT_A5(sp) |
| 278 | REG_L a6, PT_A6(sp) |
| 279 | REG_L a7, PT_A7(sp) |
| 280 | j check_syscall_nr |
| 281 | handle_syscall_trace_exit: |
| 282 | move a0, sp |
| 283 | call do_syscall_trace_exit |
| 284 | j ret_from_exception |
| 285 | |
| 286 | END(handle_exception) |
| 287 | |
| 288 | ENTRY(ret_from_fork) |
| 289 | la ra, ret_from_exception |
| 290 | tail schedule_tail |
| 291 | ENDPROC(ret_from_fork) |
| 292 | |
| 293 | ENTRY(ret_from_kernel_thread) |
| 294 | call schedule_tail |
| 295 | /* Call fn(arg) */ |
| 296 | la ra, ret_from_exception |
| 297 | move a0, s1 |
| 298 | jr s0 |
| 299 | ENDPROC(ret_from_kernel_thread) |
| 300 | |
| 301 | |
| 302 | /* |
| 303 | * Integer register context switch |
| 304 | * The callee-saved registers must be saved and restored. |
| 305 | * |
| 306 | * a0: previous task_struct (must be preserved across the switch) |
| 307 | * a1: next task_struct |
| 308 | * |
| 309 | * The value of a0 and a1 must be preserved by this function, as that's how |
| 310 | * arguments are passed to schedule_tail. |
| 311 | */ |
| 312 | ENTRY(__switch_to) |
| 313 | /* Save context into prev->thread */ |
| 314 | li a4, TASK_THREAD_RA |
| 315 | add a3, a0, a4 |
| 316 | add a4, a1, a4 |
| 317 | REG_S ra, TASK_THREAD_RA_RA(a3) |
| 318 | REG_S sp, TASK_THREAD_SP_RA(a3) |
| 319 | REG_S s0, TASK_THREAD_S0_RA(a3) |
| 320 | REG_S s1, TASK_THREAD_S1_RA(a3) |
| 321 | REG_S s2, TASK_THREAD_S2_RA(a3) |
| 322 | REG_S s3, TASK_THREAD_S3_RA(a3) |
| 323 | REG_S s4, TASK_THREAD_S4_RA(a3) |
| 324 | REG_S s5, TASK_THREAD_S5_RA(a3) |
| 325 | REG_S s6, TASK_THREAD_S6_RA(a3) |
| 326 | REG_S s7, TASK_THREAD_S7_RA(a3) |
| 327 | REG_S s8, TASK_THREAD_S8_RA(a3) |
| 328 | REG_S s9, TASK_THREAD_S9_RA(a3) |
| 329 | REG_S s10, TASK_THREAD_S10_RA(a3) |
| 330 | REG_S s11, TASK_THREAD_S11_RA(a3) |
| 331 | /* Restore context from next->thread */ |
| 332 | REG_L ra, TASK_THREAD_RA_RA(a4) |
| 333 | REG_L sp, TASK_THREAD_SP_RA(a4) |
| 334 | REG_L s0, TASK_THREAD_S0_RA(a4) |
| 335 | REG_L s1, TASK_THREAD_S1_RA(a4) |
| 336 | REG_L s2, TASK_THREAD_S2_RA(a4) |
| 337 | REG_L s3, TASK_THREAD_S3_RA(a4) |
| 338 | REG_L s4, TASK_THREAD_S4_RA(a4) |
| 339 | REG_L s5, TASK_THREAD_S5_RA(a4) |
| 340 | REG_L s6, TASK_THREAD_S6_RA(a4) |
| 341 | REG_L s7, TASK_THREAD_S7_RA(a4) |
| 342 | REG_L s8, TASK_THREAD_S8_RA(a4) |
| 343 | REG_L s9, TASK_THREAD_S9_RA(a4) |
| 344 | REG_L s10, TASK_THREAD_S10_RA(a4) |
| 345 | REG_L s11, TASK_THREAD_S11_RA(a4) |
| 346 | /* Swap the CPU entry around. */ |
| 347 | lw a3, TASK_TI_CPU(a0) |
| 348 | lw a4, TASK_TI_CPU(a1) |
| 349 | sw a3, TASK_TI_CPU(a1) |
| 350 | sw a4, TASK_TI_CPU(a0) |
| 351 | #if TASK_TI != 0 |
| 352 | #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." |
| 353 | addi tp, a1, TASK_TI |
| 354 | #else |
| 355 | move tp, a1 |
| 356 | #endif |
| 357 | ret |
| 358 | ENDPROC(__switch_to) |
| 359 | |
| 360 | ENTRY(__fstate_save) |
| 361 | li a2, TASK_THREAD_F0 |
| 362 | add a0, a0, a2 |
| 363 | li t1, SR_FS |
| 364 | csrs sstatus, t1 |
| 365 | frcsr t0 |
| 366 | fsd f0, TASK_THREAD_F0_F0(a0) |
| 367 | fsd f1, TASK_THREAD_F1_F0(a0) |
| 368 | fsd f2, TASK_THREAD_F2_F0(a0) |
| 369 | fsd f3, TASK_THREAD_F3_F0(a0) |
| 370 | fsd f4, TASK_THREAD_F4_F0(a0) |
| 371 | fsd f5, TASK_THREAD_F5_F0(a0) |
| 372 | fsd f6, TASK_THREAD_F6_F0(a0) |
| 373 | fsd f7, TASK_THREAD_F7_F0(a0) |
| 374 | fsd f8, TASK_THREAD_F8_F0(a0) |
| 375 | fsd f9, TASK_THREAD_F9_F0(a0) |
| 376 | fsd f10, TASK_THREAD_F10_F0(a0) |
| 377 | fsd f11, TASK_THREAD_F11_F0(a0) |
| 378 | fsd f12, TASK_THREAD_F12_F0(a0) |
| 379 | fsd f13, TASK_THREAD_F13_F0(a0) |
| 380 | fsd f14, TASK_THREAD_F14_F0(a0) |
| 381 | fsd f15, TASK_THREAD_F15_F0(a0) |
| 382 | fsd f16, TASK_THREAD_F16_F0(a0) |
| 383 | fsd f17, TASK_THREAD_F17_F0(a0) |
| 384 | fsd f18, TASK_THREAD_F18_F0(a0) |
| 385 | fsd f19, TASK_THREAD_F19_F0(a0) |
| 386 | fsd f20, TASK_THREAD_F20_F0(a0) |
| 387 | fsd f21, TASK_THREAD_F21_F0(a0) |
| 388 | fsd f22, TASK_THREAD_F22_F0(a0) |
| 389 | fsd f23, TASK_THREAD_F23_F0(a0) |
| 390 | fsd f24, TASK_THREAD_F24_F0(a0) |
| 391 | fsd f25, TASK_THREAD_F25_F0(a0) |
| 392 | fsd f26, TASK_THREAD_F26_F0(a0) |
| 393 | fsd f27, TASK_THREAD_F27_F0(a0) |
| 394 | fsd f28, TASK_THREAD_F28_F0(a0) |
| 395 | fsd f29, TASK_THREAD_F29_F0(a0) |
| 396 | fsd f30, TASK_THREAD_F30_F0(a0) |
| 397 | fsd f31, TASK_THREAD_F31_F0(a0) |
| 398 | sw t0, TASK_THREAD_FCSR_F0(a0) |
| 399 | csrc sstatus, t1 |
| 400 | ret |
| 401 | ENDPROC(__fstate_save) |
| 402 | |
| 403 | ENTRY(__fstate_restore) |
| 404 | li a2, TASK_THREAD_F0 |
| 405 | add a0, a0, a2 |
| 406 | li t1, SR_FS |
| 407 | lw t0, TASK_THREAD_FCSR_F0(a0) |
| 408 | csrs sstatus, t1 |
| 409 | fld f0, TASK_THREAD_F0_F0(a0) |
| 410 | fld f1, TASK_THREAD_F1_F0(a0) |
| 411 | fld f2, TASK_THREAD_F2_F0(a0) |
| 412 | fld f3, TASK_THREAD_F3_F0(a0) |
| 413 | fld f4, TASK_THREAD_F4_F0(a0) |
| 414 | fld f5, TASK_THREAD_F5_F0(a0) |
| 415 | fld f6, TASK_THREAD_F6_F0(a0) |
| 416 | fld f7, TASK_THREAD_F7_F0(a0) |
| 417 | fld f8, TASK_THREAD_F8_F0(a0) |
| 418 | fld f9, TASK_THREAD_F9_F0(a0) |
| 419 | fld f10, TASK_THREAD_F10_F0(a0) |
| 420 | fld f11, TASK_THREAD_F11_F0(a0) |
| 421 | fld f12, TASK_THREAD_F12_F0(a0) |
| 422 | fld f13, TASK_THREAD_F13_F0(a0) |
| 423 | fld f14, TASK_THREAD_F14_F0(a0) |
| 424 | fld f15, TASK_THREAD_F15_F0(a0) |
| 425 | fld f16, TASK_THREAD_F16_F0(a0) |
| 426 | fld f17, TASK_THREAD_F17_F0(a0) |
| 427 | fld f18, TASK_THREAD_F18_F0(a0) |
| 428 | fld f19, TASK_THREAD_F19_F0(a0) |
| 429 | fld f20, TASK_THREAD_F20_F0(a0) |
| 430 | fld f21, TASK_THREAD_F21_F0(a0) |
| 431 | fld f22, TASK_THREAD_F22_F0(a0) |
| 432 | fld f23, TASK_THREAD_F23_F0(a0) |
| 433 | fld f24, TASK_THREAD_F24_F0(a0) |
| 434 | fld f25, TASK_THREAD_F25_F0(a0) |
| 435 | fld f26, TASK_THREAD_F26_F0(a0) |
| 436 | fld f27, TASK_THREAD_F27_F0(a0) |
| 437 | fld f28, TASK_THREAD_F28_F0(a0) |
| 438 | fld f29, TASK_THREAD_F29_F0(a0) |
| 439 | fld f30, TASK_THREAD_F30_F0(a0) |
| 440 | fld f31, TASK_THREAD_F31_F0(a0) |
| 441 | fscsr t0 |
| 442 | csrc sstatus, t1 |
| 443 | ret |
| 444 | ENDPROC(__fstate_restore) |
| 445 | |
| 446 | |
| 447 | .section ".rodata" |
| 448 | /* Exception vector table */ |
| 449 | ENTRY(excp_vect_table) |
| 450 | RISCV_PTR do_trap_insn_misaligned |
| 451 | RISCV_PTR do_trap_insn_fault |
| 452 | RISCV_PTR do_trap_insn_illegal |
| 453 | RISCV_PTR do_trap_break |
| 454 | RISCV_PTR do_trap_load_misaligned |
| 455 | RISCV_PTR do_trap_load_fault |
| 456 | RISCV_PTR do_trap_store_misaligned |
| 457 | RISCV_PTR do_trap_store_fault |
| 458 | RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ |
| 459 | RISCV_PTR do_trap_ecall_s |
| 460 | RISCV_PTR do_trap_unknown |
| 461 | RISCV_PTR do_trap_ecall_m |
| 462 | RISCV_PTR do_page_fault /* instruction page fault */ |
| 463 | RISCV_PTR do_page_fault /* load page fault */ |
| 464 | RISCV_PTR do_trap_unknown |
| 465 | RISCV_PTR do_page_fault /* store page fault */ |
| 466 | excp_vect_table_end: |
| 467 | END(excp_vect_table) |