Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Common signal handling code for both 32 and 64 bits |
| 3 | * |
| 4 | * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation |
| 5 | * Extracted from signal_32.c and signal_64.c |
| 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General |
| 8 | * Public License. See the file README.legal in the main directory of |
| 9 | * this archive for more details. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/tracehook.h> |
| 13 | #include <linux/signal.h> |
| 14 | #include <linux/uprobes.h> |
| 15 | #include <linux/key.h> |
| 16 | #include <linux/context_tracking.h> |
| 17 | #include <linux/livepatch.h> |
| 18 | #include <linux/syscalls.h> |
| 19 | #include <asm/hw_breakpoint.h> |
| 20 | #include <linux/uaccess.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 21 | #include <asm/switch_to.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | #include <asm/unistd.h> |
| 23 | #include <asm/debug.h> |
| 24 | #include <asm/tm.h> |
| 25 | |
| 26 | #include "signal.h" |
| 27 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 28 | #ifdef CONFIG_VSX |
| 29 | unsigned long copy_fpr_to_user(void __user *to, |
| 30 | struct task_struct *task) |
| 31 | { |
| 32 | u64 buf[ELF_NFPREG]; |
| 33 | int i; |
| 34 | |
| 35 | /* save FPR copy to local buffer then write to the thread_struct */ |
| 36 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) |
| 37 | buf[i] = task->thread.TS_FPR(i); |
| 38 | buf[i] = task->thread.fp_state.fpscr; |
| 39 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); |
| 40 | } |
| 41 | |
| 42 | unsigned long copy_fpr_from_user(struct task_struct *task, |
| 43 | void __user *from) |
| 44 | { |
| 45 | u64 buf[ELF_NFPREG]; |
| 46 | int i; |
| 47 | |
| 48 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) |
| 49 | return 1; |
| 50 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) |
| 51 | task->thread.TS_FPR(i) = buf[i]; |
| 52 | task->thread.fp_state.fpscr = buf[i]; |
| 53 | |
| 54 | return 0; |
| 55 | } |
| 56 | |
| 57 | unsigned long copy_vsx_to_user(void __user *to, |
| 58 | struct task_struct *task) |
| 59 | { |
| 60 | u64 buf[ELF_NVSRHALFREG]; |
| 61 | int i; |
| 62 | |
| 63 | /* save FPR copy to local buffer then write to the thread_struct */ |
| 64 | for (i = 0; i < ELF_NVSRHALFREG; i++) |
| 65 | buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; |
| 66 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); |
| 67 | } |
| 68 | |
| 69 | unsigned long copy_vsx_from_user(struct task_struct *task, |
| 70 | void __user *from) |
| 71 | { |
| 72 | u64 buf[ELF_NVSRHALFREG]; |
| 73 | int i; |
| 74 | |
| 75 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) |
| 76 | return 1; |
| 77 | for (i = 0; i < ELF_NVSRHALFREG ; i++) |
| 78 | task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; |
| 79 | return 0; |
| 80 | } |
| 81 | |
| 82 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 83 | unsigned long copy_ckfpr_to_user(void __user *to, |
| 84 | struct task_struct *task) |
| 85 | { |
| 86 | u64 buf[ELF_NFPREG]; |
| 87 | int i; |
| 88 | |
| 89 | /* save FPR copy to local buffer then write to the thread_struct */ |
| 90 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) |
| 91 | buf[i] = task->thread.TS_CKFPR(i); |
| 92 | buf[i] = task->thread.ckfp_state.fpscr; |
| 93 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); |
| 94 | } |
| 95 | |
| 96 | unsigned long copy_ckfpr_from_user(struct task_struct *task, |
| 97 | void __user *from) |
| 98 | { |
| 99 | u64 buf[ELF_NFPREG]; |
| 100 | int i; |
| 101 | |
| 102 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) |
| 103 | return 1; |
| 104 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) |
| 105 | task->thread.TS_CKFPR(i) = buf[i]; |
| 106 | task->thread.ckfp_state.fpscr = buf[i]; |
| 107 | |
| 108 | return 0; |
| 109 | } |
| 110 | |
| 111 | unsigned long copy_ckvsx_to_user(void __user *to, |
| 112 | struct task_struct *task) |
| 113 | { |
| 114 | u64 buf[ELF_NVSRHALFREG]; |
| 115 | int i; |
| 116 | |
| 117 | /* save FPR copy to local buffer then write to the thread_struct */ |
| 118 | for (i = 0; i < ELF_NVSRHALFREG; i++) |
| 119 | buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; |
| 120 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); |
| 121 | } |
| 122 | |
| 123 | unsigned long copy_ckvsx_from_user(struct task_struct *task, |
| 124 | void __user *from) |
| 125 | { |
| 126 | u64 buf[ELF_NVSRHALFREG]; |
| 127 | int i; |
| 128 | |
| 129 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) |
| 130 | return 1; |
| 131 | for (i = 0; i < ELF_NVSRHALFREG ; i++) |
| 132 | task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; |
| 133 | return 0; |
| 134 | } |
| 135 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
| 136 | #else |
| 137 | inline unsigned long copy_fpr_to_user(void __user *to, |
| 138 | struct task_struct *task) |
| 139 | { |
| 140 | return __copy_to_user(to, task->thread.fp_state.fpr, |
| 141 | ELF_NFPREG * sizeof(double)); |
| 142 | } |
| 143 | |
| 144 | inline unsigned long copy_fpr_from_user(struct task_struct *task, |
| 145 | void __user *from) |
| 146 | { |
| 147 | return __copy_from_user(task->thread.fp_state.fpr, from, |
| 148 | ELF_NFPREG * sizeof(double)); |
| 149 | } |
| 150 | |
| 151 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 152 | inline unsigned long copy_ckfpr_to_user(void __user *to, |
| 153 | struct task_struct *task) |
| 154 | { |
| 155 | return __copy_to_user(to, task->thread.ckfp_state.fpr, |
| 156 | ELF_NFPREG * sizeof(double)); |
| 157 | } |
| 158 | |
| 159 | inline unsigned long copy_ckfpr_from_user(struct task_struct *task, |
| 160 | void __user *from) |
| 161 | { |
| 162 | return __copy_from_user(task->thread.ckfp_state.fpr, from, |
| 163 | ELF_NFPREG * sizeof(double)); |
| 164 | } |
| 165 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
| 166 | #endif |
| 167 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | /* Log an error when sending an unhandled signal to a process. Controlled |
| 169 | * through debug.exception-trace sysctl. |
| 170 | */ |
| 171 | |
| 172 | int show_unhandled_signals = 1; |
| 173 | |
| 174 | /* |
| 175 | * Allocate space for the signal frame |
| 176 | */ |
| 177 | void __user *get_sigframe(struct ksignal *ksig, unsigned long sp, |
| 178 | size_t frame_size, int is_32) |
| 179 | { |
| 180 | unsigned long oldsp, newsp; |
| 181 | |
| 182 | /* Default to using normal stack */ |
| 183 | oldsp = get_clean_sp(sp, is_32); |
| 184 | oldsp = sigsp(oldsp, ksig); |
| 185 | newsp = (oldsp - frame_size) & ~0xFUL; |
| 186 | |
| 187 | /* Check access */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 188 | if (!access_ok((void __user *)newsp, oldsp - newsp)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | return NULL; |
| 190 | |
| 191 | return (void __user *)newsp; |
| 192 | } |
| 193 | |
| 194 | static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, |
| 195 | int has_handler) |
| 196 | { |
| 197 | unsigned long ret = regs->gpr[3]; |
| 198 | int restart = 1; |
| 199 | |
| 200 | /* syscall ? */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 201 | if (!trap_is_syscall(regs)) |
| 202 | return; |
| 203 | |
| 204 | if (trap_norestart(regs)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 205 | return; |
| 206 | |
| 207 | /* error signalled ? */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 208 | if (trap_is_scv(regs)) { |
| 209 | /* 32-bit compat mode sign extend? */ |
| 210 | if (!IS_ERR_VALUE(ret)) |
| 211 | return; |
| 212 | ret = -ret; |
| 213 | } else if (!(regs->ccr & 0x10000000)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | return; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 215 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 216 | |
| 217 | switch (ret) { |
| 218 | case ERESTART_RESTARTBLOCK: |
| 219 | case ERESTARTNOHAND: |
| 220 | /* ERESTARTNOHAND means that the syscall should only be |
| 221 | * restarted if there was no handler for the signal, and since |
| 222 | * we only get here if there is a handler, we dont restart. |
| 223 | */ |
| 224 | restart = !has_handler; |
| 225 | break; |
| 226 | case ERESTARTSYS: |
| 227 | /* ERESTARTSYS means to restart the syscall if there is no |
| 228 | * handler or the handler was registered with SA_RESTART |
| 229 | */ |
| 230 | restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; |
| 231 | break; |
| 232 | case ERESTARTNOINTR: |
| 233 | /* ERESTARTNOINTR means that the syscall should be |
| 234 | * called again after the signal handler returns. |
| 235 | */ |
| 236 | break; |
| 237 | default: |
| 238 | return; |
| 239 | } |
| 240 | if (restart) { |
| 241 | if (ret == ERESTART_RESTARTBLOCK) |
| 242 | regs->gpr[0] = __NR_restart_syscall; |
| 243 | else |
| 244 | regs->gpr[3] = regs->orig_gpr3; |
| 245 | regs->nip -= 4; |
| 246 | regs->result = 0; |
| 247 | } else { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 248 | if (trap_is_scv(regs)) { |
| 249 | regs->result = -EINTR; |
| 250 | regs->gpr[3] = -EINTR; |
| 251 | } else { |
| 252 | regs->result = -EINTR; |
| 253 | regs->gpr[3] = EINTR; |
| 254 | regs->ccr |= 0x10000000; |
| 255 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | } |
| 257 | } |
| 258 | |
| 259 | static void do_signal(struct task_struct *tsk) |
| 260 | { |
| 261 | sigset_t *oldset = sigmask_to_save(); |
| 262 | struct ksignal ksig = { .sig = 0 }; |
| 263 | int ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 264 | |
| 265 | BUG_ON(tsk != current); |
| 266 | |
| 267 | get_signal(&ksig); |
| 268 | |
| 269 | /* Is there any syscall restart business here ? */ |
| 270 | check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0); |
| 271 | |
| 272 | if (ksig.sig <= 0) { |
| 273 | /* No signal to deliver -- put the saved sigmask back */ |
| 274 | restore_saved_sigmask(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 275 | set_trap_norestart(tsk->thread.regs); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | return; /* no signals delivered */ |
| 277 | } |
| 278 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | /* |
| 280 | * Reenable the DABR before delivering the signal to |
| 281 | * user space. The DABR will have been cleared if it |
| 282 | * triggered inside the kernel. |
| 283 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 284 | if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) { |
| 285 | int i; |
| 286 | |
| 287 | for (i = 0; i < nr_wp_slots(); i++) { |
| 288 | if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type) |
| 289 | __set_breakpoint(i, &tsk->thread.hw_brk[i]); |
| 290 | } |
| 291 | } |
| 292 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | /* Re-enable the breakpoints for the signal stack */ |
| 294 | thread_change_pc(tsk, tsk->thread.regs); |
| 295 | |
| 296 | rseq_signal_deliver(&ksig, tsk->thread.regs); |
| 297 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 298 | if (is_32bit_task()) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | if (ksig.ka.sa.sa_flags & SA_SIGINFO) |
| 300 | ret = handle_rt_signal32(&ksig, oldset, tsk); |
| 301 | else |
| 302 | ret = handle_signal32(&ksig, oldset, tsk); |
| 303 | } else { |
| 304 | ret = handle_rt_signal64(&ksig, oldset, tsk); |
| 305 | } |
| 306 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 307 | set_trap_norestart(tsk->thread.regs); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 308 | signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP)); |
| 309 | } |
| 310 | |
| 311 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
| 312 | { |
| 313 | user_exit(); |
| 314 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 315 | if (thread_info_flags & _TIF_UPROBE) |
| 316 | uprobe_notify_resume(regs); |
| 317 | |
| 318 | if (thread_info_flags & _TIF_PATCH_PENDING) |
| 319 | klp_update_patch_state(current); |
| 320 | |
| 321 | if (thread_info_flags & _TIF_SIGPENDING) { |
| 322 | BUG_ON(regs != current->thread.regs); |
| 323 | do_signal(current); |
| 324 | } |
| 325 | |
| 326 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | tracehook_notify_resume(regs); |
| 328 | rseq_handle_notify_resume(NULL, regs); |
| 329 | } |
| 330 | |
| 331 | user_enter(); |
| 332 | } |
| 333 | |
| 334 | unsigned long get_tm_stackpointer(struct task_struct *tsk) |
| 335 | { |
| 336 | /* When in an active transaction that takes a signal, we need to be |
| 337 | * careful with the stack. It's possible that the stack has moved back |
| 338 | * up after the tbegin. The obvious case here is when the tbegin is |
| 339 | * called inside a function that returns before a tend. In this case, |
| 340 | * the stack is part of the checkpointed transactional memory state. |
| 341 | * If we write over this non transactionally or in suspend, we are in |
| 342 | * trouble because if we get a tm abort, the program counter and stack |
| 343 | * pointer will be back at the tbegin but our in memory stack won't be |
| 344 | * valid anymore. |
| 345 | * |
| 346 | * To avoid this, when taking a signal in an active transaction, we |
| 347 | * need to use the stack pointer from the checkpointed state, rather |
| 348 | * than the speculated state. This ensures that the signal context |
| 349 | * (written tm suspended) will be written below the stack required for |
| 350 | * the rollback. The transaction is aborted because of the treclaim, |
| 351 | * so any memory written between the tbegin and the signal will be |
| 352 | * rolled back anyway. |
| 353 | * |
| 354 | * For signals taken in non-TM or suspended mode, we use the |
| 355 | * normal/non-checkpointed stack pointer. |
| 356 | */ |
| 357 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 358 | unsigned long ret = tsk->thread.regs->gpr[1]; |
| 359 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 360 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 361 | BUG_ON(tsk != current); |
| 362 | |
| 363 | if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 364 | preempt_disable(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 365 | tm_reclaim_current(TM_CAUSE_SIGNAL); |
| 366 | if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 367 | ret = tsk->thread.ckpt_regs.gpr[1]; |
| 368 | |
| 369 | /* |
| 370 | * If we treclaim, we must clear the current thread's TM bits |
| 371 | * before re-enabling preemption. Otherwise we might be |
| 372 | * preempted and have the live MSR[TS] changed behind our back |
| 373 | * (tm_recheckpoint_new_task() would recheckpoint). Besides, we |
| 374 | * enter the signal handler in non-transactional state. |
| 375 | */ |
| 376 | tsk->thread.regs->msr &= ~MSR_TS_MASK; |
| 377 | preempt_enable(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 378 | } |
| 379 | #endif |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 380 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 381 | } |