Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/sched/signal.h> |
| 3 | #include <linux/sched/task.h> |
| 4 | #include <linux/sched/task_stack.h> |
| 5 | #include <linux/slab.h> |
| 6 | #include <asm/processor.h> |
| 7 | #include <asm/fpu.h> |
| 8 | #include <asm/traps.h> |
| 9 | #include <asm/ptrace.h> |
| 10 | |
| 11 | int init_fpu(struct task_struct *tsk) |
| 12 | { |
| 13 | if (tsk_used_math(tsk)) { |
| 14 | if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) |
| 15 | unlazy_fpu(tsk, task_pt_regs(tsk)); |
| 16 | return 0; |
| 17 | } |
| 18 | |
| 19 | /* |
| 20 | * Memory allocation at the first usage of the FPU and other state. |
| 21 | */ |
| 22 | if (!tsk->thread.xstate) { |
| 23 | tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, |
| 24 | GFP_KERNEL); |
| 25 | if (!tsk->thread.xstate) |
| 26 | return -ENOMEM; |
| 27 | } |
| 28 | |
| 29 | if (boot_cpu_data.flags & CPU_HAS_FPU) { |
| 30 | struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu; |
| 31 | memset(fp, 0, xstate_size); |
| 32 | fp->fpscr = FPSCR_INIT; |
| 33 | } else { |
| 34 | struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu; |
| 35 | memset(fp, 0, xstate_size); |
| 36 | fp->fpscr = FPSCR_INIT; |
| 37 | } |
| 38 | |
| 39 | set_stopped_child_used_math(tsk); |
| 40 | return 0; |
| 41 | } |
| 42 | |
| 43 | #ifdef CONFIG_SH_FPU |
| 44 | void __fpu_state_restore(void) |
| 45 | { |
| 46 | struct task_struct *tsk = current; |
| 47 | |
| 48 | restore_fpu(tsk); |
| 49 | |
| 50 | task_thread_info(tsk)->status |= TS_USEDFPU; |
| 51 | tsk->thread.fpu_counter++; |
| 52 | } |
| 53 | |
| 54 | void fpu_state_restore(struct pt_regs *regs) |
| 55 | { |
| 56 | struct task_struct *tsk = current; |
| 57 | |
| 58 | if (unlikely(!user_mode(regs))) { |
| 59 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); |
| 60 | BUG(); |
| 61 | return; |
| 62 | } |
| 63 | |
| 64 | if (!tsk_used_math(tsk)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 65 | int ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | /* |
| 67 | * does a slab alloc which can sleep |
| 68 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 69 | local_irq_enable(); |
| 70 | ret = init_fpu(tsk); |
| 71 | local_irq_disable(); |
| 72 | if (ret) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | /* |
| 74 | * ran out of memory! |
| 75 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 76 | force_sig(SIGKILL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 77 | return; |
| 78 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | grab_fpu(regs); |
| 82 | |
| 83 | __fpu_state_restore(); |
| 84 | } |
| 85 | |
| 86 | BUILD_TRAP_HANDLER(fpu_state_restore) |
| 87 | { |
| 88 | TRAP_HANDLER_DECL; |
| 89 | |
| 90 | fpu_state_restore(regs); |
| 91 | } |
| 92 | #endif /* CONFIG_SH_FPU */ |