Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * FPU signal frame handling routines. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/compat.h> |
| 7 | #include <linux/cpu.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 8 | #include <linux/pagemap.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | |
| 10 | #include <asm/fpu/internal.h> |
| 11 | #include <asm/fpu/signal.h> |
| 12 | #include <asm/fpu/regset.h> |
| 13 | #include <asm/fpu/xstate.h> |
| 14 | |
| 15 | #include <asm/sigframe.h> |
| 16 | #include <asm/trace/fpu.h> |
| 17 | |
| 18 | static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; |
| 19 | |
| 20 | /* |
| 21 | * Check for the presence of extended state information in the |
| 22 | * user fpstate pointer in the sigcontext. |
| 23 | */ |
| 24 | static inline int check_for_xstate(struct fxregs_state __user *buf, |
| 25 | void __user *fpstate, |
| 26 | struct _fpx_sw_bytes *fx_sw) |
| 27 | { |
| 28 | int min_xstate_size = sizeof(struct fxregs_state) + |
| 29 | sizeof(struct xstate_header); |
| 30 | unsigned int magic2; |
| 31 | |
| 32 | if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw))) |
| 33 | return -1; |
| 34 | |
| 35 | /* Check for the first magic field and other error scenarios. */ |
| 36 | if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || |
| 37 | fx_sw->xstate_size < min_xstate_size || |
| 38 | fx_sw->xstate_size > fpu_user_xstate_size || |
| 39 | fx_sw->xstate_size > fx_sw->extended_size) |
| 40 | return -1; |
| 41 | |
| 42 | /* |
| 43 | * Check for the presence of second magic word at the end of memory |
| 44 | * layout. This detects the case where the user just copied the legacy |
| 45 | * fpstate layout with out copying the extended state information |
| 46 | * in the memory layout. |
| 47 | */ |
| 48 | if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)) |
| 49 | || magic2 != FP_XSTATE_MAGIC2) |
| 50 | return -1; |
| 51 | |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | /* |
| 56 | * Signal frame handlers. |
| 57 | */ |
| 58 | static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) |
| 59 | { |
| 60 | if (use_fxsr()) { |
| 61 | struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; |
| 62 | struct user_i387_ia32_struct env; |
| 63 | struct _fpstate_32 __user *fp = buf; |
| 64 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 65 | fpregs_lock(); |
| 66 | if (!test_thread_flag(TIF_NEED_FPU_LOAD)) |
| 67 | copy_fxregs_to_kernel(&tsk->thread.fpu); |
| 68 | fpregs_unlock(); |
| 69 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | convert_from_fxsr(&env, tsk); |
| 71 | |
| 72 | if (__copy_to_user(buf, &env, sizeof(env)) || |
| 73 | __put_user(xsave->i387.swd, &fp->status) || |
| 74 | __put_user(X86_FXSR_MAGIC, &fp->magic)) |
| 75 | return -1; |
| 76 | } else { |
| 77 | struct fregs_state __user *fp = buf; |
| 78 | u32 swd; |
| 79 | if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) |
| 80 | return -1; |
| 81 | } |
| 82 | |
| 83 | return 0; |
| 84 | } |
| 85 | |
| 86 | static inline int save_xstate_epilog(void __user *buf, int ia32_frame) |
| 87 | { |
| 88 | struct xregs_state __user *x = buf; |
| 89 | struct _fpx_sw_bytes *sw_bytes; |
| 90 | u32 xfeatures; |
| 91 | int err; |
| 92 | |
| 93 | /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ |
| 94 | sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; |
| 95 | err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); |
| 96 | |
| 97 | if (!use_xsave()) |
| 98 | return err; |
| 99 | |
| 100 | err |= __put_user(FP_XSTATE_MAGIC2, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 101 | (__u32 __user *)(buf + fpu_user_xstate_size)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | |
| 103 | /* |
| 104 | * Read the xfeatures which we copied (directly from the cpu or |
| 105 | * from the state in task struct) to the user buffers. |
| 106 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 107 | err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * For legacy compatible, we always set FP/SSE bits in the bit |
| 111 | * vector while saving the state to the user context. This will |
| 112 | * enable us capturing any changes(during sigreturn) to |
| 113 | * the FP/SSE bits by the legacy applications which don't touch |
| 114 | * xfeatures in the xsave header. |
| 115 | * |
| 116 | * xsave aware apps can change the xfeatures in the xsave |
| 117 | * header as well as change any contents in the memory layout. |
| 118 | * xrestore as part of sigreturn will capture all the changes. |
| 119 | */ |
| 120 | xfeatures |= XFEATURE_MASK_FPSSE; |
| 121 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 122 | err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | |
| 124 | return err; |
| 125 | } |
| 126 | |
| 127 | static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) |
| 128 | { |
| 129 | int err; |
| 130 | |
| 131 | if (use_xsave()) |
| 132 | err = copy_xregs_to_user(buf); |
| 133 | else if (use_fxsr()) |
| 134 | err = copy_fxregs_to_user((struct fxregs_state __user *) buf); |
| 135 | else |
| 136 | err = copy_fregs_to_user((struct fregs_state __user *) buf); |
| 137 | |
| 138 | if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size)) |
| 139 | err = -EFAULT; |
| 140 | return err; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Save the fpu, extended register state to the user signal frame. |
| 145 | * |
| 146 | * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save |
| 147 | * state is copied. |
| 148 | * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. |
| 149 | * |
| 150 | * buf == buf_fx for 64-bit frames and 32-bit fsave frame. |
| 151 | * buf != buf_fx for 32-bit frames with fxstate. |
| 152 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 153 | * Try to save it directly to the user frame with disabled page fault handler. |
| 154 | * If this fails then do the slow path where the FPU state is first saved to |
| 155 | * task's fpu->state and then copy it to the user frame pointed to by the |
| 156 | * aligned pointer 'buf_fx'. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 157 | * |
| 158 | * If this is a 32-bit frame with fxstate, put a fsave header before |
| 159 | * the aligned state at 'buf_fx'. |
| 160 | * |
| 161 | * For [f]xsave state, update the SW reserved fields in the [f]xsave frame |
| 162 | * indicating the absence/presence of the extended state to the user. |
| 163 | */ |
| 164 | int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) |
| 165 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 166 | struct task_struct *tsk = current; |
| 167 | int ia32_fxstate = (buf != buf_fx); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 168 | int ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 169 | |
| 170 | ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || |
| 171 | IS_ENABLED(CONFIG_IA32_EMULATION)); |
| 172 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 173 | if (!access_ok(buf, size)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 174 | return -EACCES; |
| 175 | |
| 176 | if (!static_cpu_has(X86_FEATURE_FPU)) |
| 177 | return fpregs_soft_get(current, NULL, 0, |
| 178 | sizeof(struct user_i387_ia32_struct), NULL, |
| 179 | (struct _fpstate_32 __user *) buf) ? -1 : 1; |
| 180 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 181 | retry: |
| 182 | /* |
| 183 | * Load the FPU registers if they are not valid for the current task. |
| 184 | * With a valid FPU state we can attempt to save the state directly to |
| 185 | * userland's stack frame which will likely succeed. If it does not, |
| 186 | * resolve the fault in the user memory and try again. |
| 187 | */ |
| 188 | fpregs_lock(); |
| 189 | if (test_thread_flag(TIF_NEED_FPU_LOAD)) |
| 190 | __fpregs_load_activate(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 191 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 192 | pagefault_disable(); |
| 193 | ret = copy_fpregs_to_sigframe(buf_fx); |
| 194 | pagefault_enable(); |
| 195 | fpregs_unlock(); |
| 196 | |
| 197 | if (ret) { |
| 198 | if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size)) |
| 199 | goto retry; |
| 200 | return -EFAULT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | } |
| 202 | |
| 203 | /* Save the fsave header for the 32-bit frames. */ |
| 204 | if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf)) |
| 205 | return -1; |
| 206 | |
| 207 | if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) |
| 208 | return -1; |
| 209 | |
| 210 | return 0; |
| 211 | } |
| 212 | |
| 213 | static inline void |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 214 | sanitize_restored_xstate(union fpregs_state *state, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | struct user_i387_ia32_struct *ia32_env, |
| 216 | u64 xfeatures, int fx_only) |
| 217 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 218 | struct xregs_state *xsave = &state->xsave; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 219 | struct xstate_header *header = &xsave->header; |
| 220 | |
| 221 | if (use_xsave()) { |
| 222 | /* |
| 223 | * Note: we don't need to zero the reserved bits in the |
| 224 | * xstate_header here because we either didn't copy them at all, |
| 225 | * or we checked earlier that they aren't set. |
| 226 | */ |
| 227 | |
| 228 | /* |
| 229 | * Init the state that is not present in the memory |
| 230 | * layout and not enabled by the OS. |
| 231 | */ |
| 232 | if (fx_only) |
| 233 | header->xfeatures = XFEATURE_MASK_FPSSE; |
| 234 | else |
| 235 | header->xfeatures &= xfeatures; |
| 236 | } |
| 237 | |
| 238 | if (use_fxsr()) { |
| 239 | /* |
| 240 | * mscsr reserved bits must be masked to zero for security |
| 241 | * reasons. |
| 242 | */ |
| 243 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
| 244 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 245 | if (ia32_env) |
| 246 | convert_to_fxsr(&state->fxsave, ia32_env); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 247 | } |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Restore the extended state if present. Otherwise, restore the FP/SSE state. |
| 252 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 253 | static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 254 | { |
| 255 | if (use_xsave()) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 256 | if (fx_only) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 257 | u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; |
| 258 | copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); |
| 259 | return copy_user_to_fxregs(buf); |
| 260 | } else { |
| 261 | u64 init_bv = xfeatures_mask & ~xbv; |
| 262 | if (unlikely(init_bv)) |
| 263 | copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); |
| 264 | return copy_user_to_xregs(buf, xbv); |
| 265 | } |
| 266 | } else if (use_fxsr()) { |
| 267 | return copy_user_to_fxregs(buf); |
| 268 | } else |
| 269 | return copy_user_to_fregs(buf); |
| 270 | } |
| 271 | |
| 272 | static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) |
| 273 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 274 | struct user_i387_ia32_struct *envp = NULL; |
| 275 | int state_size = fpu_kernel_xstate_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | int ia32_fxstate = (buf != buf_fx); |
| 277 | struct task_struct *tsk = current; |
| 278 | struct fpu *fpu = &tsk->thread.fpu; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 279 | struct user_i387_ia32_struct env; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | u64 xfeatures = 0; |
| 281 | int fx_only = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 282 | int ret = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 283 | |
| 284 | ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || |
| 285 | IS_ENABLED(CONFIG_IA32_EMULATION)); |
| 286 | |
| 287 | if (!buf) { |
| 288 | fpu__clear(fpu); |
| 289 | return 0; |
| 290 | } |
| 291 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 292 | if (!access_ok(buf, size)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | return -EACCES; |
| 294 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 295 | if (!static_cpu_has(X86_FEATURE_FPU)) |
| 296 | return fpregs_soft_set(current, NULL, |
| 297 | 0, sizeof(struct user_i387_ia32_struct), |
| 298 | NULL, buf) != 0; |
| 299 | |
| 300 | if (use_xsave()) { |
| 301 | struct _fpx_sw_bytes fx_sw_user; |
| 302 | if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) { |
| 303 | /* |
| 304 | * Couldn't find the extended state information in the |
| 305 | * memory layout. Restore just the FP/SSE and init all |
| 306 | * the other extended state. |
| 307 | */ |
| 308 | state_size = sizeof(struct fxregs_state); |
| 309 | fx_only = 1; |
| 310 | trace_x86_fpu_xstate_check_failed(fpu); |
| 311 | } else { |
| 312 | state_size = fx_sw_user.xstate_size; |
| 313 | xfeatures = fx_sw_user.xfeatures; |
| 314 | } |
| 315 | } |
| 316 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 317 | /* |
| 318 | * The current state of the FPU registers does not matter. By setting |
| 319 | * TIF_NEED_FPU_LOAD unconditionally it is ensured that the our xstate |
| 320 | * is not modified on context switch and that the xstate is considered |
| 321 | * to be loaded again on return to userland (overriding last_cpu avoids |
| 322 | * the optimisation). |
| 323 | */ |
| 324 | set_thread_flag(TIF_NEED_FPU_LOAD); |
| 325 | __fpu_invalidate_fpregs_state(fpu); |
| 326 | |
| 327 | if ((unsigned long)buf_fx % 64) |
| 328 | fx_only = 1; |
| 329 | /* |
| 330 | * For 32-bit frames with fxstate, copy the fxstate so it can be |
| 331 | * reconstructed later. |
| 332 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 333 | if (ia32_fxstate) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 334 | ret = __copy_from_user(&env, buf, sizeof(env)); |
| 335 | if (ret) |
| 336 | goto err_out; |
| 337 | envp = &env; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 338 | } else { |
| 339 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 340 | * Attempt to restore the FPU registers directly from user |
| 341 | * memory. For that to succeed, the user access cannot cause |
| 342 | * page faults. If it does, fall back to the slow path below, |
| 343 | * going through the kernel buffer with the enabled pagefault |
| 344 | * handler. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 345 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 346 | fpregs_lock(); |
| 347 | pagefault_disable(); |
| 348 | ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only); |
| 349 | pagefault_enable(); |
| 350 | if (!ret) { |
| 351 | fpregs_mark_activate(); |
| 352 | fpregs_unlock(); |
| 353 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 354 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 355 | fpregs_unlock(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 356 | } |
| 357 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 358 | |
| 359 | if (use_xsave() && !fx_only) { |
| 360 | u64 init_bv = xfeatures_mask & ~xfeatures; |
| 361 | |
| 362 | if (using_compacted_format()) { |
| 363 | ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx); |
| 364 | } else { |
| 365 | ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size); |
| 366 | |
| 367 | if (!ret && state_size > offsetof(struct xregs_state, header)) |
| 368 | ret = validate_xstate_header(&fpu->state.xsave.header); |
| 369 | } |
| 370 | if (ret) |
| 371 | goto err_out; |
| 372 | |
| 373 | sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only); |
| 374 | |
| 375 | fpregs_lock(); |
| 376 | if (unlikely(init_bv)) |
| 377 | copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); |
| 378 | ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures); |
| 379 | |
| 380 | } else if (use_fxsr()) { |
| 381 | ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size); |
| 382 | if (ret) { |
| 383 | ret = -EFAULT; |
| 384 | goto err_out; |
| 385 | } |
| 386 | |
| 387 | sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only); |
| 388 | |
| 389 | fpregs_lock(); |
| 390 | if (use_xsave()) { |
| 391 | u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; |
| 392 | copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); |
| 393 | } |
| 394 | |
| 395 | ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave); |
| 396 | } else { |
| 397 | ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size); |
| 398 | if (ret) |
| 399 | goto err_out; |
| 400 | |
| 401 | fpregs_lock(); |
| 402 | ret = copy_kernel_to_fregs_err(&fpu->state.fsave); |
| 403 | } |
| 404 | if (!ret) |
| 405 | fpregs_mark_activate(); |
| 406 | fpregs_unlock(); |
| 407 | |
| 408 | err_out: |
| 409 | if (ret) |
| 410 | fpu__clear(fpu); |
| 411 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | static inline int xstate_sigframe_size(void) |
| 415 | { |
| 416 | return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE : |
| 417 | fpu_user_xstate_size; |
| 418 | } |
| 419 | |
| 420 | /* |
| 421 | * Restore FPU state from a sigframe: |
| 422 | */ |
| 423 | int fpu__restore_sig(void __user *buf, int ia32_frame) |
| 424 | { |
| 425 | void __user *buf_fx = buf; |
| 426 | int size = xstate_sigframe_size(); |
| 427 | |
| 428 | if (ia32_frame && use_fxsr()) { |
| 429 | buf_fx = buf + sizeof(struct fregs_state); |
| 430 | size += sizeof(struct fregs_state); |
| 431 | } |
| 432 | |
| 433 | return __fpu__restore_sig(buf, buf_fx, size); |
| 434 | } |
| 435 | |
| 436 | unsigned long |
| 437 | fpu__alloc_mathframe(unsigned long sp, int ia32_frame, |
| 438 | unsigned long *buf_fx, unsigned long *size) |
| 439 | { |
| 440 | unsigned long frame_size = xstate_sigframe_size(); |
| 441 | |
| 442 | *buf_fx = sp = round_down(sp - frame_size, 64); |
| 443 | if (ia32_frame && use_fxsr()) { |
| 444 | frame_size += sizeof(struct fregs_state); |
| 445 | sp -= sizeof(struct fregs_state); |
| 446 | } |
| 447 | |
| 448 | *size = frame_size; |
| 449 | |
| 450 | return sp; |
| 451 | } |
| 452 | /* |
| 453 | * Prepare the SW reserved portion of the fxsave memory layout, indicating |
| 454 | * the presence of the extended state information in the memory layout |
| 455 | * pointed by the fpstate pointer in the sigcontext. |
| 456 | * This will be saved when ever the FP and extended state context is |
| 457 | * saved on the user stack during the signal handler delivery to the user. |
| 458 | */ |
| 459 | void fpu__init_prepare_fx_sw_frame(void) |
| 460 | { |
| 461 | int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE; |
| 462 | |
| 463 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; |
| 464 | fx_sw_reserved.extended_size = size; |
| 465 | fx_sw_reserved.xfeatures = xfeatures_mask; |
| 466 | fx_sw_reserved.xstate_size = fpu_user_xstate_size; |
| 467 | |
| 468 | if (IS_ENABLED(CONFIG_IA32_EMULATION) || |
| 469 | IS_ENABLED(CONFIG_X86_32)) { |
| 470 | int fsave_header_size = sizeof(struct fregs_state); |
| 471 | |
| 472 | fx_sw_reserved_ia32 = fx_sw_reserved; |
| 473 | fx_sw_reserved_ia32.extended_size = size + fsave_header_size; |
| 474 | } |
| 475 | } |
| 476 | |