Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * FPU register's regset abstraction, for ptrace, core dumps, etc. |
| 4 | */ |
| 5 | #include <asm/fpu/internal.h> |
| 6 | #include <asm/fpu/signal.h> |
| 7 | #include <asm/fpu/regset.h> |
| 8 | #include <asm/fpu/xstate.h> |
| 9 | #include <linux/sched/task_stack.h> |
| 10 | |
| 11 | /* |
| 12 | * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, |
| 13 | * as the "regset->n" for the xstate regset will be updated based on the feature |
| 14 | * capabilities supported by the xsave. |
| 15 | */ |
| 16 | int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
| 17 | { |
| 18 | struct fpu *target_fpu = &target->thread.fpu; |
| 19 | |
| 20 | return target_fpu->initialized ? regset->n : 0; |
| 21 | } |
| 22 | |
| 23 | int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
| 24 | { |
| 25 | struct fpu *target_fpu = &target->thread.fpu; |
| 26 | |
| 27 | if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized) |
| 28 | return regset->n; |
| 29 | else |
| 30 | return 0; |
| 31 | } |
| 32 | |
| 33 | int xfpregs_get(struct task_struct *target, const struct user_regset *regset, |
| 34 | unsigned int pos, unsigned int count, |
| 35 | void *kbuf, void __user *ubuf) |
| 36 | { |
| 37 | struct fpu *fpu = &target->thread.fpu; |
| 38 | |
| 39 | if (!boot_cpu_has(X86_FEATURE_FXSR)) |
| 40 | return -ENODEV; |
| 41 | |
| 42 | fpu__prepare_read(fpu); |
| 43 | fpstate_sanitize_xstate(fpu); |
| 44 | |
| 45 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 46 | &fpu->state.fxsave, 0, -1); |
| 47 | } |
| 48 | |
| 49 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, |
| 50 | unsigned int pos, unsigned int count, |
| 51 | const void *kbuf, const void __user *ubuf) |
| 52 | { |
| 53 | struct fpu *fpu = &target->thread.fpu; |
| 54 | int ret; |
| 55 | |
| 56 | if (!boot_cpu_has(X86_FEATURE_FXSR)) |
| 57 | return -ENODEV; |
| 58 | |
| 59 | fpu__prepare_write(fpu); |
| 60 | fpstate_sanitize_xstate(fpu); |
| 61 | |
| 62 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 63 | &fpu->state.fxsave, 0, -1); |
| 64 | |
| 65 | /* |
| 66 | * mxcsr reserved bits must be masked to zero for security reasons. |
| 67 | */ |
| 68 | fpu->state.fxsave.mxcsr &= mxcsr_feature_mask; |
| 69 | |
| 70 | /* |
| 71 | * update the header bits in the xsave header, indicating the |
| 72 | * presence of FP and SSE state. |
| 73 | */ |
| 74 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
| 75 | fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; |
| 76 | |
| 77 | return ret; |
| 78 | } |
| 79 | |
| 80 | int xstateregs_get(struct task_struct *target, const struct user_regset *regset, |
| 81 | unsigned int pos, unsigned int count, |
| 82 | void *kbuf, void __user *ubuf) |
| 83 | { |
| 84 | struct fpu *fpu = &target->thread.fpu; |
| 85 | struct xregs_state *xsave; |
| 86 | int ret; |
| 87 | |
| 88 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) |
| 89 | return -ENODEV; |
| 90 | |
| 91 | xsave = &fpu->state.xsave; |
| 92 | |
| 93 | fpu__prepare_read(fpu); |
| 94 | |
| 95 | if (using_compacted_format()) { |
| 96 | if (kbuf) |
| 97 | ret = copy_xstate_to_kernel(kbuf, xsave, pos, count); |
| 98 | else |
| 99 | ret = copy_xstate_to_user(ubuf, xsave, pos, count); |
| 100 | } else { |
| 101 | fpstate_sanitize_xstate(fpu); |
| 102 | /* |
| 103 | * Copy the 48 bytes defined by the software into the xsave |
| 104 | * area in the thread struct, so that we can copy the whole |
| 105 | * area to user using one user_regset_copyout(). |
| 106 | */ |
| 107 | memcpy(&xsave->i387.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); |
| 108 | |
| 109 | /* |
| 110 | * Copy the xstate memory layout. |
| 111 | */ |
| 112 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
| 113 | } |
| 114 | return ret; |
| 115 | } |
| 116 | |
| 117 | int xstateregs_set(struct task_struct *target, const struct user_regset *regset, |
| 118 | unsigned int pos, unsigned int count, |
| 119 | const void *kbuf, const void __user *ubuf) |
| 120 | { |
| 121 | struct fpu *fpu = &target->thread.fpu; |
| 122 | struct xregs_state *xsave; |
| 123 | int ret; |
| 124 | |
| 125 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) |
| 126 | return -ENODEV; |
| 127 | |
| 128 | /* |
| 129 | * A whole standard-format XSAVE buffer is needed: |
| 130 | */ |
| 131 | if ((pos != 0) || (count < fpu_user_xstate_size)) |
| 132 | return -EFAULT; |
| 133 | |
| 134 | xsave = &fpu->state.xsave; |
| 135 | |
| 136 | fpu__prepare_write(fpu); |
| 137 | |
| 138 | if (using_compacted_format()) { |
| 139 | if (kbuf) |
| 140 | ret = copy_kernel_to_xstate(xsave, kbuf); |
| 141 | else |
| 142 | ret = copy_user_to_xstate(xsave, ubuf); |
| 143 | } else { |
| 144 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
| 145 | if (!ret) |
| 146 | ret = validate_xstate_header(&xsave->header); |
| 147 | } |
| 148 | |
| 149 | /* |
| 150 | * mxcsr reserved bits must be masked to zero for security reasons. |
| 151 | */ |
| 152 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
| 153 | |
| 154 | /* |
| 155 | * In case of failure, mark all states as init: |
| 156 | */ |
| 157 | if (ret) |
| 158 | fpstate_init(&fpu->state); |
| 159 | |
| 160 | return ret; |
| 161 | } |
| 162 | |
| 163 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
| 164 | |
| 165 | /* |
| 166 | * FPU tag word conversions. |
| 167 | */ |
| 168 | |
| 169 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) |
| 170 | { |
| 171 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ |
| 172 | |
| 173 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ |
| 174 | tmp = ~twd; |
| 175 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ |
| 176 | /* and move the valid bits to the lower byte. */ |
| 177 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ |
| 178 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ |
| 179 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ |
| 180 | |
| 181 | return tmp; |
| 182 | } |
| 183 | |
| 184 | #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) |
| 185 | #define FP_EXP_TAG_VALID 0 |
| 186 | #define FP_EXP_TAG_ZERO 1 |
| 187 | #define FP_EXP_TAG_SPECIAL 2 |
| 188 | #define FP_EXP_TAG_EMPTY 3 |
| 189 | |
| 190 | static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave) |
| 191 | { |
| 192 | struct _fpxreg *st; |
| 193 | u32 tos = (fxsave->swd >> 11) & 7; |
| 194 | u32 twd = (unsigned long) fxsave->twd; |
| 195 | u32 tag; |
| 196 | u32 ret = 0xffff0000u; |
| 197 | int i; |
| 198 | |
| 199 | for (i = 0; i < 8; i++, twd >>= 1) { |
| 200 | if (twd & 0x1) { |
| 201 | st = FPREG_ADDR(fxsave, (i - tos) & 7); |
| 202 | |
| 203 | switch (st->exponent & 0x7fff) { |
| 204 | case 0x7fff: |
| 205 | tag = FP_EXP_TAG_SPECIAL; |
| 206 | break; |
| 207 | case 0x0000: |
| 208 | if (!st->significand[0] && |
| 209 | !st->significand[1] && |
| 210 | !st->significand[2] && |
| 211 | !st->significand[3]) |
| 212 | tag = FP_EXP_TAG_ZERO; |
| 213 | else |
| 214 | tag = FP_EXP_TAG_SPECIAL; |
| 215 | break; |
| 216 | default: |
| 217 | if (st->significand[3] & 0x8000) |
| 218 | tag = FP_EXP_TAG_VALID; |
| 219 | else |
| 220 | tag = FP_EXP_TAG_SPECIAL; |
| 221 | break; |
| 222 | } |
| 223 | } else { |
| 224 | tag = FP_EXP_TAG_EMPTY; |
| 225 | } |
| 226 | ret |= tag << (2 * i); |
| 227 | } |
| 228 | return ret; |
| 229 | } |
| 230 | |
| 231 | /* |
| 232 | * FXSR floating point environment conversions. |
| 233 | */ |
| 234 | |
| 235 | void |
| 236 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
| 237 | { |
| 238 | struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave; |
| 239 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
| 240 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; |
| 241 | int i; |
| 242 | |
| 243 | env->cwd = fxsave->cwd | 0xffff0000u; |
| 244 | env->swd = fxsave->swd | 0xffff0000u; |
| 245 | env->twd = twd_fxsr_to_i387(fxsave); |
| 246 | |
| 247 | #ifdef CONFIG_X86_64 |
| 248 | env->fip = fxsave->rip; |
| 249 | env->foo = fxsave->rdp; |
| 250 | /* |
| 251 | * should be actually ds/cs at fpu exception time, but |
| 252 | * that information is not available in 64bit mode. |
| 253 | */ |
| 254 | env->fcs = task_pt_regs(tsk)->cs; |
| 255 | if (tsk == current) { |
| 256 | savesegment(ds, env->fos); |
| 257 | } else { |
| 258 | env->fos = tsk->thread.ds; |
| 259 | } |
| 260 | env->fos |= 0xffff0000; |
| 261 | #else |
| 262 | env->fip = fxsave->fip; |
| 263 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); |
| 264 | env->foo = fxsave->foo; |
| 265 | env->fos = fxsave->fos; |
| 266 | #endif |
| 267 | |
| 268 | for (i = 0; i < 8; ++i) |
| 269 | memcpy(&to[i], &from[i], sizeof(to[0])); |
| 270 | } |
| 271 | |
| 272 | void convert_to_fxsr(struct task_struct *tsk, |
| 273 | const struct user_i387_ia32_struct *env) |
| 274 | |
| 275 | { |
| 276 | struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave; |
| 277 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; |
| 278 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; |
| 279 | int i; |
| 280 | |
| 281 | fxsave->cwd = env->cwd; |
| 282 | fxsave->swd = env->swd; |
| 283 | fxsave->twd = twd_i387_to_fxsr(env->twd); |
| 284 | fxsave->fop = (u16) ((u32) env->fcs >> 16); |
| 285 | #ifdef CONFIG_X86_64 |
| 286 | fxsave->rip = env->fip; |
| 287 | fxsave->rdp = env->foo; |
| 288 | /* cs and ds ignored */ |
| 289 | #else |
| 290 | fxsave->fip = env->fip; |
| 291 | fxsave->fcs = (env->fcs & 0xffff); |
| 292 | fxsave->foo = env->foo; |
| 293 | fxsave->fos = env->fos; |
| 294 | #endif |
| 295 | |
| 296 | for (i = 0; i < 8; ++i) |
| 297 | memcpy(&to[i], &from[i], sizeof(from[0])); |
| 298 | } |
| 299 | |
| 300 | int fpregs_get(struct task_struct *target, const struct user_regset *regset, |
| 301 | unsigned int pos, unsigned int count, |
| 302 | void *kbuf, void __user *ubuf) |
| 303 | { |
| 304 | struct fpu *fpu = &target->thread.fpu; |
| 305 | struct user_i387_ia32_struct env; |
| 306 | |
| 307 | fpu__prepare_read(fpu); |
| 308 | |
| 309 | if (!boot_cpu_has(X86_FEATURE_FPU)) |
| 310 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
| 311 | |
| 312 | if (!boot_cpu_has(X86_FEATURE_FXSR)) |
| 313 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 314 | &fpu->state.fsave, 0, |
| 315 | -1); |
| 316 | |
| 317 | fpstate_sanitize_xstate(fpu); |
| 318 | |
| 319 | if (kbuf && pos == 0 && count == sizeof(env)) { |
| 320 | convert_from_fxsr(kbuf, target); |
| 321 | return 0; |
| 322 | } |
| 323 | |
| 324 | convert_from_fxsr(&env, target); |
| 325 | |
| 326 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); |
| 327 | } |
| 328 | |
| 329 | int fpregs_set(struct task_struct *target, const struct user_regset *regset, |
| 330 | unsigned int pos, unsigned int count, |
| 331 | const void *kbuf, const void __user *ubuf) |
| 332 | { |
| 333 | struct fpu *fpu = &target->thread.fpu; |
| 334 | struct user_i387_ia32_struct env; |
| 335 | int ret; |
| 336 | |
| 337 | fpu__prepare_write(fpu); |
| 338 | fpstate_sanitize_xstate(fpu); |
| 339 | |
| 340 | if (!boot_cpu_has(X86_FEATURE_FPU)) |
| 341 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
| 342 | |
| 343 | if (!boot_cpu_has(X86_FEATURE_FXSR)) |
| 344 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 345 | &fpu->state.fsave, 0, |
| 346 | -1); |
| 347 | |
| 348 | if (pos > 0 || count < sizeof(env)) |
| 349 | convert_from_fxsr(&env, target); |
| 350 | |
| 351 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1); |
| 352 | if (!ret) |
| 353 | convert_to_fxsr(target, &env); |
| 354 | |
| 355 | /* |
| 356 | * update the header bit in the xsave header, indicating the |
| 357 | * presence of FP. |
| 358 | */ |
| 359 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
| 360 | fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP; |
| 361 | return ret; |
| 362 | } |
| 363 | |
| 364 | /* |
| 365 | * FPU state for core dumps. |
| 366 | * This is only used for a.out dumps now. |
| 367 | * It is declared generically using elf_fpregset_t (which is |
| 368 | * struct user_i387_struct) but is in fact only used for 32-bit |
| 369 | * dumps, so on 64-bit it is really struct user_i387_ia32_struct. |
| 370 | */ |
| 371 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) |
| 372 | { |
| 373 | struct task_struct *tsk = current; |
| 374 | struct fpu *fpu = &tsk->thread.fpu; |
| 375 | int fpvalid; |
| 376 | |
| 377 | fpvalid = fpu->initialized; |
| 378 | if (fpvalid) |
| 379 | fpvalid = !fpregs_get(tsk, NULL, |
| 380 | 0, sizeof(struct user_i387_ia32_struct), |
| 381 | ufpu, NULL); |
| 382 | |
| 383 | return fpvalid; |
| 384 | } |
| 385 | EXPORT_SYMBOL(dump_fpu); |
| 386 | |
| 387 | #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ |