David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * xsave/xrstor support. |
| 4 | * |
| 5 | * Author: Suresh Siddha <suresh.b.siddha@intel.com> |
| 6 | */ |
| 7 | #include <linux/compat.h> |
| 8 | #include <linux/cpu.h> |
| 9 | #include <linux/mman.h> |
| 10 | #include <linux/pkeys.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11 | #include <linux/seq_file.h> |
| 12 | #include <linux/proc_fs.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | |
| 14 | #include <asm/fpu/api.h> |
| 15 | #include <asm/fpu/internal.h> |
| 16 | #include <asm/fpu/signal.h> |
| 17 | #include <asm/fpu/regset.h> |
| 18 | #include <asm/fpu/xstate.h> |
| 19 | |
| 20 | #include <asm/tlbflush.h> |
| 21 | #include <asm/cpufeature.h> |
| 22 | |
| 23 | /* |
| 24 | * Although we spell it out in here, the Processor Trace |
| 25 | * xfeature is completely unused. We use other mechanisms |
| 26 | * to save/restore PT state in Linux. |
| 27 | */ |
| 28 | static const char *xfeature_names[] = |
| 29 | { |
| 30 | "x87 floating point registers" , |
| 31 | "SSE registers" , |
| 32 | "AVX registers" , |
| 33 | "MPX bounds registers" , |
| 34 | "MPX CSR" , |
| 35 | "AVX-512 opmask" , |
| 36 | "AVX-512 Hi256" , |
| 37 | "AVX-512 ZMM_Hi256" , |
| 38 | "Processor Trace (unused)" , |
| 39 | "Protection Keys User registers", |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 40 | "PASID state", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | "unknown xstate feature" , |
| 42 | }; |
| 43 | |
| 44 | static short xsave_cpuid_features[] __initdata = { |
| 45 | X86_FEATURE_FPU, |
| 46 | X86_FEATURE_XMM, |
| 47 | X86_FEATURE_AVX, |
| 48 | X86_FEATURE_MPX, |
| 49 | X86_FEATURE_MPX, |
| 50 | X86_FEATURE_AVX512F, |
| 51 | X86_FEATURE_AVX512F, |
| 52 | X86_FEATURE_AVX512F, |
| 53 | X86_FEATURE_INTEL_PT, |
| 54 | X86_FEATURE_PKU, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 55 | X86_FEATURE_ENQCMD, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | }; |
| 57 | |
| 58 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | * This represents the full set of bits that should ever be set in a kernel |
| 60 | * XSAVE buffer, both supervisor and user xstates. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 62 | u64 xfeatures_mask_all __read_mostly; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 63 | |
| 64 | static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; |
| 65 | static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 66 | static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; |
| 67 | static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1}; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 68 | |
| 69 | /* |
| 70 | * The XSAVE area of kernel can be in standard or compacted format; |
| 71 | * it is always in standard format for user mode. This is the user |
| 72 | * mode standard format size used for signal and ptrace frames. |
| 73 | */ |
| 74 | unsigned int fpu_user_xstate_size; |
| 75 | |
| 76 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 77 | * Return whether the system supports a given xfeature. |
| 78 | * |
| 79 | * Also return the name of the (most advanced) feature that the caller requested: |
| 80 | */ |
| 81 | int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name) |
| 82 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 83 | u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask_all; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | |
| 85 | if (unlikely(feature_name)) { |
| 86 | long xfeature_idx, max_idx; |
| 87 | u64 xfeatures_print; |
| 88 | /* |
| 89 | * So we use FLS here to be able to print the most advanced |
| 90 | * feature that was requested but is missing. So if a driver |
| 91 | * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the |
| 92 | * missing AVX feature - this is the most informative message |
| 93 | * to users: |
| 94 | */ |
| 95 | if (xfeatures_missing) |
| 96 | xfeatures_print = xfeatures_missing; |
| 97 | else |
| 98 | xfeatures_print = xfeatures_needed; |
| 99 | |
| 100 | xfeature_idx = fls64(xfeatures_print)-1; |
| 101 | max_idx = ARRAY_SIZE(xfeature_names)-1; |
| 102 | xfeature_idx = min(xfeature_idx, max_idx); |
| 103 | |
| 104 | *feature_name = xfeature_names[xfeature_idx]; |
| 105 | } |
| 106 | |
| 107 | if (xfeatures_missing) |
| 108 | return 0; |
| 109 | |
| 110 | return 1; |
| 111 | } |
| 112 | EXPORT_SYMBOL_GPL(cpu_has_xfeatures); |
| 113 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 114 | static bool xfeature_is_supervisor(int xfeature_nr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | { |
| 116 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 117 | * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1) |
| 118 | * returns ECX[0] set to (1) for a supervisor state, and cleared (0) |
| 119 | * for a user state. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 120 | */ |
| 121 | u32 eax, ebx, ecx, edx; |
| 122 | |
| 123 | cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 124 | return ecx & 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /* |
| 128 | * When executing XSAVEOPT (or other optimized XSAVE instructions), if |
| 129 | * a processor implementation detects that an FPU state component is still |
| 130 | * (or is again) in its initialized state, it may clear the corresponding |
| 131 | * bit in the header.xfeatures field, and can skip the writeout of registers |
| 132 | * to the corresponding memory layout. |
| 133 | * |
| 134 | * This means that when the bit is zero, the state component might still contain |
| 135 | * some previous - non-initialized register state. |
| 136 | * |
| 137 | * Before writing xstate information to user-space we sanitize those components, |
| 138 | * to always ensure that the memory layout of a feature will be in the init state |
| 139 | * if the corresponding header bit is zero. This is to ensure that user-space doesn't |
| 140 | * see some stale state in the memory layout during signal handling, debugging etc. |
| 141 | */ |
| 142 | void fpstate_sanitize_xstate(struct fpu *fpu) |
| 143 | { |
| 144 | struct fxregs_state *fx = &fpu->state.fxsave; |
| 145 | int feature_bit; |
| 146 | u64 xfeatures; |
| 147 | |
| 148 | if (!use_xsaveopt()) |
| 149 | return; |
| 150 | |
| 151 | xfeatures = fpu->state.xsave.header.xfeatures; |
| 152 | |
| 153 | /* |
| 154 | * None of the feature bits are in init state. So nothing else |
| 155 | * to do for us, as the memory layout is up to date. |
| 156 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 157 | if ((xfeatures & xfeatures_mask_all) == xfeatures_mask_all) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | return; |
| 159 | |
| 160 | /* |
| 161 | * FP is in init state |
| 162 | */ |
| 163 | if (!(xfeatures & XFEATURE_MASK_FP)) { |
| 164 | fx->cwd = 0x37f; |
| 165 | fx->swd = 0; |
| 166 | fx->twd = 0; |
| 167 | fx->fop = 0; |
| 168 | fx->rip = 0; |
| 169 | fx->rdp = 0; |
| 170 | memset(&fx->st_space[0], 0, 128); |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * SSE is in init state |
| 175 | */ |
| 176 | if (!(xfeatures & XFEATURE_MASK_SSE)) |
| 177 | memset(&fx->xmm_space[0], 0, 256); |
| 178 | |
| 179 | /* |
| 180 | * First two features are FPU and SSE, which above we handled |
| 181 | * in a special way already: |
| 182 | */ |
| 183 | feature_bit = 0x2; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 184 | xfeatures = (xfeatures_mask_user() & ~xfeatures) >> 2; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | |
| 186 | /* |
| 187 | * Update all the remaining memory layouts according to their |
| 188 | * standard xstate layout, if their header bit is in the init |
| 189 | * state: |
| 190 | */ |
| 191 | while (xfeatures) { |
| 192 | if (xfeatures & 0x1) { |
| 193 | int offset = xstate_comp_offsets[feature_bit]; |
| 194 | int size = xstate_sizes[feature_bit]; |
| 195 | |
| 196 | memcpy((void *)fx + offset, |
| 197 | (void *)&init_fpstate.xsave + offset, |
| 198 | size); |
| 199 | } |
| 200 | |
| 201 | xfeatures >>= 1; |
| 202 | feature_bit++; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Enable the extended processor state save/restore feature. |
| 208 | * Called once per CPU onlining. |
| 209 | */ |
| 210 | void fpu__init_cpu_xstate(void) |
| 211 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 212 | u64 unsup_bits; |
| 213 | |
| 214 | if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | return; |
| 216 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | * Unsupported supervisor xstates should not be found in |
| 218 | * the xfeatures mask. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 219 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 220 | unsup_bits = xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_UNSUPPORTED; |
| 221 | WARN_ONCE(unsup_bits, "x86/fpu: Found unsupported supervisor xstates: 0x%llx\n", |
| 222 | unsup_bits); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 223 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 224 | xfeatures_mask_all &= ~XFEATURE_MASK_SUPERVISOR_UNSUPPORTED; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 225 | |
| 226 | cr4_set_bits(X86_CR4_OSXSAVE); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 227 | |
| 228 | /* |
| 229 | * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features |
| 230 | * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user |
| 231 | * states can be set here. |
| 232 | */ |
| 233 | xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user()); |
| 234 | |
| 235 | /* |
| 236 | * MSR_IA32_XSS sets supervisor states managed by XSAVES. |
| 237 | */ |
| 238 | if (boot_cpu_has(X86_FEATURE_XSAVES)) { |
| 239 | wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | |
| 240 | xfeatures_mask_dynamic()); |
| 241 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | } |
| 243 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 244 | static bool xfeature_enabled(enum xfeature xfeature) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 246 | return xfeatures_mask_all & BIT_ULL(xfeature); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | /* |
| 250 | * Record the offsets and sizes of various xstates contained |
| 251 | * in the XSAVE state memory layout. |
| 252 | */ |
| 253 | static void __init setup_xstate_features(void) |
| 254 | { |
| 255 | u32 eax, ebx, ecx, edx, i; |
| 256 | /* start at the beginnning of the "extended state" */ |
| 257 | unsigned int last_good_offset = offsetof(struct xregs_state, |
| 258 | extended_state_area); |
| 259 | /* |
| 260 | * The FP xstates and SSE xstates are legacy states. They are always |
| 261 | * in the fixed offsets in the xsave area in either compacted form |
| 262 | * or standard form. |
| 263 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 264 | xstate_offsets[XFEATURE_FP] = 0; |
| 265 | xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state, |
| 266 | xmm_space); |
| 267 | |
| 268 | xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP]; |
| 269 | xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state, |
| 270 | xmm_space); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 271 | |
| 272 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
| 273 | if (!xfeature_enabled(i)) |
| 274 | continue; |
| 275 | |
| 276 | cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); |
| 277 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 278 | xstate_sizes[i] = eax; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 279 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 281 | * If an xfeature is supervisor state, the offset in EBX is |
| 282 | * invalid, leave it to -1. |
| 283 | */ |
| 284 | if (xfeature_is_supervisor(i)) |
| 285 | continue; |
| 286 | |
| 287 | xstate_offsets[i] = ebx; |
| 288 | |
| 289 | /* |
| 290 | * In our xstate size checks, we assume that the highest-numbered |
| 291 | * xstate feature has the highest offset in the buffer. Ensure |
| 292 | * it does. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | */ |
| 294 | WARN_ONCE(last_good_offset > xstate_offsets[i], |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 295 | "x86/fpu: misordered xstate at %d\n", last_good_offset); |
| 296 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 297 | last_good_offset = xstate_offsets[i]; |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | static void __init print_xstate_feature(u64 xstate_mask) |
| 302 | { |
| 303 | const char *feature_name; |
| 304 | |
| 305 | if (cpu_has_xfeatures(xstate_mask, &feature_name)) |
| 306 | pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name); |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * Print out all the supported xstate features: |
| 311 | */ |
| 312 | static void __init print_xstate_features(void) |
| 313 | { |
| 314 | print_xstate_feature(XFEATURE_MASK_FP); |
| 315 | print_xstate_feature(XFEATURE_MASK_SSE); |
| 316 | print_xstate_feature(XFEATURE_MASK_YMM); |
| 317 | print_xstate_feature(XFEATURE_MASK_BNDREGS); |
| 318 | print_xstate_feature(XFEATURE_MASK_BNDCSR); |
| 319 | print_xstate_feature(XFEATURE_MASK_OPMASK); |
| 320 | print_xstate_feature(XFEATURE_MASK_ZMM_Hi256); |
| 321 | print_xstate_feature(XFEATURE_MASK_Hi16_ZMM); |
| 322 | print_xstate_feature(XFEATURE_MASK_PKRU); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 323 | print_xstate_feature(XFEATURE_MASK_PASID); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | /* |
| 327 | * This check is important because it is easy to get XSTATE_* |
| 328 | * confused with XSTATE_BIT_*. |
| 329 | */ |
| 330 | #define CHECK_XFEATURE(nr) do { \ |
| 331 | WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \ |
| 332 | WARN_ON(nr >= XFEATURE_MAX); \ |
| 333 | } while (0) |
| 334 | |
| 335 | /* |
| 336 | * We could cache this like xstate_size[], but we only use |
| 337 | * it here, so it would be a waste of space. |
| 338 | */ |
| 339 | static int xfeature_is_aligned(int xfeature_nr) |
| 340 | { |
| 341 | u32 eax, ebx, ecx, edx; |
| 342 | |
| 343 | CHECK_XFEATURE(xfeature_nr); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 344 | |
| 345 | if (!xfeature_enabled(xfeature_nr)) { |
| 346 | WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n", |
| 347 | xfeature_nr); |
| 348 | return 0; |
| 349 | } |
| 350 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 351 | cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx); |
| 352 | /* |
| 353 | * The value returned by ECX[1] indicates the alignment |
| 354 | * of state component 'i' when the compacted format |
| 355 | * of the extended region of an XSAVE area is used: |
| 356 | */ |
| 357 | return !!(ecx & 2); |
| 358 | } |
| 359 | |
| 360 | /* |
| 361 | * This function sets up offsets and sizes of all extended states in |
| 362 | * xsave area. This supports both standard format and compacted format |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 363 | * of the xsave area. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 364 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 365 | static void __init setup_xstate_comp_offsets(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 367 | unsigned int next_offset; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 368 | int i; |
| 369 | |
| 370 | /* |
| 371 | * The FP xstates and SSE xstates are legacy states. They are always |
| 372 | * in the fixed offsets in the xsave area in either compacted form |
| 373 | * or standard form. |
| 374 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 375 | xstate_comp_offsets[XFEATURE_FP] = 0; |
| 376 | xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state, |
| 377 | xmm_space); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 378 | |
| 379 | if (!boot_cpu_has(X86_FEATURE_XSAVES)) { |
| 380 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 381 | if (xfeature_enabled(i)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 382 | xstate_comp_offsets[i] = xstate_offsets[i]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 383 | } |
| 384 | return; |
| 385 | } |
| 386 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 387 | next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 388 | |
| 389 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 390 | if (!xfeature_enabled(i)) |
| 391 | continue; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 392 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 393 | if (xfeature_is_aligned(i)) |
| 394 | next_offset = ALIGN(next_offset, 64); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 395 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 396 | xstate_comp_offsets[i] = next_offset; |
| 397 | next_offset += xstate_sizes[i]; |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | /* |
| 402 | * Setup offsets of a supervisor-state-only XSAVES buffer: |
| 403 | * |
| 404 | * The offsets stored in xstate_comp_offsets[] only work for one specific |
| 405 | * value of the Requested Feature BitMap (RFBM). In cases where a different |
| 406 | * RFBM value is used, a different set of offsets is required. This set of |
| 407 | * offsets is for when RFBM=xfeatures_mask_supervisor(). |
| 408 | */ |
| 409 | static void __init setup_supervisor_only_offsets(void) |
| 410 | { |
| 411 | unsigned int next_offset; |
| 412 | int i; |
| 413 | |
| 414 | next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE; |
| 415 | |
| 416 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
| 417 | if (!xfeature_enabled(i) || !xfeature_is_supervisor(i)) |
| 418 | continue; |
| 419 | |
| 420 | if (xfeature_is_aligned(i)) |
| 421 | next_offset = ALIGN(next_offset, 64); |
| 422 | |
| 423 | xstate_supervisor_only_offsets[i] = next_offset; |
| 424 | next_offset += xstate_sizes[i]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | } |
| 426 | } |
| 427 | |
| 428 | /* |
| 429 | * Print out xstate component offsets and sizes |
| 430 | */ |
| 431 | static void __init print_xstate_offset_size(void) |
| 432 | { |
| 433 | int i; |
| 434 | |
| 435 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
| 436 | if (!xfeature_enabled(i)) |
| 437 | continue; |
| 438 | pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", |
| 439 | i, xstate_comp_offsets[i], i, xstate_sizes[i]); |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 444 | * All supported features have either init state all zeros or are |
| 445 | * handled in setup_init_fpu() individually. This is an explicit |
| 446 | * feature list and does not use XFEATURE_MASK*SUPPORTED to catch |
| 447 | * newly added supported features at build time and make people |
| 448 | * actually look at the init state for the new feature. |
| 449 | */ |
| 450 | #define XFEATURES_INIT_FPSTATE_HANDLED \ |
| 451 | (XFEATURE_MASK_FP | \ |
| 452 | XFEATURE_MASK_SSE | \ |
| 453 | XFEATURE_MASK_YMM | \ |
| 454 | XFEATURE_MASK_OPMASK | \ |
| 455 | XFEATURE_MASK_ZMM_Hi256 | \ |
| 456 | XFEATURE_MASK_Hi16_ZMM | \ |
| 457 | XFEATURE_MASK_PKRU | \ |
| 458 | XFEATURE_MASK_BNDREGS | \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 459 | XFEATURE_MASK_BNDCSR | \ |
| 460 | XFEATURE_MASK_PASID) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 461 | |
| 462 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 463 | * setup the xstate image representing the init state |
| 464 | */ |
| 465 | static void __init setup_init_fpu_buf(void) |
| 466 | { |
| 467 | static int on_boot_cpu __initdata = 1; |
| 468 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 469 | BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED | |
| 470 | XFEATURE_MASK_SUPERVISOR_SUPPORTED) != |
| 471 | XFEATURES_INIT_FPSTATE_HANDLED); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 472 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 473 | WARN_ON_FPU(!on_boot_cpu); |
| 474 | on_boot_cpu = 0; |
| 475 | |
| 476 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) |
| 477 | return; |
| 478 | |
| 479 | setup_xstate_features(); |
| 480 | print_xstate_features(); |
| 481 | |
| 482 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 483 | init_fpstate.xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | |
| 484 | xfeatures_mask_all; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 485 | |
| 486 | /* |
| 487 | * Init all the features state with header.xfeatures being 0x0 |
| 488 | */ |
| 489 | copy_kernel_to_xregs_booting(&init_fpstate.xsave); |
| 490 | |
| 491 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 492 | * All components are now in init state. Read the state back so |
| 493 | * that init_fpstate contains all non-zero init state. This only |
| 494 | * works with XSAVE, but not with XSAVEOPT and XSAVES because |
| 495 | * those use the init optimization which skips writing data for |
| 496 | * components in init state. |
| 497 | * |
| 498 | * XSAVE could be used, but that would require to reshuffle the |
| 499 | * data when XSAVES is available because XSAVES uses xstate |
| 500 | * compaction. But doing so is a pointless exercise because most |
| 501 | * components have an all zeros init state except for the legacy |
| 502 | * ones (FP and SSE). Those can be saved with FXSAVE into the |
| 503 | * legacy area. Adding new features requires to ensure that init |
| 504 | * state is all zeroes or if not to add the necessary handling |
| 505 | * here. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 506 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 507 | fxsave(&init_fpstate.fxsave); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | static int xfeature_uncompacted_offset(int xfeature_nr) |
| 511 | { |
| 512 | u32 eax, ebx, ecx, edx; |
| 513 | |
| 514 | /* |
| 515 | * Only XSAVES supports supervisor states and it uses compacted |
| 516 | * format. Checking a supervisor state's uncompacted offset is |
| 517 | * an error. |
| 518 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 519 | if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 520 | WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr); |
| 521 | return -1; |
| 522 | } |
| 523 | |
| 524 | CHECK_XFEATURE(xfeature_nr); |
| 525 | cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx); |
| 526 | return ebx; |
| 527 | } |
| 528 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 529 | int xfeature_size(int xfeature_nr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 530 | { |
| 531 | u32 eax, ebx, ecx, edx; |
| 532 | |
| 533 | CHECK_XFEATURE(xfeature_nr); |
| 534 | cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx); |
| 535 | return eax; |
| 536 | } |
| 537 | |
| 538 | /* |
| 539 | * 'XSAVES' implies two different things: |
| 540 | * 1. saving of supervisor/system state |
| 541 | * 2. using the compacted format |
| 542 | * |
| 543 | * Use this function when dealing with the compacted format so |
| 544 | * that it is obvious which aspect of 'XSAVES' is being handled |
| 545 | * by the calling code. |
| 546 | */ |
| 547 | int using_compacted_format(void) |
| 548 | { |
| 549 | return boot_cpu_has(X86_FEATURE_XSAVES); |
| 550 | } |
| 551 | |
| 552 | /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 553 | int validate_user_xstate_header(const struct xstate_header *hdr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 554 | { |
| 555 | /* No unknown or supervisor features may be set */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 556 | if (hdr->xfeatures & ~xfeatures_mask_user()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 557 | return -EINVAL; |
| 558 | |
| 559 | /* Userspace must use the uncompacted format */ |
| 560 | if (hdr->xcomp_bv) |
| 561 | return -EINVAL; |
| 562 | |
| 563 | /* |
| 564 | * If 'reserved' is shrunken to add a new field, make sure to validate |
| 565 | * that new field here! |
| 566 | */ |
| 567 | BUILD_BUG_ON(sizeof(hdr->reserved) != 48); |
| 568 | |
| 569 | /* No reserved bits may be set */ |
| 570 | if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) |
| 571 | return -EINVAL; |
| 572 | |
| 573 | return 0; |
| 574 | } |
| 575 | |
| 576 | static void __xstate_dump_leaves(void) |
| 577 | { |
| 578 | int i; |
| 579 | u32 eax, ebx, ecx, edx; |
| 580 | static int should_dump = 1; |
| 581 | |
| 582 | if (!should_dump) |
| 583 | return; |
| 584 | should_dump = 0; |
| 585 | /* |
| 586 | * Dump out a few leaves past the ones that we support |
| 587 | * just in case there are some goodies up there |
| 588 | */ |
| 589 | for (i = 0; i < XFEATURE_MAX + 10; i++) { |
| 590 | cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); |
| 591 | pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n", |
| 592 | XSTATE_CPUID, i, eax, ebx, ecx, edx); |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | #define XSTATE_WARN_ON(x) do { \ |
| 597 | if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \ |
| 598 | __xstate_dump_leaves(); \ |
| 599 | } \ |
| 600 | } while (0) |
| 601 | |
| 602 | #define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \ |
| 603 | if ((nr == nr_macro) && \ |
| 604 | WARN_ONCE(sz != sizeof(__struct), \ |
| 605 | "%s: struct is %zu bytes, cpu state %d bytes\n", \ |
| 606 | __stringify(nr_macro), sizeof(__struct), sz)) { \ |
| 607 | __xstate_dump_leaves(); \ |
| 608 | } \ |
| 609 | } while (0) |
| 610 | |
| 611 | /* |
| 612 | * We have a C struct for each 'xstate'. We need to ensure |
| 613 | * that our software representation matches what the CPU |
| 614 | * tells us about the state's size. |
| 615 | */ |
| 616 | static void check_xstate_against_struct(int nr) |
| 617 | { |
| 618 | /* |
| 619 | * Ask the CPU for the size of the state. |
| 620 | */ |
| 621 | int sz = xfeature_size(nr); |
| 622 | /* |
| 623 | * Match each CPU state with the corresponding software |
| 624 | * structure. |
| 625 | */ |
| 626 | XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct); |
| 627 | XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state); |
| 628 | XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state); |
| 629 | XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state); |
| 630 | XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state); |
| 631 | XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state); |
| 632 | XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 633 | XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 634 | |
| 635 | /* |
| 636 | * Make *SURE* to add any feature numbers in below if |
| 637 | * there are "holes" in the xsave state component |
| 638 | * numbers. |
| 639 | */ |
| 640 | if ((nr < XFEATURE_YMM) || |
| 641 | (nr >= XFEATURE_MAX) || |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 642 | (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) || |
| 643 | ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 644 | WARN_ONCE(1, "no structure for xstate: %d\n", nr); |
| 645 | XSTATE_WARN_ON(1); |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | /* |
| 650 | * This essentially double-checks what the cpu told us about |
| 651 | * how large the XSAVE buffer needs to be. We are recalculating |
| 652 | * it to be safe. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 653 | * |
| 654 | * Dynamic XSAVE features allocate their own buffers and are not |
| 655 | * covered by these checks. Only the size of the buffer for task->fpu |
| 656 | * is checked here. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 657 | */ |
| 658 | static void do_extra_xstate_size_checks(void) |
| 659 | { |
| 660 | int paranoid_xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE; |
| 661 | int i; |
| 662 | |
| 663 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
| 664 | if (!xfeature_enabled(i)) |
| 665 | continue; |
| 666 | |
| 667 | check_xstate_against_struct(i); |
| 668 | /* |
| 669 | * Supervisor state components can be managed only by |
| 670 | * XSAVES, which is compacted-format only. |
| 671 | */ |
| 672 | if (!using_compacted_format()) |
| 673 | XSTATE_WARN_ON(xfeature_is_supervisor(i)); |
| 674 | |
| 675 | /* Align from the end of the previous feature */ |
| 676 | if (xfeature_is_aligned(i)) |
| 677 | paranoid_xstate_size = ALIGN(paranoid_xstate_size, 64); |
| 678 | /* |
| 679 | * The offset of a given state in the non-compacted |
| 680 | * format is given to us in a CPUID leaf. We check |
| 681 | * them for being ordered (increasing offsets) in |
| 682 | * setup_xstate_features(). |
| 683 | */ |
| 684 | if (!using_compacted_format()) |
| 685 | paranoid_xstate_size = xfeature_uncompacted_offset(i); |
| 686 | /* |
| 687 | * The compacted-format offset always depends on where |
| 688 | * the previous state ended. |
| 689 | */ |
| 690 | paranoid_xstate_size += xfeature_size(i); |
| 691 | } |
| 692 | XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size); |
| 693 | } |
| 694 | |
| 695 | |
| 696 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 697 | * Get total size of enabled xstates in XCR0 | IA32_XSS. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 698 | * |
| 699 | * Note the SDM's wording here. "sub-function 0" only enumerates |
| 700 | * the size of the *user* states. If we use it to size a buffer |
| 701 | * that we use 'XSAVES' on, we could potentially overflow the |
| 702 | * buffer because 'XSAVES' saves system states too. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 703 | */ |
| 704 | static unsigned int __init get_xsaves_size(void) |
| 705 | { |
| 706 | unsigned int eax, ebx, ecx, edx; |
| 707 | /* |
| 708 | * - CPUID function 0DH, sub-function 1: |
| 709 | * EBX enumerates the size (in bytes) required by |
| 710 | * the XSAVES instruction for an XSAVE area |
| 711 | * containing all the state components |
| 712 | * corresponding to bits currently set in |
| 713 | * XCR0 | IA32_XSS. |
| 714 | */ |
| 715 | cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx); |
| 716 | return ebx; |
| 717 | } |
| 718 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 719 | /* |
| 720 | * Get the total size of the enabled xstates without the dynamic supervisor |
| 721 | * features. |
| 722 | */ |
| 723 | static unsigned int __init get_xsaves_size_no_dynamic(void) |
| 724 | { |
| 725 | u64 mask = xfeatures_mask_dynamic(); |
| 726 | unsigned int size; |
| 727 | |
| 728 | if (!mask) |
| 729 | return get_xsaves_size(); |
| 730 | |
| 731 | /* Disable dynamic features. */ |
| 732 | wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); |
| 733 | |
| 734 | /* |
| 735 | * Ask the hardware what size is required of the buffer. |
| 736 | * This is the size required for the task->fpu buffer. |
| 737 | */ |
| 738 | size = get_xsaves_size(); |
| 739 | |
| 740 | /* Re-enable dynamic features so XSAVES will work on them again. */ |
| 741 | wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); |
| 742 | |
| 743 | return size; |
| 744 | } |
| 745 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 746 | static unsigned int __init get_xsave_size(void) |
| 747 | { |
| 748 | unsigned int eax, ebx, ecx, edx; |
| 749 | /* |
| 750 | * - CPUID function 0DH, sub-function 0: |
| 751 | * EBX enumerates the size (in bytes) required by |
| 752 | * the XSAVE instruction for an XSAVE area |
| 753 | * containing all the *user* state components |
| 754 | * corresponding to bits currently set in XCR0. |
| 755 | */ |
| 756 | cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); |
| 757 | return ebx; |
| 758 | } |
| 759 | |
| 760 | /* |
| 761 | * Will the runtime-enumerated 'xstate_size' fit in the init |
| 762 | * task's statically-allocated buffer? |
| 763 | */ |
| 764 | static bool is_supported_xstate_size(unsigned int test_xstate_size) |
| 765 | { |
| 766 | if (test_xstate_size <= sizeof(union fpregs_state)) |
| 767 | return true; |
| 768 | |
| 769 | pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n", |
| 770 | sizeof(union fpregs_state), test_xstate_size); |
| 771 | return false; |
| 772 | } |
| 773 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 774 | static int __init init_xstate_size(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 775 | { |
| 776 | /* Recompute the context size for enabled features: */ |
| 777 | unsigned int possible_xstate_size; |
| 778 | unsigned int xsave_size; |
| 779 | |
| 780 | xsave_size = get_xsave_size(); |
| 781 | |
| 782 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 783 | possible_xstate_size = get_xsaves_size_no_dynamic(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 784 | else |
| 785 | possible_xstate_size = xsave_size; |
| 786 | |
| 787 | /* Ensure we have the space to store all enabled: */ |
| 788 | if (!is_supported_xstate_size(possible_xstate_size)) |
| 789 | return -EINVAL; |
| 790 | |
| 791 | /* |
| 792 | * The size is OK, we are definitely going to use xsave, |
| 793 | * make it known to the world that we need more space. |
| 794 | */ |
| 795 | fpu_kernel_xstate_size = possible_xstate_size; |
| 796 | do_extra_xstate_size_checks(); |
| 797 | |
| 798 | /* |
| 799 | * User space is always in standard format. |
| 800 | */ |
| 801 | fpu_user_xstate_size = xsave_size; |
| 802 | return 0; |
| 803 | } |
| 804 | |
| 805 | /* |
| 806 | * We enabled the XSAVE hardware, but something went wrong and |
| 807 | * we can not use it. Disable it. |
| 808 | */ |
| 809 | static void fpu__init_disable_system_xstate(void) |
| 810 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 811 | xfeatures_mask_all = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 812 | cr4_clear_bits(X86_CR4_OSXSAVE); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 813 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 814 | } |
| 815 | |
| 816 | /* |
| 817 | * Enable and initialize the xsave feature. |
| 818 | * Called once per system bootup. |
| 819 | */ |
| 820 | void __init fpu__init_system_xstate(void) |
| 821 | { |
| 822 | unsigned int eax, ebx, ecx, edx; |
| 823 | static int on_boot_cpu __initdata = 1; |
| 824 | int err; |
| 825 | int i; |
| 826 | |
| 827 | WARN_ON_FPU(!on_boot_cpu); |
| 828 | on_boot_cpu = 0; |
| 829 | |
| 830 | if (!boot_cpu_has(X86_FEATURE_FPU)) { |
| 831 | pr_info("x86/fpu: No FPU detected\n"); |
| 832 | return; |
| 833 | } |
| 834 | |
| 835 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) { |
| 836 | pr_info("x86/fpu: x87 FPU will use %s\n", |
| 837 | boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE"); |
| 838 | return; |
| 839 | } |
| 840 | |
| 841 | if (boot_cpu_data.cpuid_level < XSTATE_CPUID) { |
| 842 | WARN_ON_FPU(1); |
| 843 | return; |
| 844 | } |
| 845 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 846 | /* |
| 847 | * Find user xstates supported by the processor. |
| 848 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 849 | cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 850 | xfeatures_mask_all = eax + ((u64)edx << 32); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 851 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 852 | /* |
| 853 | * Find supervisor xstates supported by the processor. |
| 854 | */ |
| 855 | cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx); |
| 856 | xfeatures_mask_all |= ecx + ((u64)edx << 32); |
| 857 | |
| 858 | if ((xfeatures_mask_user() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 859 | /* |
| 860 | * This indicates that something really unexpected happened |
| 861 | * with the enumeration. Disable XSAVE and try to continue |
| 862 | * booting without it. This is too early to BUG(). |
| 863 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 864 | pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", |
| 865 | xfeatures_mask_all); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 866 | goto out_disable; |
| 867 | } |
| 868 | |
| 869 | /* |
| 870 | * Clear XSAVE features that are disabled in the normal CPUID. |
| 871 | */ |
| 872 | for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) { |
| 873 | if (!boot_cpu_has(xsave_cpuid_features[i])) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 874 | xfeatures_mask_all &= ~BIT_ULL(i); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 875 | } |
| 876 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 877 | xfeatures_mask_all &= fpu__get_supported_xfeatures_mask(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 878 | |
| 879 | /* Enable xstate instructions to be able to continue with initialization: */ |
| 880 | fpu__init_cpu_xstate(); |
| 881 | err = init_xstate_size(); |
| 882 | if (err) |
| 883 | goto out_disable; |
| 884 | |
| 885 | /* |
| 886 | * Update info used for ptrace frames; use standard-format size and no |
| 887 | * supervisor xstates: |
| 888 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 889 | update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_user()); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 890 | |
| 891 | fpu__init_prepare_fx_sw_frame(); |
| 892 | setup_init_fpu_buf(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 893 | setup_xstate_comp_offsets(); |
| 894 | setup_supervisor_only_offsets(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 895 | print_xstate_offset_size(); |
| 896 | |
| 897 | pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 898 | xfeatures_mask_all, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 899 | fpu_kernel_xstate_size, |
| 900 | boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard"); |
| 901 | return; |
| 902 | |
| 903 | out_disable: |
| 904 | /* something went wrong, try to boot without any XSAVE support */ |
| 905 | fpu__init_disable_system_xstate(); |
| 906 | } |
| 907 | |
| 908 | /* |
| 909 | * Restore minimal FPU state after suspend: |
| 910 | */ |
| 911 | void fpu__resume_cpu(void) |
| 912 | { |
| 913 | /* |
| 914 | * Restore XCR0 on xsave capable CPUs: |
| 915 | */ |
| 916 | if (boot_cpu_has(X86_FEATURE_XSAVE)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 917 | xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user()); |
| 918 | |
| 919 | /* |
| 920 | * Restore IA32_XSS. The same CPUID bit enumerates support |
| 921 | * of XSAVES and MSR_IA32_XSS. |
| 922 | */ |
| 923 | if (boot_cpu_has(X86_FEATURE_XSAVES)) { |
| 924 | wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | |
| 925 | xfeatures_mask_dynamic()); |
| 926 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 927 | } |
| 928 | |
| 929 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 930 | * Given an xstate feature nr, calculate where in the xsave |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 931 | * buffer the state is. Callers should ensure that the buffer |
| 932 | * is valid. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 933 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 934 | static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 935 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 936 | if (!xfeature_enabled(xfeature_nr)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 937 | WARN_ON_FPU(1); |
| 938 | return NULL; |
| 939 | } |
| 940 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 941 | return (void *)xsave + xstate_comp_offsets[xfeature_nr]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 942 | } |
| 943 | /* |
| 944 | * Given the xsave area and a state inside, this function returns the |
| 945 | * address of the state. |
| 946 | * |
| 947 | * This is the API that is called to get xstate address in either |
| 948 | * standard format or compacted format of xsave area. |
| 949 | * |
| 950 | * Note that if there is no data for the field in the xsave buffer |
| 951 | * this will return NULL. |
| 952 | * |
| 953 | * Inputs: |
| 954 | * xstate: the thread's storage area for all FPU data |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 955 | * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP, |
| 956 | * XFEATURE_SSE, etc...) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 957 | * Output: |
| 958 | * address of the state in the xsave area, or NULL if the |
| 959 | * field is not present in the xsave buffer. |
| 960 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 961 | void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 962 | { |
| 963 | /* |
| 964 | * Do we even *have* xsave state? |
| 965 | */ |
| 966 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) |
| 967 | return NULL; |
| 968 | |
| 969 | /* |
| 970 | * We should not ever be requesting features that we |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 971 | * have not enabled. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 972 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 973 | WARN_ONCE(!(xfeatures_mask_all & BIT_ULL(xfeature_nr)), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 974 | "get of unsupported state"); |
| 975 | /* |
| 976 | * This assumes the last 'xsave*' instruction to |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 977 | * have requested that 'xfeature_nr' be saved. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 978 | * If it did not, we might be seeing and old value |
| 979 | * of the field in the buffer. |
| 980 | * |
| 981 | * This can happen because the last 'xsave' did not |
| 982 | * request that this feature be saved (unlikely) |
| 983 | * or because the "init optimization" caused it |
| 984 | * to not be saved. |
| 985 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 986 | if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 987 | return NULL; |
| 988 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 989 | return __raw_xsave_addr(xsave, xfeature_nr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 990 | } |
| 991 | EXPORT_SYMBOL_GPL(get_xsave_addr); |
| 992 | |
| 993 | /* |
| 994 | * This wraps up the common operations that need to occur when retrieving |
| 995 | * data from xsave state. It first ensures that the current task was |
| 996 | * using the FPU and retrieves the data in to a buffer. It then calculates |
| 997 | * the offset of the requested field in the buffer. |
| 998 | * |
| 999 | * This function is safe to call whether the FPU is in use or not. |
| 1000 | * |
| 1001 | * Note that this only works on the current task. |
| 1002 | * |
| 1003 | * Inputs: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1004 | * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP, |
| 1005 | * XFEATURE_SSE, etc...) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1006 | * Output: |
| 1007 | * address of the state in the xsave area or NULL if the state |
| 1008 | * is not present or is in its 'init state'. |
| 1009 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1010 | const void *get_xsave_field_ptr(int xfeature_nr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1011 | { |
| 1012 | struct fpu *fpu = ¤t->thread.fpu; |
| 1013 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1014 | /* |
| 1015 | * fpu__save() takes the CPU's xstate registers |
| 1016 | * and saves them off to the 'fpu memory buffer. |
| 1017 | */ |
| 1018 | fpu__save(fpu); |
| 1019 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1020 | return get_xsave_addr(&fpu->state.xsave, xfeature_nr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1021 | } |
| 1022 | |
| 1023 | #ifdef CONFIG_ARCH_HAS_PKEYS |
| 1024 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1025 | /* |
| 1026 | * This will go out and modify PKRU register to set the access |
| 1027 | * rights for @pkey to @init_val. |
| 1028 | */ |
| 1029 | int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, |
| 1030 | unsigned long init_val) |
| 1031 | { |
| 1032 | u32 old_pkru; |
| 1033 | int pkey_shift = (pkey * PKRU_BITS_PER_PKEY); |
| 1034 | u32 new_pkru_bits = 0; |
| 1035 | |
| 1036 | /* |
| 1037 | * This check implies XSAVE support. OSPKE only gets |
| 1038 | * set if we enable XSAVE and we enable PKU in XCR0. |
| 1039 | */ |
| 1040 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
| 1041 | return -EINVAL; |
| 1042 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1043 | /* |
| 1044 | * This code should only be called with valid 'pkey' |
| 1045 | * values originating from in-kernel users. Complain |
| 1046 | * if a bad value is observed. |
| 1047 | */ |
| 1048 | WARN_ON_ONCE(pkey >= arch_max_pkey()); |
| 1049 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1050 | /* Set the bits we need in PKRU: */ |
| 1051 | if (init_val & PKEY_DISABLE_ACCESS) |
| 1052 | new_pkru_bits |= PKRU_AD_BIT; |
| 1053 | if (init_val & PKEY_DISABLE_WRITE) |
| 1054 | new_pkru_bits |= PKRU_WD_BIT; |
| 1055 | |
| 1056 | /* Shift the bits in to the correct place in PKRU for pkey: */ |
| 1057 | new_pkru_bits <<= pkey_shift; |
| 1058 | |
| 1059 | /* Get old PKRU and mask off any old bits in place: */ |
| 1060 | old_pkru = read_pkru(); |
| 1061 | old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); |
| 1062 | |
| 1063 | /* Write old part along with new part: */ |
| 1064 | write_pkru(old_pkru | new_pkru_bits); |
| 1065 | |
| 1066 | return 0; |
| 1067 | } |
| 1068 | #endif /* ! CONFIG_ARCH_HAS_PKEYS */ |
| 1069 | |
| 1070 | /* |
| 1071 | * Weird legacy quirk: SSE and YMM states store information in the |
| 1072 | * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP |
| 1073 | * area is marked as unused in the xfeatures header, we need to copy |
| 1074 | * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use. |
| 1075 | */ |
| 1076 | static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) |
| 1077 | { |
| 1078 | if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM))) |
| 1079 | return false; |
| 1080 | |
| 1081 | if (xfeatures & XFEATURE_MASK_FP) |
| 1082 | return false; |
| 1083 | |
| 1084 | return true; |
| 1085 | } |
| 1086 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1087 | static void copy_feature(bool from_xstate, struct membuf *to, void *xstate, |
| 1088 | void *init_xstate, unsigned int size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1089 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1090 | membuf_write(to, from_xstate ? xstate : init_xstate, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1091 | } |
| 1092 | |
| 1093 | /* |
| 1094 | * Convert from kernel XSAVES compacted format to standard format and copy |
| 1095 | * to a kernel-space ptrace buffer. |
| 1096 | * |
| 1097 | * It supports partial copy but pos always starts from zero. This is called |
| 1098 | * from xstateregs_get() and there we check the CPU has XSAVES. |
| 1099 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1100 | void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1101 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1102 | const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr); |
| 1103 | struct xregs_state *xinit = &init_fpstate.xsave; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1104 | struct xstate_header header; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1105 | unsigned int zerofrom; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1106 | int i; |
| 1107 | |
| 1108 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1109 | * The destination is a ptrace buffer; we put in only user xstates: |
| 1110 | */ |
| 1111 | memset(&header, 0, sizeof(header)); |
| 1112 | header.xfeatures = xsave->header.xfeatures; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1113 | header.xfeatures &= xfeatures_mask_user(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1114 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1115 | /* Copy FP state up to MXCSR */ |
| 1116 | copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387, |
| 1117 | &xinit->i387, off_mxcsr); |
| 1118 | |
| 1119 | /* Copy MXCSR when SSE or YMM are set in the feature mask */ |
| 1120 | copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM), |
| 1121 | &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr, |
| 1122 | MXCSR_AND_FLAGS_SIZE); |
| 1123 | |
| 1124 | /* Copy the remaining FP state */ |
| 1125 | copy_feature(header.xfeatures & XFEATURE_MASK_FP, |
| 1126 | &to, &xsave->i387.st_space, &xinit->i387.st_space, |
| 1127 | sizeof(xsave->i387.st_space)); |
| 1128 | |
| 1129 | /* Copy the SSE state - shared with YMM, but independently managed */ |
| 1130 | copy_feature(header.xfeatures & XFEATURE_MASK_SSE, |
| 1131 | &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space, |
| 1132 | sizeof(xsave->i387.xmm_space)); |
| 1133 | |
| 1134 | /* Zero the padding area */ |
| 1135 | membuf_zero(&to, sizeof(xsave->i387.padding)); |
| 1136 | |
| 1137 | /* Copy xsave->i387.sw_reserved */ |
| 1138 | membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved)); |
| 1139 | |
| 1140 | /* Copy the user space relevant state of @xsave->header */ |
| 1141 | membuf_write(&to, &header, sizeof(header)); |
| 1142 | |
| 1143 | zerofrom = offsetof(struct xregs_state, extended_state_area); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1144 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1145 | for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1146 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1147 | * The ptrace buffer is in non-compacted XSAVE format. |
| 1148 | * In non-compacted format disabled features still occupy |
| 1149 | * state space, but there is no state to copy from in the |
| 1150 | * compacted init_fpstate. The gap tracking will zero this |
| 1151 | * later. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1152 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1153 | if (!(xfeatures_mask_user() & BIT_ULL(i))) |
| 1154 | continue; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1155 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1156 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1157 | * If there was a feature or alignment gap, zero the space |
| 1158 | * in the destination buffer. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1159 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1160 | if (zerofrom < xstate_offsets[i]) |
| 1161 | membuf_zero(&to, xstate_offsets[i] - zerofrom); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1162 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1163 | copy_feature(header.xfeatures & BIT_ULL(i), &to, |
| 1164 | __raw_xsave_addr(xsave, i), |
| 1165 | __raw_xsave_addr(xinit, i), |
| 1166 | xstate_sizes[i]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1167 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1168 | /* |
| 1169 | * Keep track of the last copied state in the non-compacted |
| 1170 | * target buffer for gap zeroing. |
| 1171 | */ |
| 1172 | zerofrom = xstate_offsets[i] + xstate_sizes[i]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1173 | } |
| 1174 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1175 | if (to.left) |
| 1176 | membuf_zero(&to, to.left); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1177 | } |
| 1178 | |
| 1179 | /* |
| 1180 | * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format |
| 1181 | * and copy to the target thread. This is called from xstateregs_set(). |
| 1182 | */ |
| 1183 | int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf) |
| 1184 | { |
| 1185 | unsigned int offset, size; |
| 1186 | int i; |
| 1187 | struct xstate_header hdr; |
| 1188 | |
| 1189 | offset = offsetof(struct xregs_state, header); |
| 1190 | size = sizeof(hdr); |
| 1191 | |
| 1192 | memcpy(&hdr, kbuf + offset, size); |
| 1193 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1194 | if (validate_user_xstate_header(&hdr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1195 | return -EINVAL; |
| 1196 | |
| 1197 | for (i = 0; i < XFEATURE_MAX; i++) { |
| 1198 | u64 mask = ((u64)1 << i); |
| 1199 | |
| 1200 | if (hdr.xfeatures & mask) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1201 | void *dst = __raw_xsave_addr(xsave, i); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1202 | |
| 1203 | offset = xstate_offsets[i]; |
| 1204 | size = xstate_sizes[i]; |
| 1205 | |
| 1206 | memcpy(dst, kbuf + offset, size); |
| 1207 | } |
| 1208 | } |
| 1209 | |
| 1210 | if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { |
| 1211 | offset = offsetof(struct fxregs_state, mxcsr); |
| 1212 | size = MXCSR_AND_FLAGS_SIZE; |
| 1213 | memcpy(&xsave->i387.mxcsr, kbuf + offset, size); |
| 1214 | } |
| 1215 | |
| 1216 | /* |
| 1217 | * The state that came in from userspace was user-state only. |
| 1218 | * Mask all the user states out of 'xfeatures': |
| 1219 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1220 | xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1221 | |
| 1222 | /* |
| 1223 | * Add back in the features that came in from userspace: |
| 1224 | */ |
| 1225 | xsave->header.xfeatures |= hdr.xfeatures; |
| 1226 | |
| 1227 | return 0; |
| 1228 | } |
| 1229 | |
| 1230 | /* |
| 1231 | * Convert from a ptrace or sigreturn standard-format user-space buffer to |
| 1232 | * kernel XSAVES format and copy to the target thread. This is called from |
| 1233 | * xstateregs_set(), as well as potentially from the sigreturn() and |
| 1234 | * rt_sigreturn() system calls. |
| 1235 | */ |
| 1236 | int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf) |
| 1237 | { |
| 1238 | unsigned int offset, size; |
| 1239 | int i; |
| 1240 | struct xstate_header hdr; |
| 1241 | |
| 1242 | offset = offsetof(struct xregs_state, header); |
| 1243 | size = sizeof(hdr); |
| 1244 | |
| 1245 | if (__copy_from_user(&hdr, ubuf + offset, size)) |
| 1246 | return -EFAULT; |
| 1247 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1248 | if (validate_user_xstate_header(&hdr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1249 | return -EINVAL; |
| 1250 | |
| 1251 | for (i = 0; i < XFEATURE_MAX; i++) { |
| 1252 | u64 mask = ((u64)1 << i); |
| 1253 | |
| 1254 | if (hdr.xfeatures & mask) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1255 | void *dst = __raw_xsave_addr(xsave, i); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1256 | |
| 1257 | offset = xstate_offsets[i]; |
| 1258 | size = xstate_sizes[i]; |
| 1259 | |
| 1260 | if (__copy_from_user(dst, ubuf + offset, size)) |
| 1261 | return -EFAULT; |
| 1262 | } |
| 1263 | } |
| 1264 | |
| 1265 | if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { |
| 1266 | offset = offsetof(struct fxregs_state, mxcsr); |
| 1267 | size = MXCSR_AND_FLAGS_SIZE; |
| 1268 | if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size)) |
| 1269 | return -EFAULT; |
| 1270 | } |
| 1271 | |
| 1272 | /* |
| 1273 | * The state that came in from userspace was user-state only. |
| 1274 | * Mask all the user states out of 'xfeatures': |
| 1275 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1276 | xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1277 | |
| 1278 | /* |
| 1279 | * Add back in the features that came in from userspace: |
| 1280 | */ |
| 1281 | xsave->header.xfeatures |= hdr.xfeatures; |
| 1282 | |
| 1283 | return 0; |
| 1284 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1285 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1286 | /* |
| 1287 | * Save only supervisor states to the kernel buffer. This blows away all |
| 1288 | * old states, and is intended to be used only in __fpu__restore_sig(), where |
| 1289 | * user states are restored from the user buffer. |
| 1290 | */ |
| 1291 | void copy_supervisor_to_kernel(struct xregs_state *xstate) |
| 1292 | { |
| 1293 | struct xstate_header *header; |
| 1294 | u64 max_bit, min_bit; |
| 1295 | u32 lmask, hmask; |
| 1296 | int err, i; |
| 1297 | |
| 1298 | if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES))) |
| 1299 | return; |
| 1300 | |
| 1301 | if (!xfeatures_mask_supervisor()) |
| 1302 | return; |
| 1303 | |
| 1304 | max_bit = __fls(xfeatures_mask_supervisor()); |
| 1305 | min_bit = __ffs(xfeatures_mask_supervisor()); |
| 1306 | |
| 1307 | lmask = xfeatures_mask_supervisor(); |
| 1308 | hmask = xfeatures_mask_supervisor() >> 32; |
| 1309 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
| 1310 | |
| 1311 | /* We should never fault when copying to a kernel buffer: */ |
| 1312 | if (WARN_ON_FPU(err)) |
| 1313 | return; |
| 1314 | |
| 1315 | /* |
| 1316 | * At this point, the buffer has only supervisor states and must be |
| 1317 | * converted back to normal kernel format. |
| 1318 | */ |
| 1319 | header = &xstate->header; |
| 1320 | header->xcomp_bv |= xfeatures_mask_all; |
| 1321 | |
| 1322 | /* |
| 1323 | * This only moves states up in the buffer. Start with |
| 1324 | * the last state and move backwards so that states are |
| 1325 | * not overwritten until after they are moved. Note: |
| 1326 | * memmove() allows overlapping src/dst buffers. |
| 1327 | */ |
| 1328 | for (i = max_bit; i >= min_bit; i--) { |
| 1329 | u8 *xbuf = (u8 *)xstate; |
| 1330 | |
| 1331 | if (!((header->xfeatures >> i) & 1)) |
| 1332 | continue; |
| 1333 | |
| 1334 | /* Move xfeature 'i' into its normal location */ |
| 1335 | memmove(xbuf + xstate_comp_offsets[i], |
| 1336 | xbuf + xstate_supervisor_only_offsets[i], |
| 1337 | xstate_sizes[i]); |
| 1338 | } |
| 1339 | } |
| 1340 | |
| 1341 | /** |
| 1342 | * copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to |
| 1343 | * an xsave area |
| 1344 | * @xstate: A pointer to an xsave area |
| 1345 | * @mask: Represent the dynamic supervisor features saved into the xsave area |
| 1346 | * |
| 1347 | * Only the dynamic supervisor states sets in the mask are saved into the xsave |
| 1348 | * area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic |
| 1349 | * supervisor feature). Besides the dynamic supervisor states, the legacy |
| 1350 | * region and XSAVE header are also saved into the xsave area. The supervisor |
| 1351 | * features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and |
| 1352 | * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved. |
| 1353 | * |
| 1354 | * The xsave area must be 64-bytes aligned. |
| 1355 | */ |
| 1356 | void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask) |
| 1357 | { |
| 1358 | u64 dynamic_mask = xfeatures_mask_dynamic() & mask; |
| 1359 | u32 lmask, hmask; |
| 1360 | int err; |
| 1361 | |
| 1362 | if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES))) |
| 1363 | return; |
| 1364 | |
| 1365 | if (WARN_ON_FPU(!dynamic_mask)) |
| 1366 | return; |
| 1367 | |
| 1368 | lmask = dynamic_mask; |
| 1369 | hmask = dynamic_mask >> 32; |
| 1370 | |
| 1371 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
| 1372 | |
| 1373 | /* Should never fault when copying to a kernel buffer */ |
| 1374 | WARN_ON_FPU(err); |
| 1375 | } |
| 1376 | |
| 1377 | /** |
| 1378 | * copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from |
| 1379 | * an xsave area |
| 1380 | * @xstate: A pointer to an xsave area |
| 1381 | * @mask: Represent the dynamic supervisor features restored from the xsave area |
| 1382 | * |
| 1383 | * Only the dynamic supervisor states sets in the mask are restored from the |
| 1384 | * xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of |
| 1385 | * dynamic supervisor feature). Besides the dynamic supervisor states, the |
| 1386 | * legacy region and XSAVE header are also restored from the xsave area. The |
| 1387 | * supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and |
| 1388 | * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored. |
| 1389 | * |
| 1390 | * The xsave area must be 64-bytes aligned. |
| 1391 | */ |
| 1392 | void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask) |
| 1393 | { |
| 1394 | u64 dynamic_mask = xfeatures_mask_dynamic() & mask; |
| 1395 | u32 lmask, hmask; |
| 1396 | int err; |
| 1397 | |
| 1398 | if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES))) |
| 1399 | return; |
| 1400 | |
| 1401 | if (WARN_ON_FPU(!dynamic_mask)) |
| 1402 | return; |
| 1403 | |
| 1404 | lmask = dynamic_mask; |
| 1405 | hmask = dynamic_mask >> 32; |
| 1406 | |
| 1407 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
| 1408 | |
| 1409 | /* Should never fault when copying from a kernel buffer */ |
| 1410 | WARN_ON_FPU(err); |
| 1411 | } |
| 1412 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1413 | #ifdef CONFIG_PROC_PID_ARCH_STATUS |
| 1414 | /* |
| 1415 | * Report the amount of time elapsed in millisecond since last AVX512 |
| 1416 | * use in the task. |
| 1417 | */ |
| 1418 | static void avx512_status(struct seq_file *m, struct task_struct *task) |
| 1419 | { |
| 1420 | unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp); |
| 1421 | long delta; |
| 1422 | |
| 1423 | if (!timestamp) { |
| 1424 | /* |
| 1425 | * Report -1 if no AVX512 usage |
| 1426 | */ |
| 1427 | delta = -1; |
| 1428 | } else { |
| 1429 | delta = (long)(jiffies - timestamp); |
| 1430 | /* |
| 1431 | * Cap to LONG_MAX if time difference > LONG_MAX |
| 1432 | */ |
| 1433 | if (delta < 0) |
| 1434 | delta = LONG_MAX; |
| 1435 | delta = jiffies_to_msecs(delta); |
| 1436 | } |
| 1437 | |
| 1438 | seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta); |
| 1439 | seq_putc(m, '\n'); |
| 1440 | } |
| 1441 | |
| 1442 | /* |
| 1443 | * Report architecture specific information |
| 1444 | */ |
| 1445 | int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, |
| 1446 | struct pid *pid, struct task_struct *task) |
| 1447 | { |
| 1448 | /* |
| 1449 | * Report AVX512 state if the processor and build option supported. |
| 1450 | */ |
| 1451 | if (cpu_feature_enabled(X86_FEATURE_AVX512F)) |
| 1452 | avx512_status(m, task); |
| 1453 | |
| 1454 | return 0; |
| 1455 | } |
| 1456 | #endif /* CONFIG_PROC_PID_ARCH_STATUS */ |