blob: 78928f56cf72971cb3c49556c66e715fe75f4ea0 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11#include <linux/init.h>
12#include <linux/utsname.h>
13#include <linux/cpu.h>
14#include <linux/module.h>
15#include <linux/nospec.h>
16#include <linux/prctl.h>
17#include <linux/sched/smt.h>
18
19#include <asm/spec-ctrl.h>
20#include <asm/cmdline.h>
21#include <asm/bugs.h>
22#include <asm/processor.h>
23#include <asm/processor-flags.h>
24#include <asm/fpu/internal.h>
25#include <asm/msr.h>
26#include <asm/vmx.h>
27#include <asm/paravirt.h>
28#include <asm/alternative.h>
29#include <asm/pgtable.h>
30#include <asm/set_memory.h>
31#include <asm/intel-family.h>
32#include <asm/e820/api.h>
33#include <asm/hypervisor.h>
34
35static void __init spectre_v2_select_mitigation(void);
36static void __init ssb_select_mitigation(void);
37static void __init l1tf_select_mitigation(void);
38
39/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
40u64 x86_spec_ctrl_base;
41EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
42static DEFINE_MUTEX(spec_ctrl_mutex);
43
44/*
45 * The vendor and possibly platform specific bits which can be modified in
46 * x86_spec_ctrl_base.
47 */
48static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
49
50/*
51 * AMD specific MSR info for Speculative Store Bypass control.
52 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
53 */
54u64 __ro_after_init x86_amd_ls_cfg_base;
55u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
56
57/* Control conditional STIPB in switch_to() */
58DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
59/* Control conditional IBPB in switch_mm() */
60DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
61/* Control unconditional IBPB in switch_mm() */
62DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
63
64void __init check_bugs(void)
65{
66 identify_boot_cpu();
67
68 /*
69 * identify_boot_cpu() initialized SMT support information, let the
70 * core code know.
71 */
72 cpu_smt_check_topology_early();
73
74 if (!IS_ENABLED(CONFIG_SMP)) {
75 pr_info("CPU: ");
76 print_cpu_info(&boot_cpu_data);
77 }
78
79 /*
80 * Read the SPEC_CTRL MSR to account for reserved bits which may
81 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
82 * init code as it is not enumerated and depends on the family.
83 */
84 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
85 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
86
87 /* Allow STIBP in MSR_SPEC_CTRL if supported */
88 if (boot_cpu_has(X86_FEATURE_STIBP))
89 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
90
91 /* Select the proper spectre mitigation before patching alternatives */
92 spectre_v2_select_mitigation();
93
94 /*
95 * Select proper mitigation for any exposure to the Speculative Store
96 * Bypass vulnerability.
97 */
98 ssb_select_mitigation();
99
100 l1tf_select_mitigation();
101
102#ifdef CONFIG_X86_32
103 /*
104 * Check whether we are able to run this kernel safely on SMP.
105 *
106 * - i386 is no longer supported.
107 * - In order to run on anything without a TSC, we need to be
108 * compiled for a i486.
109 */
110 if (boot_cpu_data.x86 < 4)
111 panic("Kernel requires i486+ for 'invlpg' and other features");
112
113 init_utsname()->machine[1] =
114 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
115 alternative_instructions();
116
117 fpu__init_check_bugs();
118#else /* CONFIG_X86_64 */
119 alternative_instructions();
120
121 /*
122 * Make sure the first 2MB area is not mapped by huge pages
123 * There are typically fixed size MTRRs in there and overlapping
124 * MTRRs into large pages causes slow downs.
125 *
126 * Right now we don't do that with gbpages because there seems
127 * very little benefit for that case.
128 */
129 if (!direct_gbpages)
130 set_memory_4k((unsigned long)__va(0), 1);
131#endif
132}
133
134void
135x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
136{
137 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
138 struct thread_info *ti = current_thread_info();
139
140 /* Is MSR_SPEC_CTRL implemented ? */
141 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
142 /*
143 * Restrict guest_spec_ctrl to supported values. Clear the
144 * modifiable bits in the host base value and or the
145 * modifiable bits from the guest value.
146 */
147 guestval = hostval & ~x86_spec_ctrl_mask;
148 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
149
150 /* SSBD controlled in MSR_SPEC_CTRL */
151 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
152 static_cpu_has(X86_FEATURE_AMD_SSBD))
153 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
154
155 /* Conditional STIBP enabled? */
156 if (static_branch_unlikely(&switch_to_cond_stibp))
157 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
158
159 if (hostval != guestval) {
160 msrval = setguest ? guestval : hostval;
161 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
162 }
163 }
164
165 /*
166 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
167 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
168 */
169 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
170 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
171 return;
172
173 /*
174 * If the host has SSBD mitigation enabled, force it in the host's
175 * virtual MSR value. If its not permanently enabled, evaluate
176 * current's TIF_SSBD thread flag.
177 */
178 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
179 hostval = SPEC_CTRL_SSBD;
180 else
181 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
182
183 /* Sanitize the guest value */
184 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
185
186 if (hostval != guestval) {
187 unsigned long tif;
188
189 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
190 ssbd_spec_ctrl_to_tif(hostval);
191
192 speculation_ctrl_update(tif);
193 }
194}
195EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
196
197static void x86_amd_ssb_disable(void)
198{
199 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
200
201 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
202 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
203 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
204 wrmsrl(MSR_AMD64_LS_CFG, msrval);
205}
206
207#undef pr_fmt
208#define pr_fmt(fmt) "Spectre V2 : " fmt
209
210static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
211 SPECTRE_V2_NONE;
212
213static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
214 SPECTRE_V2_USER_NONE;
215
216#ifdef RETPOLINE
217static bool spectre_v2_bad_module;
218
219bool retpoline_module_ok(bool has_retpoline)
220{
221 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
222 return true;
223
224 pr_err("System may be vulnerable to spectre v2\n");
225 spectre_v2_bad_module = true;
226 return false;
227}
228
229static inline const char *spectre_v2_module_string(void)
230{
231 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
232}
233#else
234static inline const char *spectre_v2_module_string(void) { return ""; }
235#endif
236
237static inline bool match_option(const char *arg, int arglen, const char *opt)
238{
239 int len = strlen(opt);
240
241 return len == arglen && !strncmp(arg, opt, len);
242}
243
244/* The kernel command line selection for spectre v2 */
245enum spectre_v2_mitigation_cmd {
246 SPECTRE_V2_CMD_NONE,
247 SPECTRE_V2_CMD_AUTO,
248 SPECTRE_V2_CMD_FORCE,
249 SPECTRE_V2_CMD_RETPOLINE,
250 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
251 SPECTRE_V2_CMD_RETPOLINE_AMD,
252};
253
254enum spectre_v2_user_cmd {
255 SPECTRE_V2_USER_CMD_NONE,
256 SPECTRE_V2_USER_CMD_AUTO,
257 SPECTRE_V2_USER_CMD_FORCE,
258 SPECTRE_V2_USER_CMD_PRCTL,
259 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
260 SPECTRE_V2_USER_CMD_SECCOMP,
261 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
262};
263
264static const char * const spectre_v2_user_strings[] = {
265 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
266 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
267 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
268 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
269};
270
271static const struct {
272 const char *option;
273 enum spectre_v2_user_cmd cmd;
274 bool secure;
275} v2_user_options[] __initdata = {
276 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
277 { "off", SPECTRE_V2_USER_CMD_NONE, false },
278 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
279 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
280 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
281 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
282 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
283};
284
285static void __init spec_v2_user_print_cond(const char *reason, bool secure)
286{
287 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
288 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
289}
290
291static enum spectre_v2_user_cmd __init
292spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
293{
294 char arg[20];
295 int ret, i;
296
297 switch (v2_cmd) {
298 case SPECTRE_V2_CMD_NONE:
299 return SPECTRE_V2_USER_CMD_NONE;
300 case SPECTRE_V2_CMD_FORCE:
301 return SPECTRE_V2_USER_CMD_FORCE;
302 default:
303 break;
304 }
305
306 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
307 arg, sizeof(arg));
308 if (ret < 0)
309 return SPECTRE_V2_USER_CMD_AUTO;
310
311 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
312 if (match_option(arg, ret, v2_user_options[i].option)) {
313 spec_v2_user_print_cond(v2_user_options[i].option,
314 v2_user_options[i].secure);
315 return v2_user_options[i].cmd;
316 }
317 }
318
319 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
320 return SPECTRE_V2_USER_CMD_AUTO;
321}
322
323static void __init
324spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
325{
326 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
327 bool smt_possible = IS_ENABLED(CONFIG_SMP);
328 enum spectre_v2_user_cmd cmd;
329
330 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
331 return;
332
333 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
334 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
335 smt_possible = false;
336
337 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
338 switch (cmd) {
339 case SPECTRE_V2_USER_CMD_NONE:
340 goto set_mode;
341 case SPECTRE_V2_USER_CMD_FORCE:
342 mode = SPECTRE_V2_USER_STRICT;
343 break;
344 case SPECTRE_V2_USER_CMD_PRCTL:
345 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
346 mode = SPECTRE_V2_USER_PRCTL;
347 break;
348 case SPECTRE_V2_USER_CMD_AUTO:
349 case SPECTRE_V2_USER_CMD_SECCOMP:
350 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
351 if (IS_ENABLED(CONFIG_SECCOMP))
352 mode = SPECTRE_V2_USER_SECCOMP;
353 else
354 mode = SPECTRE_V2_USER_PRCTL;
355 break;
356 }
357
358 /* Initialize Indirect Branch Prediction Barrier */
359 if (boot_cpu_has(X86_FEATURE_IBPB)) {
360 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
361
362 switch (cmd) {
363 case SPECTRE_V2_USER_CMD_FORCE:
364 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
365 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
366 static_branch_enable(&switch_mm_always_ibpb);
367 break;
368 case SPECTRE_V2_USER_CMD_PRCTL:
369 case SPECTRE_V2_USER_CMD_AUTO:
370 case SPECTRE_V2_USER_CMD_SECCOMP:
371 static_branch_enable(&switch_mm_cond_ibpb);
372 break;
373 default:
374 break;
375 }
376
377 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
378 static_key_enabled(&switch_mm_always_ibpb) ?
379 "always-on" : "conditional");
380 }
381
382 /* If enhanced IBRS is enabled no STIPB required */
383 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
384 return;
385
386 /*
387 * If SMT is not possible or STIBP is not available clear the STIPB
388 * mode.
389 */
390 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
391 mode = SPECTRE_V2_USER_NONE;
392set_mode:
393 spectre_v2_user = mode;
394 /* Only print the STIBP mode when SMT possible */
395 if (smt_possible)
396 pr_info("%s\n", spectre_v2_user_strings[mode]);
397}
398
399static const char * const spectre_v2_strings[] = {
400 [SPECTRE_V2_NONE] = "Vulnerable",
401 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
402 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
403 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
404};
405
406static const struct {
407 const char *option;
408 enum spectre_v2_mitigation_cmd cmd;
409 bool secure;
410} mitigation_options[] __initdata = {
411 { "off", SPECTRE_V2_CMD_NONE, false },
412 { "on", SPECTRE_V2_CMD_FORCE, true },
413 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
414 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
415 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
416 { "auto", SPECTRE_V2_CMD_AUTO, false },
417};
418
419static void __init spec_v2_print_cond(const char *reason, bool secure)
420{
421 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
422 pr_info("%s selected on command line.\n", reason);
423}
424
425static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
426{
427 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
428 char arg[20];
429 int ret, i;
430
431 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
432 return SPECTRE_V2_CMD_NONE;
433
434 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
435 if (ret < 0)
436 return SPECTRE_V2_CMD_AUTO;
437
438 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
439 if (!match_option(arg, ret, mitigation_options[i].option))
440 continue;
441 cmd = mitigation_options[i].cmd;
442 break;
443 }
444
445 if (i >= ARRAY_SIZE(mitigation_options)) {
446 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
447 return SPECTRE_V2_CMD_AUTO;
448 }
449
450 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
451 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
452 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
453 !IS_ENABLED(CONFIG_RETPOLINE)) {
454 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
455 return SPECTRE_V2_CMD_AUTO;
456 }
457
458 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
459 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
460 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
461 return SPECTRE_V2_CMD_AUTO;
462 }
463
464 spec_v2_print_cond(mitigation_options[i].option,
465 mitigation_options[i].secure);
466 return cmd;
467}
468
469static void __init spectre_v2_select_mitigation(void)
470{
471 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
472 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
473
474 /*
475 * If the CPU is not affected and the command line mode is NONE or AUTO
476 * then nothing to do.
477 */
478 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
479 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
480 return;
481
482 switch (cmd) {
483 case SPECTRE_V2_CMD_NONE:
484 return;
485
486 case SPECTRE_V2_CMD_FORCE:
487 case SPECTRE_V2_CMD_AUTO:
488 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
489 mode = SPECTRE_V2_IBRS_ENHANCED;
490 /* Force it so VMEXIT will restore correctly */
491 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
492 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
493 goto specv2_set_mode;
494 }
495 if (IS_ENABLED(CONFIG_RETPOLINE))
496 goto retpoline_auto;
497 break;
498 case SPECTRE_V2_CMD_RETPOLINE_AMD:
499 if (IS_ENABLED(CONFIG_RETPOLINE))
500 goto retpoline_amd;
501 break;
502 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
503 if (IS_ENABLED(CONFIG_RETPOLINE))
504 goto retpoline_generic;
505 break;
506 case SPECTRE_V2_CMD_RETPOLINE:
507 if (IS_ENABLED(CONFIG_RETPOLINE))
508 goto retpoline_auto;
509 break;
510 }
511 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
512 return;
513
514retpoline_auto:
515 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
516 retpoline_amd:
517 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
518 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
519 goto retpoline_generic;
520 }
521 mode = SPECTRE_V2_RETPOLINE_AMD;
522 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
523 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
524 } else {
525 retpoline_generic:
526 mode = SPECTRE_V2_RETPOLINE_GENERIC;
527 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
528 }
529
530specv2_set_mode:
531 spectre_v2_enabled = mode;
532 pr_info("%s\n", spectre_v2_strings[mode]);
533
534 /*
535 * If spectre v2 protection has been enabled, unconditionally fill
536 * RSB during a context switch; this protects against two independent
537 * issues:
538 *
539 * - RSB underflow (and switch to BTB) on Skylake+
540 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
541 */
542 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
543 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
544
545 /*
546 * Retpoline means the kernel is safe because it has no indirect
547 * branches. Enhanced IBRS protects firmware too, so, enable restricted
548 * speculation around firmware calls only when Enhanced IBRS isn't
549 * supported.
550 *
551 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
552 * the user might select retpoline on the kernel command line and if
553 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
554 * enable IBRS around firmware calls.
555 */
556 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
557 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
558 pr_info("Enabling Restricted Speculation for firmware calls\n");
559 }
560
561 /* Set up IBPB and STIBP depending on the general spectre V2 command */
562 spectre_v2_user_select_mitigation(cmd);
563
564 /* Enable STIBP if appropriate */
565 arch_smt_update();
566}
567
568static void update_stibp_msr(void * __unused)
569{
570 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
571}
572
573/* Update x86_spec_ctrl_base in case SMT state changed. */
574static void update_stibp_strict(void)
575{
576 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
577
578 if (sched_smt_active())
579 mask |= SPEC_CTRL_STIBP;
580
581 if (mask == x86_spec_ctrl_base)
582 return;
583
584 pr_info("Update user space SMT mitigation: STIBP %s\n",
585 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
586 x86_spec_ctrl_base = mask;
587 on_each_cpu(update_stibp_msr, NULL, 1);
588}
589
590/* Update the static key controlling the evaluation of TIF_SPEC_IB */
591static void update_indir_branch_cond(void)
592{
593 if (sched_smt_active())
594 static_branch_enable(&switch_to_cond_stibp);
595 else
596 static_branch_disable(&switch_to_cond_stibp);
597}
598
599void arch_smt_update(void)
600{
601 /* Enhanced IBRS implies STIBP. No update required. */
602 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
603 return;
604
605 mutex_lock(&spec_ctrl_mutex);
606
607 switch (spectre_v2_user) {
608 case SPECTRE_V2_USER_NONE:
609 break;
610 case SPECTRE_V2_USER_STRICT:
611 update_stibp_strict();
612 break;
613 case SPECTRE_V2_USER_PRCTL:
614 case SPECTRE_V2_USER_SECCOMP:
615 update_indir_branch_cond();
616 break;
617 }
618
619 mutex_unlock(&spec_ctrl_mutex);
620}
621
622#undef pr_fmt
623#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
624
625static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
626
627/* The kernel command line selection */
628enum ssb_mitigation_cmd {
629 SPEC_STORE_BYPASS_CMD_NONE,
630 SPEC_STORE_BYPASS_CMD_AUTO,
631 SPEC_STORE_BYPASS_CMD_ON,
632 SPEC_STORE_BYPASS_CMD_PRCTL,
633 SPEC_STORE_BYPASS_CMD_SECCOMP,
634};
635
636static const char * const ssb_strings[] = {
637 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
638 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
639 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
640 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
641};
642
643static const struct {
644 const char *option;
645 enum ssb_mitigation_cmd cmd;
646} ssb_mitigation_options[] __initdata = {
647 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
648 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
649 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
650 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
651 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
652};
653
654static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
655{
656 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
657 char arg[20];
658 int ret, i;
659
660 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
661 return SPEC_STORE_BYPASS_CMD_NONE;
662 } else {
663 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
664 arg, sizeof(arg));
665 if (ret < 0)
666 return SPEC_STORE_BYPASS_CMD_AUTO;
667
668 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
669 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
670 continue;
671
672 cmd = ssb_mitigation_options[i].cmd;
673 break;
674 }
675
676 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
677 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
678 return SPEC_STORE_BYPASS_CMD_AUTO;
679 }
680 }
681
682 return cmd;
683}
684
685static enum ssb_mitigation __init __ssb_select_mitigation(void)
686{
687 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
688 enum ssb_mitigation_cmd cmd;
689
690 if (!boot_cpu_has(X86_FEATURE_SSBD))
691 return mode;
692
693 cmd = ssb_parse_cmdline();
694 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
695 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
696 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
697 return mode;
698
699 switch (cmd) {
700 case SPEC_STORE_BYPASS_CMD_AUTO:
701 case SPEC_STORE_BYPASS_CMD_SECCOMP:
702 /*
703 * Choose prctl+seccomp as the default mode if seccomp is
704 * enabled.
705 */
706 if (IS_ENABLED(CONFIG_SECCOMP))
707 mode = SPEC_STORE_BYPASS_SECCOMP;
708 else
709 mode = SPEC_STORE_BYPASS_PRCTL;
710 break;
711 case SPEC_STORE_BYPASS_CMD_ON:
712 mode = SPEC_STORE_BYPASS_DISABLE;
713 break;
714 case SPEC_STORE_BYPASS_CMD_PRCTL:
715 mode = SPEC_STORE_BYPASS_PRCTL;
716 break;
717 case SPEC_STORE_BYPASS_CMD_NONE:
718 break;
719 }
720
721 /*
722 * We have three CPU feature flags that are in play here:
723 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
724 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
725 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
726 */
727 if (mode == SPEC_STORE_BYPASS_DISABLE) {
728 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
729 /*
730 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
731 * use a completely different MSR and bit dependent on family.
732 */
733 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
734 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
735 x86_amd_ssb_disable();
736 } else {
737 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
738 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
739 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
740 }
741 }
742
743 return mode;
744}
745
746static void ssb_select_mitigation(void)
747{
748 ssb_mode = __ssb_select_mitigation();
749
750 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
751 pr_info("%s\n", ssb_strings[ssb_mode]);
752}
753
754#undef pr_fmt
755#define pr_fmt(fmt) "Speculation prctl: " fmt
756
757static void task_update_spec_tif(struct task_struct *tsk)
758{
759 /* Force the update of the real TIF bits */
760 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
761
762 /*
763 * Immediately update the speculation control MSRs for the current
764 * task, but for a non-current task delay setting the CPU
765 * mitigation until it is scheduled next.
766 *
767 * This can only happen for SECCOMP mitigation. For PRCTL it's
768 * always the current task.
769 */
770 if (tsk == current)
771 speculation_ctrl_update_current();
772}
773
774static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
775{
776 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
777 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
778 return -ENXIO;
779
780 switch (ctrl) {
781 case PR_SPEC_ENABLE:
782 /* If speculation is force disabled, enable is not allowed */
783 if (task_spec_ssb_force_disable(task))
784 return -EPERM;
785 task_clear_spec_ssb_disable(task);
786 task_update_spec_tif(task);
787 break;
788 case PR_SPEC_DISABLE:
789 task_set_spec_ssb_disable(task);
790 task_update_spec_tif(task);
791 break;
792 case PR_SPEC_FORCE_DISABLE:
793 task_set_spec_ssb_disable(task);
794 task_set_spec_ssb_force_disable(task);
795 task_update_spec_tif(task);
796 break;
797 default:
798 return -ERANGE;
799 }
800 return 0;
801}
802
803static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
804{
805 switch (ctrl) {
806 case PR_SPEC_ENABLE:
807 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
808 return 0;
809 /*
810 * Indirect branch speculation is always disabled in strict
811 * mode.
812 */
813 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
814 return -EPERM;
815 task_clear_spec_ib_disable(task);
816 task_update_spec_tif(task);
817 break;
818 case PR_SPEC_DISABLE:
819 case PR_SPEC_FORCE_DISABLE:
820 /*
821 * Indirect branch speculation is always allowed when
822 * mitigation is force disabled.
823 */
824 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
825 return -EPERM;
826 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
827 return 0;
828 task_set_spec_ib_disable(task);
829 if (ctrl == PR_SPEC_FORCE_DISABLE)
830 task_set_spec_ib_force_disable(task);
831 task_update_spec_tif(task);
832 break;
833 default:
834 return -ERANGE;
835 }
836 return 0;
837}
838
839int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
840 unsigned long ctrl)
841{
842 switch (which) {
843 case PR_SPEC_STORE_BYPASS:
844 return ssb_prctl_set(task, ctrl);
845 case PR_SPEC_INDIRECT_BRANCH:
846 return ib_prctl_set(task, ctrl);
847 default:
848 return -ENODEV;
849 }
850}
851
852#ifdef CONFIG_SECCOMP
853void arch_seccomp_spec_mitigate(struct task_struct *task)
854{
855 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
856 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
857 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
858 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
859}
860#endif
861
862static int ssb_prctl_get(struct task_struct *task)
863{
864 switch (ssb_mode) {
865 case SPEC_STORE_BYPASS_DISABLE:
866 return PR_SPEC_DISABLE;
867 case SPEC_STORE_BYPASS_SECCOMP:
868 case SPEC_STORE_BYPASS_PRCTL:
869 if (task_spec_ssb_force_disable(task))
870 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
871 if (task_spec_ssb_disable(task))
872 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
873 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
874 default:
875 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
876 return PR_SPEC_ENABLE;
877 return PR_SPEC_NOT_AFFECTED;
878 }
879}
880
881static int ib_prctl_get(struct task_struct *task)
882{
883 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
884 return PR_SPEC_NOT_AFFECTED;
885
886 switch (spectre_v2_user) {
887 case SPECTRE_V2_USER_NONE:
888 return PR_SPEC_ENABLE;
889 case SPECTRE_V2_USER_PRCTL:
890 case SPECTRE_V2_USER_SECCOMP:
891 if (task_spec_ib_force_disable(task))
892 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
893 if (task_spec_ib_disable(task))
894 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
895 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
896 case SPECTRE_V2_USER_STRICT:
897 return PR_SPEC_DISABLE;
898 default:
899 return PR_SPEC_NOT_AFFECTED;
900 }
901}
902
903int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
904{
905 switch (which) {
906 case PR_SPEC_STORE_BYPASS:
907 return ssb_prctl_get(task);
908 case PR_SPEC_INDIRECT_BRANCH:
909 return ib_prctl_get(task);
910 default:
911 return -ENODEV;
912 }
913}
914
915void x86_spec_ctrl_setup_ap(void)
916{
917 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
918 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
919
920 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
921 x86_amd_ssb_disable();
922}
923
924#undef pr_fmt
925#define pr_fmt(fmt) "L1TF: " fmt
926
927/* Default mitigation for L1TF-affected CPUs */
928enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
929#if IS_ENABLED(CONFIG_KVM_INTEL)
930EXPORT_SYMBOL_GPL(l1tf_mitigation);
931#endif
932enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
933EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
934
935/*
936 * These CPUs all support 44bits physical address space internally in the
937 * cache but CPUID can report a smaller number of physical address bits.
938 *
939 * The L1TF mitigation uses the top most address bit for the inversion of
940 * non present PTEs. When the installed memory reaches into the top most
941 * address bit due to memory holes, which has been observed on machines
942 * which report 36bits physical address bits and have 32G RAM installed,
943 * then the mitigation range check in l1tf_select_mitigation() triggers.
944 * This is a false positive because the mitigation is still possible due to
945 * the fact that the cache uses 44bit internally. Use the cache bits
946 * instead of the reported physical bits and adjust them on the affected
947 * machines to 44bit if the reported bits are less than 44.
948 */
949static void override_cache_bits(struct cpuinfo_x86 *c)
950{
951 if (c->x86 != 6)
952 return;
953
954 switch (c->x86_model) {
955 case INTEL_FAM6_NEHALEM:
956 case INTEL_FAM6_WESTMERE:
957 case INTEL_FAM6_SANDYBRIDGE:
958 case INTEL_FAM6_IVYBRIDGE:
959 case INTEL_FAM6_HASWELL_CORE:
960 case INTEL_FAM6_HASWELL_ULT:
961 case INTEL_FAM6_HASWELL_GT3E:
962 case INTEL_FAM6_BROADWELL_CORE:
963 case INTEL_FAM6_BROADWELL_GT3E:
964 case INTEL_FAM6_SKYLAKE_MOBILE:
965 case INTEL_FAM6_SKYLAKE_DESKTOP:
966 case INTEL_FAM6_KABYLAKE_MOBILE:
967 case INTEL_FAM6_KABYLAKE_DESKTOP:
968 if (c->x86_cache_bits < 44)
969 c->x86_cache_bits = 44;
970 break;
971 }
972}
973
974static void __init l1tf_select_mitigation(void)
975{
976 u64 half_pa;
977
978 if (!boot_cpu_has_bug(X86_BUG_L1TF))
979 return;
980
981 override_cache_bits(&boot_cpu_data);
982
983 switch (l1tf_mitigation) {
984 case L1TF_MITIGATION_OFF:
985 case L1TF_MITIGATION_FLUSH_NOWARN:
986 case L1TF_MITIGATION_FLUSH:
987 break;
988 case L1TF_MITIGATION_FLUSH_NOSMT:
989 case L1TF_MITIGATION_FULL:
990 cpu_smt_disable(false);
991 break;
992 case L1TF_MITIGATION_FULL_FORCE:
993 cpu_smt_disable(true);
994 break;
995 }
996
997#if CONFIG_PGTABLE_LEVELS == 2
998 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
999 return;
1000#endif
1001
1002 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1003 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1004 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1005 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1006 half_pa);
1007 pr_info("However, doing so will make a part of your RAM unusable.\n");
1008 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
1009 return;
1010 }
1011
1012 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1013}
1014
1015static int __init l1tf_cmdline(char *str)
1016{
1017 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1018 return 0;
1019
1020 if (!str)
1021 return -EINVAL;
1022
1023 if (!strcmp(str, "off"))
1024 l1tf_mitigation = L1TF_MITIGATION_OFF;
1025 else if (!strcmp(str, "flush,nowarn"))
1026 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1027 else if (!strcmp(str, "flush"))
1028 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1029 else if (!strcmp(str, "flush,nosmt"))
1030 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1031 else if (!strcmp(str, "full"))
1032 l1tf_mitigation = L1TF_MITIGATION_FULL;
1033 else if (!strcmp(str, "full,force"))
1034 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1035
1036 return 0;
1037}
1038early_param("l1tf", l1tf_cmdline);
1039
1040#undef pr_fmt
1041
1042#ifdef CONFIG_SYSFS
1043
1044#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1045
1046#if IS_ENABLED(CONFIG_KVM_INTEL)
1047static const char * const l1tf_vmx_states[] = {
1048 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1049 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1050 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1051 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1052 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1053 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1054};
1055
1056static ssize_t l1tf_show_state(char *buf)
1057{
1058 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1059 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1060
1061 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1062 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1063 sched_smt_active())) {
1064 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1065 l1tf_vmx_states[l1tf_vmx_mitigation]);
1066 }
1067
1068 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1069 l1tf_vmx_states[l1tf_vmx_mitigation],
1070 sched_smt_active() ? "vulnerable" : "disabled");
1071}
1072#else
1073static ssize_t l1tf_show_state(char *buf)
1074{
1075 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1076}
1077#endif
1078
1079static char *stibp_state(void)
1080{
1081 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1082 return "";
1083
1084 switch (spectre_v2_user) {
1085 case SPECTRE_V2_USER_NONE:
1086 return ", STIBP: disabled";
1087 case SPECTRE_V2_USER_STRICT:
1088 return ", STIBP: forced";
1089 case SPECTRE_V2_USER_PRCTL:
1090 case SPECTRE_V2_USER_SECCOMP:
1091 if (static_key_enabled(&switch_to_cond_stibp))
1092 return ", STIBP: conditional";
1093 }
1094 return "";
1095}
1096
1097static char *ibpb_state(void)
1098{
1099 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1100 if (static_key_enabled(&switch_mm_always_ibpb))
1101 return ", IBPB: always-on";
1102 if (static_key_enabled(&switch_mm_cond_ibpb))
1103 return ", IBPB: conditional";
1104 return ", IBPB: disabled";
1105 }
1106 return "";
1107}
1108
1109static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1110 char *buf, unsigned int bug)
1111{
1112 if (!boot_cpu_has_bug(bug))
1113 return sprintf(buf, "Not affected\n");
1114
1115 switch (bug) {
1116 case X86_BUG_CPU_MELTDOWN:
1117 if (boot_cpu_has(X86_FEATURE_PTI))
1118 return sprintf(buf, "Mitigation: PTI\n");
1119
1120 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1121 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1122
1123 break;
1124
1125 case X86_BUG_SPECTRE_V1:
1126 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1127
1128 case X86_BUG_SPECTRE_V2:
1129 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1130 ibpb_state(),
1131 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1132 stibp_state(),
1133 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1134 spectre_v2_module_string());
1135
1136 case X86_BUG_SPEC_STORE_BYPASS:
1137 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1138
1139 case X86_BUG_L1TF:
1140 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1141 return l1tf_show_state(buf);
1142 break;
1143 default:
1144 break;
1145 }
1146
1147 return sprintf(buf, "Vulnerable\n");
1148}
1149
1150ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1151{
1152 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1153}
1154
1155ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1156{
1157 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1158}
1159
1160ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1161{
1162 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1163}
1164
1165ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1166{
1167 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1168}
1169
1170ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1171{
1172 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1173}
1174#endif