Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 1994 Linus Torvalds |
| 4 | * |
| 5 | * Cyrix stuff, June 1998 by: |
| 6 | * - Rafael R. Reilova (moved everything from head.S), |
| 7 | * <rreilova@ececs.uc.edu> |
| 8 | * - Channing Corn (tests & fixes), |
| 9 | * - Andrew D. Balsa (code cleanup). |
| 10 | */ |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/utsname.h> |
| 13 | #include <linux/cpu.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/nospec.h> |
| 16 | #include <linux/prctl.h> |
| 17 | #include <linux/sched/smt.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 18 | #include <linux/pgtable.h> |
| 19 | #include <linux/bpf.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | |
| 21 | #include <asm/spec-ctrl.h> |
| 22 | #include <asm/cmdline.h> |
| 23 | #include <asm/bugs.h> |
| 24 | #include <asm/processor.h> |
| 25 | #include <asm/processor-flags.h> |
| 26 | #include <asm/fpu/internal.h> |
| 27 | #include <asm/msr.h> |
| 28 | #include <asm/vmx.h> |
| 29 | #include <asm/paravirt.h> |
| 30 | #include <asm/alternative.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | #include <asm/set_memory.h> |
| 32 | #include <asm/intel-family.h> |
| 33 | #include <asm/e820/api.h> |
| 34 | #include <asm/hypervisor.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 35 | #include <asm/tlbflush.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 37 | #include "cpu.h" |
| 38 | |
| 39 | static void __init spectre_v1_select_mitigation(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | static void __init spectre_v2_select_mitigation(void); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 41 | static void __init retbleed_select_mitigation(void); |
| 42 | static void __init spectre_v2_user_select_mitigation(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | static void __init ssb_select_mitigation(void); |
| 44 | static void __init l1tf_select_mitigation(void); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 45 | static void __init mds_select_mitigation(void); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 46 | static void __init md_clear_update_mitigation(void); |
| 47 | static void __init md_clear_select_mitigation(void); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 48 | static void __init taa_select_mitigation(void); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 49 | static void __init mmio_select_mitigation(void); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 50 | static void __init srbds_select_mitigation(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 52 | /* The base value of the SPEC_CTRL MSR without task-specific bits set */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | u64 x86_spec_ctrl_base; |
| 54 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 55 | |
| 56 | /* The current value of the SPEC_CTRL MSR with task-specific bits set */ |
| 57 | DEFINE_PER_CPU(u64, x86_spec_ctrl_current); |
| 58 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); |
| 59 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | static DEFINE_MUTEX(spec_ctrl_mutex); |
| 61 | |
| 62 | /* |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 63 | * Keep track of the SPEC_CTRL MSR value for the current task, which may differ |
| 64 | * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | */ |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 66 | void write_spec_ctrl_current(u64 val, bool force) |
| 67 | { |
| 68 | if (this_cpu_read(x86_spec_ctrl_current) == val) |
| 69 | return; |
| 70 | |
| 71 | this_cpu_write(x86_spec_ctrl_current, val); |
| 72 | |
| 73 | /* |
| 74 | * When KERNEL_IBRS this MSR is written on return-to-user, unless |
| 75 | * forced the update can be delayed until that time. |
| 76 | */ |
| 77 | if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) |
| 78 | wrmsrl(MSR_IA32_SPEC_CTRL, val); |
| 79 | } |
| 80 | |
| 81 | u64 spec_ctrl_current(void) |
| 82 | { |
| 83 | return this_cpu_read(x86_spec_ctrl_current); |
| 84 | } |
| 85 | EXPORT_SYMBOL_GPL(spec_ctrl_current); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 86 | |
| 87 | /* |
| 88 | * AMD specific MSR info for Speculative Store Bypass control. |
| 89 | * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). |
| 90 | */ |
| 91 | u64 __ro_after_init x86_amd_ls_cfg_base; |
| 92 | u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; |
| 93 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 94 | /* Control conditional STIBP in switch_to() */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); |
| 96 | /* Control conditional IBPB in switch_mm() */ |
| 97 | DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); |
| 98 | /* Control unconditional IBPB in switch_mm() */ |
| 99 | DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); |
| 100 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 101 | /* Control MDS CPU buffer clear before returning to user space */ |
| 102 | DEFINE_STATIC_KEY_FALSE(mds_user_clear); |
| 103 | EXPORT_SYMBOL_GPL(mds_user_clear); |
| 104 | /* Control MDS CPU buffer clear before idling (halt, mwait) */ |
| 105 | DEFINE_STATIC_KEY_FALSE(mds_idle_clear); |
| 106 | EXPORT_SYMBOL_GPL(mds_idle_clear); |
| 107 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 108 | /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ |
| 109 | DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); |
| 110 | EXPORT_SYMBOL_GPL(mmio_stale_data_clear); |
| 111 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | void __init check_bugs(void) |
| 113 | { |
| 114 | identify_boot_cpu(); |
| 115 | |
| 116 | /* |
| 117 | * identify_boot_cpu() initialized SMT support information, let the |
| 118 | * core code know. |
| 119 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 120 | cpu_smt_check_topology(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | |
| 122 | if (!IS_ENABLED(CONFIG_SMP)) { |
| 123 | pr_info("CPU: "); |
| 124 | print_cpu_info(&boot_cpu_data); |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | * Read the SPEC_CTRL MSR to account for reserved bits which may |
| 129 | * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD |
| 130 | * init code as it is not enumerated and depends on the family. |
| 131 | */ |
| 132 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
| 133 | rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
| 134 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 135 | /* Select the proper CPU mitigations before patching alternatives: */ |
| 136 | spectre_v1_select_mitigation(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | spectre_v2_select_mitigation(); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 138 | /* |
| 139 | * retbleed_select_mitigation() relies on the state set by |
| 140 | * spectre_v2_select_mitigation(); specifically it wants to know about |
| 141 | * spectre_v2=ibrs. |
| 142 | */ |
| 143 | retbleed_select_mitigation(); |
| 144 | /* |
| 145 | * spectre_v2_user_select_mitigation() relies on the state set by |
| 146 | * retbleed_select_mitigation(); specifically the STIBP selection is |
| 147 | * forced for UNRET or IBPB. |
| 148 | */ |
| 149 | spectre_v2_user_select_mitigation(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 150 | ssb_select_mitigation(); |
| 151 | l1tf_select_mitigation(); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 152 | md_clear_select_mitigation(); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 153 | srbds_select_mitigation(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 154 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 155 | arch_smt_update(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 156 | |
| 157 | #ifdef CONFIG_X86_32 |
| 158 | /* |
| 159 | * Check whether we are able to run this kernel safely on SMP. |
| 160 | * |
| 161 | * - i386 is no longer supported. |
| 162 | * - In order to run on anything without a TSC, we need to be |
| 163 | * compiled for a i486. |
| 164 | */ |
| 165 | if (boot_cpu_data.x86 < 4) |
| 166 | panic("Kernel requires i486+ for 'invlpg' and other features"); |
| 167 | |
| 168 | init_utsname()->machine[1] = |
| 169 | '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); |
| 170 | alternative_instructions(); |
| 171 | |
| 172 | fpu__init_check_bugs(); |
| 173 | #else /* CONFIG_X86_64 */ |
| 174 | alternative_instructions(); |
| 175 | |
| 176 | /* |
| 177 | * Make sure the first 2MB area is not mapped by huge pages |
| 178 | * There are typically fixed size MTRRs in there and overlapping |
| 179 | * MTRRs into large pages causes slow downs. |
| 180 | * |
| 181 | * Right now we don't do that with gbpages because there seems |
| 182 | * very little benefit for that case. |
| 183 | */ |
| 184 | if (!direct_gbpages) |
| 185 | set_memory_4k((unsigned long)__va(0), 1); |
| 186 | #endif |
| 187 | } |
| 188 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 189 | /* |
| 190 | * NOTE: For VMX, this function is not called in the vmexit path. |
| 191 | * It uses vmx_spec_ctrl_restore_host() instead. |
| 192 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | void |
| 194 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
| 195 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 196 | u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 197 | struct thread_info *ti = current_thread_info(); |
| 198 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 199 | if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | if (hostval != guestval) { |
| 201 | msrval = setguest ? guestval : hostval; |
| 202 | wrmsrl(MSR_IA32_SPEC_CTRL, msrval); |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update |
| 208 | * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. |
| 209 | */ |
| 210 | if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
| 211 | !static_cpu_has(X86_FEATURE_VIRT_SSBD)) |
| 212 | return; |
| 213 | |
| 214 | /* |
| 215 | * If the host has SSBD mitigation enabled, force it in the host's |
| 216 | * virtual MSR value. If its not permanently enabled, evaluate |
| 217 | * current's TIF_SSBD thread flag. |
| 218 | */ |
| 219 | if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) |
| 220 | hostval = SPEC_CTRL_SSBD; |
| 221 | else |
| 222 | hostval = ssbd_tif_to_spec_ctrl(ti->flags); |
| 223 | |
| 224 | /* Sanitize the guest value */ |
| 225 | guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; |
| 226 | |
| 227 | if (hostval != guestval) { |
| 228 | unsigned long tif; |
| 229 | |
| 230 | tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : |
| 231 | ssbd_spec_ctrl_to_tif(hostval); |
| 232 | |
| 233 | speculation_ctrl_update(tif); |
| 234 | } |
| 235 | } |
| 236 | EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); |
| 237 | |
| 238 | static void x86_amd_ssb_disable(void) |
| 239 | { |
| 240 | u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; |
| 241 | |
| 242 | if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) |
| 243 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); |
| 244 | else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
| 245 | wrmsrl(MSR_AMD64_LS_CFG, msrval); |
| 246 | } |
| 247 | |
| 248 | #undef pr_fmt |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 249 | #define pr_fmt(fmt) "MDS: " fmt |
| 250 | |
| 251 | /* Default mitigation for MDS-affected CPUs */ |
| 252 | static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; |
| 253 | static bool mds_nosmt __ro_after_init = false; |
| 254 | |
| 255 | static const char * const mds_strings[] = { |
| 256 | [MDS_MITIGATION_OFF] = "Vulnerable", |
| 257 | [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", |
| 258 | [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", |
| 259 | }; |
| 260 | |
| 261 | static void __init mds_select_mitigation(void) |
| 262 | { |
| 263 | if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { |
| 264 | mds_mitigation = MDS_MITIGATION_OFF; |
| 265 | return; |
| 266 | } |
| 267 | |
| 268 | if (mds_mitigation == MDS_MITIGATION_FULL) { |
| 269 | if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
| 270 | mds_mitigation = MDS_MITIGATION_VMWERV; |
| 271 | |
| 272 | static_branch_enable(&mds_user_clear); |
| 273 | |
| 274 | if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && |
| 275 | (mds_nosmt || cpu_mitigations_auto_nosmt())) |
| 276 | cpu_smt_disable(false); |
| 277 | } |
| 278 | } |
| 279 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 280 | static int __init mds_cmdline(char *str) |
| 281 | { |
| 282 | if (!boot_cpu_has_bug(X86_BUG_MDS)) |
| 283 | return 0; |
| 284 | |
| 285 | if (!str) |
| 286 | return -EINVAL; |
| 287 | |
| 288 | if (!strcmp(str, "off")) |
| 289 | mds_mitigation = MDS_MITIGATION_OFF; |
| 290 | else if (!strcmp(str, "full")) |
| 291 | mds_mitigation = MDS_MITIGATION_FULL; |
| 292 | else if (!strcmp(str, "full,nosmt")) { |
| 293 | mds_mitigation = MDS_MITIGATION_FULL; |
| 294 | mds_nosmt = true; |
| 295 | } |
| 296 | |
| 297 | return 0; |
| 298 | } |
| 299 | early_param("mds", mds_cmdline); |
| 300 | |
| 301 | #undef pr_fmt |
| 302 | #define pr_fmt(fmt) "TAA: " fmt |
| 303 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 304 | enum taa_mitigations { |
| 305 | TAA_MITIGATION_OFF, |
| 306 | TAA_MITIGATION_UCODE_NEEDED, |
| 307 | TAA_MITIGATION_VERW, |
| 308 | TAA_MITIGATION_TSX_DISABLED, |
| 309 | }; |
| 310 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 311 | /* Default mitigation for TAA-affected CPUs */ |
| 312 | static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; |
| 313 | static bool taa_nosmt __ro_after_init; |
| 314 | |
| 315 | static const char * const taa_strings[] = { |
| 316 | [TAA_MITIGATION_OFF] = "Vulnerable", |
| 317 | [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", |
| 318 | [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", |
| 319 | [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", |
| 320 | }; |
| 321 | |
| 322 | static void __init taa_select_mitigation(void) |
| 323 | { |
| 324 | u64 ia32_cap; |
| 325 | |
| 326 | if (!boot_cpu_has_bug(X86_BUG_TAA)) { |
| 327 | taa_mitigation = TAA_MITIGATION_OFF; |
| 328 | return; |
| 329 | } |
| 330 | |
| 331 | /* TSX previously disabled by tsx=off */ |
| 332 | if (!boot_cpu_has(X86_FEATURE_RTM)) { |
| 333 | taa_mitigation = TAA_MITIGATION_TSX_DISABLED; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 334 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | if (cpu_mitigations_off()) { |
| 338 | taa_mitigation = TAA_MITIGATION_OFF; |
| 339 | return; |
| 340 | } |
| 341 | |
| 342 | /* |
| 343 | * TAA mitigation via VERW is turned off if both |
| 344 | * tsx_async_abort=off and mds=off are specified. |
| 345 | */ |
| 346 | if (taa_mitigation == TAA_MITIGATION_OFF && |
| 347 | mds_mitigation == MDS_MITIGATION_OFF) |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 348 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 349 | |
| 350 | if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
| 351 | taa_mitigation = TAA_MITIGATION_VERW; |
| 352 | else |
| 353 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; |
| 354 | |
| 355 | /* |
| 356 | * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. |
| 357 | * A microcode update fixes this behavior to clear CPU buffers. It also |
| 358 | * adds support for MSR_IA32_TSX_CTRL which is enumerated by the |
| 359 | * ARCH_CAP_TSX_CTRL_MSR bit. |
| 360 | * |
| 361 | * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode |
| 362 | * update is required. |
| 363 | */ |
| 364 | ia32_cap = x86_read_arch_cap_msr(); |
| 365 | if ( (ia32_cap & ARCH_CAP_MDS_NO) && |
| 366 | !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) |
| 367 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; |
| 368 | |
| 369 | /* |
| 370 | * TSX is enabled, select alternate mitigation for TAA which is |
| 371 | * the same as MDS. Enable MDS static branch to clear CPU buffers. |
| 372 | * |
| 373 | * For guests that can't determine whether the correct microcode is |
| 374 | * present on host, enable the mitigation for UCODE_NEEDED as well. |
| 375 | */ |
| 376 | static_branch_enable(&mds_user_clear); |
| 377 | |
| 378 | if (taa_nosmt || cpu_mitigations_auto_nosmt()) |
| 379 | cpu_smt_disable(false); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 380 | } |
| 381 | |
| 382 | static int __init tsx_async_abort_parse_cmdline(char *str) |
| 383 | { |
| 384 | if (!boot_cpu_has_bug(X86_BUG_TAA)) |
| 385 | return 0; |
| 386 | |
| 387 | if (!str) |
| 388 | return -EINVAL; |
| 389 | |
| 390 | if (!strcmp(str, "off")) { |
| 391 | taa_mitigation = TAA_MITIGATION_OFF; |
| 392 | } else if (!strcmp(str, "full")) { |
| 393 | taa_mitigation = TAA_MITIGATION_VERW; |
| 394 | } else if (!strcmp(str, "full,nosmt")) { |
| 395 | taa_mitigation = TAA_MITIGATION_VERW; |
| 396 | taa_nosmt = true; |
| 397 | } |
| 398 | |
| 399 | return 0; |
| 400 | } |
| 401 | early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); |
| 402 | |
| 403 | #undef pr_fmt |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 404 | #define pr_fmt(fmt) "MMIO Stale Data: " fmt |
| 405 | |
| 406 | enum mmio_mitigations { |
| 407 | MMIO_MITIGATION_OFF, |
| 408 | MMIO_MITIGATION_UCODE_NEEDED, |
| 409 | MMIO_MITIGATION_VERW, |
| 410 | }; |
| 411 | |
| 412 | /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ |
| 413 | static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; |
| 414 | static bool mmio_nosmt __ro_after_init = false; |
| 415 | |
| 416 | static const char * const mmio_strings[] = { |
| 417 | [MMIO_MITIGATION_OFF] = "Vulnerable", |
| 418 | [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", |
| 419 | [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", |
| 420 | }; |
| 421 | |
| 422 | static void __init mmio_select_mitigation(void) |
| 423 | { |
| 424 | u64 ia32_cap; |
| 425 | |
| 426 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || |
| 427 | boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || |
| 428 | cpu_mitigations_off()) { |
| 429 | mmio_mitigation = MMIO_MITIGATION_OFF; |
| 430 | return; |
| 431 | } |
| 432 | |
| 433 | if (mmio_mitigation == MMIO_MITIGATION_OFF) |
| 434 | return; |
| 435 | |
| 436 | ia32_cap = x86_read_arch_cap_msr(); |
| 437 | |
| 438 | /* |
| 439 | * Enable CPU buffer clear mitigation for host and VMM, if also affected |
| 440 | * by MDS or TAA. Otherwise, enable mitigation for VMM only. |
| 441 | */ |
| 442 | if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && |
| 443 | boot_cpu_has(X86_FEATURE_RTM))) |
| 444 | static_branch_enable(&mds_user_clear); |
| 445 | else |
| 446 | static_branch_enable(&mmio_stale_data_clear); |
| 447 | |
| 448 | /* |
| 449 | * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can |
| 450 | * be propagated to uncore buffers, clearing the Fill buffers on idle |
| 451 | * is required irrespective of SMT state. |
| 452 | */ |
| 453 | if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) |
| 454 | static_branch_enable(&mds_idle_clear); |
| 455 | |
| 456 | /* |
| 457 | * Check if the system has the right microcode. |
| 458 | * |
| 459 | * CPU Fill buffer clear mitigation is enumerated by either an explicit |
| 460 | * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS |
| 461 | * affected systems. |
| 462 | */ |
| 463 | if ((ia32_cap & ARCH_CAP_FB_CLEAR) || |
| 464 | (boot_cpu_has(X86_FEATURE_MD_CLEAR) && |
| 465 | boot_cpu_has(X86_FEATURE_FLUSH_L1D) && |
| 466 | !(ia32_cap & ARCH_CAP_MDS_NO))) |
| 467 | mmio_mitigation = MMIO_MITIGATION_VERW; |
| 468 | else |
| 469 | mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; |
| 470 | |
| 471 | if (mmio_nosmt || cpu_mitigations_auto_nosmt()) |
| 472 | cpu_smt_disable(false); |
| 473 | } |
| 474 | |
| 475 | static int __init mmio_stale_data_parse_cmdline(char *str) |
| 476 | { |
| 477 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
| 478 | return 0; |
| 479 | |
| 480 | if (!str) |
| 481 | return -EINVAL; |
| 482 | |
| 483 | if (!strcmp(str, "off")) { |
| 484 | mmio_mitigation = MMIO_MITIGATION_OFF; |
| 485 | } else if (!strcmp(str, "full")) { |
| 486 | mmio_mitigation = MMIO_MITIGATION_VERW; |
| 487 | } else if (!strcmp(str, "full,nosmt")) { |
| 488 | mmio_mitigation = MMIO_MITIGATION_VERW; |
| 489 | mmio_nosmt = true; |
| 490 | } |
| 491 | |
| 492 | return 0; |
| 493 | } |
| 494 | early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); |
| 495 | |
| 496 | #undef pr_fmt |
| 497 | #define pr_fmt(fmt) "" fmt |
| 498 | |
| 499 | static void __init md_clear_update_mitigation(void) |
| 500 | { |
| 501 | if (cpu_mitigations_off()) |
| 502 | return; |
| 503 | |
| 504 | if (!static_key_enabled(&mds_user_clear)) |
| 505 | goto out; |
| 506 | |
| 507 | /* |
| 508 | * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data |
| 509 | * mitigation, if necessary. |
| 510 | */ |
| 511 | if (mds_mitigation == MDS_MITIGATION_OFF && |
| 512 | boot_cpu_has_bug(X86_BUG_MDS)) { |
| 513 | mds_mitigation = MDS_MITIGATION_FULL; |
| 514 | mds_select_mitigation(); |
| 515 | } |
| 516 | if (taa_mitigation == TAA_MITIGATION_OFF && |
| 517 | boot_cpu_has_bug(X86_BUG_TAA)) { |
| 518 | taa_mitigation = TAA_MITIGATION_VERW; |
| 519 | taa_select_mitigation(); |
| 520 | } |
| 521 | if (mmio_mitigation == MMIO_MITIGATION_OFF && |
| 522 | boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { |
| 523 | mmio_mitigation = MMIO_MITIGATION_VERW; |
| 524 | mmio_select_mitigation(); |
| 525 | } |
| 526 | out: |
| 527 | if (boot_cpu_has_bug(X86_BUG_MDS)) |
| 528 | pr_info("MDS: %s\n", mds_strings[mds_mitigation]); |
| 529 | if (boot_cpu_has_bug(X86_BUG_TAA)) |
| 530 | pr_info("TAA: %s\n", taa_strings[taa_mitigation]); |
| 531 | if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
| 532 | pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); |
| 533 | else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
| 534 | pr_info("MMIO Stale Data: Unknown: No mitigations\n"); |
| 535 | } |
| 536 | |
| 537 | static void __init md_clear_select_mitigation(void) |
| 538 | { |
| 539 | mds_select_mitigation(); |
| 540 | taa_select_mitigation(); |
| 541 | mmio_select_mitigation(); |
| 542 | |
| 543 | /* |
| 544 | * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update |
| 545 | * and print their mitigation after MDS, TAA and MMIO Stale Data |
| 546 | * mitigation selection is done. |
| 547 | */ |
| 548 | md_clear_update_mitigation(); |
| 549 | } |
| 550 | |
| 551 | #undef pr_fmt |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 552 | #define pr_fmt(fmt) "SRBDS: " fmt |
| 553 | |
| 554 | enum srbds_mitigations { |
| 555 | SRBDS_MITIGATION_OFF, |
| 556 | SRBDS_MITIGATION_UCODE_NEEDED, |
| 557 | SRBDS_MITIGATION_FULL, |
| 558 | SRBDS_MITIGATION_TSX_OFF, |
| 559 | SRBDS_MITIGATION_HYPERVISOR, |
| 560 | }; |
| 561 | |
| 562 | static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; |
| 563 | |
| 564 | static const char * const srbds_strings[] = { |
| 565 | [SRBDS_MITIGATION_OFF] = "Vulnerable", |
| 566 | [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", |
| 567 | [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", |
| 568 | [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", |
| 569 | [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", |
| 570 | }; |
| 571 | |
| 572 | static bool srbds_off; |
| 573 | |
| 574 | void update_srbds_msr(void) |
| 575 | { |
| 576 | u64 mcu_ctrl; |
| 577 | |
| 578 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) |
| 579 | return; |
| 580 | |
| 581 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
| 582 | return; |
| 583 | |
| 584 | if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) |
| 585 | return; |
| 586 | |
| 587 | rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
| 588 | |
| 589 | switch (srbds_mitigation) { |
| 590 | case SRBDS_MITIGATION_OFF: |
| 591 | case SRBDS_MITIGATION_TSX_OFF: |
| 592 | mcu_ctrl |= RNGDS_MITG_DIS; |
| 593 | break; |
| 594 | case SRBDS_MITIGATION_FULL: |
| 595 | mcu_ctrl &= ~RNGDS_MITG_DIS; |
| 596 | break; |
| 597 | default: |
| 598 | break; |
| 599 | } |
| 600 | |
| 601 | wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
| 602 | } |
| 603 | |
| 604 | static void __init srbds_select_mitigation(void) |
| 605 | { |
| 606 | u64 ia32_cap; |
| 607 | |
| 608 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) |
| 609 | return; |
| 610 | |
| 611 | /* |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 612 | * Check to see if this is one of the MDS_NO systems supporting TSX that |
| 613 | * are only exposed to SRBDS when TSX is enabled or when CPU is affected |
| 614 | * by Processor MMIO Stale Data vulnerability. |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 615 | */ |
| 616 | ia32_cap = x86_read_arch_cap_msr(); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 617 | if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && |
| 618 | !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 619 | srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; |
| 620 | else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
| 621 | srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; |
| 622 | else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) |
| 623 | srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; |
| 624 | else if (cpu_mitigations_off() || srbds_off) |
| 625 | srbds_mitigation = SRBDS_MITIGATION_OFF; |
| 626 | |
| 627 | update_srbds_msr(); |
| 628 | pr_info("%s\n", srbds_strings[srbds_mitigation]); |
| 629 | } |
| 630 | |
| 631 | static int __init srbds_parse_cmdline(char *str) |
| 632 | { |
| 633 | if (!str) |
| 634 | return -EINVAL; |
| 635 | |
| 636 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) |
| 637 | return 0; |
| 638 | |
| 639 | srbds_off = !strcmp(str, "off"); |
| 640 | return 0; |
| 641 | } |
| 642 | early_param("srbds", srbds_parse_cmdline); |
| 643 | |
| 644 | #undef pr_fmt |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 645 | #define pr_fmt(fmt) "Spectre V1 : " fmt |
| 646 | |
| 647 | enum spectre_v1_mitigation { |
| 648 | SPECTRE_V1_MITIGATION_NONE, |
| 649 | SPECTRE_V1_MITIGATION_AUTO, |
| 650 | }; |
| 651 | |
| 652 | static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = |
| 653 | SPECTRE_V1_MITIGATION_AUTO; |
| 654 | |
| 655 | static const char * const spectre_v1_strings[] = { |
| 656 | [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", |
| 657 | [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", |
| 658 | }; |
| 659 | |
| 660 | /* |
| 661 | * Does SMAP provide full mitigation against speculative kernel access to |
| 662 | * userspace? |
| 663 | */ |
| 664 | static bool smap_works_speculatively(void) |
| 665 | { |
| 666 | if (!boot_cpu_has(X86_FEATURE_SMAP)) |
| 667 | return false; |
| 668 | |
| 669 | /* |
| 670 | * On CPUs which are vulnerable to Meltdown, SMAP does not |
| 671 | * prevent speculative access to user data in the L1 cache. |
| 672 | * Consider SMAP to be non-functional as a mitigation on these |
| 673 | * CPUs. |
| 674 | */ |
| 675 | if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) |
| 676 | return false; |
| 677 | |
| 678 | return true; |
| 679 | } |
| 680 | |
| 681 | static void __init spectre_v1_select_mitigation(void) |
| 682 | { |
| 683 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { |
| 684 | spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
| 685 | return; |
| 686 | } |
| 687 | |
| 688 | if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { |
| 689 | /* |
| 690 | * With Spectre v1, a user can speculatively control either |
| 691 | * path of a conditional swapgs with a user-controlled GS |
| 692 | * value. The mitigation is to add lfences to both code paths. |
| 693 | * |
| 694 | * If FSGSBASE is enabled, the user can put a kernel address in |
| 695 | * GS, in which case SMAP provides no protection. |
| 696 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 697 | * If FSGSBASE is disabled, the user can only put a user space |
| 698 | * address in GS. That makes an attack harder, but still |
| 699 | * possible if there's no SMAP protection. |
| 700 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 701 | if (boot_cpu_has(X86_FEATURE_FSGSBASE) || |
| 702 | !smap_works_speculatively()) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 703 | /* |
| 704 | * Mitigation can be provided from SWAPGS itself or |
| 705 | * PTI as the CR3 write in the Meltdown mitigation |
| 706 | * is serializing. |
| 707 | * |
| 708 | * If neither is there, mitigate with an LFENCE to |
| 709 | * stop speculation through swapgs. |
| 710 | */ |
| 711 | if (boot_cpu_has_bug(X86_BUG_SWAPGS) && |
| 712 | !boot_cpu_has(X86_FEATURE_PTI)) |
| 713 | setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); |
| 714 | |
| 715 | /* |
| 716 | * Enable lfences in the kernel entry (non-swapgs) |
| 717 | * paths, to prevent user entry from speculatively |
| 718 | * skipping swapgs. |
| 719 | */ |
| 720 | setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); |
| 721 | } |
| 722 | } |
| 723 | |
| 724 | pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); |
| 725 | } |
| 726 | |
| 727 | static int __init nospectre_v1_cmdline(char *str) |
| 728 | { |
| 729 | spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
| 730 | return 0; |
| 731 | } |
| 732 | early_param("nospectre_v1", nospectre_v1_cmdline); |
| 733 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 734 | static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
| 735 | SPECTRE_V2_NONE; |
| 736 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 737 | #undef pr_fmt |
| 738 | #define pr_fmt(fmt) "RETBleed: " fmt |
| 739 | |
| 740 | enum retbleed_mitigation { |
| 741 | RETBLEED_MITIGATION_NONE, |
| 742 | RETBLEED_MITIGATION_UNRET, |
| 743 | RETBLEED_MITIGATION_IBPB, |
| 744 | RETBLEED_MITIGATION_IBRS, |
| 745 | RETBLEED_MITIGATION_EIBRS, |
| 746 | }; |
| 747 | |
| 748 | enum retbleed_mitigation_cmd { |
| 749 | RETBLEED_CMD_OFF, |
| 750 | RETBLEED_CMD_AUTO, |
| 751 | RETBLEED_CMD_UNRET, |
| 752 | RETBLEED_CMD_IBPB, |
| 753 | }; |
| 754 | |
| 755 | const char * const retbleed_strings[] = { |
| 756 | [RETBLEED_MITIGATION_NONE] = "Vulnerable", |
| 757 | [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", |
| 758 | [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", |
| 759 | [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", |
| 760 | [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", |
| 761 | }; |
| 762 | |
| 763 | static enum retbleed_mitigation retbleed_mitigation __ro_after_init = |
| 764 | RETBLEED_MITIGATION_NONE; |
| 765 | static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = |
| 766 | RETBLEED_CMD_AUTO; |
| 767 | |
| 768 | static int __ro_after_init retbleed_nosmt = false; |
| 769 | |
| 770 | static int __init retbleed_parse_cmdline(char *str) |
| 771 | { |
| 772 | if (!str) |
| 773 | return -EINVAL; |
| 774 | |
| 775 | while (str) { |
| 776 | char *next = strchr(str, ','); |
| 777 | if (next) { |
| 778 | *next = 0; |
| 779 | next++; |
| 780 | } |
| 781 | |
| 782 | if (!strcmp(str, "off")) { |
| 783 | retbleed_cmd = RETBLEED_CMD_OFF; |
| 784 | } else if (!strcmp(str, "auto")) { |
| 785 | retbleed_cmd = RETBLEED_CMD_AUTO; |
| 786 | } else if (!strcmp(str, "unret")) { |
| 787 | retbleed_cmd = RETBLEED_CMD_UNRET; |
| 788 | } else if (!strcmp(str, "ibpb")) { |
| 789 | retbleed_cmd = RETBLEED_CMD_IBPB; |
| 790 | } else if (!strcmp(str, "nosmt")) { |
| 791 | retbleed_nosmt = true; |
| 792 | } else { |
| 793 | pr_err("Ignoring unknown retbleed option (%s).", str); |
| 794 | } |
| 795 | |
| 796 | str = next; |
| 797 | } |
| 798 | |
| 799 | return 0; |
| 800 | } |
| 801 | early_param("retbleed", retbleed_parse_cmdline); |
| 802 | |
| 803 | #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" |
| 804 | #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" |
| 805 | |
| 806 | static void __init retbleed_select_mitigation(void) |
| 807 | { |
| 808 | bool mitigate_smt = false; |
| 809 | |
| 810 | if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) |
| 811 | return; |
| 812 | |
| 813 | switch (retbleed_cmd) { |
| 814 | case RETBLEED_CMD_OFF: |
| 815 | return; |
| 816 | |
| 817 | case RETBLEED_CMD_UNRET: |
| 818 | if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { |
| 819 | retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
| 820 | } else { |
| 821 | pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); |
| 822 | goto do_cmd_auto; |
| 823 | } |
| 824 | break; |
| 825 | |
| 826 | case RETBLEED_CMD_IBPB: |
| 827 | if (!boot_cpu_has(X86_FEATURE_IBPB)) { |
| 828 | pr_err("WARNING: CPU does not support IBPB.\n"); |
| 829 | goto do_cmd_auto; |
| 830 | } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { |
| 831 | retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
| 832 | } else { |
| 833 | pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); |
| 834 | goto do_cmd_auto; |
| 835 | } |
| 836 | break; |
| 837 | |
| 838 | do_cmd_auto: |
| 839 | case RETBLEED_CMD_AUTO: |
| 840 | default: |
| 841 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
| 842 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
| 843 | if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) |
| 844 | retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
| 845 | else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) |
| 846 | retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
| 847 | } |
| 848 | |
| 849 | /* |
| 850 | * The Intel mitigation (IBRS or eIBRS) was already selected in |
| 851 | * spectre_v2_select_mitigation(). 'retbleed_mitigation' will |
| 852 | * be set accordingly below. |
| 853 | */ |
| 854 | |
| 855 | break; |
| 856 | } |
| 857 | |
| 858 | switch (retbleed_mitigation) { |
| 859 | case RETBLEED_MITIGATION_UNRET: |
| 860 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); |
| 861 | setup_force_cpu_cap(X86_FEATURE_UNRET); |
| 862 | |
| 863 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
| 864 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
| 865 | pr_err(RETBLEED_UNTRAIN_MSG); |
| 866 | |
| 867 | mitigate_smt = true; |
| 868 | break; |
| 869 | |
| 870 | case RETBLEED_MITIGATION_IBPB: |
| 871 | setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); |
| 872 | mitigate_smt = true; |
| 873 | break; |
| 874 | |
| 875 | default: |
| 876 | break; |
| 877 | } |
| 878 | |
| 879 | if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && |
| 880 | (retbleed_nosmt || cpu_mitigations_auto_nosmt())) |
| 881 | cpu_smt_disable(false); |
| 882 | |
| 883 | /* |
| 884 | * Let IBRS trump all on Intel without affecting the effects of the |
| 885 | * retbleed= cmdline option. |
| 886 | */ |
| 887 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { |
| 888 | switch (spectre_v2_enabled) { |
| 889 | case SPECTRE_V2_IBRS: |
| 890 | retbleed_mitigation = RETBLEED_MITIGATION_IBRS; |
| 891 | break; |
| 892 | case SPECTRE_V2_EIBRS: |
| 893 | case SPECTRE_V2_EIBRS_RETPOLINE: |
| 894 | case SPECTRE_V2_EIBRS_LFENCE: |
| 895 | retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; |
| 896 | break; |
| 897 | default: |
| 898 | pr_err(RETBLEED_INTEL_MSG); |
| 899 | } |
| 900 | } |
| 901 | |
| 902 | pr_info("%s\n", retbleed_strings[retbleed_mitigation]); |
| 903 | } |
| 904 | |
| 905 | #undef pr_fmt |
| 906 | #define pr_fmt(fmt) "Spectre V2 : " fmt |
| 907 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 908 | static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = |
| 909 | SPECTRE_V2_USER_NONE; |
| 910 | static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 911 | SPECTRE_V2_USER_NONE; |
| 912 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 913 | #ifdef CONFIG_RETPOLINE |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 914 | static bool spectre_v2_bad_module; |
| 915 | |
| 916 | bool retpoline_module_ok(bool has_retpoline) |
| 917 | { |
| 918 | if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) |
| 919 | return true; |
| 920 | |
| 921 | pr_err("System may be vulnerable to spectre v2\n"); |
| 922 | spectre_v2_bad_module = true; |
| 923 | return false; |
| 924 | } |
| 925 | |
| 926 | static inline const char *spectre_v2_module_string(void) |
| 927 | { |
| 928 | return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; |
| 929 | } |
| 930 | #else |
| 931 | static inline const char *spectre_v2_module_string(void) { return ""; } |
| 932 | #endif |
| 933 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 934 | #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" |
| 935 | #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" |
| 936 | #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 937 | #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 938 | |
| 939 | #ifdef CONFIG_BPF_SYSCALL |
| 940 | void unpriv_ebpf_notify(int new_state) |
| 941 | { |
| 942 | if (new_state) |
| 943 | return; |
| 944 | |
| 945 | /* Unprivileged eBPF is enabled */ |
| 946 | |
| 947 | switch (spectre_v2_enabled) { |
| 948 | case SPECTRE_V2_EIBRS: |
| 949 | pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
| 950 | break; |
| 951 | case SPECTRE_V2_EIBRS_LFENCE: |
| 952 | if (sched_smt_active()) |
| 953 | pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); |
| 954 | break; |
| 955 | default: |
| 956 | break; |
| 957 | } |
| 958 | } |
| 959 | #endif |
| 960 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 961 | static inline bool match_option(const char *arg, int arglen, const char *opt) |
| 962 | { |
| 963 | int len = strlen(opt); |
| 964 | |
| 965 | return len == arglen && !strncmp(arg, opt, len); |
| 966 | } |
| 967 | |
| 968 | /* The kernel command line selection for spectre v2 */ |
| 969 | enum spectre_v2_mitigation_cmd { |
| 970 | SPECTRE_V2_CMD_NONE, |
| 971 | SPECTRE_V2_CMD_AUTO, |
| 972 | SPECTRE_V2_CMD_FORCE, |
| 973 | SPECTRE_V2_CMD_RETPOLINE, |
| 974 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 975 | SPECTRE_V2_CMD_RETPOLINE_LFENCE, |
| 976 | SPECTRE_V2_CMD_EIBRS, |
| 977 | SPECTRE_V2_CMD_EIBRS_RETPOLINE, |
| 978 | SPECTRE_V2_CMD_EIBRS_LFENCE, |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 979 | SPECTRE_V2_CMD_IBRS, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 980 | }; |
| 981 | |
| 982 | enum spectre_v2_user_cmd { |
| 983 | SPECTRE_V2_USER_CMD_NONE, |
| 984 | SPECTRE_V2_USER_CMD_AUTO, |
| 985 | SPECTRE_V2_USER_CMD_FORCE, |
| 986 | SPECTRE_V2_USER_CMD_PRCTL, |
| 987 | SPECTRE_V2_USER_CMD_PRCTL_IBPB, |
| 988 | SPECTRE_V2_USER_CMD_SECCOMP, |
| 989 | SPECTRE_V2_USER_CMD_SECCOMP_IBPB, |
| 990 | }; |
| 991 | |
| 992 | static const char * const spectre_v2_user_strings[] = { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 993 | [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", |
| 994 | [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", |
| 995 | [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", |
| 996 | [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", |
| 997 | [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 998 | }; |
| 999 | |
| 1000 | static const struct { |
| 1001 | const char *option; |
| 1002 | enum spectre_v2_user_cmd cmd; |
| 1003 | bool secure; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1004 | } v2_user_options[] __initconst = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1005 | { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, |
| 1006 | { "off", SPECTRE_V2_USER_CMD_NONE, false }, |
| 1007 | { "on", SPECTRE_V2_USER_CMD_FORCE, true }, |
| 1008 | { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, |
| 1009 | { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, |
| 1010 | { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, |
| 1011 | { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, |
| 1012 | }; |
| 1013 | |
| 1014 | static void __init spec_v2_user_print_cond(const char *reason, bool secure) |
| 1015 | { |
| 1016 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) |
| 1017 | pr_info("spectre_v2_user=%s forced on command line.\n", reason); |
| 1018 | } |
| 1019 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1020 | static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; |
| 1021 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1022 | static enum spectre_v2_user_cmd __init |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1023 | spectre_v2_parse_user_cmdline(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1024 | { |
| 1025 | char arg[20]; |
| 1026 | int ret, i; |
| 1027 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1028 | switch (spectre_v2_cmd) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1029 | case SPECTRE_V2_CMD_NONE: |
| 1030 | return SPECTRE_V2_USER_CMD_NONE; |
| 1031 | case SPECTRE_V2_CMD_FORCE: |
| 1032 | return SPECTRE_V2_USER_CMD_FORCE; |
| 1033 | default: |
| 1034 | break; |
| 1035 | } |
| 1036 | |
| 1037 | ret = cmdline_find_option(boot_command_line, "spectre_v2_user", |
| 1038 | arg, sizeof(arg)); |
| 1039 | if (ret < 0) |
| 1040 | return SPECTRE_V2_USER_CMD_AUTO; |
| 1041 | |
| 1042 | for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { |
| 1043 | if (match_option(arg, ret, v2_user_options[i].option)) { |
| 1044 | spec_v2_user_print_cond(v2_user_options[i].option, |
| 1045 | v2_user_options[i].secure); |
| 1046 | return v2_user_options[i].cmd; |
| 1047 | } |
| 1048 | } |
| 1049 | |
| 1050 | pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); |
| 1051 | return SPECTRE_V2_USER_CMD_AUTO; |
| 1052 | } |
| 1053 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1054 | static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1055 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1056 | return mode == SPECTRE_V2_IBRS || |
| 1057 | mode == SPECTRE_V2_EIBRS || |
| 1058 | mode == SPECTRE_V2_EIBRS_RETPOLINE || |
| 1059 | mode == SPECTRE_V2_EIBRS_LFENCE; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1060 | } |
| 1061 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1062 | static void __init |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1063 | spectre_v2_user_select_mitigation(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1064 | { |
| 1065 | enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; |
| 1066 | bool smt_possible = IS_ENABLED(CONFIG_SMP); |
| 1067 | enum spectre_v2_user_cmd cmd; |
| 1068 | |
| 1069 | if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) |
| 1070 | return; |
| 1071 | |
| 1072 | if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || |
| 1073 | cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
| 1074 | smt_possible = false; |
| 1075 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1076 | cmd = spectre_v2_parse_user_cmdline(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1077 | switch (cmd) { |
| 1078 | case SPECTRE_V2_USER_CMD_NONE: |
| 1079 | goto set_mode; |
| 1080 | case SPECTRE_V2_USER_CMD_FORCE: |
| 1081 | mode = SPECTRE_V2_USER_STRICT; |
| 1082 | break; |
| 1083 | case SPECTRE_V2_USER_CMD_PRCTL: |
| 1084 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: |
| 1085 | mode = SPECTRE_V2_USER_PRCTL; |
| 1086 | break; |
| 1087 | case SPECTRE_V2_USER_CMD_AUTO: |
| 1088 | case SPECTRE_V2_USER_CMD_SECCOMP: |
| 1089 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: |
| 1090 | if (IS_ENABLED(CONFIG_SECCOMP)) |
| 1091 | mode = SPECTRE_V2_USER_SECCOMP; |
| 1092 | else |
| 1093 | mode = SPECTRE_V2_USER_PRCTL; |
| 1094 | break; |
| 1095 | } |
| 1096 | |
| 1097 | /* Initialize Indirect Branch Prediction Barrier */ |
| 1098 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
| 1099 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); |
| 1100 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1101 | spectre_v2_user_ibpb = mode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1102 | switch (cmd) { |
| 1103 | case SPECTRE_V2_USER_CMD_FORCE: |
| 1104 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: |
| 1105 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: |
| 1106 | static_branch_enable(&switch_mm_always_ibpb); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1107 | spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1108 | break; |
| 1109 | case SPECTRE_V2_USER_CMD_PRCTL: |
| 1110 | case SPECTRE_V2_USER_CMD_AUTO: |
| 1111 | case SPECTRE_V2_USER_CMD_SECCOMP: |
| 1112 | static_branch_enable(&switch_mm_cond_ibpb); |
| 1113 | break; |
| 1114 | default: |
| 1115 | break; |
| 1116 | } |
| 1117 | |
| 1118 | pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", |
| 1119 | static_key_enabled(&switch_mm_always_ibpb) ? |
| 1120 | "always-on" : "conditional"); |
| 1121 | } |
| 1122 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1123 | /* |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1124 | * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, |
| 1125 | * STIBP is not required. |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1126 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1127 | if (!boot_cpu_has(X86_FEATURE_STIBP) || |
| 1128 | !smt_possible || |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1129 | spectre_v2_in_ibrs_mode(spectre_v2_enabled)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1130 | return; |
| 1131 | |
| 1132 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1133 | * At this point, an STIBP mode other than "off" has been set. |
| 1134 | * If STIBP support is not being forced, check if STIBP always-on |
| 1135 | * is preferred. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1136 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1137 | if (mode != SPECTRE_V2_USER_STRICT && |
| 1138 | boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) |
| 1139 | mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
| 1140 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1141 | if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
| 1142 | retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
| 1143 | if (mode != SPECTRE_V2_USER_STRICT && |
| 1144 | mode != SPECTRE_V2_USER_STRICT_PREFERRED) |
| 1145 | pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); |
| 1146 | mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
| 1147 | } |
| 1148 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1149 | spectre_v2_user_stibp = mode; |
| 1150 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1151 | set_mode: |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1152 | pr_info("%s\n", spectre_v2_user_strings[mode]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1153 | } |
| 1154 | |
| 1155 | static const char * const spectre_v2_strings[] = { |
| 1156 | [SPECTRE_V2_NONE] = "Vulnerable", |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1157 | [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", |
| 1158 | [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", |
| 1159 | [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", |
| 1160 | [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", |
| 1161 | [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1162 | [SPECTRE_V2_IBRS] = "Mitigation: IBRS", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1163 | }; |
| 1164 | |
| 1165 | static const struct { |
| 1166 | const char *option; |
| 1167 | enum spectre_v2_mitigation_cmd cmd; |
| 1168 | bool secure; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1169 | } mitigation_options[] __initconst = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1170 | { "off", SPECTRE_V2_CMD_NONE, false }, |
| 1171 | { "on", SPECTRE_V2_CMD_FORCE, true }, |
| 1172 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1173 | { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
| 1174 | { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1175 | { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1176 | { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, |
| 1177 | { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, |
| 1178 | { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1179 | { "auto", SPECTRE_V2_CMD_AUTO, false }, |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1180 | { "ibrs", SPECTRE_V2_CMD_IBRS, false }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1181 | }; |
| 1182 | |
| 1183 | static void __init spec_v2_print_cond(const char *reason, bool secure) |
| 1184 | { |
| 1185 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) |
| 1186 | pr_info("%s selected on command line.\n", reason); |
| 1187 | } |
| 1188 | |
| 1189 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
| 1190 | { |
| 1191 | enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; |
| 1192 | char arg[20]; |
| 1193 | int ret, i; |
| 1194 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1195 | if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || |
| 1196 | cpu_mitigations_off()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1197 | return SPECTRE_V2_CMD_NONE; |
| 1198 | |
| 1199 | ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); |
| 1200 | if (ret < 0) |
| 1201 | return SPECTRE_V2_CMD_AUTO; |
| 1202 | |
| 1203 | for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { |
| 1204 | if (!match_option(arg, ret, mitigation_options[i].option)) |
| 1205 | continue; |
| 1206 | cmd = mitigation_options[i].cmd; |
| 1207 | break; |
| 1208 | } |
| 1209 | |
| 1210 | if (i >= ARRAY_SIZE(mitigation_options)) { |
| 1211 | pr_err("unknown option (%s). Switching to AUTO select\n", arg); |
| 1212 | return SPECTRE_V2_CMD_AUTO; |
| 1213 | } |
| 1214 | |
| 1215 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE || |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1216 | cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
| 1217 | cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || |
| 1218 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || |
| 1219 | cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1220 | !IS_ENABLED(CONFIG_RETPOLINE)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1221 | pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
| 1222 | mitigation_options[i].option); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1223 | return SPECTRE_V2_CMD_AUTO; |
| 1224 | } |
| 1225 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1226 | if ((cmd == SPECTRE_V2_CMD_EIBRS || |
| 1227 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || |
| 1228 | cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && |
| 1229 | !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
| 1230 | pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", |
| 1231 | mitigation_options[i].option); |
| 1232 | return SPECTRE_V2_CMD_AUTO; |
| 1233 | } |
| 1234 | |
| 1235 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
| 1236 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && |
| 1237 | !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { |
| 1238 | pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", |
| 1239 | mitigation_options[i].option); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1240 | return SPECTRE_V2_CMD_AUTO; |
| 1241 | } |
| 1242 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1243 | if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { |
| 1244 | pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
| 1245 | mitigation_options[i].option); |
| 1246 | return SPECTRE_V2_CMD_AUTO; |
| 1247 | } |
| 1248 | |
| 1249 | if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
| 1250 | pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", |
| 1251 | mitigation_options[i].option); |
| 1252 | return SPECTRE_V2_CMD_AUTO; |
| 1253 | } |
| 1254 | |
| 1255 | if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { |
| 1256 | pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", |
| 1257 | mitigation_options[i].option); |
| 1258 | return SPECTRE_V2_CMD_AUTO; |
| 1259 | } |
| 1260 | |
| 1261 | if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { |
| 1262 | pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", |
| 1263 | mitigation_options[i].option); |
| 1264 | return SPECTRE_V2_CMD_AUTO; |
| 1265 | } |
| 1266 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1267 | spec_v2_print_cond(mitigation_options[i].option, |
| 1268 | mitigation_options[i].secure); |
| 1269 | return cmd; |
| 1270 | } |
| 1271 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1272 | static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) |
| 1273 | { |
| 1274 | if (!IS_ENABLED(CONFIG_RETPOLINE)) { |
| 1275 | pr_err("Kernel not compiled with retpoline; no mitigation available!"); |
| 1276 | return SPECTRE_V2_NONE; |
| 1277 | } |
| 1278 | |
| 1279 | return SPECTRE_V2_RETPOLINE; |
| 1280 | } |
| 1281 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1282 | /* Disable in-kernel use of non-RSB RET predictors */ |
| 1283 | static void __init spec_ctrl_disable_kernel_rrsba(void) |
| 1284 | { |
| 1285 | u64 ia32_cap; |
| 1286 | |
| 1287 | if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) |
| 1288 | return; |
| 1289 | |
| 1290 | ia32_cap = x86_read_arch_cap_msr(); |
| 1291 | |
| 1292 | if (ia32_cap & ARCH_CAP_RRSBA) { |
| 1293 | x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; |
| 1294 | write_spec_ctrl_current(x86_spec_ctrl_base, true); |
| 1295 | } |
| 1296 | } |
| 1297 | |
| 1298 | static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) |
| 1299 | { |
| 1300 | /* |
| 1301 | * Similar to context switches, there are two types of RSB attacks |
| 1302 | * after VM exit: |
| 1303 | * |
| 1304 | * 1) RSB underflow |
| 1305 | * |
| 1306 | * 2) Poisoned RSB entry |
| 1307 | * |
| 1308 | * When retpoline is enabled, both are mitigated by filling/clearing |
| 1309 | * the RSB. |
| 1310 | * |
| 1311 | * When IBRS is enabled, while #1 would be mitigated by the IBRS branch |
| 1312 | * prediction isolation protections, RSB still needs to be cleared |
| 1313 | * because of #2. Note that SMEP provides no protection here, unlike |
| 1314 | * user-space-poisoned RSB entries. |
| 1315 | * |
| 1316 | * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB |
| 1317 | * bug is present then a LITE version of RSB protection is required, |
| 1318 | * just a single call needs to retire before a RET is executed. |
| 1319 | */ |
| 1320 | switch (mode) { |
| 1321 | case SPECTRE_V2_NONE: |
| 1322 | return; |
| 1323 | |
| 1324 | case SPECTRE_V2_EIBRS_LFENCE: |
| 1325 | case SPECTRE_V2_EIBRS: |
| 1326 | if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
| 1327 | setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); |
| 1328 | pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); |
| 1329 | } |
| 1330 | return; |
| 1331 | |
| 1332 | case SPECTRE_V2_EIBRS_RETPOLINE: |
| 1333 | case SPECTRE_V2_RETPOLINE: |
| 1334 | case SPECTRE_V2_LFENCE: |
| 1335 | case SPECTRE_V2_IBRS: |
| 1336 | setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); |
| 1337 | pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); |
| 1338 | return; |
| 1339 | } |
| 1340 | |
| 1341 | pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); |
| 1342 | dump_stack(); |
| 1343 | } |
| 1344 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1345 | static void __init spectre_v2_select_mitigation(void) |
| 1346 | { |
| 1347 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); |
| 1348 | enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; |
| 1349 | |
| 1350 | /* |
| 1351 | * If the CPU is not affected and the command line mode is NONE or AUTO |
| 1352 | * then nothing to do. |
| 1353 | */ |
| 1354 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && |
| 1355 | (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) |
| 1356 | return; |
| 1357 | |
| 1358 | switch (cmd) { |
| 1359 | case SPECTRE_V2_CMD_NONE: |
| 1360 | return; |
| 1361 | |
| 1362 | case SPECTRE_V2_CMD_FORCE: |
| 1363 | case SPECTRE_V2_CMD_AUTO: |
| 1364 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1365 | mode = SPECTRE_V2_EIBRS; |
| 1366 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1367 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1368 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1369 | if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && |
| 1370 | boot_cpu_has_bug(X86_BUG_RETBLEED) && |
| 1371 | retbleed_cmd != RETBLEED_CMD_OFF && |
| 1372 | boot_cpu_has(X86_FEATURE_IBRS) && |
| 1373 | boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { |
| 1374 | mode = SPECTRE_V2_IBRS; |
| 1375 | break; |
| 1376 | } |
| 1377 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1378 | mode = spectre_v2_select_retpoline(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1379 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1380 | |
| 1381 | case SPECTRE_V2_CMD_RETPOLINE_LFENCE: |
| 1382 | pr_err(SPECTRE_V2_LFENCE_MSG); |
| 1383 | mode = SPECTRE_V2_LFENCE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1384 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1385 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1386 | case SPECTRE_V2_CMD_RETPOLINE_GENERIC: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1387 | mode = SPECTRE_V2_RETPOLINE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1388 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1389 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1390 | case SPECTRE_V2_CMD_RETPOLINE: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1391 | mode = spectre_v2_select_retpoline(); |
| 1392 | break; |
| 1393 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1394 | case SPECTRE_V2_CMD_IBRS: |
| 1395 | mode = SPECTRE_V2_IBRS; |
| 1396 | break; |
| 1397 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1398 | case SPECTRE_V2_CMD_EIBRS: |
| 1399 | mode = SPECTRE_V2_EIBRS; |
| 1400 | break; |
| 1401 | |
| 1402 | case SPECTRE_V2_CMD_EIBRS_LFENCE: |
| 1403 | mode = SPECTRE_V2_EIBRS_LFENCE; |
| 1404 | break; |
| 1405 | |
| 1406 | case SPECTRE_V2_CMD_EIBRS_RETPOLINE: |
| 1407 | mode = SPECTRE_V2_EIBRS_RETPOLINE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1408 | break; |
| 1409 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1410 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1411 | if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
| 1412 | pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
| 1413 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1414 | if (spectre_v2_in_ibrs_mode(mode)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1415 | x86_spec_ctrl_base |= SPEC_CTRL_IBRS; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1416 | write_spec_ctrl_current(x86_spec_ctrl_base, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1417 | } |
| 1418 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1419 | switch (mode) { |
| 1420 | case SPECTRE_V2_NONE: |
| 1421 | case SPECTRE_V2_EIBRS: |
| 1422 | break; |
| 1423 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1424 | case SPECTRE_V2_IBRS: |
| 1425 | setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); |
| 1426 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) |
| 1427 | pr_warn(SPECTRE_V2_IBRS_PERF_MSG); |
| 1428 | break; |
| 1429 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1430 | case SPECTRE_V2_LFENCE: |
| 1431 | case SPECTRE_V2_EIBRS_LFENCE: |
| 1432 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); |
| 1433 | fallthrough; |
| 1434 | |
| 1435 | case SPECTRE_V2_RETPOLINE: |
| 1436 | case SPECTRE_V2_EIBRS_RETPOLINE: |
| 1437 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
| 1438 | break; |
| 1439 | } |
| 1440 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1441 | /* |
| 1442 | * Disable alternate RSB predictions in kernel when indirect CALLs and |
| 1443 | * JMPs gets protection against BHI and Intramode-BTI, but RET |
| 1444 | * prediction from a non-RSB predictor is still a risk. |
| 1445 | */ |
| 1446 | if (mode == SPECTRE_V2_EIBRS_LFENCE || |
| 1447 | mode == SPECTRE_V2_EIBRS_RETPOLINE || |
| 1448 | mode == SPECTRE_V2_RETPOLINE) |
| 1449 | spec_ctrl_disable_kernel_rrsba(); |
| 1450 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1451 | spectre_v2_enabled = mode; |
| 1452 | pr_info("%s\n", spectre_v2_strings[mode]); |
| 1453 | |
| 1454 | /* |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1455 | * If Spectre v2 protection has been enabled, fill the RSB during a |
| 1456 | * context switch. In general there are two types of RSB attacks |
| 1457 | * across context switches, for which the CALLs/RETs may be unbalanced. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1458 | * |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1459 | * 1) RSB underflow |
| 1460 | * |
| 1461 | * Some Intel parts have "bottomless RSB". When the RSB is empty, |
| 1462 | * speculated return targets may come from the branch predictor, |
| 1463 | * which could have a user-poisoned BTB or BHB entry. |
| 1464 | * |
| 1465 | * AMD has it even worse: *all* returns are speculated from the BTB, |
| 1466 | * regardless of the state of the RSB. |
| 1467 | * |
| 1468 | * When IBRS or eIBRS is enabled, the "user -> kernel" attack |
| 1469 | * scenario is mitigated by the IBRS branch prediction isolation |
| 1470 | * properties, so the RSB buffer filling wouldn't be necessary to |
| 1471 | * protect against this type of attack. |
| 1472 | * |
| 1473 | * The "user -> user" attack scenario is mitigated by RSB filling. |
| 1474 | * |
| 1475 | * 2) Poisoned RSB entry |
| 1476 | * |
| 1477 | * If the 'next' in-kernel return stack is shorter than 'prev', |
| 1478 | * 'next' could be tricked into speculating with a user-poisoned RSB |
| 1479 | * entry. |
| 1480 | * |
| 1481 | * The "user -> kernel" attack scenario is mitigated by SMEP and |
| 1482 | * eIBRS. |
| 1483 | * |
| 1484 | * The "user -> user" scenario, also known as SpectreBHB, requires |
| 1485 | * RSB clearing. |
| 1486 | * |
| 1487 | * So to mitigate all cases, unconditionally fill RSB on context |
| 1488 | * switches. |
| 1489 | * |
| 1490 | * FIXME: Is this pointless for retbleed-affected AMD? |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1491 | */ |
| 1492 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
| 1493 | pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); |
| 1494 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1495 | spectre_v2_determine_rsb_fill_type_at_vmexit(mode); |
| 1496 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1497 | /* |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1498 | * Retpoline protects the kernel, but doesn't protect firmware. IBRS |
| 1499 | * and Enhanced IBRS protect firmware too, so enable IBRS around |
| 1500 | * firmware calls only when IBRS / Enhanced IBRS aren't otherwise |
| 1501 | * enabled. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1502 | * |
| 1503 | * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because |
| 1504 | * the user might select retpoline on the kernel command line and if |
| 1505 | * the CPU supports Enhanced IBRS, kernel might un-intentionally not |
| 1506 | * enable IBRS around firmware calls. |
| 1507 | */ |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1508 | if (boot_cpu_has_bug(X86_BUG_RETBLEED) && |
| 1509 | boot_cpu_has(X86_FEATURE_IBPB) && |
| 1510 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
| 1511 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { |
| 1512 | |
| 1513 | if (retbleed_cmd != RETBLEED_CMD_IBPB) { |
| 1514 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); |
| 1515 | pr_info("Enabling Speculation Barrier for firmware calls\n"); |
| 1516 | } |
| 1517 | |
| 1518 | } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1519 | setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); |
| 1520 | pr_info("Enabling Restricted Speculation for firmware calls\n"); |
| 1521 | } |
| 1522 | |
| 1523 | /* Set up IBPB and STIBP depending on the general spectre V2 command */ |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1524 | spectre_v2_cmd = cmd; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1525 | } |
| 1526 | |
| 1527 | static void update_stibp_msr(void * __unused) |
| 1528 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1529 | u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); |
| 1530 | write_spec_ctrl_current(val, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1531 | } |
| 1532 | |
| 1533 | /* Update x86_spec_ctrl_base in case SMT state changed. */ |
| 1534 | static void update_stibp_strict(void) |
| 1535 | { |
| 1536 | u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; |
| 1537 | |
| 1538 | if (sched_smt_active()) |
| 1539 | mask |= SPEC_CTRL_STIBP; |
| 1540 | |
| 1541 | if (mask == x86_spec_ctrl_base) |
| 1542 | return; |
| 1543 | |
| 1544 | pr_info("Update user space SMT mitigation: STIBP %s\n", |
| 1545 | mask & SPEC_CTRL_STIBP ? "always-on" : "off"); |
| 1546 | x86_spec_ctrl_base = mask; |
| 1547 | on_each_cpu(update_stibp_msr, NULL, 1); |
| 1548 | } |
| 1549 | |
| 1550 | /* Update the static key controlling the evaluation of TIF_SPEC_IB */ |
| 1551 | static void update_indir_branch_cond(void) |
| 1552 | { |
| 1553 | if (sched_smt_active()) |
| 1554 | static_branch_enable(&switch_to_cond_stibp); |
| 1555 | else |
| 1556 | static_branch_disable(&switch_to_cond_stibp); |
| 1557 | } |
| 1558 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1559 | #undef pr_fmt |
| 1560 | #define pr_fmt(fmt) fmt |
| 1561 | |
| 1562 | /* Update the static key controlling the MDS CPU buffer clear in idle */ |
| 1563 | static void update_mds_branch_idle(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1564 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1565 | u64 ia32_cap = x86_read_arch_cap_msr(); |
| 1566 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1567 | /* |
| 1568 | * Enable the idle clearing if SMT is active on CPUs which are |
| 1569 | * affected only by MSBDS and not any other MDS variant. |
| 1570 | * |
| 1571 | * The other variants cannot be mitigated when SMT is enabled, so |
| 1572 | * clearing the buffers on idle just to prevent the Store Buffer |
| 1573 | * repartitioning leak would be a window dressing exercise. |
| 1574 | */ |
| 1575 | if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1576 | return; |
| 1577 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1578 | if (sched_smt_active()) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1579 | static_branch_enable(&mds_idle_clear); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1580 | } else if (mmio_mitigation == MMIO_MITIGATION_OFF || |
| 1581 | (ia32_cap & ARCH_CAP_FBSDP_NO)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1582 | static_branch_disable(&mds_idle_clear); |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1583 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1584 | } |
| 1585 | |
| 1586 | #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" |
| 1587 | #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1588 | #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1589 | |
| 1590 | void cpu_bugs_smt_update(void) |
| 1591 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1592 | mutex_lock(&spec_ctrl_mutex); |
| 1593 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1594 | if (sched_smt_active() && unprivileged_ebpf_enabled() && |
| 1595 | spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) |
| 1596 | pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); |
| 1597 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1598 | switch (spectre_v2_user_stibp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1599 | case SPECTRE_V2_USER_NONE: |
| 1600 | break; |
| 1601 | case SPECTRE_V2_USER_STRICT: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1602 | case SPECTRE_V2_USER_STRICT_PREFERRED: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1603 | update_stibp_strict(); |
| 1604 | break; |
| 1605 | case SPECTRE_V2_USER_PRCTL: |
| 1606 | case SPECTRE_V2_USER_SECCOMP: |
| 1607 | update_indir_branch_cond(); |
| 1608 | break; |
| 1609 | } |
| 1610 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1611 | switch (mds_mitigation) { |
| 1612 | case MDS_MITIGATION_FULL: |
| 1613 | case MDS_MITIGATION_VMWERV: |
| 1614 | if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) |
| 1615 | pr_warn_once(MDS_MSG_SMT); |
| 1616 | update_mds_branch_idle(); |
| 1617 | break; |
| 1618 | case MDS_MITIGATION_OFF: |
| 1619 | break; |
| 1620 | } |
| 1621 | |
| 1622 | switch (taa_mitigation) { |
| 1623 | case TAA_MITIGATION_VERW: |
| 1624 | case TAA_MITIGATION_UCODE_NEEDED: |
| 1625 | if (sched_smt_active()) |
| 1626 | pr_warn_once(TAA_MSG_SMT); |
| 1627 | break; |
| 1628 | case TAA_MITIGATION_TSX_DISABLED: |
| 1629 | case TAA_MITIGATION_OFF: |
| 1630 | break; |
| 1631 | } |
| 1632 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1633 | switch (mmio_mitigation) { |
| 1634 | case MMIO_MITIGATION_VERW: |
| 1635 | case MMIO_MITIGATION_UCODE_NEEDED: |
| 1636 | if (sched_smt_active()) |
| 1637 | pr_warn_once(MMIO_MSG_SMT); |
| 1638 | break; |
| 1639 | case MMIO_MITIGATION_OFF: |
| 1640 | break; |
| 1641 | } |
| 1642 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1643 | mutex_unlock(&spec_ctrl_mutex); |
| 1644 | } |
| 1645 | |
| 1646 | #undef pr_fmt |
| 1647 | #define pr_fmt(fmt) "Speculative Store Bypass: " fmt |
| 1648 | |
| 1649 | static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; |
| 1650 | |
| 1651 | /* The kernel command line selection */ |
| 1652 | enum ssb_mitigation_cmd { |
| 1653 | SPEC_STORE_BYPASS_CMD_NONE, |
| 1654 | SPEC_STORE_BYPASS_CMD_AUTO, |
| 1655 | SPEC_STORE_BYPASS_CMD_ON, |
| 1656 | SPEC_STORE_BYPASS_CMD_PRCTL, |
| 1657 | SPEC_STORE_BYPASS_CMD_SECCOMP, |
| 1658 | }; |
| 1659 | |
| 1660 | static const char * const ssb_strings[] = { |
| 1661 | [SPEC_STORE_BYPASS_NONE] = "Vulnerable", |
| 1662 | [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", |
| 1663 | [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", |
| 1664 | [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", |
| 1665 | }; |
| 1666 | |
| 1667 | static const struct { |
| 1668 | const char *option; |
| 1669 | enum ssb_mitigation_cmd cmd; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1670 | } ssb_mitigation_options[] __initconst = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1671 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ |
| 1672 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ |
| 1673 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ |
| 1674 | { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ |
| 1675 | { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ |
| 1676 | }; |
| 1677 | |
| 1678 | static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) |
| 1679 | { |
| 1680 | enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; |
| 1681 | char arg[20]; |
| 1682 | int ret, i; |
| 1683 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1684 | if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || |
| 1685 | cpu_mitigations_off()) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1686 | return SPEC_STORE_BYPASS_CMD_NONE; |
| 1687 | } else { |
| 1688 | ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", |
| 1689 | arg, sizeof(arg)); |
| 1690 | if (ret < 0) |
| 1691 | return SPEC_STORE_BYPASS_CMD_AUTO; |
| 1692 | |
| 1693 | for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { |
| 1694 | if (!match_option(arg, ret, ssb_mitigation_options[i].option)) |
| 1695 | continue; |
| 1696 | |
| 1697 | cmd = ssb_mitigation_options[i].cmd; |
| 1698 | break; |
| 1699 | } |
| 1700 | |
| 1701 | if (i >= ARRAY_SIZE(ssb_mitigation_options)) { |
| 1702 | pr_err("unknown option (%s). Switching to AUTO select\n", arg); |
| 1703 | return SPEC_STORE_BYPASS_CMD_AUTO; |
| 1704 | } |
| 1705 | } |
| 1706 | |
| 1707 | return cmd; |
| 1708 | } |
| 1709 | |
| 1710 | static enum ssb_mitigation __init __ssb_select_mitigation(void) |
| 1711 | { |
| 1712 | enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; |
| 1713 | enum ssb_mitigation_cmd cmd; |
| 1714 | |
| 1715 | if (!boot_cpu_has(X86_FEATURE_SSBD)) |
| 1716 | return mode; |
| 1717 | |
| 1718 | cmd = ssb_parse_cmdline(); |
| 1719 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && |
| 1720 | (cmd == SPEC_STORE_BYPASS_CMD_NONE || |
| 1721 | cmd == SPEC_STORE_BYPASS_CMD_AUTO)) |
| 1722 | return mode; |
| 1723 | |
| 1724 | switch (cmd) { |
| 1725 | case SPEC_STORE_BYPASS_CMD_AUTO: |
| 1726 | case SPEC_STORE_BYPASS_CMD_SECCOMP: |
| 1727 | /* |
| 1728 | * Choose prctl+seccomp as the default mode if seccomp is |
| 1729 | * enabled. |
| 1730 | */ |
| 1731 | if (IS_ENABLED(CONFIG_SECCOMP)) |
| 1732 | mode = SPEC_STORE_BYPASS_SECCOMP; |
| 1733 | else |
| 1734 | mode = SPEC_STORE_BYPASS_PRCTL; |
| 1735 | break; |
| 1736 | case SPEC_STORE_BYPASS_CMD_ON: |
| 1737 | mode = SPEC_STORE_BYPASS_DISABLE; |
| 1738 | break; |
| 1739 | case SPEC_STORE_BYPASS_CMD_PRCTL: |
| 1740 | mode = SPEC_STORE_BYPASS_PRCTL; |
| 1741 | break; |
| 1742 | case SPEC_STORE_BYPASS_CMD_NONE: |
| 1743 | break; |
| 1744 | } |
| 1745 | |
| 1746 | /* |
| 1747 | * We have three CPU feature flags that are in play here: |
| 1748 | * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. |
| 1749 | * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass |
| 1750 | * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation |
| 1751 | */ |
| 1752 | if (mode == SPEC_STORE_BYPASS_DISABLE) { |
| 1753 | setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); |
| 1754 | /* |
| 1755 | * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may |
| 1756 | * use a completely different MSR and bit dependent on family. |
| 1757 | */ |
| 1758 | if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && |
| 1759 | !static_cpu_has(X86_FEATURE_AMD_SSBD)) { |
| 1760 | x86_amd_ssb_disable(); |
| 1761 | } else { |
| 1762 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1763 | write_spec_ctrl_current(x86_spec_ctrl_base, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1764 | } |
| 1765 | } |
| 1766 | |
| 1767 | return mode; |
| 1768 | } |
| 1769 | |
| 1770 | static void ssb_select_mitigation(void) |
| 1771 | { |
| 1772 | ssb_mode = __ssb_select_mitigation(); |
| 1773 | |
| 1774 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
| 1775 | pr_info("%s\n", ssb_strings[ssb_mode]); |
| 1776 | } |
| 1777 | |
| 1778 | #undef pr_fmt |
| 1779 | #define pr_fmt(fmt) "Speculation prctl: " fmt |
| 1780 | |
| 1781 | static void task_update_spec_tif(struct task_struct *tsk) |
| 1782 | { |
| 1783 | /* Force the update of the real TIF bits */ |
| 1784 | set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); |
| 1785 | |
| 1786 | /* |
| 1787 | * Immediately update the speculation control MSRs for the current |
| 1788 | * task, but for a non-current task delay setting the CPU |
| 1789 | * mitigation until it is scheduled next. |
| 1790 | * |
| 1791 | * This can only happen for SECCOMP mitigation. For PRCTL it's |
| 1792 | * always the current task. |
| 1793 | */ |
| 1794 | if (tsk == current) |
| 1795 | speculation_ctrl_update_current(); |
| 1796 | } |
| 1797 | |
| 1798 | static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
| 1799 | { |
| 1800 | if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && |
| 1801 | ssb_mode != SPEC_STORE_BYPASS_SECCOMP) |
| 1802 | return -ENXIO; |
| 1803 | |
| 1804 | switch (ctrl) { |
| 1805 | case PR_SPEC_ENABLE: |
| 1806 | /* If speculation is force disabled, enable is not allowed */ |
| 1807 | if (task_spec_ssb_force_disable(task)) |
| 1808 | return -EPERM; |
| 1809 | task_clear_spec_ssb_disable(task); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1810 | task_clear_spec_ssb_noexec(task); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1811 | task_update_spec_tif(task); |
| 1812 | break; |
| 1813 | case PR_SPEC_DISABLE: |
| 1814 | task_set_spec_ssb_disable(task); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1815 | task_clear_spec_ssb_noexec(task); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1816 | task_update_spec_tif(task); |
| 1817 | break; |
| 1818 | case PR_SPEC_FORCE_DISABLE: |
| 1819 | task_set_spec_ssb_disable(task); |
| 1820 | task_set_spec_ssb_force_disable(task); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1821 | task_clear_spec_ssb_noexec(task); |
| 1822 | task_update_spec_tif(task); |
| 1823 | break; |
| 1824 | case PR_SPEC_DISABLE_NOEXEC: |
| 1825 | if (task_spec_ssb_force_disable(task)) |
| 1826 | return -EPERM; |
| 1827 | task_set_spec_ssb_disable(task); |
| 1828 | task_set_spec_ssb_noexec(task); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1829 | task_update_spec_tif(task); |
| 1830 | break; |
| 1831 | default: |
| 1832 | return -ERANGE; |
| 1833 | } |
| 1834 | return 0; |
| 1835 | } |
| 1836 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1837 | static bool is_spec_ib_user_controlled(void) |
| 1838 | { |
| 1839 | return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || |
| 1840 | spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
| 1841 | spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || |
| 1842 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; |
| 1843 | } |
| 1844 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1845 | static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) |
| 1846 | { |
| 1847 | switch (ctrl) { |
| 1848 | case PR_SPEC_ENABLE: |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1849 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
| 1850 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1851 | return 0; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1852 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1853 | /* |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1854 | * With strict mode for both IBPB and STIBP, the instruction |
| 1855 | * code paths avoid checking this task flag and instead, |
| 1856 | * unconditionally run the instruction. However, STIBP and IBPB |
| 1857 | * are independent and either can be set to conditionally |
| 1858 | * enabled regardless of the mode of the other. |
| 1859 | * |
| 1860 | * If either is set to conditional, allow the task flag to be |
| 1861 | * updated, unless it was force-disabled by a previous prctl |
| 1862 | * call. Currently, this is possible on an AMD CPU which has the |
| 1863 | * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the |
| 1864 | * kernel is booted with 'spectre_v2_user=seccomp', then |
| 1865 | * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and |
| 1866 | * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1867 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1868 | if (!is_spec_ib_user_controlled() || |
| 1869 | task_spec_ib_force_disable(task)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1870 | return -EPERM; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1871 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1872 | task_clear_spec_ib_disable(task); |
| 1873 | task_update_spec_tif(task); |
| 1874 | break; |
| 1875 | case PR_SPEC_DISABLE: |
| 1876 | case PR_SPEC_FORCE_DISABLE: |
| 1877 | /* |
| 1878 | * Indirect branch speculation is always allowed when |
| 1879 | * mitigation is force disabled. |
| 1880 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1881 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
| 1882 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1883 | return -EPERM; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1884 | |
| 1885 | if (!is_spec_ib_user_controlled()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1886 | return 0; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1887 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1888 | task_set_spec_ib_disable(task); |
| 1889 | if (ctrl == PR_SPEC_FORCE_DISABLE) |
| 1890 | task_set_spec_ib_force_disable(task); |
| 1891 | task_update_spec_tif(task); |
| 1892 | break; |
| 1893 | default: |
| 1894 | return -ERANGE; |
| 1895 | } |
| 1896 | return 0; |
| 1897 | } |
| 1898 | |
| 1899 | int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
| 1900 | unsigned long ctrl) |
| 1901 | { |
| 1902 | switch (which) { |
| 1903 | case PR_SPEC_STORE_BYPASS: |
| 1904 | return ssb_prctl_set(task, ctrl); |
| 1905 | case PR_SPEC_INDIRECT_BRANCH: |
| 1906 | return ib_prctl_set(task, ctrl); |
| 1907 | default: |
| 1908 | return -ENODEV; |
| 1909 | } |
| 1910 | } |
| 1911 | |
| 1912 | #ifdef CONFIG_SECCOMP |
| 1913 | void arch_seccomp_spec_mitigate(struct task_struct *task) |
| 1914 | { |
| 1915 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) |
| 1916 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1917 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
| 1918 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1919 | ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
| 1920 | } |
| 1921 | #endif |
| 1922 | |
| 1923 | static int ssb_prctl_get(struct task_struct *task) |
| 1924 | { |
| 1925 | switch (ssb_mode) { |
| 1926 | case SPEC_STORE_BYPASS_DISABLE: |
| 1927 | return PR_SPEC_DISABLE; |
| 1928 | case SPEC_STORE_BYPASS_SECCOMP: |
| 1929 | case SPEC_STORE_BYPASS_PRCTL: |
| 1930 | if (task_spec_ssb_force_disable(task)) |
| 1931 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1932 | if (task_spec_ssb_noexec(task)) |
| 1933 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1934 | if (task_spec_ssb_disable(task)) |
| 1935 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
| 1936 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
| 1937 | default: |
| 1938 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
| 1939 | return PR_SPEC_ENABLE; |
| 1940 | return PR_SPEC_NOT_AFFECTED; |
| 1941 | } |
| 1942 | } |
| 1943 | |
| 1944 | static int ib_prctl_get(struct task_struct *task) |
| 1945 | { |
| 1946 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
| 1947 | return PR_SPEC_NOT_AFFECTED; |
| 1948 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1949 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
| 1950 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1951 | return PR_SPEC_ENABLE; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1952 | else if (is_spec_ib_user_controlled()) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1953 | if (task_spec_ib_force_disable(task)) |
| 1954 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
| 1955 | if (task_spec_ib_disable(task)) |
| 1956 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
| 1957 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1958 | } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || |
| 1959 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
| 1960 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1961 | return PR_SPEC_DISABLE; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1962 | else |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1963 | return PR_SPEC_NOT_AFFECTED; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1964 | } |
| 1965 | |
| 1966 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
| 1967 | { |
| 1968 | switch (which) { |
| 1969 | case PR_SPEC_STORE_BYPASS: |
| 1970 | return ssb_prctl_get(task); |
| 1971 | case PR_SPEC_INDIRECT_BRANCH: |
| 1972 | return ib_prctl_get(task); |
| 1973 | default: |
| 1974 | return -ENODEV; |
| 1975 | } |
| 1976 | } |
| 1977 | |
| 1978 | void x86_spec_ctrl_setup_ap(void) |
| 1979 | { |
| 1980 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 1981 | write_spec_ctrl_current(x86_spec_ctrl_base, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1982 | |
| 1983 | if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) |
| 1984 | x86_amd_ssb_disable(); |
| 1985 | } |
| 1986 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1987 | bool itlb_multihit_kvm_mitigation; |
| 1988 | EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); |
| 1989 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1990 | #undef pr_fmt |
| 1991 | #define pr_fmt(fmt) "L1TF: " fmt |
| 1992 | |
| 1993 | /* Default mitigation for L1TF-affected CPUs */ |
| 1994 | enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; |
| 1995 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
| 1996 | EXPORT_SYMBOL_GPL(l1tf_mitigation); |
| 1997 | #endif |
| 1998 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
| 1999 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
| 2000 | |
| 2001 | /* |
| 2002 | * These CPUs all support 44bits physical address space internally in the |
| 2003 | * cache but CPUID can report a smaller number of physical address bits. |
| 2004 | * |
| 2005 | * The L1TF mitigation uses the top most address bit for the inversion of |
| 2006 | * non present PTEs. When the installed memory reaches into the top most |
| 2007 | * address bit due to memory holes, which has been observed on machines |
| 2008 | * which report 36bits physical address bits and have 32G RAM installed, |
| 2009 | * then the mitigation range check in l1tf_select_mitigation() triggers. |
| 2010 | * This is a false positive because the mitigation is still possible due to |
| 2011 | * the fact that the cache uses 44bit internally. Use the cache bits |
| 2012 | * instead of the reported physical bits and adjust them on the affected |
| 2013 | * machines to 44bit if the reported bits are less than 44. |
| 2014 | */ |
| 2015 | static void override_cache_bits(struct cpuinfo_x86 *c) |
| 2016 | { |
| 2017 | if (c->x86 != 6) |
| 2018 | return; |
| 2019 | |
| 2020 | switch (c->x86_model) { |
| 2021 | case INTEL_FAM6_NEHALEM: |
| 2022 | case INTEL_FAM6_WESTMERE: |
| 2023 | case INTEL_FAM6_SANDYBRIDGE: |
| 2024 | case INTEL_FAM6_IVYBRIDGE: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2025 | case INTEL_FAM6_HASWELL: |
| 2026 | case INTEL_FAM6_HASWELL_L: |
| 2027 | case INTEL_FAM6_HASWELL_G: |
| 2028 | case INTEL_FAM6_BROADWELL: |
| 2029 | case INTEL_FAM6_BROADWELL_G: |
| 2030 | case INTEL_FAM6_SKYLAKE_L: |
| 2031 | case INTEL_FAM6_SKYLAKE: |
| 2032 | case INTEL_FAM6_KABYLAKE_L: |
| 2033 | case INTEL_FAM6_KABYLAKE: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2034 | if (c->x86_cache_bits < 44) |
| 2035 | c->x86_cache_bits = 44; |
| 2036 | break; |
| 2037 | } |
| 2038 | } |
| 2039 | |
| 2040 | static void __init l1tf_select_mitigation(void) |
| 2041 | { |
| 2042 | u64 half_pa; |
| 2043 | |
| 2044 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
| 2045 | return; |
| 2046 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2047 | if (cpu_mitigations_off()) |
| 2048 | l1tf_mitigation = L1TF_MITIGATION_OFF; |
| 2049 | else if (cpu_mitigations_auto_nosmt()) |
| 2050 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; |
| 2051 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2052 | override_cache_bits(&boot_cpu_data); |
| 2053 | |
| 2054 | switch (l1tf_mitigation) { |
| 2055 | case L1TF_MITIGATION_OFF: |
| 2056 | case L1TF_MITIGATION_FLUSH_NOWARN: |
| 2057 | case L1TF_MITIGATION_FLUSH: |
| 2058 | break; |
| 2059 | case L1TF_MITIGATION_FLUSH_NOSMT: |
| 2060 | case L1TF_MITIGATION_FULL: |
| 2061 | cpu_smt_disable(false); |
| 2062 | break; |
| 2063 | case L1TF_MITIGATION_FULL_FORCE: |
| 2064 | cpu_smt_disable(true); |
| 2065 | break; |
| 2066 | } |
| 2067 | |
| 2068 | #if CONFIG_PGTABLE_LEVELS == 2 |
| 2069 | pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); |
| 2070 | return; |
| 2071 | #endif |
| 2072 | |
| 2073 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2074 | if (l1tf_mitigation != L1TF_MITIGATION_OFF && |
| 2075 | e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2076 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); |
| 2077 | pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", |
| 2078 | half_pa); |
| 2079 | pr_info("However, doing so will make a part of your RAM unusable.\n"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2080 | pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2081 | return; |
| 2082 | } |
| 2083 | |
| 2084 | setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); |
| 2085 | } |
| 2086 | |
| 2087 | static int __init l1tf_cmdline(char *str) |
| 2088 | { |
| 2089 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
| 2090 | return 0; |
| 2091 | |
| 2092 | if (!str) |
| 2093 | return -EINVAL; |
| 2094 | |
| 2095 | if (!strcmp(str, "off")) |
| 2096 | l1tf_mitigation = L1TF_MITIGATION_OFF; |
| 2097 | else if (!strcmp(str, "flush,nowarn")) |
| 2098 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; |
| 2099 | else if (!strcmp(str, "flush")) |
| 2100 | l1tf_mitigation = L1TF_MITIGATION_FLUSH; |
| 2101 | else if (!strcmp(str, "flush,nosmt")) |
| 2102 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; |
| 2103 | else if (!strcmp(str, "full")) |
| 2104 | l1tf_mitigation = L1TF_MITIGATION_FULL; |
| 2105 | else if (!strcmp(str, "full,force")) |
| 2106 | l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; |
| 2107 | |
| 2108 | return 0; |
| 2109 | } |
| 2110 | early_param("l1tf", l1tf_cmdline); |
| 2111 | |
| 2112 | #undef pr_fmt |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2113 | #define pr_fmt(fmt) fmt |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2114 | |
| 2115 | #ifdef CONFIG_SYSFS |
| 2116 | |
| 2117 | #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" |
| 2118 | |
| 2119 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
| 2120 | static const char * const l1tf_vmx_states[] = { |
| 2121 | [VMENTER_L1D_FLUSH_AUTO] = "auto", |
| 2122 | [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", |
| 2123 | [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", |
| 2124 | [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", |
| 2125 | [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", |
| 2126 | [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" |
| 2127 | }; |
| 2128 | |
| 2129 | static ssize_t l1tf_show_state(char *buf) |
| 2130 | { |
| 2131 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) |
| 2132 | return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); |
| 2133 | |
| 2134 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || |
| 2135 | (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && |
| 2136 | sched_smt_active())) { |
| 2137 | return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, |
| 2138 | l1tf_vmx_states[l1tf_vmx_mitigation]); |
| 2139 | } |
| 2140 | |
| 2141 | return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, |
| 2142 | l1tf_vmx_states[l1tf_vmx_mitigation], |
| 2143 | sched_smt_active() ? "vulnerable" : "disabled"); |
| 2144 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2145 | |
| 2146 | static ssize_t itlb_multihit_show_state(char *buf) |
| 2147 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 2148 | if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || |
| 2149 | !boot_cpu_has(X86_FEATURE_VMX)) |
| 2150 | return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); |
| 2151 | else if (!(cr4_read_shadow() & X86_CR4_VMXE)) |
| 2152 | return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); |
| 2153 | else if (itlb_multihit_kvm_mitigation) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2154 | return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); |
| 2155 | else |
| 2156 | return sprintf(buf, "KVM: Vulnerable\n"); |
| 2157 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2158 | #else |
| 2159 | static ssize_t l1tf_show_state(char *buf) |
| 2160 | { |
| 2161 | return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); |
| 2162 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2163 | |
| 2164 | static ssize_t itlb_multihit_show_state(char *buf) |
| 2165 | { |
| 2166 | return sprintf(buf, "Processor vulnerable\n"); |
| 2167 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2168 | #endif |
| 2169 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2170 | static ssize_t mds_show_state(char *buf) |
| 2171 | { |
| 2172 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
| 2173 | return sprintf(buf, "%s; SMT Host state unknown\n", |
| 2174 | mds_strings[mds_mitigation]); |
| 2175 | } |
| 2176 | |
| 2177 | if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { |
| 2178 | return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], |
| 2179 | (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : |
| 2180 | sched_smt_active() ? "mitigated" : "disabled")); |
| 2181 | } |
| 2182 | |
| 2183 | return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], |
| 2184 | sched_smt_active() ? "vulnerable" : "disabled"); |
| 2185 | } |
| 2186 | |
| 2187 | static ssize_t tsx_async_abort_show_state(char *buf) |
| 2188 | { |
| 2189 | if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || |
| 2190 | (taa_mitigation == TAA_MITIGATION_OFF)) |
| 2191 | return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); |
| 2192 | |
| 2193 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
| 2194 | return sprintf(buf, "%s; SMT Host state unknown\n", |
| 2195 | taa_strings[taa_mitigation]); |
| 2196 | } |
| 2197 | |
| 2198 | return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], |
| 2199 | sched_smt_active() ? "vulnerable" : "disabled"); |
| 2200 | } |
| 2201 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2202 | static ssize_t mmio_stale_data_show_state(char *buf) |
| 2203 | { |
| 2204 | if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
| 2205 | return sysfs_emit(buf, "Unknown: No mitigations\n"); |
| 2206 | |
| 2207 | if (mmio_mitigation == MMIO_MITIGATION_OFF) |
| 2208 | return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); |
| 2209 | |
| 2210 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
| 2211 | return sysfs_emit(buf, "%s; SMT Host state unknown\n", |
| 2212 | mmio_strings[mmio_mitigation]); |
| 2213 | } |
| 2214 | |
| 2215 | return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], |
| 2216 | sched_smt_active() ? "vulnerable" : "disabled"); |
| 2217 | } |
| 2218 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2219 | static char *stibp_state(void) |
| 2220 | { |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2221 | if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2222 | return ""; |
| 2223 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2224 | switch (spectre_v2_user_stibp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2225 | case SPECTRE_V2_USER_NONE: |
| 2226 | return ", STIBP: disabled"; |
| 2227 | case SPECTRE_V2_USER_STRICT: |
| 2228 | return ", STIBP: forced"; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2229 | case SPECTRE_V2_USER_STRICT_PREFERRED: |
| 2230 | return ", STIBP: always-on"; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2231 | case SPECTRE_V2_USER_PRCTL: |
| 2232 | case SPECTRE_V2_USER_SECCOMP: |
| 2233 | if (static_key_enabled(&switch_to_cond_stibp)) |
| 2234 | return ", STIBP: conditional"; |
| 2235 | } |
| 2236 | return ""; |
| 2237 | } |
| 2238 | |
| 2239 | static char *ibpb_state(void) |
| 2240 | { |
| 2241 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
| 2242 | if (static_key_enabled(&switch_mm_always_ibpb)) |
| 2243 | return ", IBPB: always-on"; |
| 2244 | if (static_key_enabled(&switch_mm_cond_ibpb)) |
| 2245 | return ", IBPB: conditional"; |
| 2246 | return ", IBPB: disabled"; |
| 2247 | } |
| 2248 | return ""; |
| 2249 | } |
| 2250 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2251 | static char *pbrsb_eibrs_state(void) |
| 2252 | { |
| 2253 | if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
| 2254 | if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || |
| 2255 | boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) |
| 2256 | return ", PBRSB-eIBRS: SW sequence"; |
| 2257 | else |
| 2258 | return ", PBRSB-eIBRS: Vulnerable"; |
| 2259 | } else { |
| 2260 | return ", PBRSB-eIBRS: Not affected"; |
| 2261 | } |
| 2262 | } |
| 2263 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 2264 | static ssize_t spectre_v2_show_state(char *buf) |
| 2265 | { |
| 2266 | if (spectre_v2_enabled == SPECTRE_V2_LFENCE) |
| 2267 | return sprintf(buf, "Vulnerable: LFENCE\n"); |
| 2268 | |
| 2269 | if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
| 2270 | return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); |
| 2271 | |
| 2272 | if (sched_smt_active() && unprivileged_ebpf_enabled() && |
| 2273 | spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) |
| 2274 | return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); |
| 2275 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2276 | return sprintf(buf, "%s%s%s%s%s%s%s\n", |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 2277 | spectre_v2_strings[spectre_v2_enabled], |
| 2278 | ibpb_state(), |
| 2279 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
| 2280 | stibp_state(), |
| 2281 | boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2282 | pbrsb_eibrs_state(), |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 2283 | spectre_v2_module_string()); |
| 2284 | } |
| 2285 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2286 | static ssize_t srbds_show_state(char *buf) |
| 2287 | { |
| 2288 | return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); |
| 2289 | } |
| 2290 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2291 | static ssize_t retbleed_show_state(char *buf) |
| 2292 | { |
| 2293 | if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
| 2294 | retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { |
| 2295 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
| 2296 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
| 2297 | return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); |
| 2298 | |
| 2299 | return sprintf(buf, "%s; SMT %s\n", |
| 2300 | retbleed_strings[retbleed_mitigation], |
| 2301 | !sched_smt_active() ? "disabled" : |
| 2302 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
| 2303 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? |
| 2304 | "enabled with STIBP protection" : "vulnerable"); |
| 2305 | } |
| 2306 | |
| 2307 | return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); |
| 2308 | } |
| 2309 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2310 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
| 2311 | char *buf, unsigned int bug) |
| 2312 | { |
| 2313 | if (!boot_cpu_has_bug(bug)) |
| 2314 | return sprintf(buf, "Not affected\n"); |
| 2315 | |
| 2316 | switch (bug) { |
| 2317 | case X86_BUG_CPU_MELTDOWN: |
| 2318 | if (boot_cpu_has(X86_FEATURE_PTI)) |
| 2319 | return sprintf(buf, "Mitigation: PTI\n"); |
| 2320 | |
| 2321 | if (hypervisor_is_type(X86_HYPER_XEN_PV)) |
| 2322 | return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); |
| 2323 | |
| 2324 | break; |
| 2325 | |
| 2326 | case X86_BUG_SPECTRE_V1: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2327 | return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2328 | |
| 2329 | case X86_BUG_SPECTRE_V2: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 2330 | return spectre_v2_show_state(buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2331 | |
| 2332 | case X86_BUG_SPEC_STORE_BYPASS: |
| 2333 | return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); |
| 2334 | |
| 2335 | case X86_BUG_L1TF: |
| 2336 | if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) |
| 2337 | return l1tf_show_state(buf); |
| 2338 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2339 | |
| 2340 | case X86_BUG_MDS: |
| 2341 | return mds_show_state(buf); |
| 2342 | |
| 2343 | case X86_BUG_TAA: |
| 2344 | return tsx_async_abort_show_state(buf); |
| 2345 | |
| 2346 | case X86_BUG_ITLB_MULTIHIT: |
| 2347 | return itlb_multihit_show_state(buf); |
| 2348 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2349 | case X86_BUG_SRBDS: |
| 2350 | return srbds_show_state(buf); |
| 2351 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2352 | case X86_BUG_MMIO_STALE_DATA: |
| 2353 | case X86_BUG_MMIO_UNKNOWN: |
| 2354 | return mmio_stale_data_show_state(buf); |
| 2355 | |
| 2356 | case X86_BUG_RETBLEED: |
| 2357 | return retbleed_show_state(buf); |
| 2358 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2359 | default: |
| 2360 | break; |
| 2361 | } |
| 2362 | |
| 2363 | return sprintf(buf, "Vulnerable\n"); |
| 2364 | } |
| 2365 | |
| 2366 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
| 2367 | { |
| 2368 | return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); |
| 2369 | } |
| 2370 | |
| 2371 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) |
| 2372 | { |
| 2373 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); |
| 2374 | } |
| 2375 | |
| 2376 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) |
| 2377 | { |
| 2378 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); |
| 2379 | } |
| 2380 | |
| 2381 | ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) |
| 2382 | { |
| 2383 | return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); |
| 2384 | } |
| 2385 | |
| 2386 | ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) |
| 2387 | { |
| 2388 | return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); |
| 2389 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2390 | |
| 2391 | ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) |
| 2392 | { |
| 2393 | return cpu_show_common(dev, attr, buf, X86_BUG_MDS); |
| 2394 | } |
| 2395 | |
| 2396 | ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) |
| 2397 | { |
| 2398 | return cpu_show_common(dev, attr, buf, X86_BUG_TAA); |
| 2399 | } |
| 2400 | |
| 2401 | ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) |
| 2402 | { |
| 2403 | return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); |
| 2404 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2405 | |
| 2406 | ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) |
| 2407 | { |
| 2408 | return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); |
| 2409 | } |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 2410 | |
| 2411 | ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) |
| 2412 | { |
| 2413 | if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) |
| 2414 | return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); |
| 2415 | else |
| 2416 | return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); |
| 2417 | } |
| 2418 | |
| 2419 | ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) |
| 2420 | { |
| 2421 | return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); |
| 2422 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2423 | #endif |