David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* cpu_feature_enabled() cannot be used this early */ |
| 3 | #define USE_EARLY_PGTABLE_L5 |
| 4 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5 | #include <linux/memblock.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | #include <linux/linkage.h> |
| 7 | #include <linux/bitops.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/export.h> |
| 10 | #include <linux/percpu.h> |
| 11 | #include <linux/string.h> |
| 12 | #include <linux/ctype.h> |
| 13 | #include <linux/delay.h> |
| 14 | #include <linux/sched/mm.h> |
| 15 | #include <linux/sched/clock.h> |
| 16 | #include <linux/sched/task.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/kprobes.h> |
| 19 | #include <linux/kgdb.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/io.h> |
| 22 | #include <linux/syscore_ops.h> |
| 23 | |
| 24 | #include <asm/stackprotector.h> |
| 25 | #include <asm/perf_event.h> |
| 26 | #include <asm/mmu_context.h> |
| 27 | #include <asm/archrandom.h> |
| 28 | #include <asm/hypervisor.h> |
| 29 | #include <asm/processor.h> |
| 30 | #include <asm/tlbflush.h> |
| 31 | #include <asm/debugreg.h> |
| 32 | #include <asm/sections.h> |
| 33 | #include <asm/vsyscall.h> |
| 34 | #include <linux/topology.h> |
| 35 | #include <linux/cpumask.h> |
| 36 | #include <asm/pgtable.h> |
| 37 | #include <linux/atomic.h> |
| 38 | #include <asm/proto.h> |
| 39 | #include <asm/setup.h> |
| 40 | #include <asm/apic.h> |
| 41 | #include <asm/desc.h> |
| 42 | #include <asm/fpu/internal.h> |
| 43 | #include <asm/mtrr.h> |
| 44 | #include <asm/hwcap2.h> |
| 45 | #include <linux/numa.h> |
| 46 | #include <asm/asm.h> |
| 47 | #include <asm/bugs.h> |
| 48 | #include <asm/cpu.h> |
| 49 | #include <asm/mce.h> |
| 50 | #include <asm/msr.h> |
| 51 | #include <asm/pat.h> |
| 52 | #include <asm/microcode.h> |
| 53 | #include <asm/microcode_intel.h> |
| 54 | #include <asm/intel-family.h> |
| 55 | #include <asm/cpu_device_id.h> |
| 56 | |
| 57 | #ifdef CONFIG_X86_LOCAL_APIC |
| 58 | #include <asm/uv/uv.h> |
| 59 | #endif |
| 60 | |
| 61 | #include "cpu.h" |
| 62 | |
| 63 | u32 elf_hwcap2 __read_mostly; |
| 64 | |
| 65 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
| 66 | cpumask_var_t cpu_initialized_mask; |
| 67 | cpumask_var_t cpu_callout_mask; |
| 68 | cpumask_var_t cpu_callin_mask; |
| 69 | |
| 70 | /* representing cpus for which sibling maps can be computed */ |
| 71 | cpumask_var_t cpu_sibling_setup_mask; |
| 72 | |
| 73 | /* Number of siblings per CPU package */ |
| 74 | int smp_num_siblings = 1; |
| 75 | EXPORT_SYMBOL(smp_num_siblings); |
| 76 | |
| 77 | /* Last level cache ID of each logical CPU */ |
| 78 | DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; |
| 79 | |
| 80 | /* correctly size the local cpu masks */ |
| 81 | void __init setup_cpu_local_masks(void) |
| 82 | { |
| 83 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
| 84 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
| 85 | alloc_bootmem_cpumask_var(&cpu_callout_mask); |
| 86 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
| 87 | } |
| 88 | |
| 89 | static void default_init(struct cpuinfo_x86 *c) |
| 90 | { |
| 91 | #ifdef CONFIG_X86_64 |
| 92 | cpu_detect_cache_sizes(c); |
| 93 | #else |
| 94 | /* Not much we can do here... */ |
| 95 | /* Check if at least it has cpuid */ |
| 96 | if (c->cpuid_level == -1) { |
| 97 | /* No cpuid. It must be an ancient CPU */ |
| 98 | if (c->x86 == 4) |
| 99 | strcpy(c->x86_model_id, "486"); |
| 100 | else if (c->x86 == 3) |
| 101 | strcpy(c->x86_model_id, "386"); |
| 102 | } |
| 103 | #endif |
| 104 | } |
| 105 | |
| 106 | static const struct cpu_dev default_cpu = { |
| 107 | .c_init = default_init, |
| 108 | .c_vendor = "Unknown", |
| 109 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
| 110 | }; |
| 111 | |
| 112 | static const struct cpu_dev *this_cpu = &default_cpu; |
| 113 | |
| 114 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
| 115 | #ifdef CONFIG_X86_64 |
| 116 | /* |
| 117 | * We need valid kernel segments for data and code in long mode too |
| 118 | * IRET will check the segment types kkeil 2000/10/28 |
| 119 | * Also sysret mandates a special GDT layout |
| 120 | * |
| 121 | * TLS descriptors are currently at a different place compared to i386. |
| 122 | * Hopefully nobody expects them at a fixed place (Wine?) |
| 123 | */ |
| 124 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
| 125 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
| 126 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
| 127 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
| 128 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
| 129 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
| 130 | #else |
| 131 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
| 132 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| 133 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
| 134 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
| 135 | /* |
| 136 | * Segments used for calling PnP BIOS have byte granularity. |
| 137 | * They code segments and data segments have fixed 64k limits, |
| 138 | * the transfer segment sizes are set at run time. |
| 139 | */ |
| 140 | /* 32-bit code */ |
| 141 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
| 142 | /* 16-bit code */ |
| 143 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
| 144 | /* 16-bit data */ |
| 145 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
| 146 | /* 16-bit data */ |
| 147 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
| 148 | /* 16-bit data */ |
| 149 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
| 150 | /* |
| 151 | * The APM segments have byte granularity and their bases |
| 152 | * are set at run time. All have 64k limits. |
| 153 | */ |
| 154 | /* 32-bit code */ |
| 155 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
| 156 | /* 16-bit code */ |
| 157 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
| 158 | /* data */ |
| 159 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
| 160 | |
| 161 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| 162 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| 163 | GDT_STACK_CANARY_INIT |
| 164 | #endif |
| 165 | } }; |
| 166 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| 167 | |
| 168 | static int __init x86_mpx_setup(char *s) |
| 169 | { |
| 170 | /* require an exact match without trailing characters */ |
| 171 | if (strlen(s)) |
| 172 | return 0; |
| 173 | |
| 174 | /* do not emit a message if the feature is not present */ |
| 175 | if (!boot_cpu_has(X86_FEATURE_MPX)) |
| 176 | return 1; |
| 177 | |
| 178 | setup_clear_cpu_cap(X86_FEATURE_MPX); |
| 179 | pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); |
| 180 | return 1; |
| 181 | } |
| 182 | __setup("nompx", x86_mpx_setup); |
| 183 | |
| 184 | #ifdef CONFIG_X86_64 |
| 185 | static int __init x86_nopcid_setup(char *s) |
| 186 | { |
| 187 | /* nopcid doesn't accept parameters */ |
| 188 | if (s) |
| 189 | return -EINVAL; |
| 190 | |
| 191 | /* do not emit a message if the feature is not present */ |
| 192 | if (!boot_cpu_has(X86_FEATURE_PCID)) |
| 193 | return 0; |
| 194 | |
| 195 | setup_clear_cpu_cap(X86_FEATURE_PCID); |
| 196 | pr_info("nopcid: PCID feature disabled\n"); |
| 197 | return 0; |
| 198 | } |
| 199 | early_param("nopcid", x86_nopcid_setup); |
| 200 | #endif |
| 201 | |
| 202 | static int __init x86_noinvpcid_setup(char *s) |
| 203 | { |
| 204 | /* noinvpcid doesn't accept parameters */ |
| 205 | if (s) |
| 206 | return -EINVAL; |
| 207 | |
| 208 | /* do not emit a message if the feature is not present */ |
| 209 | if (!boot_cpu_has(X86_FEATURE_INVPCID)) |
| 210 | return 0; |
| 211 | |
| 212 | setup_clear_cpu_cap(X86_FEATURE_INVPCID); |
| 213 | pr_info("noinvpcid: INVPCID feature disabled\n"); |
| 214 | return 0; |
| 215 | } |
| 216 | early_param("noinvpcid", x86_noinvpcid_setup); |
| 217 | |
| 218 | #ifdef CONFIG_X86_32 |
| 219 | static int cachesize_override = -1; |
| 220 | static int disable_x86_serial_nr = 1; |
| 221 | |
| 222 | static int __init cachesize_setup(char *str) |
| 223 | { |
| 224 | get_option(&str, &cachesize_override); |
| 225 | return 1; |
| 226 | } |
| 227 | __setup("cachesize=", cachesize_setup); |
| 228 | |
| 229 | static int __init x86_sep_setup(char *s) |
| 230 | { |
| 231 | setup_clear_cpu_cap(X86_FEATURE_SEP); |
| 232 | return 1; |
| 233 | } |
| 234 | __setup("nosep", x86_sep_setup); |
| 235 | |
| 236 | /* Standard macro to see if a specific flag is changeable */ |
| 237 | static inline int flag_is_changeable_p(u32 flag) |
| 238 | { |
| 239 | u32 f1, f2; |
| 240 | |
| 241 | /* |
| 242 | * Cyrix and IDT cpus allow disabling of CPUID |
| 243 | * so the code below may return different results |
| 244 | * when it is executed before and after enabling |
| 245 | * the CPUID. Add "volatile" to not allow gcc to |
| 246 | * optimize the subsequent calls to this function. |
| 247 | */ |
| 248 | asm volatile ("pushfl \n\t" |
| 249 | "pushfl \n\t" |
| 250 | "popl %0 \n\t" |
| 251 | "movl %0, %1 \n\t" |
| 252 | "xorl %2, %0 \n\t" |
| 253 | "pushl %0 \n\t" |
| 254 | "popfl \n\t" |
| 255 | "pushfl \n\t" |
| 256 | "popl %0 \n\t" |
| 257 | "popfl \n\t" |
| 258 | |
| 259 | : "=&r" (f1), "=&r" (f2) |
| 260 | : "ir" (flag)); |
| 261 | |
| 262 | return ((f1^f2) & flag) != 0; |
| 263 | } |
| 264 | |
| 265 | /* Probe for the CPUID instruction */ |
| 266 | int have_cpuid_p(void) |
| 267 | { |
| 268 | return flag_is_changeable_p(X86_EFLAGS_ID); |
| 269 | } |
| 270 | |
| 271 | static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 272 | { |
| 273 | unsigned long lo, hi; |
| 274 | |
| 275 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
| 276 | return; |
| 277 | |
| 278 | /* Disable processor serial number: */ |
| 279 | |
| 280 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 281 | lo |= 0x200000; |
| 282 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 283 | |
| 284 | pr_notice("CPU serial number disabled.\n"); |
| 285 | clear_cpu_cap(c, X86_FEATURE_PN); |
| 286 | |
| 287 | /* Disabling the serial number may affect the cpuid level */ |
| 288 | c->cpuid_level = cpuid_eax(0); |
| 289 | } |
| 290 | |
| 291 | static int __init x86_serial_nr_setup(char *s) |
| 292 | { |
| 293 | disable_x86_serial_nr = 0; |
| 294 | return 1; |
| 295 | } |
| 296 | __setup("serialnumber", x86_serial_nr_setup); |
| 297 | #else |
| 298 | static inline int flag_is_changeable_p(u32 flag) |
| 299 | { |
| 300 | return 1; |
| 301 | } |
| 302 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 303 | { |
| 304 | } |
| 305 | #endif |
| 306 | |
| 307 | static __init int setup_disable_smep(char *arg) |
| 308 | { |
| 309 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
| 310 | /* Check for things that depend on SMEP being enabled: */ |
| 311 | check_mpx_erratum(&boot_cpu_data); |
| 312 | return 1; |
| 313 | } |
| 314 | __setup("nosmep", setup_disable_smep); |
| 315 | |
| 316 | static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
| 317 | { |
| 318 | if (cpu_has(c, X86_FEATURE_SMEP)) |
| 319 | cr4_set_bits(X86_CR4_SMEP); |
| 320 | } |
| 321 | |
| 322 | static __init int setup_disable_smap(char *arg) |
| 323 | { |
| 324 | setup_clear_cpu_cap(X86_FEATURE_SMAP); |
| 325 | return 1; |
| 326 | } |
| 327 | __setup("nosmap", setup_disable_smap); |
| 328 | |
| 329 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
| 330 | { |
| 331 | unsigned long eflags = native_save_fl(); |
| 332 | |
| 333 | /* This should have been cleared long ago */ |
| 334 | BUG_ON(eflags & X86_EFLAGS_AC); |
| 335 | |
| 336 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
| 337 | #ifdef CONFIG_X86_SMAP |
| 338 | cr4_set_bits(X86_CR4_SMAP); |
| 339 | #else |
| 340 | cr4_clear_bits(X86_CR4_SMAP); |
| 341 | #endif |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | static __always_inline void setup_umip(struct cpuinfo_x86 *c) |
| 346 | { |
| 347 | /* Check the boot processor, plus build option for UMIP. */ |
| 348 | if (!cpu_feature_enabled(X86_FEATURE_UMIP)) |
| 349 | goto out; |
| 350 | |
| 351 | /* Check the current processor's cpuid bits. */ |
| 352 | if (!cpu_has(c, X86_FEATURE_UMIP)) |
| 353 | goto out; |
| 354 | |
| 355 | cr4_set_bits(X86_CR4_UMIP); |
| 356 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 357 | pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 358 | |
| 359 | return; |
| 360 | |
| 361 | out: |
| 362 | /* |
| 363 | * Make sure UMIP is disabled in case it was enabled in a |
| 364 | * previous boot (e.g., via kexec). |
| 365 | */ |
| 366 | cr4_clear_bits(X86_CR4_UMIP); |
| 367 | } |
| 368 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 369 | static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); |
| 370 | static unsigned long cr4_pinned_bits __ro_after_init; |
| 371 | |
| 372 | void native_write_cr0(unsigned long val) |
| 373 | { |
| 374 | unsigned long bits_missing = 0; |
| 375 | |
| 376 | set_register: |
| 377 | asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); |
| 378 | |
| 379 | if (static_branch_likely(&cr_pinning)) { |
| 380 | if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { |
| 381 | bits_missing = X86_CR0_WP; |
| 382 | val |= bits_missing; |
| 383 | goto set_register; |
| 384 | } |
| 385 | /* Warn after we've set the missing bits. */ |
| 386 | WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n"); |
| 387 | } |
| 388 | } |
| 389 | EXPORT_SYMBOL(native_write_cr0); |
| 390 | |
| 391 | void native_write_cr4(unsigned long val) |
| 392 | { |
| 393 | unsigned long bits_missing = 0; |
| 394 | |
| 395 | set_register: |
| 396 | asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); |
| 397 | |
| 398 | if (static_branch_likely(&cr_pinning)) { |
| 399 | if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { |
| 400 | bits_missing = ~val & cr4_pinned_bits; |
| 401 | val |= bits_missing; |
| 402 | goto set_register; |
| 403 | } |
| 404 | /* Warn after we've set the missing bits. */ |
| 405 | WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", |
| 406 | bits_missing); |
| 407 | } |
| 408 | } |
| 409 | EXPORT_SYMBOL(native_write_cr4); |
| 410 | |
| 411 | void cr4_init(void) |
| 412 | { |
| 413 | unsigned long cr4 = __read_cr4(); |
| 414 | |
| 415 | if (boot_cpu_has(X86_FEATURE_PCID)) |
| 416 | cr4 |= X86_CR4_PCIDE; |
| 417 | if (static_branch_likely(&cr_pinning)) |
| 418 | cr4 |= cr4_pinned_bits; |
| 419 | |
| 420 | __write_cr4(cr4); |
| 421 | |
| 422 | /* Initialize cr4 shadow for this CPU. */ |
| 423 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 424 | } |
| 425 | |
| 426 | /* |
| 427 | * Once CPU feature detection is finished (and boot params have been |
| 428 | * parsed), record any of the sensitive CR bits that are set, and |
| 429 | * enable CR pinning. |
| 430 | */ |
| 431 | static void __init setup_cr_pinning(void) |
| 432 | { |
| 433 | unsigned long mask; |
| 434 | |
| 435 | mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); |
| 436 | cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; |
| 437 | static_key_enable(&cr_pinning.key); |
| 438 | } |
| 439 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 440 | /* |
| 441 | * Protection Keys are not available in 32-bit mode. |
| 442 | */ |
| 443 | static bool pku_disabled; |
| 444 | |
| 445 | static __always_inline void setup_pku(struct cpuinfo_x86 *c) |
| 446 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 447 | struct pkru_state *pk; |
| 448 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 449 | /* check the boot processor, plus compile options for PKU: */ |
| 450 | if (!cpu_feature_enabled(X86_FEATURE_PKU)) |
| 451 | return; |
| 452 | /* checks the actual processor's cpuid bits: */ |
| 453 | if (!cpu_has(c, X86_FEATURE_PKU)) |
| 454 | return; |
| 455 | if (pku_disabled) |
| 456 | return; |
| 457 | |
| 458 | cr4_set_bits(X86_CR4_PKE); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 459 | pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU); |
| 460 | if (pk) |
| 461 | pk->pkru = init_pkru_value; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 462 | /* |
| 463 | * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE |
| 464 | * cpuid bit to be set. We need to ensure that we |
| 465 | * update that bit in this CPU's "cpu_info". |
| 466 | */ |
| 467 | get_cpu_cap(c); |
| 468 | } |
| 469 | |
| 470 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| 471 | static __init int setup_disable_pku(char *arg) |
| 472 | { |
| 473 | /* |
| 474 | * Do not clear the X86_FEATURE_PKU bit. All of the |
| 475 | * runtime checks are against OSPKE so clearing the |
| 476 | * bit does nothing. |
| 477 | * |
| 478 | * This way, we will see "pku" in cpuinfo, but not |
| 479 | * "ospke", which is exactly what we want. It shows |
| 480 | * that the CPU has PKU, but the OS has not enabled it. |
| 481 | * This happens to be exactly how a system would look |
| 482 | * if we disabled the config option. |
| 483 | */ |
| 484 | pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); |
| 485 | pku_disabled = true; |
| 486 | return 1; |
| 487 | } |
| 488 | __setup("nopku", setup_disable_pku); |
| 489 | #endif /* CONFIG_X86_64 */ |
| 490 | |
| 491 | /* |
| 492 | * Some CPU features depend on higher CPUID levels, which may not always |
| 493 | * be available due to CPUID level capping or broken virtualization |
| 494 | * software. Add those features to this table to auto-disable them. |
| 495 | */ |
| 496 | struct cpuid_dependent_feature { |
| 497 | u32 feature; |
| 498 | u32 level; |
| 499 | }; |
| 500 | |
| 501 | static const struct cpuid_dependent_feature |
| 502 | cpuid_dependent_features[] = { |
| 503 | { X86_FEATURE_MWAIT, 0x00000005 }, |
| 504 | { X86_FEATURE_DCA, 0x00000009 }, |
| 505 | { X86_FEATURE_XSAVE, 0x0000000d }, |
| 506 | { 0, 0 } |
| 507 | }; |
| 508 | |
| 509 | static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
| 510 | { |
| 511 | const struct cpuid_dependent_feature *df; |
| 512 | |
| 513 | for (df = cpuid_dependent_features; df->feature; df++) { |
| 514 | |
| 515 | if (!cpu_has(c, df->feature)) |
| 516 | continue; |
| 517 | /* |
| 518 | * Note: cpuid_level is set to -1 if unavailable, but |
| 519 | * extended_extended_level is set to 0 if unavailable |
| 520 | * and the legitimate extended levels are all negative |
| 521 | * when signed; hence the weird messing around with |
| 522 | * signs here... |
| 523 | */ |
| 524 | if (!((s32)df->level < 0 ? |
| 525 | (u32)df->level > (u32)c->extended_cpuid_level : |
| 526 | (s32)df->level > (s32)c->cpuid_level)) |
| 527 | continue; |
| 528 | |
| 529 | clear_cpu_cap(c, df->feature); |
| 530 | if (!warn) |
| 531 | continue; |
| 532 | |
| 533 | pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", |
| 534 | x86_cap_flag(df->feature), df->level); |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | /* |
| 539 | * Naming convention should be: <Name> [(<Codename>)] |
| 540 | * This table only is used unless init_<vendor>() below doesn't set it; |
| 541 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
| 542 | * isn't used |
| 543 | */ |
| 544 | |
| 545 | /* Look up CPU names by table lookup. */ |
| 546 | static const char *table_lookup_model(struct cpuinfo_x86 *c) |
| 547 | { |
| 548 | #ifdef CONFIG_X86_32 |
| 549 | const struct legacy_cpu_model_info *info; |
| 550 | |
| 551 | if (c->x86_model >= 16) |
| 552 | return NULL; /* Range check */ |
| 553 | |
| 554 | if (!this_cpu) |
| 555 | return NULL; |
| 556 | |
| 557 | info = this_cpu->legacy_models; |
| 558 | |
| 559 | while (info->family) { |
| 560 | if (info->family == c->x86) |
| 561 | return info->model_names[c->x86_model]; |
| 562 | info++; |
| 563 | } |
| 564 | #endif |
| 565 | return NULL; /* Not found */ |
| 566 | } |
| 567 | |
| 568 | __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; |
| 569 | __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; |
| 570 | |
| 571 | void load_percpu_segment(int cpu) |
| 572 | { |
| 573 | #ifdef CONFIG_X86_32 |
| 574 | loadsegment(fs, __KERNEL_PERCPU); |
| 575 | #else |
| 576 | __loadsegment_simple(gs, 0); |
| 577 | wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); |
| 578 | #endif |
| 579 | load_stack_canary_segment(); |
| 580 | } |
| 581 | |
| 582 | #ifdef CONFIG_X86_32 |
| 583 | /* The 32-bit entry code needs to find cpu_entry_area. */ |
| 584 | DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); |
| 585 | #endif |
| 586 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 587 | /* Load the original GDT from the per-cpu structure */ |
| 588 | void load_direct_gdt(int cpu) |
| 589 | { |
| 590 | struct desc_ptr gdt_descr; |
| 591 | |
| 592 | gdt_descr.address = (long)get_cpu_gdt_rw(cpu); |
| 593 | gdt_descr.size = GDT_SIZE - 1; |
| 594 | load_gdt(&gdt_descr); |
| 595 | } |
| 596 | EXPORT_SYMBOL_GPL(load_direct_gdt); |
| 597 | |
| 598 | /* Load a fixmap remapping of the per-cpu GDT */ |
| 599 | void load_fixmap_gdt(int cpu) |
| 600 | { |
| 601 | struct desc_ptr gdt_descr; |
| 602 | |
| 603 | gdt_descr.address = (long)get_cpu_gdt_ro(cpu); |
| 604 | gdt_descr.size = GDT_SIZE - 1; |
| 605 | load_gdt(&gdt_descr); |
| 606 | } |
| 607 | EXPORT_SYMBOL_GPL(load_fixmap_gdt); |
| 608 | |
| 609 | /* |
| 610 | * Current gdt points %fs at the "master" per-cpu area: after this, |
| 611 | * it's on the real one. |
| 612 | */ |
| 613 | void switch_to_new_gdt(int cpu) |
| 614 | { |
| 615 | /* Load the original GDT */ |
| 616 | load_direct_gdt(cpu); |
| 617 | /* Reload the per-cpu base */ |
| 618 | load_percpu_segment(cpu); |
| 619 | } |
| 620 | |
| 621 | static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
| 622 | |
| 623 | static void get_model_name(struct cpuinfo_x86 *c) |
| 624 | { |
| 625 | unsigned int *v; |
| 626 | char *p, *q, *s; |
| 627 | |
| 628 | if (c->extended_cpuid_level < 0x80000004) |
| 629 | return; |
| 630 | |
| 631 | v = (unsigned int *)c->x86_model_id; |
| 632 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
| 633 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
| 634 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
| 635 | c->x86_model_id[48] = 0; |
| 636 | |
| 637 | /* Trim whitespace */ |
| 638 | p = q = s = &c->x86_model_id[0]; |
| 639 | |
| 640 | while (*p == ' ') |
| 641 | p++; |
| 642 | |
| 643 | while (*p) { |
| 644 | /* Note the last non-whitespace index */ |
| 645 | if (!isspace(*p)) |
| 646 | s = q; |
| 647 | |
| 648 | *q++ = *p++; |
| 649 | } |
| 650 | |
| 651 | *(s + 1) = '\0'; |
| 652 | } |
| 653 | |
| 654 | void detect_num_cpu_cores(struct cpuinfo_x86 *c) |
| 655 | { |
| 656 | unsigned int eax, ebx, ecx, edx; |
| 657 | |
| 658 | c->x86_max_cores = 1; |
| 659 | if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) |
| 660 | return; |
| 661 | |
| 662 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); |
| 663 | if (eax & 0x1f) |
| 664 | c->x86_max_cores = (eax >> 26) + 1; |
| 665 | } |
| 666 | |
| 667 | void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
| 668 | { |
| 669 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
| 670 | |
| 671 | n = c->extended_cpuid_level; |
| 672 | |
| 673 | if (n >= 0x80000005) { |
| 674 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
| 675 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
| 676 | #ifdef CONFIG_X86_64 |
| 677 | /* On K8 L1 TLB is inclusive, so don't count it */ |
| 678 | c->x86_tlbsize = 0; |
| 679 | #endif |
| 680 | } |
| 681 | |
| 682 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
| 683 | return; |
| 684 | |
| 685 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
| 686 | l2size = ecx >> 16; |
| 687 | |
| 688 | #ifdef CONFIG_X86_64 |
| 689 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); |
| 690 | #else |
| 691 | /* do processor-specific cache resizing */ |
| 692 | if (this_cpu->legacy_cache_size) |
| 693 | l2size = this_cpu->legacy_cache_size(c, l2size); |
| 694 | |
| 695 | /* Allow user to override all this if necessary. */ |
| 696 | if (cachesize_override != -1) |
| 697 | l2size = cachesize_override; |
| 698 | |
| 699 | if (l2size == 0) |
| 700 | return; /* Again, no L2 cache is possible */ |
| 701 | #endif |
| 702 | |
| 703 | c->x86_cache_size = l2size; |
| 704 | } |
| 705 | |
| 706 | u16 __read_mostly tlb_lli_4k[NR_INFO]; |
| 707 | u16 __read_mostly tlb_lli_2m[NR_INFO]; |
| 708 | u16 __read_mostly tlb_lli_4m[NR_INFO]; |
| 709 | u16 __read_mostly tlb_lld_4k[NR_INFO]; |
| 710 | u16 __read_mostly tlb_lld_2m[NR_INFO]; |
| 711 | u16 __read_mostly tlb_lld_4m[NR_INFO]; |
| 712 | u16 __read_mostly tlb_lld_1g[NR_INFO]; |
| 713 | |
| 714 | static void cpu_detect_tlb(struct cpuinfo_x86 *c) |
| 715 | { |
| 716 | if (this_cpu->c_detect_tlb) |
| 717 | this_cpu->c_detect_tlb(c); |
| 718 | |
| 719 | pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", |
| 720 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
| 721 | tlb_lli_4m[ENTRIES]); |
| 722 | |
| 723 | pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", |
| 724 | tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], |
| 725 | tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); |
| 726 | } |
| 727 | |
| 728 | int detect_ht_early(struct cpuinfo_x86 *c) |
| 729 | { |
| 730 | #ifdef CONFIG_SMP |
| 731 | u32 eax, ebx, ecx, edx; |
| 732 | |
| 733 | if (!cpu_has(c, X86_FEATURE_HT)) |
| 734 | return -1; |
| 735 | |
| 736 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
| 737 | return -1; |
| 738 | |
| 739 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
| 740 | return -1; |
| 741 | |
| 742 | cpuid(1, &eax, &ebx, &ecx, &edx); |
| 743 | |
| 744 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
| 745 | if (smp_num_siblings == 1) |
| 746 | pr_info_once("CPU0: Hyper-Threading is disabled\n"); |
| 747 | #endif |
| 748 | return 0; |
| 749 | } |
| 750 | |
| 751 | void detect_ht(struct cpuinfo_x86 *c) |
| 752 | { |
| 753 | #ifdef CONFIG_SMP |
| 754 | int index_msb, core_bits; |
| 755 | |
| 756 | if (detect_ht_early(c) < 0) |
| 757 | return; |
| 758 | |
| 759 | index_msb = get_count_order(smp_num_siblings); |
| 760 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
| 761 | |
| 762 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
| 763 | |
| 764 | index_msb = get_count_order(smp_num_siblings); |
| 765 | |
| 766 | core_bits = get_count_order(c->x86_max_cores); |
| 767 | |
| 768 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
| 769 | ((1 << core_bits) - 1); |
| 770 | #endif |
| 771 | } |
| 772 | |
| 773 | static void get_cpu_vendor(struct cpuinfo_x86 *c) |
| 774 | { |
| 775 | char *v = c->x86_vendor_id; |
| 776 | int i; |
| 777 | |
| 778 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
| 779 | if (!cpu_devs[i]) |
| 780 | break; |
| 781 | |
| 782 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
| 783 | (cpu_devs[i]->c_ident[1] && |
| 784 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
| 785 | |
| 786 | this_cpu = cpu_devs[i]; |
| 787 | c->x86_vendor = this_cpu->c_x86_vendor; |
| 788 | return; |
| 789 | } |
| 790 | } |
| 791 | |
| 792 | pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ |
| 793 | "CPU: Your system may be unstable.\n", v); |
| 794 | |
| 795 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 796 | this_cpu = &default_cpu; |
| 797 | } |
| 798 | |
| 799 | void cpu_detect(struct cpuinfo_x86 *c) |
| 800 | { |
| 801 | /* Get vendor name */ |
| 802 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
| 803 | (unsigned int *)&c->x86_vendor_id[0], |
| 804 | (unsigned int *)&c->x86_vendor_id[8], |
| 805 | (unsigned int *)&c->x86_vendor_id[4]); |
| 806 | |
| 807 | c->x86 = 4; |
| 808 | /* Intel-defined flags: level 0x00000001 */ |
| 809 | if (c->cpuid_level >= 0x00000001) { |
| 810 | u32 junk, tfms, cap0, misc; |
| 811 | |
| 812 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
| 813 | c->x86 = x86_family(tfms); |
| 814 | c->x86_model = x86_model(tfms); |
| 815 | c->x86_stepping = x86_stepping(tfms); |
| 816 | |
| 817 | if (cap0 & (1<<19)) { |
| 818 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
| 819 | c->x86_cache_alignment = c->x86_clflush_size; |
| 820 | } |
| 821 | } |
| 822 | } |
| 823 | |
| 824 | static void apply_forced_caps(struct cpuinfo_x86 *c) |
| 825 | { |
| 826 | int i; |
| 827 | |
| 828 | for (i = 0; i < NCAPINTS + NBUGINTS; i++) { |
| 829 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; |
| 830 | c->x86_capability[i] |= cpu_caps_set[i]; |
| 831 | } |
| 832 | } |
| 833 | |
| 834 | static void init_speculation_control(struct cpuinfo_x86 *c) |
| 835 | { |
| 836 | /* |
| 837 | * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, |
| 838 | * and they also have a different bit for STIBP support. Also, |
| 839 | * a hypervisor might have set the individual AMD bits even on |
| 840 | * Intel CPUs, for finer-grained selection of what's available. |
| 841 | */ |
| 842 | if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { |
| 843 | set_cpu_cap(c, X86_FEATURE_IBRS); |
| 844 | set_cpu_cap(c, X86_FEATURE_IBPB); |
| 845 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| 846 | } |
| 847 | |
| 848 | if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
| 849 | set_cpu_cap(c, X86_FEATURE_STIBP); |
| 850 | |
| 851 | if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || |
| 852 | cpu_has(c, X86_FEATURE_VIRT_SSBD)) |
| 853 | set_cpu_cap(c, X86_FEATURE_SSBD); |
| 854 | |
| 855 | if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { |
| 856 | set_cpu_cap(c, X86_FEATURE_IBRS); |
| 857 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| 858 | } |
| 859 | |
| 860 | if (cpu_has(c, X86_FEATURE_AMD_IBPB)) |
| 861 | set_cpu_cap(c, X86_FEATURE_IBPB); |
| 862 | |
| 863 | if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { |
| 864 | set_cpu_cap(c, X86_FEATURE_STIBP); |
| 865 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| 866 | } |
| 867 | |
| 868 | if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { |
| 869 | set_cpu_cap(c, X86_FEATURE_SSBD); |
| 870 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| 871 | clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); |
| 872 | } |
| 873 | } |
| 874 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 875 | static void init_cqm(struct cpuinfo_x86 *c) |
| 876 | { |
| 877 | if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { |
| 878 | c->x86_cache_max_rmid = -1; |
| 879 | c->x86_cache_occ_scale = -1; |
| 880 | return; |
| 881 | } |
| 882 | |
| 883 | /* will be overridden if occupancy monitoring exists */ |
| 884 | c->x86_cache_max_rmid = cpuid_ebx(0xf); |
| 885 | |
| 886 | if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || |
| 887 | cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || |
| 888 | cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { |
| 889 | u32 eax, ebx, ecx, edx; |
| 890 | |
| 891 | /* QoS sub-leaf, EAX=0Fh, ECX=1 */ |
| 892 | cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx); |
| 893 | |
| 894 | c->x86_cache_max_rmid = ecx; |
| 895 | c->x86_cache_occ_scale = ebx; |
| 896 | } |
| 897 | } |
| 898 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 899 | void get_cpu_cap(struct cpuinfo_x86 *c) |
| 900 | { |
| 901 | u32 eax, ebx, ecx, edx; |
| 902 | |
| 903 | /* Intel-defined flags: level 0x00000001 */ |
| 904 | if (c->cpuid_level >= 0x00000001) { |
| 905 | cpuid(0x00000001, &eax, &ebx, &ecx, &edx); |
| 906 | |
| 907 | c->x86_capability[CPUID_1_ECX] = ecx; |
| 908 | c->x86_capability[CPUID_1_EDX] = edx; |
| 909 | } |
| 910 | |
| 911 | /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ |
| 912 | if (c->cpuid_level >= 0x00000006) |
| 913 | c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); |
| 914 | |
| 915 | /* Additional Intel-defined flags: level 0x00000007 */ |
| 916 | if (c->cpuid_level >= 0x00000007) { |
| 917 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
| 918 | c->x86_capability[CPUID_7_0_EBX] = ebx; |
| 919 | c->x86_capability[CPUID_7_ECX] = ecx; |
| 920 | c->x86_capability[CPUID_7_EDX] = edx; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 921 | |
| 922 | /* Check valid sub-leaf index before accessing it */ |
| 923 | if (eax >= 1) { |
| 924 | cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); |
| 925 | c->x86_capability[CPUID_7_1_EAX] = eax; |
| 926 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 927 | } |
| 928 | |
| 929 | /* Extended state features: level 0x0000000d */ |
| 930 | if (c->cpuid_level >= 0x0000000d) { |
| 931 | cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); |
| 932 | |
| 933 | c->x86_capability[CPUID_D_1_EAX] = eax; |
| 934 | } |
| 935 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 936 | /* AMD-defined flags: level 0x80000001 */ |
| 937 | eax = cpuid_eax(0x80000000); |
| 938 | c->extended_cpuid_level = eax; |
| 939 | |
| 940 | if ((eax & 0xffff0000) == 0x80000000) { |
| 941 | if (eax >= 0x80000001) { |
| 942 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); |
| 943 | |
| 944 | c->x86_capability[CPUID_8000_0001_ECX] = ecx; |
| 945 | c->x86_capability[CPUID_8000_0001_EDX] = edx; |
| 946 | } |
| 947 | } |
| 948 | |
| 949 | if (c->extended_cpuid_level >= 0x80000007) { |
| 950 | cpuid(0x80000007, &eax, &ebx, &ecx, &edx); |
| 951 | |
| 952 | c->x86_capability[CPUID_8000_0007_EBX] = ebx; |
| 953 | c->x86_power = edx; |
| 954 | } |
| 955 | |
| 956 | if (c->extended_cpuid_level >= 0x80000008) { |
| 957 | cpuid(0x80000008, &eax, &ebx, &ecx, &edx); |
| 958 | c->x86_capability[CPUID_8000_0008_EBX] = ebx; |
| 959 | } |
| 960 | |
| 961 | if (c->extended_cpuid_level >= 0x8000000a) |
| 962 | c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); |
| 963 | |
| 964 | init_scattered_cpuid_features(c); |
| 965 | init_speculation_control(c); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 966 | init_cqm(c); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 967 | |
| 968 | /* |
| 969 | * Clear/Set all flags overridden by options, after probe. |
| 970 | * This needs to happen each time we re-probe, which may happen |
| 971 | * several times during CPU initialization. |
| 972 | */ |
| 973 | apply_forced_caps(c); |
| 974 | } |
| 975 | |
| 976 | void get_cpu_address_sizes(struct cpuinfo_x86 *c) |
| 977 | { |
| 978 | u32 eax, ebx, ecx, edx; |
| 979 | |
| 980 | if (c->extended_cpuid_level >= 0x80000008) { |
| 981 | cpuid(0x80000008, &eax, &ebx, &ecx, &edx); |
| 982 | |
| 983 | c->x86_virt_bits = (eax >> 8) & 0xff; |
| 984 | c->x86_phys_bits = eax & 0xff; |
| 985 | } |
| 986 | #ifdef CONFIG_X86_32 |
| 987 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
| 988 | c->x86_phys_bits = 36; |
| 989 | #endif |
| 990 | c->x86_cache_bits = c->x86_phys_bits; |
| 991 | } |
| 992 | |
| 993 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
| 994 | { |
| 995 | #ifdef CONFIG_X86_32 |
| 996 | int i; |
| 997 | |
| 998 | /* |
| 999 | * First of all, decide if this is a 486 or higher |
| 1000 | * It's a 486 if we can modify the AC flag |
| 1001 | */ |
| 1002 | if (flag_is_changeable_p(X86_EFLAGS_AC)) |
| 1003 | c->x86 = 4; |
| 1004 | else |
| 1005 | c->x86 = 3; |
| 1006 | |
| 1007 | for (i = 0; i < X86_VENDOR_NUM; i++) |
| 1008 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { |
| 1009 | c->x86_vendor_id[0] = 0; |
| 1010 | cpu_devs[i]->c_identify(c); |
| 1011 | if (c->x86_vendor_id[0]) { |
| 1012 | get_cpu_vendor(c); |
| 1013 | break; |
| 1014 | } |
| 1015 | } |
| 1016 | #endif |
| 1017 | } |
| 1018 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1019 | #define NO_SPECULATION BIT(0) |
| 1020 | #define NO_MELTDOWN BIT(1) |
| 1021 | #define NO_SSB BIT(2) |
| 1022 | #define NO_L1TF BIT(3) |
| 1023 | #define NO_MDS BIT(4) |
| 1024 | #define MSBDS_ONLY BIT(5) |
| 1025 | #define NO_SWAPGS BIT(6) |
| 1026 | #define NO_ITLB_MULTIHIT BIT(7) |
| 1027 | |
| 1028 | #define VULNWL(_vendor, _family, _model, _whitelist) \ |
| 1029 | { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } |
| 1030 | |
| 1031 | #define VULNWL_INTEL(model, whitelist) \ |
| 1032 | VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) |
| 1033 | |
| 1034 | #define VULNWL_AMD(family, whitelist) \ |
| 1035 | VULNWL(AMD, family, X86_MODEL_ANY, whitelist) |
| 1036 | |
| 1037 | #define VULNWL_HYGON(family, whitelist) \ |
| 1038 | VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) |
| 1039 | |
| 1040 | static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { |
| 1041 | VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), |
| 1042 | VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), |
| 1043 | VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), |
| 1044 | VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), |
| 1045 | |
| 1046 | /* Intel Family 6 */ |
| 1047 | VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| 1048 | VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| 1049 | VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| 1050 | VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| 1051 | VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| 1052 | |
| 1053 | VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1054 | VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1055 | VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1056 | VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1057 | VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1058 | VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1059 | |
| 1060 | VULNWL_INTEL(CORE_YONAH, NO_SSB), |
| 1061 | |
| 1062 | VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1063 | VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1064 | |
| 1065 | VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1066 | VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1067 | VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1068 | |
| 1069 | /* |
| 1070 | * Technically, swapgs isn't serializing on AMD (despite it previously |
| 1071 | * being documented as such in the APM). But according to AMD, %gs is |
| 1072 | * updated non-speculatively, and the issuing of %gs-relative memory |
| 1073 | * operands will be blocked until the %gs update completes, which is |
| 1074 | * good enough for our purposes. |
| 1075 | */ |
| 1076 | |
| 1077 | VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT), |
| 1078 | |
| 1079 | /* AMD Family 0xf - 0x12 */ |
| 1080 | VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1081 | VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1082 | VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1083 | VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1084 | |
| 1085 | /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ |
| 1086 | VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| 1087 | VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1088 | {} |
| 1089 | }; |
| 1090 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1091 | static bool __init cpu_matches(unsigned long which) |
| 1092 | { |
| 1093 | const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1094 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1095 | return m && !!(m->driver_data & which); |
| 1096 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1097 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1098 | u64 x86_read_arch_cap_msr(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1099 | { |
| 1100 | u64 ia32_cap = 0; |
| 1101 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1102 | if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) |
| 1103 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
| 1104 | |
| 1105 | return ia32_cap; |
| 1106 | } |
| 1107 | |
| 1108 | static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
| 1109 | { |
| 1110 | u64 ia32_cap = x86_read_arch_cap_msr(); |
| 1111 | |
| 1112 | /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ |
| 1113 | if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) |
| 1114 | setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); |
| 1115 | |
| 1116 | if (cpu_matches(NO_SPECULATION)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1117 | return; |
| 1118 | |
| 1119 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); |
| 1120 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); |
| 1121 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1122 | if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1123 | !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) |
| 1124 | setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
| 1125 | |
| 1126 | if (ia32_cap & ARCH_CAP_IBRS_ALL) |
| 1127 | setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); |
| 1128 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1129 | if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { |
| 1130 | setup_force_cpu_bug(X86_BUG_MDS); |
| 1131 | if (cpu_matches(MSBDS_ONLY)) |
| 1132 | setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); |
| 1133 | } |
| 1134 | |
| 1135 | if (!cpu_matches(NO_SWAPGS)) |
| 1136 | setup_force_cpu_bug(X86_BUG_SWAPGS); |
| 1137 | |
| 1138 | /* |
| 1139 | * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: |
| 1140 | * - TSX is supported or |
| 1141 | * - TSX_CTRL is present |
| 1142 | * |
| 1143 | * TSX_CTRL check is needed for cases when TSX could be disabled before |
| 1144 | * the kernel boot e.g. kexec. |
| 1145 | * TSX_CTRL check alone is not sufficient for cases when the microcode |
| 1146 | * update is not present or running as guest that don't get TSX_CTRL. |
| 1147 | */ |
| 1148 | if (!(ia32_cap & ARCH_CAP_TAA_NO) && |
| 1149 | (cpu_has(c, X86_FEATURE_RTM) || |
| 1150 | (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) |
| 1151 | setup_force_cpu_bug(X86_BUG_TAA); |
| 1152 | |
| 1153 | if (cpu_matches(NO_MELTDOWN)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1154 | return; |
| 1155 | |
| 1156 | /* Rogue Data Cache Load? No! */ |
| 1157 | if (ia32_cap & ARCH_CAP_RDCL_NO) |
| 1158 | return; |
| 1159 | |
| 1160 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
| 1161 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1162 | if (cpu_matches(NO_L1TF)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1163 | return; |
| 1164 | |
| 1165 | setup_force_cpu_bug(X86_BUG_L1TF); |
| 1166 | } |
| 1167 | |
| 1168 | /* |
| 1169 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
| 1170 | * unfortunately, that's not true in practice because of early VIA |
| 1171 | * chips and (more importantly) broken virtualizers that are not easy |
| 1172 | * to detect. In the latter case it doesn't even *fail* reliably, so |
| 1173 | * probing for it doesn't even work. Disable it completely on 32-bit |
| 1174 | * unless we can find a reliable way to detect all the broken cases. |
| 1175 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
| 1176 | */ |
| 1177 | static void detect_nopl(void) |
| 1178 | { |
| 1179 | #ifdef CONFIG_X86_32 |
| 1180 | setup_clear_cpu_cap(X86_FEATURE_NOPL); |
| 1181 | #else |
| 1182 | setup_force_cpu_cap(X86_FEATURE_NOPL); |
| 1183 | #endif |
| 1184 | } |
| 1185 | |
| 1186 | /* |
| 1187 | * Do minimum CPU detection early. |
| 1188 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
| 1189 | * cache alignment. |
| 1190 | * The others are not touched to avoid unwanted side effects. |
| 1191 | * |
| 1192 | * WARNING: this function is only called on the boot CPU. Don't add code |
| 1193 | * here that is supposed to run on all CPUs. |
| 1194 | */ |
| 1195 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
| 1196 | { |
| 1197 | #ifdef CONFIG_X86_64 |
| 1198 | c->x86_clflush_size = 64; |
| 1199 | c->x86_phys_bits = 36; |
| 1200 | c->x86_virt_bits = 48; |
| 1201 | #else |
| 1202 | c->x86_clflush_size = 32; |
| 1203 | c->x86_phys_bits = 32; |
| 1204 | c->x86_virt_bits = 32; |
| 1205 | #endif |
| 1206 | c->x86_cache_alignment = c->x86_clflush_size; |
| 1207 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1208 | memset(&c->x86_capability, 0, sizeof(c->x86_capability)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1209 | c->extended_cpuid_level = 0; |
| 1210 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1211 | if (!have_cpuid_p()) |
| 1212 | identify_cpu_without_cpuid(c); |
| 1213 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1214 | /* cyrix could have cpuid enabled via c_identify()*/ |
| 1215 | if (have_cpuid_p()) { |
| 1216 | cpu_detect(c); |
| 1217 | get_cpu_vendor(c); |
| 1218 | get_cpu_cap(c); |
| 1219 | get_cpu_address_sizes(c); |
| 1220 | setup_force_cpu_cap(X86_FEATURE_CPUID); |
| 1221 | |
| 1222 | if (this_cpu->c_early_init) |
| 1223 | this_cpu->c_early_init(c); |
| 1224 | |
| 1225 | c->cpu_index = 0; |
| 1226 | filter_cpuid_features(c, false); |
| 1227 | |
| 1228 | if (this_cpu->c_bsp_init) |
| 1229 | this_cpu->c_bsp_init(c); |
| 1230 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1231 | setup_clear_cpu_cap(X86_FEATURE_CPUID); |
| 1232 | } |
| 1233 | |
| 1234 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
| 1235 | |
| 1236 | cpu_set_bug_bits(c); |
| 1237 | |
| 1238 | fpu__init_system(c); |
| 1239 | |
| 1240 | #ifdef CONFIG_X86_32 |
| 1241 | /* |
| 1242 | * Regardless of whether PCID is enumerated, the SDM says |
| 1243 | * that it can't be enabled in 32-bit mode. |
| 1244 | */ |
| 1245 | setup_clear_cpu_cap(X86_FEATURE_PCID); |
| 1246 | #endif |
| 1247 | |
| 1248 | /* |
| 1249 | * Later in the boot process pgtable_l5_enabled() relies on |
| 1250 | * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not |
| 1251 | * enabled by this point we need to clear the feature bit to avoid |
| 1252 | * false-positives at the later stage. |
| 1253 | * |
| 1254 | * pgtable_l5_enabled() can be false here for several reasons: |
| 1255 | * - 5-level paging is disabled compile-time; |
| 1256 | * - it's 32-bit kernel; |
| 1257 | * - machine doesn't support 5-level paging; |
| 1258 | * - user specified 'no5lvl' in kernel command line. |
| 1259 | */ |
| 1260 | if (!pgtable_l5_enabled()) |
| 1261 | setup_clear_cpu_cap(X86_FEATURE_LA57); |
| 1262 | |
| 1263 | detect_nopl(); |
| 1264 | } |
| 1265 | |
| 1266 | void __init early_cpu_init(void) |
| 1267 | { |
| 1268 | const struct cpu_dev *const *cdev; |
| 1269 | int count = 0; |
| 1270 | |
| 1271 | #ifdef CONFIG_PROCESSOR_SELECT |
| 1272 | pr_info("KERNEL supported cpus:\n"); |
| 1273 | #endif |
| 1274 | |
| 1275 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
| 1276 | const struct cpu_dev *cpudev = *cdev; |
| 1277 | |
| 1278 | if (count >= X86_VENDOR_NUM) |
| 1279 | break; |
| 1280 | cpu_devs[count] = cpudev; |
| 1281 | count++; |
| 1282 | |
| 1283 | #ifdef CONFIG_PROCESSOR_SELECT |
| 1284 | { |
| 1285 | unsigned int j; |
| 1286 | |
| 1287 | for (j = 0; j < 2; j++) { |
| 1288 | if (!cpudev->c_ident[j]) |
| 1289 | continue; |
| 1290 | pr_info(" %s %s\n", cpudev->c_vendor, |
| 1291 | cpudev->c_ident[j]); |
| 1292 | } |
| 1293 | } |
| 1294 | #endif |
| 1295 | } |
| 1296 | early_identify_cpu(&boot_cpu_data); |
| 1297 | } |
| 1298 | |
| 1299 | static void detect_null_seg_behavior(struct cpuinfo_x86 *c) |
| 1300 | { |
| 1301 | #ifdef CONFIG_X86_64 |
| 1302 | /* |
| 1303 | * Empirically, writing zero to a segment selector on AMD does |
| 1304 | * not clear the base, whereas writing zero to a segment |
| 1305 | * selector on Intel does clear the base. Intel's behavior |
| 1306 | * allows slightly faster context switches in the common case |
| 1307 | * where GS is unused by the prev and next threads. |
| 1308 | * |
| 1309 | * Since neither vendor documents this anywhere that I can see, |
| 1310 | * detect it directly instead of hardcoding the choice by |
| 1311 | * vendor. |
| 1312 | * |
| 1313 | * I've designated AMD's behavior as the "bug" because it's |
| 1314 | * counterintuitive and less friendly. |
| 1315 | */ |
| 1316 | |
| 1317 | unsigned long old_base, tmp; |
| 1318 | rdmsrl(MSR_FS_BASE, old_base); |
| 1319 | wrmsrl(MSR_FS_BASE, 1); |
| 1320 | loadsegment(fs, 0); |
| 1321 | rdmsrl(MSR_FS_BASE, tmp); |
| 1322 | if (tmp != 0) |
| 1323 | set_cpu_bug(c, X86_BUG_NULL_SEG); |
| 1324 | wrmsrl(MSR_FS_BASE, old_base); |
| 1325 | #endif |
| 1326 | } |
| 1327 | |
| 1328 | static void generic_identify(struct cpuinfo_x86 *c) |
| 1329 | { |
| 1330 | c->extended_cpuid_level = 0; |
| 1331 | |
| 1332 | if (!have_cpuid_p()) |
| 1333 | identify_cpu_without_cpuid(c); |
| 1334 | |
| 1335 | /* cyrix could have cpuid enabled via c_identify()*/ |
| 1336 | if (!have_cpuid_p()) |
| 1337 | return; |
| 1338 | |
| 1339 | cpu_detect(c); |
| 1340 | |
| 1341 | get_cpu_vendor(c); |
| 1342 | |
| 1343 | get_cpu_cap(c); |
| 1344 | |
| 1345 | get_cpu_address_sizes(c); |
| 1346 | |
| 1347 | if (c->cpuid_level >= 0x00000001) { |
| 1348 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
| 1349 | #ifdef CONFIG_X86_32 |
| 1350 | # ifdef CONFIG_SMP |
| 1351 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 1352 | # else |
| 1353 | c->apicid = c->initial_apicid; |
| 1354 | # endif |
| 1355 | #endif |
| 1356 | c->phys_proc_id = c->initial_apicid; |
| 1357 | } |
| 1358 | |
| 1359 | get_model_name(c); /* Default name */ |
| 1360 | |
| 1361 | detect_null_seg_behavior(c); |
| 1362 | |
| 1363 | /* |
| 1364 | * ESPFIX is a strange bug. All real CPUs have it. Paravirt |
| 1365 | * systems that run Linux at CPL > 0 may or may not have the |
| 1366 | * issue, but, even if they have the issue, there's absolutely |
| 1367 | * nothing we can do about it because we can't use the real IRET |
| 1368 | * instruction. |
| 1369 | * |
| 1370 | * NB: For the time being, only 32-bit kernels support |
| 1371 | * X86_BUG_ESPFIX as such. 64-bit kernels directly choose |
| 1372 | * whether to apply espfix using paravirt hooks. If any |
| 1373 | * non-paravirt system ever shows up that does *not* have the |
| 1374 | * ESPFIX issue, we can change this. |
| 1375 | */ |
| 1376 | #ifdef CONFIG_X86_32 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1377 | # ifdef CONFIG_PARAVIRT_XXL |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1378 | do { |
| 1379 | extern void native_iret(void); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1380 | if (pv_ops.cpu.iret == native_iret) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1381 | set_cpu_bug(c, X86_BUG_ESPFIX); |
| 1382 | } while (0); |
| 1383 | # else |
| 1384 | set_cpu_bug(c, X86_BUG_ESPFIX); |
| 1385 | # endif |
| 1386 | #endif |
| 1387 | } |
| 1388 | |
| 1389 | static void x86_init_cache_qos(struct cpuinfo_x86 *c) |
| 1390 | { |
| 1391 | /* |
| 1392 | * The heavy lifting of max_rmid and cache_occ_scale are handled |
| 1393 | * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu |
| 1394 | * in case CQM bits really aren't there in this CPU. |
| 1395 | */ |
| 1396 | if (c != &boot_cpu_data) { |
| 1397 | boot_cpu_data.x86_cache_max_rmid = |
| 1398 | min(boot_cpu_data.x86_cache_max_rmid, |
| 1399 | c->x86_cache_max_rmid); |
| 1400 | } |
| 1401 | } |
| 1402 | |
| 1403 | /* |
| 1404 | * Validate that ACPI/mptables have the same information about the |
| 1405 | * effective APIC id and update the package map. |
| 1406 | */ |
| 1407 | static void validate_apic_and_package_id(struct cpuinfo_x86 *c) |
| 1408 | { |
| 1409 | #ifdef CONFIG_SMP |
| 1410 | unsigned int apicid, cpu = smp_processor_id(); |
| 1411 | |
| 1412 | apicid = apic->cpu_present_to_apicid(cpu); |
| 1413 | |
| 1414 | if (apicid != c->apicid) { |
| 1415 | pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", |
| 1416 | cpu, apicid, c->initial_apicid); |
| 1417 | } |
| 1418 | BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1419 | BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1420 | #else |
| 1421 | c->logical_proc_id = 0; |
| 1422 | #endif |
| 1423 | } |
| 1424 | |
| 1425 | /* |
| 1426 | * This does the hard work of actually picking apart the CPU stuff... |
| 1427 | */ |
| 1428 | static void identify_cpu(struct cpuinfo_x86 *c) |
| 1429 | { |
| 1430 | int i; |
| 1431 | |
| 1432 | c->loops_per_jiffy = loops_per_jiffy; |
| 1433 | c->x86_cache_size = 0; |
| 1434 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 1435 | c->x86_model = c->x86_stepping = 0; /* So far unknown... */ |
| 1436 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
| 1437 | c->x86_model_id[0] = '\0'; /* Unset */ |
| 1438 | c->x86_max_cores = 1; |
| 1439 | c->x86_coreid_bits = 0; |
| 1440 | c->cu_id = 0xff; |
| 1441 | #ifdef CONFIG_X86_64 |
| 1442 | c->x86_clflush_size = 64; |
| 1443 | c->x86_phys_bits = 36; |
| 1444 | c->x86_virt_bits = 48; |
| 1445 | #else |
| 1446 | c->cpuid_level = -1; /* CPUID not detected */ |
| 1447 | c->x86_clflush_size = 32; |
| 1448 | c->x86_phys_bits = 32; |
| 1449 | c->x86_virt_bits = 32; |
| 1450 | #endif |
| 1451 | c->x86_cache_alignment = c->x86_clflush_size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1452 | memset(&c->x86_capability, 0, sizeof(c->x86_capability)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1453 | |
| 1454 | generic_identify(c); |
| 1455 | |
| 1456 | if (this_cpu->c_identify) |
| 1457 | this_cpu->c_identify(c); |
| 1458 | |
| 1459 | /* Clear/Set all flags overridden by options, after probe */ |
| 1460 | apply_forced_caps(c); |
| 1461 | |
| 1462 | #ifdef CONFIG_X86_64 |
| 1463 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 1464 | #endif |
| 1465 | |
| 1466 | /* |
| 1467 | * Vendor-specific initialization. In this section we |
| 1468 | * canonicalize the feature flags, meaning if there are |
| 1469 | * features a certain CPU supports which CPUID doesn't |
| 1470 | * tell us, CPUID claiming incorrect flags, or other bugs, |
| 1471 | * we handle them here. |
| 1472 | * |
| 1473 | * At the end of this section, c->x86_capability better |
| 1474 | * indicate the features this CPU genuinely supports! |
| 1475 | */ |
| 1476 | if (this_cpu->c_init) |
| 1477 | this_cpu->c_init(c); |
| 1478 | |
| 1479 | /* Disable the PN if appropriate */ |
| 1480 | squash_the_stupid_serial_number(c); |
| 1481 | |
| 1482 | /* Set up SMEP/SMAP/UMIP */ |
| 1483 | setup_smep(c); |
| 1484 | setup_smap(c); |
| 1485 | setup_umip(c); |
| 1486 | |
| 1487 | /* |
| 1488 | * The vendor-specific functions might have changed features. |
| 1489 | * Now we do "generic changes." |
| 1490 | */ |
| 1491 | |
| 1492 | /* Filter out anything that depends on CPUID levels we don't have */ |
| 1493 | filter_cpuid_features(c, true); |
| 1494 | |
| 1495 | /* If the model name is still unset, do table lookup. */ |
| 1496 | if (!c->x86_model_id[0]) { |
| 1497 | const char *p; |
| 1498 | p = table_lookup_model(c); |
| 1499 | if (p) |
| 1500 | strcpy(c->x86_model_id, p); |
| 1501 | else |
| 1502 | /* Last resort... */ |
| 1503 | sprintf(c->x86_model_id, "%02x/%02x", |
| 1504 | c->x86, c->x86_model); |
| 1505 | } |
| 1506 | |
| 1507 | #ifdef CONFIG_X86_64 |
| 1508 | detect_ht(c); |
| 1509 | #endif |
| 1510 | |
| 1511 | x86_init_rdrand(c); |
| 1512 | x86_init_cache_qos(c); |
| 1513 | setup_pku(c); |
| 1514 | |
| 1515 | /* |
| 1516 | * Clear/Set all flags overridden by options, need do it |
| 1517 | * before following smp all cpus cap AND. |
| 1518 | */ |
| 1519 | apply_forced_caps(c); |
| 1520 | |
| 1521 | /* |
| 1522 | * On SMP, boot_cpu_data holds the common feature set between |
| 1523 | * all CPUs; so make sure that we indicate which features are |
| 1524 | * common between the CPUs. The first time this routine gets |
| 1525 | * executed, c == &boot_cpu_data. |
| 1526 | */ |
| 1527 | if (c != &boot_cpu_data) { |
| 1528 | /* AND the already accumulated flags with these */ |
| 1529 | for (i = 0; i < NCAPINTS; i++) |
| 1530 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
| 1531 | |
| 1532 | /* OR, i.e. replicate the bug flags */ |
| 1533 | for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) |
| 1534 | c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; |
| 1535 | } |
| 1536 | |
| 1537 | /* Init Machine Check Exception if available. */ |
| 1538 | mcheck_cpu_init(c); |
| 1539 | |
| 1540 | select_idle_routine(c); |
| 1541 | |
| 1542 | #ifdef CONFIG_NUMA |
| 1543 | numa_add_cpu(smp_processor_id()); |
| 1544 | #endif |
| 1545 | } |
| 1546 | |
| 1547 | /* |
| 1548 | * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions |
| 1549 | * on 32-bit kernels: |
| 1550 | */ |
| 1551 | #ifdef CONFIG_X86_32 |
| 1552 | void enable_sep_cpu(void) |
| 1553 | { |
| 1554 | struct tss_struct *tss; |
| 1555 | int cpu; |
| 1556 | |
| 1557 | if (!boot_cpu_has(X86_FEATURE_SEP)) |
| 1558 | return; |
| 1559 | |
| 1560 | cpu = get_cpu(); |
| 1561 | tss = &per_cpu(cpu_tss_rw, cpu); |
| 1562 | |
| 1563 | /* |
| 1564 | * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- |
| 1565 | * see the big comment in struct x86_hw_tss's definition. |
| 1566 | */ |
| 1567 | |
| 1568 | tss->x86_tss.ss1 = __KERNEL_CS; |
| 1569 | wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); |
| 1570 | wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); |
| 1571 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); |
| 1572 | |
| 1573 | put_cpu(); |
| 1574 | } |
| 1575 | #endif |
| 1576 | |
| 1577 | void __init identify_boot_cpu(void) |
| 1578 | { |
| 1579 | identify_cpu(&boot_cpu_data); |
| 1580 | #ifdef CONFIG_X86_32 |
| 1581 | sysenter_setup(); |
| 1582 | enable_sep_cpu(); |
| 1583 | #endif |
| 1584 | cpu_detect_tlb(&boot_cpu_data); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1585 | setup_cr_pinning(); |
| 1586 | |
| 1587 | tsx_init(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1588 | } |
| 1589 | |
| 1590 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
| 1591 | { |
| 1592 | BUG_ON(c == &boot_cpu_data); |
| 1593 | identify_cpu(c); |
| 1594 | #ifdef CONFIG_X86_32 |
| 1595 | enable_sep_cpu(); |
| 1596 | #endif |
| 1597 | mtrr_ap_init(); |
| 1598 | validate_apic_and_package_id(c); |
| 1599 | x86_spec_ctrl_setup_ap(); |
| 1600 | } |
| 1601 | |
| 1602 | static __init int setup_noclflush(char *arg) |
| 1603 | { |
| 1604 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); |
| 1605 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); |
| 1606 | return 1; |
| 1607 | } |
| 1608 | __setup("noclflush", setup_noclflush); |
| 1609 | |
| 1610 | void print_cpu_info(struct cpuinfo_x86 *c) |
| 1611 | { |
| 1612 | const char *vendor = NULL; |
| 1613 | |
| 1614 | if (c->x86_vendor < X86_VENDOR_NUM) { |
| 1615 | vendor = this_cpu->c_vendor; |
| 1616 | } else { |
| 1617 | if (c->cpuid_level >= 0) |
| 1618 | vendor = c->x86_vendor_id; |
| 1619 | } |
| 1620 | |
| 1621 | if (vendor && !strstr(c->x86_model_id, vendor)) |
| 1622 | pr_cont("%s ", vendor); |
| 1623 | |
| 1624 | if (c->x86_model_id[0]) |
| 1625 | pr_cont("%s", c->x86_model_id); |
| 1626 | else |
| 1627 | pr_cont("%d86", c->x86); |
| 1628 | |
| 1629 | pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); |
| 1630 | |
| 1631 | if (c->x86_stepping || c->cpuid_level >= 0) |
| 1632 | pr_cont(", stepping: 0x%x)\n", c->x86_stepping); |
| 1633 | else |
| 1634 | pr_cont(")\n"); |
| 1635 | } |
| 1636 | |
| 1637 | /* |
| 1638 | * clearcpuid= was already parsed in fpu__init_parse_early_param. |
| 1639 | * But we need to keep a dummy __setup around otherwise it would |
| 1640 | * show up as an environment variable for init. |
| 1641 | */ |
| 1642 | static __init int setup_clearcpuid(char *arg) |
| 1643 | { |
| 1644 | return 1; |
| 1645 | } |
| 1646 | __setup("clearcpuid=", setup_clearcpuid); |
| 1647 | |
| 1648 | #ifdef CONFIG_X86_64 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1649 | DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, |
| 1650 | fixed_percpu_data) __aligned(PAGE_SIZE) __visible; |
| 1651 | EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1652 | |
| 1653 | /* |
| 1654 | * The following percpu variables are hot. Align current_task to |
| 1655 | * cacheline size such that they fall in the same cacheline. |
| 1656 | */ |
| 1657 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = |
| 1658 | &init_task; |
| 1659 | EXPORT_PER_CPU_SYMBOL(current_task); |
| 1660 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1661 | DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1662 | DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; |
| 1663 | |
| 1664 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
| 1665 | EXPORT_PER_CPU_SYMBOL(__preempt_count); |
| 1666 | |
| 1667 | /* May not be marked __init: used by software suspend */ |
| 1668 | void syscall_init(void) |
| 1669 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1670 | wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1671 | wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1672 | |
| 1673 | #ifdef CONFIG_IA32_EMULATION |
| 1674 | wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); |
| 1675 | /* |
| 1676 | * This only works on Intel CPUs. |
| 1677 | * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. |
| 1678 | * This does not cause SYSENTER to jump to the wrong location, because |
| 1679 | * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). |
| 1680 | */ |
| 1681 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1682 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, |
| 1683 | (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1684 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); |
| 1685 | #else |
| 1686 | wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); |
| 1687 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); |
| 1688 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); |
| 1689 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); |
| 1690 | #endif |
| 1691 | |
| 1692 | /* Flags to clear on syscall */ |
| 1693 | wrmsrl(MSR_SYSCALL_MASK, |
| 1694 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
| 1695 | X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); |
| 1696 | } |
| 1697 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1698 | DEFINE_PER_CPU(int, debug_stack_usage); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1699 | DEFINE_PER_CPU(u32, debug_idt_ctr); |
| 1700 | |
| 1701 | void debug_stack_set_zero(void) |
| 1702 | { |
| 1703 | this_cpu_inc(debug_idt_ctr); |
| 1704 | load_current_idt(); |
| 1705 | } |
| 1706 | NOKPROBE_SYMBOL(debug_stack_set_zero); |
| 1707 | |
| 1708 | void debug_stack_reset(void) |
| 1709 | { |
| 1710 | if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
| 1711 | return; |
| 1712 | if (this_cpu_dec_return(debug_idt_ctr) == 0) |
| 1713 | load_current_idt(); |
| 1714 | } |
| 1715 | NOKPROBE_SYMBOL(debug_stack_reset); |
| 1716 | |
| 1717 | #else /* CONFIG_X86_64 */ |
| 1718 | |
| 1719 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
| 1720 | EXPORT_PER_CPU_SYMBOL(current_task); |
| 1721 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
| 1722 | EXPORT_PER_CPU_SYMBOL(__preempt_count); |
| 1723 | |
| 1724 | /* |
| 1725 | * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find |
| 1726 | * the top of the kernel stack. Use an extra percpu variable to track the |
| 1727 | * top of the kernel stack directly. |
| 1728 | */ |
| 1729 | DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = |
| 1730 | (unsigned long)&init_thread_union + THREAD_SIZE; |
| 1731 | EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); |
| 1732 | |
| 1733 | #ifdef CONFIG_STACKPROTECTOR |
| 1734 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
| 1735 | #endif |
| 1736 | |
| 1737 | #endif /* CONFIG_X86_64 */ |
| 1738 | |
| 1739 | /* |
| 1740 | * Clear all 6 debug registers: |
| 1741 | */ |
| 1742 | static void clear_all_debug_regs(void) |
| 1743 | { |
| 1744 | int i; |
| 1745 | |
| 1746 | for (i = 0; i < 8; i++) { |
| 1747 | /* Ignore db4, db5 */ |
| 1748 | if ((i == 4) || (i == 5)) |
| 1749 | continue; |
| 1750 | |
| 1751 | set_debugreg(0, i); |
| 1752 | } |
| 1753 | } |
| 1754 | |
| 1755 | #ifdef CONFIG_KGDB |
| 1756 | /* |
| 1757 | * Restore debug regs if using kgdbwait and you have a kernel debugger |
| 1758 | * connection established. |
| 1759 | */ |
| 1760 | static void dbg_restore_debug_regs(void) |
| 1761 | { |
| 1762 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) |
| 1763 | arch_kgdb_ops.correct_hw_break(); |
| 1764 | } |
| 1765 | #else /* ! CONFIG_KGDB */ |
| 1766 | #define dbg_restore_debug_regs() |
| 1767 | #endif /* ! CONFIG_KGDB */ |
| 1768 | |
| 1769 | static void wait_for_master_cpu(int cpu) |
| 1770 | { |
| 1771 | #ifdef CONFIG_SMP |
| 1772 | /* |
| 1773 | * wait for ACK from master CPU before continuing |
| 1774 | * with AP initialization |
| 1775 | */ |
| 1776 | WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); |
| 1777 | while (!cpumask_test_cpu(cpu, cpu_callout_mask)) |
| 1778 | cpu_relax(); |
| 1779 | #endif |
| 1780 | } |
| 1781 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1782 | #ifdef CONFIG_X86_64 |
| 1783 | static void setup_getcpu(int cpu) |
| 1784 | { |
| 1785 | unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); |
| 1786 | struct desc_struct d = { }; |
| 1787 | |
| 1788 | if (boot_cpu_has(X86_FEATURE_RDTSCP)) |
| 1789 | write_rdtscp_aux(cpudata); |
| 1790 | |
| 1791 | /* Store CPU and node number in limit. */ |
| 1792 | d.limit0 = cpudata; |
| 1793 | d.limit1 = cpudata >> 16; |
| 1794 | |
| 1795 | d.type = 5; /* RO data, expand down, accessed */ |
| 1796 | d.dpl = 3; /* Visible to user code */ |
| 1797 | d.s = 1; /* Not a system segment */ |
| 1798 | d.p = 1; /* Present */ |
| 1799 | d.d = 1; /* 32-bit */ |
| 1800 | |
| 1801 | write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); |
| 1802 | } |
| 1803 | #endif |
| 1804 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1805 | /* |
| 1806 | * cpu_init() initializes state that is per-CPU. Some data is already |
| 1807 | * initialized (naturally) in the bootstrap process, such as the GDT |
| 1808 | * and IDT. We reload them nevertheless, this function acts as a |
| 1809 | * 'CPU state barrier', nothing should get across. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1810 | */ |
| 1811 | #ifdef CONFIG_X86_64 |
| 1812 | |
| 1813 | void cpu_init(void) |
| 1814 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1815 | int cpu = raw_smp_processor_id(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1816 | struct task_struct *me; |
| 1817 | struct tss_struct *t; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1818 | int i; |
| 1819 | |
| 1820 | wait_for_master_cpu(cpu); |
| 1821 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1822 | if (cpu) |
| 1823 | load_ucode_ap(); |
| 1824 | |
| 1825 | t = &per_cpu(cpu_tss_rw, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1826 | |
| 1827 | #ifdef CONFIG_NUMA |
| 1828 | if (this_cpu_read(numa_node) == 0 && |
| 1829 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
| 1830 | set_numa_node(early_cpu_to_node(cpu)); |
| 1831 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1832 | setup_getcpu(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1833 | |
| 1834 | me = current; |
| 1835 | |
| 1836 | pr_debug("Initializing CPU#%d\n", cpu); |
| 1837 | |
| 1838 | cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1839 | |
| 1840 | /* |
| 1841 | * Initialize the per-CPU GDT with the boot GDT, |
| 1842 | * and set up the GDT descriptor: |
| 1843 | */ |
| 1844 | |
| 1845 | switch_to_new_gdt(cpu); |
| 1846 | loadsegment(fs, 0); |
| 1847 | |
| 1848 | load_current_idt(); |
| 1849 | |
| 1850 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
| 1851 | syscall_init(); |
| 1852 | |
| 1853 | wrmsrl(MSR_FS_BASE, 0); |
| 1854 | wrmsrl(MSR_KERNEL_GS_BASE, 0); |
| 1855 | barrier(); |
| 1856 | |
| 1857 | x86_configure_nx(); |
| 1858 | x2apic_setup(); |
| 1859 | |
| 1860 | /* |
| 1861 | * set up and load the per-CPU TSS |
| 1862 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1863 | if (!t->x86_tss.ist[0]) { |
| 1864 | t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); |
| 1865 | t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); |
| 1866 | t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); |
| 1867 | t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1868 | } |
| 1869 | |
| 1870 | t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
| 1871 | |
| 1872 | /* |
| 1873 | * <= is required because the CPU will access up to |
| 1874 | * 8 bits beyond the end of the IO permission bitmap. |
| 1875 | */ |
| 1876 | for (i = 0; i <= IO_BITMAP_LONGS; i++) |
| 1877 | t->io_bitmap[i] = ~0UL; |
| 1878 | |
| 1879 | mmgrab(&init_mm); |
| 1880 | me->active_mm = &init_mm; |
| 1881 | BUG_ON(me->mm); |
| 1882 | initialize_tlbstate_and_flush(); |
| 1883 | enter_lazy_tlb(&init_mm, me); |
| 1884 | |
| 1885 | /* |
| 1886 | * Initialize the TSS. sp0 points to the entry trampoline stack |
| 1887 | * regardless of what task is running. |
| 1888 | */ |
| 1889 | set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); |
| 1890 | load_TR_desc(); |
| 1891 | load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); |
| 1892 | |
| 1893 | load_mm_ldt(&init_mm); |
| 1894 | |
| 1895 | clear_all_debug_regs(); |
| 1896 | dbg_restore_debug_regs(); |
| 1897 | |
| 1898 | fpu__init_cpu(); |
| 1899 | |
| 1900 | if (is_uv_system()) |
| 1901 | uv_cpu_init(); |
| 1902 | |
| 1903 | load_fixmap_gdt(cpu); |
| 1904 | } |
| 1905 | |
| 1906 | #else |
| 1907 | |
| 1908 | void cpu_init(void) |
| 1909 | { |
| 1910 | int cpu = smp_processor_id(); |
| 1911 | struct task_struct *curr = current; |
| 1912 | struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); |
| 1913 | |
| 1914 | wait_for_master_cpu(cpu); |
| 1915 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1916 | show_ucode_info_early(); |
| 1917 | |
| 1918 | pr_info("Initializing CPU#%d\n", cpu); |
| 1919 | |
| 1920 | if (cpu_feature_enabled(X86_FEATURE_VME) || |
| 1921 | boot_cpu_has(X86_FEATURE_TSC) || |
| 1922 | boot_cpu_has(X86_FEATURE_DE)) |
| 1923 | cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1924 | |
| 1925 | load_current_idt(); |
| 1926 | switch_to_new_gdt(cpu); |
| 1927 | |
| 1928 | /* |
| 1929 | * Set up and load the per-CPU TSS and LDT |
| 1930 | */ |
| 1931 | mmgrab(&init_mm); |
| 1932 | curr->active_mm = &init_mm; |
| 1933 | BUG_ON(curr->mm); |
| 1934 | initialize_tlbstate_and_flush(); |
| 1935 | enter_lazy_tlb(&init_mm, curr); |
| 1936 | |
| 1937 | /* |
| 1938 | * Initialize the TSS. sp0 points to the entry trampoline stack |
| 1939 | * regardless of what task is running. |
| 1940 | */ |
| 1941 | set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); |
| 1942 | load_TR_desc(); |
| 1943 | load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); |
| 1944 | |
| 1945 | load_mm_ldt(&init_mm); |
| 1946 | |
| 1947 | t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
| 1948 | |
| 1949 | #ifdef CONFIG_DOUBLEFAULT |
| 1950 | /* Set up doublefault TSS pointer in the GDT */ |
| 1951 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
| 1952 | #endif |
| 1953 | |
| 1954 | clear_all_debug_regs(); |
| 1955 | dbg_restore_debug_regs(); |
| 1956 | |
| 1957 | fpu__init_cpu(); |
| 1958 | |
| 1959 | load_fixmap_gdt(cpu); |
| 1960 | } |
| 1961 | #endif |
| 1962 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1963 | /* |
| 1964 | * The microcode loader calls this upon late microcode load to recheck features, |
| 1965 | * only when microcode has been updated. Caller holds microcode_mutex and CPU |
| 1966 | * hotplug lock. |
| 1967 | */ |
| 1968 | void microcode_check(void) |
| 1969 | { |
| 1970 | struct cpuinfo_x86 info; |
| 1971 | |
| 1972 | perf_check_microcode(); |
| 1973 | |
| 1974 | /* Reload CPUID max function as it might've changed. */ |
| 1975 | info.cpuid_level = cpuid_eax(0); |
| 1976 | |
| 1977 | /* |
| 1978 | * Copy all capability leafs to pick up the synthetic ones so that |
| 1979 | * memcmp() below doesn't fail on that. The ones coming from CPUID will |
| 1980 | * get overwritten in get_cpu_cap(). |
| 1981 | */ |
| 1982 | memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); |
| 1983 | |
| 1984 | get_cpu_cap(&info); |
| 1985 | |
| 1986 | if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) |
| 1987 | return; |
| 1988 | |
| 1989 | pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); |
| 1990 | pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); |
| 1991 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1992 | |
| 1993 | /* |
| 1994 | * Invoked from core CPU hotplug code after hotplug operations |
| 1995 | */ |
| 1996 | void arch_smt_update(void) |
| 1997 | { |
| 1998 | /* Handle the speculative execution misfeatures */ |
| 1999 | cpu_bugs_smt_update(); |
| 2000 | /* Check whether IPI broadcasting can be enabled */ |
| 2001 | apic_smt_update(); |
| 2002 | } |