David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Contains CPU specific errata definitions |
| 4 | * |
| 5 | * Copyright (C) 2014 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/arm-smccc.h> |
| 9 | #include <linux/psci.h> |
| 10 | #include <linux/types.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 11 | #include <linux/cpu.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include <asm/cpu.h> |
| 13 | #include <asm/cputype.h> |
| 14 | #include <asm/cpufeature.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 15 | #include <asm/smp_plat.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | |
| 17 | static bool __maybe_unused |
| 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
| 19 | { |
| 20 | const struct arm64_midr_revidr *fix; |
| 21 | u32 midr = read_cpuid_id(), revidr; |
| 22 | |
| 23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
| 25 | return false; |
| 26 | |
| 27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; |
| 28 | revidr = read_cpuid(REVIDR_EL1); |
| 29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) |
| 30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) |
| 31 | return false; |
| 32 | |
| 33 | return true; |
| 34 | } |
| 35 | |
| 36 | static bool __maybe_unused |
| 37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, |
| 38 | int scope) |
| 39 | { |
| 40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
| 42 | } |
| 43 | |
| 44 | static bool __maybe_unused |
| 45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) |
| 46 | { |
| 47 | u32 model; |
| 48 | |
| 49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 50 | |
| 51 | model = read_cpuid_id(); |
| 52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | |
| 53 | MIDR_ARCHITECTURE_MASK; |
| 54 | |
| 55 | return model == entry->midr_range.model; |
| 56 | } |
| 57 | |
| 58 | static bool |
| 59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
| 60 | int scope) |
| 61 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
| 63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; |
| 64 | u64 ctr_raw, ctr_real; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | |
| 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 67 | |
| 68 | /* |
| 69 | * We want to make sure that all the CPUs in the system expose |
| 70 | * a consistent CTR_EL0 to make sure that applications behaves |
| 71 | * correctly with migration. |
| 72 | * |
| 73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : |
| 74 | * |
| 75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway |
| 76 | * reports IDC = 0, consistent with the rest. |
| 77 | * |
| 78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 |
| 79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. |
| 80 | * |
| 81 | * So, we need to make sure either the raw CTR_EL0 or the effective |
| 82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. |
| 83 | */ |
| 84 | ctr_raw = read_cpuid_cachetype() & mask; |
| 85 | ctr_real = read_cpuid_effective_cachetype() & mask; |
| 86 | |
| 87 | return (ctr_real != sys) && (ctr_raw != sys); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | static void |
| 91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) |
| 92 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
| 94 | |
| 95 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ |
| 96 | if ((read_cpuid_cachetype() & mask) != |
| 97 | (arm64_ftr_reg_ctrel0.sys_val & mask)) |
| 98 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
| 102 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 103 | #include <asm/mmu_context.h> |
| 104 | #include <asm/cacheflush.h> |
| 105 | |
| 106 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
| 107 | |
| 108 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
| 109 | extern char __smccc_workaround_1_smc_start[]; |
| 110 | extern char __smccc_workaround_1_smc_end[]; |
| 111 | |
| 112 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
| 113 | const char *hyp_vecs_end) |
| 114 | { |
| 115 | void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); |
| 116 | int i; |
| 117 | |
| 118 | for (i = 0; i < SZ_2K; i += 0x80) |
| 119 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); |
| 120 | |
| 121 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
| 122 | } |
| 123 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 124 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
| 125 | const char *hyp_vecs_start, |
| 126 | const char *hyp_vecs_end) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 128 | static DEFINE_RAW_SPINLOCK(bp_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | int cpu, slot = -1; |
| 130 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 131 | /* |
| 132 | * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if |
| 133 | * we're a guest. Skip the hyp-vectors work. |
| 134 | */ |
| 135 | if (!hyp_vecs_start) { |
| 136 | __this_cpu_write(bp_hardening_data.fn, fn); |
| 137 | return; |
| 138 | } |
| 139 | |
| 140 | raw_spin_lock(&bp_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | for_each_possible_cpu(cpu) { |
| 142 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { |
| 143 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); |
| 144 | break; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | if (slot == -1) { |
| 149 | slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
| 150 | BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); |
| 151 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
| 152 | } |
| 153 | |
| 154 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); |
| 155 | __this_cpu_write(bp_hardening_data.fn, fn); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 156 | raw_spin_unlock(&bp_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 157 | } |
| 158 | #else |
| 159 | #define __smccc_workaround_1_smc_start NULL |
| 160 | #define __smccc_workaround_1_smc_end NULL |
| 161 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 162 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | const char *hyp_vecs_start, |
| 164 | const char *hyp_vecs_end) |
| 165 | { |
| 166 | __this_cpu_write(bp_hardening_data.fn, fn); |
| 167 | } |
| 168 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
| 169 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 170 | #include <uapi/linux/psci.h> |
| 171 | #include <linux/arm-smccc.h> |
| 172 | #include <linux/psci.h> |
| 173 | |
| 174 | static void call_smc_arch_workaround_1(void) |
| 175 | { |
| 176 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
| 177 | } |
| 178 | |
| 179 | static void call_hvc_arch_workaround_1(void) |
| 180 | { |
| 181 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
| 182 | } |
| 183 | |
| 184 | static void qcom_link_stack_sanitization(void) |
| 185 | { |
| 186 | u64 tmp; |
| 187 | |
| 188 | asm volatile("mov %0, x30 \n" |
| 189 | ".rept 16 \n" |
| 190 | "bl . + 4 \n" |
| 191 | ".endr \n" |
| 192 | "mov x30, %0 \n" |
| 193 | : "=&r" (tmp)); |
| 194 | } |
| 195 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 196 | static bool __nospectre_v2; |
| 197 | static int __init parse_nospectre_v2(char *str) |
| 198 | { |
| 199 | __nospectre_v2 = true; |
| 200 | return 0; |
| 201 | } |
| 202 | early_param("nospectre_v2", parse_nospectre_v2); |
| 203 | |
| 204 | /* |
| 205 | * -1: No workaround |
| 206 | * 0: No workaround required |
| 207 | * 1: Workaround installed |
| 208 | */ |
| 209 | static int detect_harden_bp_fw(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 210 | { |
| 211 | bp_hardening_cb_t cb; |
| 212 | void *smccc_start, *smccc_end; |
| 213 | struct arm_smccc_res res; |
| 214 | u32 midr = read_cpuid_id(); |
| 215 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 216 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 217 | return -1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | |
| 219 | switch (psci_ops.conduit) { |
| 220 | case PSCI_CONDUIT_HVC: |
| 221 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
| 222 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 223 | switch ((int)res.a0) { |
| 224 | case 1: |
| 225 | /* Firmware says we're just fine */ |
| 226 | return 0; |
| 227 | case 0: |
| 228 | cb = call_hvc_arch_workaround_1; |
| 229 | /* This is a guest, no need to patch KVM vectors */ |
| 230 | smccc_start = NULL; |
| 231 | smccc_end = NULL; |
| 232 | break; |
| 233 | default: |
| 234 | return -1; |
| 235 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 236 | break; |
| 237 | |
| 238 | case PSCI_CONDUIT_SMC: |
| 239 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
| 240 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 241 | switch ((int)res.a0) { |
| 242 | case 1: |
| 243 | /* Firmware says we're just fine */ |
| 244 | return 0; |
| 245 | case 0: |
| 246 | cb = call_smc_arch_workaround_1; |
| 247 | smccc_start = __smccc_workaround_1_smc_start; |
| 248 | smccc_end = __smccc_workaround_1_smc_end; |
| 249 | break; |
| 250 | default: |
| 251 | return -1; |
| 252 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | break; |
| 254 | |
| 255 | default: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 256 | return -1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
| 260 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) |
| 261 | cb = qcom_link_stack_sanitization; |
| 262 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 263 | if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) |
| 264 | install_bp_hardening_cb(cb, smccc_start, smccc_end); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 265 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 266 | return 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 269 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
| 270 | |
| 271 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 272 | static bool __ssb_safe = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 273 | |
| 274 | static const struct ssbd_options { |
| 275 | const char *str; |
| 276 | int state; |
| 277 | } ssbd_options[] = { |
| 278 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, |
| 279 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, |
| 280 | { "kernel", ARM64_SSBD_KERNEL, }, |
| 281 | }; |
| 282 | |
| 283 | static int __init ssbd_cfg(char *buf) |
| 284 | { |
| 285 | int i; |
| 286 | |
| 287 | if (!buf || !buf[0]) |
| 288 | return -EINVAL; |
| 289 | |
| 290 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { |
| 291 | int len = strlen(ssbd_options[i].str); |
| 292 | |
| 293 | if (strncmp(buf, ssbd_options[i].str, len)) |
| 294 | continue; |
| 295 | |
| 296 | ssbd_state = ssbd_options[i].state; |
| 297 | return 0; |
| 298 | } |
| 299 | |
| 300 | return -EINVAL; |
| 301 | } |
| 302 | early_param("ssbd", ssbd_cfg); |
| 303 | |
| 304 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
| 305 | __le32 *origptr, __le32 *updptr, |
| 306 | int nr_inst) |
| 307 | { |
| 308 | u32 insn; |
| 309 | |
| 310 | BUG_ON(nr_inst != 1); |
| 311 | |
| 312 | switch (psci_ops.conduit) { |
| 313 | case PSCI_CONDUIT_HVC: |
| 314 | insn = aarch64_insn_get_hvc_value(); |
| 315 | break; |
| 316 | case PSCI_CONDUIT_SMC: |
| 317 | insn = aarch64_insn_get_smc_value(); |
| 318 | break; |
| 319 | default: |
| 320 | return; |
| 321 | } |
| 322 | |
| 323 | *updptr = cpu_to_le32(insn); |
| 324 | } |
| 325 | |
| 326 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
| 327 | __le32 *origptr, __le32 *updptr, |
| 328 | int nr_inst) |
| 329 | { |
| 330 | BUG_ON(nr_inst != 1); |
| 331 | /* |
| 332 | * Only allow mitigation on EL1 entry/exit and guest |
| 333 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to |
| 334 | * be flipped. |
| 335 | */ |
| 336 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) |
| 337 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); |
| 338 | } |
| 339 | |
| 340 | void arm64_set_ssbd_mitigation(bool state) |
| 341 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 342 | if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { |
| 343 | pr_info_once("SSBD disabled by kernel configuration\n"); |
| 344 | return; |
| 345 | } |
| 346 | |
| 347 | if (this_cpu_has_cap(ARM64_SSBS)) { |
| 348 | if (state) |
| 349 | asm volatile(SET_PSTATE_SSBS(0)); |
| 350 | else |
| 351 | asm volatile(SET_PSTATE_SSBS(1)); |
| 352 | return; |
| 353 | } |
| 354 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 355 | switch (psci_ops.conduit) { |
| 356 | case PSCI_CONDUIT_HVC: |
| 357 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); |
| 358 | break; |
| 359 | |
| 360 | case PSCI_CONDUIT_SMC: |
| 361 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); |
| 362 | break; |
| 363 | |
| 364 | default: |
| 365 | WARN_ON_ONCE(1); |
| 366 | break; |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, |
| 371 | int scope) |
| 372 | { |
| 373 | struct arm_smccc_res res; |
| 374 | bool required = true; |
| 375 | s32 val; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 376 | bool this_cpu_safe = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 377 | |
| 378 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 379 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 380 | if (cpu_mitigations_off()) |
| 381 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; |
| 382 | |
| 383 | /* delay setting __ssb_safe until we get a firmware response */ |
| 384 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) |
| 385 | this_cpu_safe = true; |
| 386 | |
| 387 | if (this_cpu_has_cap(ARM64_SSBS)) { |
| 388 | if (!this_cpu_safe) |
| 389 | __ssb_safe = false; |
| 390 | required = false; |
| 391 | goto out_printmsg; |
| 392 | } |
| 393 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 394 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { |
| 395 | ssbd_state = ARM64_SSBD_UNKNOWN; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 396 | if (!this_cpu_safe) |
| 397 | __ssb_safe = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 398 | return false; |
| 399 | } |
| 400 | |
| 401 | switch (psci_ops.conduit) { |
| 402 | case PSCI_CONDUIT_HVC: |
| 403 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
| 404 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); |
| 405 | break; |
| 406 | |
| 407 | case PSCI_CONDUIT_SMC: |
| 408 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
| 409 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); |
| 410 | break; |
| 411 | |
| 412 | default: |
| 413 | ssbd_state = ARM64_SSBD_UNKNOWN; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 414 | if (!this_cpu_safe) |
| 415 | __ssb_safe = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 416 | return false; |
| 417 | } |
| 418 | |
| 419 | val = (s32)res.a0; |
| 420 | |
| 421 | switch (val) { |
| 422 | case SMCCC_RET_NOT_SUPPORTED: |
| 423 | ssbd_state = ARM64_SSBD_UNKNOWN; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 424 | if (!this_cpu_safe) |
| 425 | __ssb_safe = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 426 | return false; |
| 427 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 428 | /* machines with mixed mitigation requirements must not return this */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 429 | case SMCCC_RET_NOT_REQUIRED: |
| 430 | pr_info_once("%s mitigation not required\n", entry->desc); |
| 431 | ssbd_state = ARM64_SSBD_MITIGATED; |
| 432 | return false; |
| 433 | |
| 434 | case SMCCC_RET_SUCCESS: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 435 | __ssb_safe = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 436 | required = true; |
| 437 | break; |
| 438 | |
| 439 | case 1: /* Mitigation not required on this CPU */ |
| 440 | required = false; |
| 441 | break; |
| 442 | |
| 443 | default: |
| 444 | WARN_ON(1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 445 | if (!this_cpu_safe) |
| 446 | __ssb_safe = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 447 | return false; |
| 448 | } |
| 449 | |
| 450 | switch (ssbd_state) { |
| 451 | case ARM64_SSBD_FORCE_DISABLE: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 452 | arm64_set_ssbd_mitigation(false); |
| 453 | required = false; |
| 454 | break; |
| 455 | |
| 456 | case ARM64_SSBD_KERNEL: |
| 457 | if (required) { |
| 458 | __this_cpu_write(arm64_ssbd_callback_required, 1); |
| 459 | arm64_set_ssbd_mitigation(true); |
| 460 | } |
| 461 | break; |
| 462 | |
| 463 | case ARM64_SSBD_FORCE_ENABLE: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 464 | arm64_set_ssbd_mitigation(true); |
| 465 | required = true; |
| 466 | break; |
| 467 | |
| 468 | default: |
| 469 | WARN_ON(1); |
| 470 | break; |
| 471 | } |
| 472 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 473 | out_printmsg: |
| 474 | switch (ssbd_state) { |
| 475 | case ARM64_SSBD_FORCE_DISABLE: |
| 476 | pr_info_once("%s disabled from command-line\n", entry->desc); |
| 477 | break; |
| 478 | |
| 479 | case ARM64_SSBD_FORCE_ENABLE: |
| 480 | pr_info_once("%s forced from command-line\n", entry->desc); |
| 481 | break; |
| 482 | } |
| 483 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 484 | return required; |
| 485 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 486 | |
| 487 | /* known invulnerable cores */ |
| 488 | static const struct midr_range arm64_ssb_cpus[] = { |
| 489 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
| 490 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
| 491 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
| 492 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
| 493 | {}, |
| 494 | }; |
| 495 | |
| 496 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
| 497 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); |
| 498 | |
| 499 | static bool |
| 500 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, |
| 501 | int scope) |
| 502 | { |
| 503 | u32 midr = read_cpuid_id(); |
| 504 | /* Cortex-A76 r0p0 - r3p1 */ |
| 505 | struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); |
| 506 | |
| 507 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 508 | return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); |
| 509 | } |
| 510 | #endif |
| 511 | |
| 512 | static void __maybe_unused |
| 513 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) |
| 514 | { |
| 515 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); |
| 516 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 517 | |
| 518 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
| 519 | .matches = is_affected_midr_range, \ |
| 520 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
| 521 | |
| 522 | #define CAP_MIDR_ALL_VERSIONS(model) \ |
| 523 | .matches = is_affected_midr_range, \ |
| 524 | .midr_range = MIDR_ALL_VERSIONS(model) |
| 525 | |
| 526 | #define MIDR_FIXED(rev, revidr_mask) \ |
| 527 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} |
| 528 | |
| 529 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
| 530 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
| 531 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
| 532 | |
| 533 | #define CAP_MIDR_RANGE_LIST(list) \ |
| 534 | .matches = is_affected_midr_range_list, \ |
| 535 | .midr_range_list = list |
| 536 | |
| 537 | /* Errata affecting a range of revisions of given model variant */ |
| 538 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ |
| 539 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) |
| 540 | |
| 541 | /* Errata affecting a single variant/revision of a model */ |
| 542 | #define ERRATA_MIDR_REV(model, var, rev) \ |
| 543 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) |
| 544 | |
| 545 | /* Errata affecting all variants/revisions of a given a model */ |
| 546 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ |
| 547 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
| 548 | CAP_MIDR_ALL_VERSIONS(model) |
| 549 | |
| 550 | /* Errata affecting a list of midr ranges, with same work around */ |
| 551 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ |
| 552 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
| 553 | CAP_MIDR_RANGE_LIST(midr_list) |
| 554 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 555 | /* Track overall mitigation state. We are only mitigated if all cores are ok */ |
| 556 | static bool __hardenbp_enab = true; |
| 557 | static bool __spectrev2_safe = true; |
| 558 | |
| 559 | int get_spectre_v2_workaround_state(void) |
| 560 | { |
| 561 | if (__spectrev2_safe) |
| 562 | return ARM64_BP_HARDEN_NOT_REQUIRED; |
| 563 | |
| 564 | if (!__hardenbp_enab) |
| 565 | return ARM64_BP_HARDEN_UNKNOWN; |
| 566 | |
| 567 | return ARM64_BP_HARDEN_WA_NEEDED; |
| 568 | } |
| 569 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 570 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 571 | * List of CPUs that do not need any Spectre-v2 mitigation at all. |
| 572 | */ |
| 573 | static const struct midr_range spectre_v2_safe_list[] = { |
| 574 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
| 575 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
| 576 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
| 577 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
| 578 | { /* sentinel */ } |
| 579 | }; |
| 580 | |
| 581 | /* |
| 582 | * Track overall bp hardening for all heterogeneous cores in the machine. |
| 583 | * We are only considered "safe" if all booted cores are known safe. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 584 | */ |
| 585 | static bool __maybe_unused |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 586 | check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 587 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 588 | int need_wa; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 589 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 590 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 591 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 592 | /* If the CPU has CSV2 set, we're safe */ |
| 593 | if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), |
| 594 | ID_AA64PFR0_CSV2_SHIFT)) |
| 595 | return false; |
| 596 | |
| 597 | /* Alternatively, we have a list of unaffected CPUs */ |
| 598 | if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) |
| 599 | return false; |
| 600 | |
| 601 | /* Fallback to firmware detection */ |
| 602 | need_wa = detect_harden_bp_fw(); |
| 603 | if (!need_wa) |
| 604 | return false; |
| 605 | |
| 606 | __spectrev2_safe = false; |
| 607 | |
| 608 | if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { |
| 609 | pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); |
| 610 | __hardenbp_enab = false; |
| 611 | return false; |
| 612 | } |
| 613 | |
| 614 | /* forced off */ |
| 615 | if (__nospectre_v2 || cpu_mitigations_off()) { |
| 616 | pr_info_once("spectrev2 mitigation disabled by command line option\n"); |
| 617 | __hardenbp_enab = false; |
| 618 | return false; |
| 619 | } |
| 620 | |
| 621 | if (need_wa < 0) { |
| 622 | pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); |
| 623 | __hardenbp_enab = false; |
| 624 | } |
| 625 | |
| 626 | return (need_wa > 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 627 | } |
| 628 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 629 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 630 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
| 631 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 632 | {}, |
| 633 | }; |
| 634 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 635 | static bool __maybe_unused |
| 636 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, |
| 637 | int scope) |
| 638 | { |
| 639 | int i; |
| 640 | |
| 641 | if (!is_affected_midr_range_list(entry, scope) || |
| 642 | !is_hyp_mode_available()) |
| 643 | return false; |
| 644 | |
| 645 | for_each_possible_cpu(i) { |
| 646 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) |
| 647 | return true; |
| 648 | } |
| 649 | |
| 650 | return false; |
| 651 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 652 | |
| 653 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
| 654 | |
| 655 | static const struct midr_range arm64_harden_el2_vectors[] = { |
| 656 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
| 657 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
| 658 | {}, |
| 659 | }; |
| 660 | |
| 661 | #endif |
| 662 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 663 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
| 664 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
| 665 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
| 666 | { |
| 667 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) |
| 668 | }, |
| 669 | { |
| 670 | .midr_range.model = MIDR_QCOM_KRYO, |
| 671 | .matches = is_kryo_midr, |
| 672 | }, |
| 673 | #endif |
| 674 | #ifdef CONFIG_ARM64_ERRATUM_1286807 |
| 675 | { |
| 676 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), |
| 677 | }, |
| 678 | #endif |
| 679 | {}, |
| 680 | }; |
| 681 | #endif |
| 682 | |
| 683 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
| 684 | const struct midr_range cavium_erratum_27456_cpus[] = { |
| 685 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
| 686 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), |
| 687 | /* Cavium ThunderX, T81 pass 1.0 */ |
| 688 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), |
| 689 | {}, |
| 690 | }; |
| 691 | #endif |
| 692 | |
| 693 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 |
| 694 | static const struct midr_range cavium_erratum_30115_cpus[] = { |
| 695 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ |
| 696 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), |
| 697 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ |
| 698 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), |
| 699 | /* Cavium ThunderX, T83 pass 1.0 */ |
| 700 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), |
| 701 | {}, |
| 702 | }; |
| 703 | #endif |
| 704 | |
| 705 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
| 706 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { |
| 707 | { |
| 708 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
| 709 | }, |
| 710 | { |
| 711 | .midr_range.model = MIDR_QCOM_KRYO, |
| 712 | .matches = is_kryo_midr, |
| 713 | }, |
| 714 | {}, |
| 715 | }; |
| 716 | #endif |
| 717 | |
| 718 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
| 719 | static const struct midr_range workaround_clean_cache[] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 720 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
| 721 | defined(CONFIG_ARM64_ERRATUM_827319) || \ |
| 722 | defined(CONFIG_ARM64_ERRATUM_824069) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 723 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
| 724 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 725 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 726 | #ifdef CONFIG_ARM64_ERRATUM_819472 |
| 727 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ |
| 728 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), |
| 729 | #endif |
| 730 | {}, |
| 731 | }; |
| 732 | #endif |
| 733 | |
| 734 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
| 735 | /* |
| 736 | * - 1188873 affects r0p0 to r2p0 |
| 737 | * - 1418040 affects r0p0 to r3p1 |
| 738 | */ |
| 739 | static const struct midr_range erratum_1418040_list[] = { |
| 740 | /* Cortex-A76 r0p0 to r3p1 */ |
| 741 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), |
| 742 | /* Neoverse-N1 r0p0 to r3p1 */ |
| 743 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), |
| 744 | {}, |
| 745 | }; |
| 746 | #endif |
| 747 | |
| 748 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
| 749 | static const struct midr_range erratum_845719_list[] = { |
| 750 | /* Cortex-A53 r0p[01234] */ |
| 751 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
| 752 | /* Brahma-B53 r0p[0] */ |
| 753 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), |
| 754 | {}, |
| 755 | }; |
| 756 | #endif |
| 757 | |
| 758 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
| 759 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 760 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 761 | /* Cortex-A53 r0p[01234] */ |
| 762 | .matches = is_affected_midr_range, |
| 763 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
| 764 | MIDR_FIXED(0x4, BIT(8)), |
| 765 | }, |
| 766 | { |
| 767 | /* Brahma-B53 r0p[0] */ |
| 768 | .matches = is_affected_midr_range, |
| 769 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), |
| 770 | }, |
| 771 | {}, |
| 772 | }; |
| 773 | #endif |
| 774 | |
| 775 | const struct arm64_cpu_capabilities arm64_errata[] = { |
| 776 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
| 777 | { |
| 778 | .desc = "ARM errata 826319, 827319, 824069, 819472", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 779 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 780 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 781 | .cpu_enable = cpu_enable_cache_maint_trap, |
| 782 | }, |
| 783 | #endif |
| 784 | #ifdef CONFIG_ARM64_ERRATUM_832075 |
| 785 | { |
| 786 | /* Cortex-A57 r0p0 - r1p2 */ |
| 787 | .desc = "ARM erratum 832075", |
| 788 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, |
| 789 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
| 790 | 0, 0, |
| 791 | 1, 2), |
| 792 | }, |
| 793 | #endif |
| 794 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
| 795 | { |
| 796 | /* Cortex-A57 r0p0 - r1p2 */ |
| 797 | .desc = "ARM erratum 834220", |
| 798 | .capability = ARM64_WORKAROUND_834220, |
| 799 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
| 800 | 0, 0, |
| 801 | 1, 2), |
| 802 | }, |
| 803 | #endif |
| 804 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
| 805 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 806 | .desc = "ARM erratum 843419", |
| 807 | .capability = ARM64_WORKAROUND_843419, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 808 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
| 809 | .matches = cpucap_multi_entry_cap_matches, |
| 810 | .match_list = erratum_843419_list, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 811 | }, |
| 812 | #endif |
| 813 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
| 814 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 815 | .desc = "ARM erratum 845719", |
| 816 | .capability = ARM64_WORKAROUND_845719, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 817 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 818 | }, |
| 819 | #endif |
| 820 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 |
| 821 | { |
| 822 | /* Cavium ThunderX, pass 1.x */ |
| 823 | .desc = "Cavium erratum 23154", |
| 824 | .capability = ARM64_WORKAROUND_CAVIUM_23154, |
| 825 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
| 826 | }, |
| 827 | #endif |
| 828 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
| 829 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 830 | .desc = "Cavium erratum 27456", |
| 831 | .capability = ARM64_WORKAROUND_CAVIUM_27456, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 832 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 833 | }, |
| 834 | #endif |
| 835 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 |
| 836 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 837 | .desc = "Cavium erratum 30115", |
| 838 | .capability = ARM64_WORKAROUND_CAVIUM_30115, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 839 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 840 | }, |
| 841 | #endif |
| 842 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 843 | .desc = "Mismatched cache type (CTR_EL0)", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 844 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
| 845 | .matches = has_mismatched_cache_type, |
| 846 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
| 847 | .cpu_enable = cpu_enable_trap_ctr_access, |
| 848 | }, |
| 849 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
| 850 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 851 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 852 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
| 853 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 854 | .matches = cpucap_multi_entry_cap_matches, |
| 855 | .match_list = qcom_erratum_1003_list, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 856 | }, |
| 857 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 858 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 859 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 860 | .desc = "Qualcomm erratum 1009, ARM erratum 1286807", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 861 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 862 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
| 863 | .matches = cpucap_multi_entry_cap_matches, |
| 864 | .match_list = arm64_repeat_tlbi_list, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 865 | }, |
| 866 | #endif |
| 867 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
| 868 | { |
| 869 | /* Cortex-A73 all versions */ |
| 870 | .desc = "ARM erratum 858921", |
| 871 | .capability = ARM64_WORKAROUND_858921, |
| 872 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
| 873 | }, |
| 874 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 875 | { |
| 876 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 877 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
| 878 | .matches = check_branch_predictor, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 879 | }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 880 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
| 881 | { |
| 882 | .desc = "EL2 vector hardening", |
| 883 | .capability = ARM64_HARDEN_EL2_VECTORS, |
| 884 | ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), |
| 885 | }, |
| 886 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 887 | { |
| 888 | .desc = "Speculative Store Bypass Disable", |
| 889 | .capability = ARM64_SSBD, |
| 890 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
| 891 | .matches = has_ssbd_mitigation, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 892 | .midr_range_list = arm64_ssb_cpus, |
| 893 | }, |
| 894 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
| 895 | { |
| 896 | .desc = "ARM erratum 1418040", |
| 897 | .capability = ARM64_WORKAROUND_1418040, |
| 898 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), |
| 899 | }, |
| 900 | #endif |
| 901 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
| 902 | { |
| 903 | /* Cortex-A76 r0p0 to r2p0 */ |
| 904 | .desc = "ARM erratum 1165522", |
| 905 | .capability = ARM64_WORKAROUND_1165522, |
| 906 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), |
| 907 | }, |
| 908 | #endif |
| 909 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
| 910 | { |
| 911 | .desc = "ARM erratum 1463225", |
| 912 | .capability = ARM64_WORKAROUND_1463225, |
| 913 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
| 914 | .matches = has_cortex_a76_erratum_1463225, |
| 915 | }, |
| 916 | #endif |
| 917 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 |
| 918 | { |
| 919 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", |
| 920 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, |
| 921 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
| 922 | .matches = needs_tx2_tvm_workaround, |
| 923 | }, |
| 924 | { |
| 925 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", |
| 926 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, |
| 927 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 928 | }, |
| 929 | #endif |
| 930 | { |
| 931 | } |
| 932 | }; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 933 | |
| 934 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, |
| 935 | char *buf) |
| 936 | { |
| 937 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
| 938 | } |
| 939 | |
| 940 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, |
| 941 | char *buf) |
| 942 | { |
| 943 | switch (get_spectre_v2_workaround_state()) { |
| 944 | case ARM64_BP_HARDEN_NOT_REQUIRED: |
| 945 | return sprintf(buf, "Not affected\n"); |
| 946 | case ARM64_BP_HARDEN_WA_NEEDED: |
| 947 | return sprintf(buf, "Mitigation: Branch predictor hardening\n"); |
| 948 | case ARM64_BP_HARDEN_UNKNOWN: |
| 949 | default: |
| 950 | return sprintf(buf, "Vulnerable\n"); |
| 951 | } |
| 952 | } |
| 953 | |
| 954 | ssize_t cpu_show_spec_store_bypass(struct device *dev, |
| 955 | struct device_attribute *attr, char *buf) |
| 956 | { |
| 957 | if (__ssb_safe) |
| 958 | return sprintf(buf, "Not affected\n"); |
| 959 | |
| 960 | switch (ssbd_state) { |
| 961 | case ARM64_SSBD_KERNEL: |
| 962 | case ARM64_SSBD_FORCE_ENABLE: |
| 963 | if (IS_ENABLED(CONFIG_ARM64_SSBD)) |
| 964 | return sprintf(buf, |
| 965 | "Mitigation: Speculative Store Bypass disabled via prctl\n"); |
| 966 | } |
| 967 | |
| 968 | return sprintf(buf, "Vulnerable\n"); |
| 969 | } |