blob: 1e16c4e00e771a88120c94de36788b4607eaf027 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Contains CPU specific errata definitions
4 *
5 * Copyright (C) 2014 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#include <linux/arm-smccc.h>
9#include <linux/psci.h>
10#include <linux/types.h>
David Brazdil0f672f62019-12-10 10:32:29 +000011#include <linux/cpu.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <asm/cpu.h>
13#include <asm/cputype.h>
14#include <asm/cpufeature.h>
David Brazdil0f672f62019-12-10 10:32:29 +000015#include <asm/smp_plat.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016
17static bool __maybe_unused
18is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
19{
20 const struct arm64_midr_revidr *fix;
21 u32 midr = read_cpuid_id(), revidr;
22
23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24 if (!is_midr_in_range(midr, &entry->midr_range))
25 return false;
26
27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28 revidr = read_cpuid(REVIDR_EL1);
29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
31 return false;
32
33 return true;
34}
35
36static bool __maybe_unused
37is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
38 int scope)
39{
40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
42}
43
44static bool __maybe_unused
45is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
46{
47 u32 model;
48
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50
51 model = read_cpuid_id();
52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 MIDR_ARCHITECTURE_MASK;
54
55 return model == entry->midr_range.model;
56}
57
58static bool
59has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
60 int scope)
61{
David Brazdil0f672f62019-12-10 10:32:29 +000062 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64 u64 ctr_raw, ctr_real;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065
66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
David Brazdil0f672f62019-12-10 10:32:29 +000067
68 /*
69 * We want to make sure that all the CPUs in the system expose
70 * a consistent CTR_EL0 to make sure that applications behaves
71 * correctly with migration.
72 *
73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
74 *
75 * 1) It is safe if the system doesn't support IDC, as CPU anyway
76 * reports IDC = 0, consistent with the rest.
77 *
78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
80 *
81 * So, we need to make sure either the raw CTR_EL0 or the effective
82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
83 */
84 ctr_raw = read_cpuid_cachetype() & mask;
85 ctr_real = read_cpuid_effective_cachetype() & mask;
86
87 return (ctr_real != sys) && (ctr_raw != sys);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088}
89
90static void
Olivier Deprez0e641232021-09-23 10:07:05 +020091cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092{
David Brazdil0f672f62019-12-10 10:32:29 +000093 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
Olivier Deprez0e641232021-09-23 10:07:05 +020094 bool enable_uct_trap = false;
David Brazdil0f672f62019-12-10 10:32:29 +000095
96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97 if ((read_cpuid_cachetype() & mask) !=
98 (arm64_ftr_reg_ctrel0.sys_val & mask))
Olivier Deprez0e641232021-09-23 10:07:05 +020099 enable_uct_trap = true;
100
101 /* ... or if the system is affected by an erratum */
102 if (cap->capability == ARM64_WORKAROUND_1542419)
103 enable_uct_trap = true;
104
105 if (enable_uct_trap)
David Brazdil0f672f62019-12-10 10:32:29 +0000106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107}
108
109atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
110
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111#include <asm/mmu_context.h>
112#include <asm/cacheflush.h>
113
114DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
115
116#ifdef CONFIG_KVM_INDIRECT_VECTORS
117extern char __smccc_workaround_1_smc_start[];
118extern char __smccc_workaround_1_smc_end[];
119
120static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
121 const char *hyp_vecs_end)
122{
123 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
124 int i;
125
126 for (i = 0; i < SZ_2K; i += 0x80)
127 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
128
129 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
130}
131
David Brazdil0f672f62019-12-10 10:32:29 +0000132static void install_bp_hardening_cb(bp_hardening_cb_t fn,
133 const char *hyp_vecs_start,
134 const char *hyp_vecs_end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135{
David Brazdil0f672f62019-12-10 10:32:29 +0000136 static DEFINE_RAW_SPINLOCK(bp_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137 int cpu, slot = -1;
138
David Brazdil0f672f62019-12-10 10:32:29 +0000139 /*
140 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
141 * we're a guest. Skip the hyp-vectors work.
142 */
143 if (!hyp_vecs_start) {
144 __this_cpu_write(bp_hardening_data.fn, fn);
145 return;
146 }
147
148 raw_spin_lock(&bp_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149 for_each_possible_cpu(cpu) {
150 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
151 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
152 break;
153 }
154 }
155
156 if (slot == -1) {
157 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
158 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
159 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
160 }
161
162 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
163 __this_cpu_write(bp_hardening_data.fn, fn);
David Brazdil0f672f62019-12-10 10:32:29 +0000164 raw_spin_unlock(&bp_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165}
166#else
167#define __smccc_workaround_1_smc_start NULL
168#define __smccc_workaround_1_smc_end NULL
169
David Brazdil0f672f62019-12-10 10:32:29 +0000170static void install_bp_hardening_cb(bp_hardening_cb_t fn,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 const char *hyp_vecs_start,
172 const char *hyp_vecs_end)
173{
174 __this_cpu_write(bp_hardening_data.fn, fn);
175}
176#endif /* CONFIG_KVM_INDIRECT_VECTORS */
177
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178#include <uapi/linux/psci.h>
179#include <linux/arm-smccc.h>
180#include <linux/psci.h>
181
182static void call_smc_arch_workaround_1(void)
183{
184 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
185}
186
187static void call_hvc_arch_workaround_1(void)
188{
189 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
190}
191
192static void qcom_link_stack_sanitization(void)
193{
194 u64 tmp;
195
196 asm volatile("mov %0, x30 \n"
197 ".rept 16 \n"
198 "bl . + 4 \n"
199 ".endr \n"
200 "mov x30, %0 \n"
201 : "=&r" (tmp));
202}
203
David Brazdil0f672f62019-12-10 10:32:29 +0000204static bool __nospectre_v2;
205static int __init parse_nospectre_v2(char *str)
206{
207 __nospectre_v2 = true;
208 return 0;
209}
210early_param("nospectre_v2", parse_nospectre_v2);
211
212/*
213 * -1: No workaround
214 * 0: No workaround required
215 * 1: Workaround installed
216 */
217static int detect_harden_bp_fw(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218{
219 bp_hardening_cb_t cb;
220 void *smccc_start, *smccc_end;
221 struct arm_smccc_res res;
222 u32 midr = read_cpuid_id();
223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
David Brazdil0f672f62019-12-10 10:32:29 +0000225 return -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226
227 switch (psci_ops.conduit) {
228 case PSCI_CONDUIT_HVC:
229 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
230 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
David Brazdil0f672f62019-12-10 10:32:29 +0000231 switch ((int)res.a0) {
232 case 1:
233 /* Firmware says we're just fine */
234 return 0;
235 case 0:
236 cb = call_hvc_arch_workaround_1;
237 /* This is a guest, no need to patch KVM vectors */
238 smccc_start = NULL;
239 smccc_end = NULL;
240 break;
241 default:
242 return -1;
243 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244 break;
245
246 case PSCI_CONDUIT_SMC:
247 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
248 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
David Brazdil0f672f62019-12-10 10:32:29 +0000249 switch ((int)res.a0) {
250 case 1:
251 /* Firmware says we're just fine */
252 return 0;
253 case 0:
254 cb = call_smc_arch_workaround_1;
255 smccc_start = __smccc_workaround_1_smc_start;
256 smccc_end = __smccc_workaround_1_smc_end;
257 break;
258 default:
259 return -1;
260 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261 break;
262
263 default:
David Brazdil0f672f62019-12-10 10:32:29 +0000264 return -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 }
266
267 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
268 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
269 cb = qcom_link_stack_sanitization;
270
David Brazdil0f672f62019-12-10 10:32:29 +0000271 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
272 install_bp_hardening_cb(cb, smccc_start, smccc_end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000273
David Brazdil0f672f62019-12-10 10:32:29 +0000274 return 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
278
279int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
David Brazdil0f672f62019-12-10 10:32:29 +0000280static bool __ssb_safe = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281
282static const struct ssbd_options {
283 const char *str;
284 int state;
285} ssbd_options[] = {
286 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
287 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
288 { "kernel", ARM64_SSBD_KERNEL, },
289};
290
291static int __init ssbd_cfg(char *buf)
292{
293 int i;
294
295 if (!buf || !buf[0])
296 return -EINVAL;
297
298 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
299 int len = strlen(ssbd_options[i].str);
300
301 if (strncmp(buf, ssbd_options[i].str, len))
302 continue;
303
304 ssbd_state = ssbd_options[i].state;
305 return 0;
306 }
307
308 return -EINVAL;
309}
310early_param("ssbd", ssbd_cfg);
311
312void __init arm64_update_smccc_conduit(struct alt_instr *alt,
313 __le32 *origptr, __le32 *updptr,
314 int nr_inst)
315{
316 u32 insn;
317
318 BUG_ON(nr_inst != 1);
319
320 switch (psci_ops.conduit) {
321 case PSCI_CONDUIT_HVC:
322 insn = aarch64_insn_get_hvc_value();
323 break;
324 case PSCI_CONDUIT_SMC:
325 insn = aarch64_insn_get_smc_value();
326 break;
327 default:
328 return;
329 }
330
331 *updptr = cpu_to_le32(insn);
332}
333
334void __init arm64_enable_wa2_handling(struct alt_instr *alt,
335 __le32 *origptr, __le32 *updptr,
336 int nr_inst)
337{
338 BUG_ON(nr_inst != 1);
339 /*
340 * Only allow mitigation on EL1 entry/exit and guest
341 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
342 * be flipped.
343 */
344 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
345 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
346}
347
348void arm64_set_ssbd_mitigation(bool state)
349{
David Brazdil0f672f62019-12-10 10:32:29 +0000350 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
351 pr_info_once("SSBD disabled by kernel configuration\n");
352 return;
353 }
354
355 if (this_cpu_has_cap(ARM64_SSBS)) {
356 if (state)
357 asm volatile(SET_PSTATE_SSBS(0));
358 else
359 asm volatile(SET_PSTATE_SSBS(1));
360 return;
361 }
362
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363 switch (psci_ops.conduit) {
364 case PSCI_CONDUIT_HVC:
365 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
366 break;
367
368 case PSCI_CONDUIT_SMC:
369 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
370 break;
371
372 default:
373 WARN_ON_ONCE(1);
374 break;
375 }
376}
377
378static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
379 int scope)
380{
381 struct arm_smccc_res res;
382 bool required = true;
383 s32 val;
David Brazdil0f672f62019-12-10 10:32:29 +0000384 bool this_cpu_safe = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000385
386 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
387
David Brazdil0f672f62019-12-10 10:32:29 +0000388 if (cpu_mitigations_off())
389 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
390
391 /* delay setting __ssb_safe until we get a firmware response */
392 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
393 this_cpu_safe = true;
394
395 if (this_cpu_has_cap(ARM64_SSBS)) {
396 if (!this_cpu_safe)
397 __ssb_safe = false;
398 required = false;
399 goto out_printmsg;
400 }
401
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
403 ssbd_state = ARM64_SSBD_UNKNOWN;
David Brazdil0f672f62019-12-10 10:32:29 +0000404 if (!this_cpu_safe)
405 __ssb_safe = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000406 return false;
407 }
408
409 switch (psci_ops.conduit) {
410 case PSCI_CONDUIT_HVC:
411 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
412 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
413 break;
414
415 case PSCI_CONDUIT_SMC:
416 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
417 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
418 break;
419
420 default:
421 ssbd_state = ARM64_SSBD_UNKNOWN;
David Brazdil0f672f62019-12-10 10:32:29 +0000422 if (!this_cpu_safe)
423 __ssb_safe = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424 return false;
425 }
426
427 val = (s32)res.a0;
428
429 switch (val) {
430 case SMCCC_RET_NOT_SUPPORTED:
431 ssbd_state = ARM64_SSBD_UNKNOWN;
David Brazdil0f672f62019-12-10 10:32:29 +0000432 if (!this_cpu_safe)
433 __ssb_safe = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 return false;
435
David Brazdil0f672f62019-12-10 10:32:29 +0000436 /* machines with mixed mitigation requirements must not return this */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437 case SMCCC_RET_NOT_REQUIRED:
438 pr_info_once("%s mitigation not required\n", entry->desc);
439 ssbd_state = ARM64_SSBD_MITIGATED;
440 return false;
441
442 case SMCCC_RET_SUCCESS:
David Brazdil0f672f62019-12-10 10:32:29 +0000443 __ssb_safe = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 required = true;
445 break;
446
447 case 1: /* Mitigation not required on this CPU */
448 required = false;
449 break;
450
451 default:
452 WARN_ON(1);
David Brazdil0f672f62019-12-10 10:32:29 +0000453 if (!this_cpu_safe)
454 __ssb_safe = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455 return false;
456 }
457
458 switch (ssbd_state) {
459 case ARM64_SSBD_FORCE_DISABLE:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460 arm64_set_ssbd_mitigation(false);
461 required = false;
462 break;
463
464 case ARM64_SSBD_KERNEL:
465 if (required) {
466 __this_cpu_write(arm64_ssbd_callback_required, 1);
467 arm64_set_ssbd_mitigation(true);
468 }
469 break;
470
471 case ARM64_SSBD_FORCE_ENABLE:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472 arm64_set_ssbd_mitigation(true);
473 required = true;
474 break;
475
476 default:
477 WARN_ON(1);
478 break;
479 }
480
David Brazdil0f672f62019-12-10 10:32:29 +0000481out_printmsg:
482 switch (ssbd_state) {
483 case ARM64_SSBD_FORCE_DISABLE:
484 pr_info_once("%s disabled from command-line\n", entry->desc);
485 break;
486
487 case ARM64_SSBD_FORCE_ENABLE:
488 pr_info_once("%s forced from command-line\n", entry->desc);
489 break;
490 }
491
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492 return required;
493}
David Brazdil0f672f62019-12-10 10:32:29 +0000494
Olivier Deprez0e641232021-09-23 10:07:05 +0200495static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
496{
497 if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
498 cap->matches(cap, SCOPE_LOCAL_CPU);
499}
500
David Brazdil0f672f62019-12-10 10:32:29 +0000501/* known invulnerable cores */
502static const struct midr_range arm64_ssb_cpus[] = {
503 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
504 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
505 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
506 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
507 {},
508};
509
510#ifdef CONFIG_ARM64_ERRATUM_1463225
511DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
512
513static bool
514has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
515 int scope)
516{
517 u32 midr = read_cpuid_id();
518 /* Cortex-A76 r0p0 - r3p1 */
519 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
520
521 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
522 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
523}
524#endif
525
526static void __maybe_unused
527cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
528{
529 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
530}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000531
532#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
533 .matches = is_affected_midr_range, \
534 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
535
536#define CAP_MIDR_ALL_VERSIONS(model) \
537 .matches = is_affected_midr_range, \
538 .midr_range = MIDR_ALL_VERSIONS(model)
539
540#define MIDR_FIXED(rev, revidr_mask) \
541 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
542
543#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
544 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
545 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
546
547#define CAP_MIDR_RANGE_LIST(list) \
548 .matches = is_affected_midr_range_list, \
549 .midr_range_list = list
550
551/* Errata affecting a range of revisions of given model variant */
552#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
553 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
554
555/* Errata affecting a single variant/revision of a model */
556#define ERRATA_MIDR_REV(model, var, rev) \
557 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
558
559/* Errata affecting all variants/revisions of a given a model */
560#define ERRATA_MIDR_ALL_VERSIONS(model) \
561 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
562 CAP_MIDR_ALL_VERSIONS(model)
563
564/* Errata affecting a list of midr ranges, with same work around */
565#define ERRATA_MIDR_RANGE_LIST(midr_list) \
566 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
567 CAP_MIDR_RANGE_LIST(midr_list)
568
David Brazdil0f672f62019-12-10 10:32:29 +0000569/* Track overall mitigation state. We are only mitigated if all cores are ok */
570static bool __hardenbp_enab = true;
571static bool __spectrev2_safe = true;
572
573int get_spectre_v2_workaround_state(void)
574{
575 if (__spectrev2_safe)
576 return ARM64_BP_HARDEN_NOT_REQUIRED;
577
578 if (!__hardenbp_enab)
579 return ARM64_BP_HARDEN_UNKNOWN;
580
581 return ARM64_BP_HARDEN_WA_NEEDED;
582}
583
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584/*
David Brazdil0f672f62019-12-10 10:32:29 +0000585 * List of CPUs that do not need any Spectre-v2 mitigation at all.
586 */
587static const struct midr_range spectre_v2_safe_list[] = {
588 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
589 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
590 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
591 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
Olivier Deprez0e641232021-09-23 10:07:05 +0200592 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
David Brazdil0f672f62019-12-10 10:32:29 +0000593 { /* sentinel */ }
594};
595
596/*
597 * Track overall bp hardening for all heterogeneous cores in the machine.
598 * We are only considered "safe" if all booted cores are known safe.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599 */
600static bool __maybe_unused
David Brazdil0f672f62019-12-10 10:32:29 +0000601check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602{
David Brazdil0f672f62019-12-10 10:32:29 +0000603 int need_wa;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604
David Brazdil0f672f62019-12-10 10:32:29 +0000605 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606
David Brazdil0f672f62019-12-10 10:32:29 +0000607 /* If the CPU has CSV2 set, we're safe */
608 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
609 ID_AA64PFR0_CSV2_SHIFT))
610 return false;
611
612 /* Alternatively, we have a list of unaffected CPUs */
613 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
614 return false;
615
616 /* Fallback to firmware detection */
617 need_wa = detect_harden_bp_fw();
618 if (!need_wa)
619 return false;
620
621 __spectrev2_safe = false;
622
623 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
624 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
625 __hardenbp_enab = false;
626 return false;
627 }
628
629 /* forced off */
630 if (__nospectre_v2 || cpu_mitigations_off()) {
631 pr_info_once("spectrev2 mitigation disabled by command line option\n");
632 __hardenbp_enab = false;
633 return false;
634 }
635
636 if (need_wa < 0) {
637 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
638 __hardenbp_enab = false;
639 }
640
641 return (need_wa > 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642}
643
Olivier Deprez0e641232021-09-23 10:07:05 +0200644static void
645cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
646{
647 cap->matches(cap, SCOPE_LOCAL_CPU);
648}
649
David Brazdil0f672f62019-12-10 10:32:29 +0000650static const __maybe_unused struct midr_range tx2_family_cpus[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000651 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
652 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653 {},
654};
655
David Brazdil0f672f62019-12-10 10:32:29 +0000656static bool __maybe_unused
657needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
658 int scope)
659{
660 int i;
661
662 if (!is_affected_midr_range_list(entry, scope) ||
663 !is_hyp_mode_available())
664 return false;
665
666 for_each_possible_cpu(i) {
667 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
668 return true;
669 }
670
671 return false;
672}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673
Olivier Deprez0e641232021-09-23 10:07:05 +0200674static bool __maybe_unused
675has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
676 int scope)
677{
678 u32 midr = read_cpuid_id();
679 bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
680 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
681
682 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
683 return is_midr_in_range(midr, &range) && has_dic;
684}
685
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686#ifdef CONFIG_HARDEN_EL2_VECTORS
687
688static const struct midr_range arm64_harden_el2_vectors[] = {
689 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
690 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
691 {},
692};
693
694#endif
695
David Brazdil0f672f62019-12-10 10:32:29 +0000696#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
697static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
698#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
699 {
700 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
701 },
702 {
703 .midr_range.model = MIDR_QCOM_KRYO,
704 .matches = is_kryo_midr,
705 },
706#endif
707#ifdef CONFIG_ARM64_ERRATUM_1286807
708 {
709 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
710 },
711#endif
712 {},
713};
714#endif
715
716#ifdef CONFIG_CAVIUM_ERRATUM_27456
717const struct midr_range cavium_erratum_27456_cpus[] = {
718 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
719 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
720 /* Cavium ThunderX, T81 pass 1.0 */
721 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
722 {},
723};
724#endif
725
726#ifdef CONFIG_CAVIUM_ERRATUM_30115
727static const struct midr_range cavium_erratum_30115_cpus[] = {
728 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
729 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
730 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
731 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
732 /* Cavium ThunderX, T83 pass 1.0 */
733 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
734 {},
735};
736#endif
737
738#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
739static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
740 {
741 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
742 },
743 {
744 .midr_range.model = MIDR_QCOM_KRYO,
745 .matches = is_kryo_midr,
746 },
747 {},
748};
749#endif
750
751#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
752static const struct midr_range workaround_clean_cache[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753#if defined(CONFIG_ARM64_ERRATUM_826319) || \
754 defined(CONFIG_ARM64_ERRATUM_827319) || \
755 defined(CONFIG_ARM64_ERRATUM_824069)
David Brazdil0f672f62019-12-10 10:32:29 +0000756 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
757 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000759#ifdef CONFIG_ARM64_ERRATUM_819472
760 /* Cortex-A53 r0p[01] : ARM errata 819472 */
761 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
762#endif
763 {},
764};
765#endif
766
767#ifdef CONFIG_ARM64_ERRATUM_1418040
768/*
769 * - 1188873 affects r0p0 to r2p0
770 * - 1418040 affects r0p0 to r3p1
771 */
772static const struct midr_range erratum_1418040_list[] = {
773 /* Cortex-A76 r0p0 to r3p1 */
774 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
775 /* Neoverse-N1 r0p0 to r3p1 */
776 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
777 {},
778};
779#endif
780
781#ifdef CONFIG_ARM64_ERRATUM_845719
782static const struct midr_range erratum_845719_list[] = {
783 /* Cortex-A53 r0p[01234] */
784 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
785 /* Brahma-B53 r0p[0] */
786 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
787 {},
788};
789#endif
790
791#ifdef CONFIG_ARM64_ERRATUM_843419
792static const struct arm64_cpu_capabilities erratum_843419_list[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000793 {
David Brazdil0f672f62019-12-10 10:32:29 +0000794 /* Cortex-A53 r0p[01234] */
795 .matches = is_affected_midr_range,
796 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
797 MIDR_FIXED(0x4, BIT(8)),
798 },
799 {
800 /* Brahma-B53 r0p[0] */
801 .matches = is_affected_midr_range,
802 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
803 },
804 {},
805};
806#endif
807
808const struct arm64_cpu_capabilities arm64_errata[] = {
809#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
810 {
811 .desc = "ARM errata 826319, 827319, 824069, 819472",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000812 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
David Brazdil0f672f62019-12-10 10:32:29 +0000813 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000814 .cpu_enable = cpu_enable_cache_maint_trap,
815 },
816#endif
817#ifdef CONFIG_ARM64_ERRATUM_832075
818 {
819 /* Cortex-A57 r0p0 - r1p2 */
820 .desc = "ARM erratum 832075",
821 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
822 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
823 0, 0,
824 1, 2),
825 },
826#endif
827#ifdef CONFIG_ARM64_ERRATUM_834220
828 {
829 /* Cortex-A57 r0p0 - r1p2 */
830 .desc = "ARM erratum 834220",
831 .capability = ARM64_WORKAROUND_834220,
832 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
833 0, 0,
834 1, 2),
835 },
836#endif
837#ifdef CONFIG_ARM64_ERRATUM_843419
838 {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839 .desc = "ARM erratum 843419",
840 .capability = ARM64_WORKAROUND_843419,
David Brazdil0f672f62019-12-10 10:32:29 +0000841 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
842 .matches = cpucap_multi_entry_cap_matches,
843 .match_list = erratum_843419_list,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000844 },
845#endif
846#ifdef CONFIG_ARM64_ERRATUM_845719
847 {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000848 .desc = "ARM erratum 845719",
849 .capability = ARM64_WORKAROUND_845719,
David Brazdil0f672f62019-12-10 10:32:29 +0000850 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000851 },
852#endif
853#ifdef CONFIG_CAVIUM_ERRATUM_23154
854 {
855 /* Cavium ThunderX, pass 1.x */
856 .desc = "Cavium erratum 23154",
857 .capability = ARM64_WORKAROUND_CAVIUM_23154,
858 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
859 },
860#endif
861#ifdef CONFIG_CAVIUM_ERRATUM_27456
862 {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000863 .desc = "Cavium erratum 27456",
864 .capability = ARM64_WORKAROUND_CAVIUM_27456,
David Brazdil0f672f62019-12-10 10:32:29 +0000865 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866 },
867#endif
868#ifdef CONFIG_CAVIUM_ERRATUM_30115
869 {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870 .desc = "Cavium erratum 30115",
871 .capability = ARM64_WORKAROUND_CAVIUM_30115,
David Brazdil0f672f62019-12-10 10:32:29 +0000872 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000873 },
874#endif
875 {
David Brazdil0f672f62019-12-10 10:32:29 +0000876 .desc = "Mismatched cache type (CTR_EL0)",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 .capability = ARM64_MISMATCHED_CACHE_TYPE,
878 .matches = has_mismatched_cache_type,
879 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
880 .cpu_enable = cpu_enable_trap_ctr_access,
881 },
882#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
883 {
David Brazdil0f672f62019-12-10 10:32:29 +0000884 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000885 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
886 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
David Brazdil0f672f62019-12-10 10:32:29 +0000887 .matches = cpucap_multi_entry_cap_matches,
888 .match_list = qcom_erratum_1003_list,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000889 },
890#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000891#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000892 {
David Brazdil0f672f62019-12-10 10:32:29 +0000893 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000894 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
David Brazdil0f672f62019-12-10 10:32:29 +0000895 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
896 .matches = cpucap_multi_entry_cap_matches,
897 .match_list = arm64_repeat_tlbi_list,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000898 },
899#endif
900#ifdef CONFIG_ARM64_ERRATUM_858921
901 {
902 /* Cortex-A73 all versions */
903 .desc = "ARM erratum 858921",
904 .capability = ARM64_WORKAROUND_858921,
905 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
906 },
907#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000908 {
Olivier Deprez0e641232021-09-23 10:07:05 +0200909 .desc = "Branch predictor hardening",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000910 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
David Brazdil0f672f62019-12-10 10:32:29 +0000911 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
912 .matches = check_branch_predictor,
Olivier Deprez0e641232021-09-23 10:07:05 +0200913 .cpu_enable = cpu_enable_branch_predictor_hardening,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000915#ifdef CONFIG_HARDEN_EL2_VECTORS
916 {
917 .desc = "EL2 vector hardening",
918 .capability = ARM64_HARDEN_EL2_VECTORS,
919 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
920 },
921#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000922 {
923 .desc = "Speculative Store Bypass Disable",
924 .capability = ARM64_SSBD,
925 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
926 .matches = has_ssbd_mitigation,
Olivier Deprez0e641232021-09-23 10:07:05 +0200927 .cpu_enable = cpu_enable_ssbd_mitigation,
David Brazdil0f672f62019-12-10 10:32:29 +0000928 .midr_range_list = arm64_ssb_cpus,
929 },
930#ifdef CONFIG_ARM64_ERRATUM_1418040
931 {
932 .desc = "ARM erratum 1418040",
933 .capability = ARM64_WORKAROUND_1418040,
934 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
Olivier Deprez0e641232021-09-23 10:07:05 +0200935 /*
936 * We need to allow affected CPUs to come in late, but
937 * also need the non-affected CPUs to be able to come
938 * in at any point in time. Wonderful.
939 */
940 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
David Brazdil0f672f62019-12-10 10:32:29 +0000941 },
942#endif
943#ifdef CONFIG_ARM64_ERRATUM_1165522
944 {
945 /* Cortex-A76 r0p0 to r2p0 */
946 .desc = "ARM erratum 1165522",
947 .capability = ARM64_WORKAROUND_1165522,
948 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
949 },
950#endif
951#ifdef CONFIG_ARM64_ERRATUM_1463225
952 {
953 .desc = "ARM erratum 1463225",
954 .capability = ARM64_WORKAROUND_1463225,
955 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
956 .matches = has_cortex_a76_erratum_1463225,
957 },
958#endif
959#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
960 {
961 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
962 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
963 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
964 .matches = needs_tx2_tvm_workaround,
965 },
966 {
967 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
968 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
969 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000970 },
971#endif
Olivier Deprez0e641232021-09-23 10:07:05 +0200972#ifdef CONFIG_ARM64_ERRATUM_1542419
973 {
974 /* we depend on the firmware portion for correctness */
975 .desc = "ARM erratum 1542419 (kernel portion)",
976 .capability = ARM64_WORKAROUND_1542419,
977 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
978 .matches = has_neoverse_n1_erratum_1542419,
979 .cpu_enable = cpu_enable_trap_ctr_access,
980 },
981#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000982 {
983 }
984};
David Brazdil0f672f62019-12-10 10:32:29 +0000985
986ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
987 char *buf)
988{
989 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
990}
991
992ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
993 char *buf)
994{
995 switch (get_spectre_v2_workaround_state()) {
996 case ARM64_BP_HARDEN_NOT_REQUIRED:
997 return sprintf(buf, "Not affected\n");
998 case ARM64_BP_HARDEN_WA_NEEDED:
999 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
1000 case ARM64_BP_HARDEN_UNKNOWN:
1001 default:
1002 return sprintf(buf, "Vulnerable\n");
1003 }
1004}
1005
1006ssize_t cpu_show_spec_store_bypass(struct device *dev,
1007 struct device_attribute *attr, char *buf)
1008{
1009 if (__ssb_safe)
1010 return sprintf(buf, "Not affected\n");
1011
1012 switch (ssbd_state) {
1013 case ARM64_SSBD_KERNEL:
1014 case ARM64_SSBD_FORCE_ENABLE:
1015 if (IS_ENABLED(CONFIG_ARM64_SSBD))
1016 return sprintf(buf,
1017 "Mitigation: Speculative Store Bypass disabled via prctl\n");
1018 }
1019
1020 return sprintf(buf, "Vulnerable\n");
1021}