aboutsummaryrefslogtreecommitdiff
path: root/lib/cpus/aarch64/cortex_a76.S
diff options
context:
space:
mode:
Diffstat (limited to 'lib/cpus/aarch64/cortex_a76.S')
-rw-r--r--lib/cpus/aarch64/cortex_a76.S657
1 files changed, 285 insertions, 372 deletions
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 2c99cdc926..8b3d7300eb 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,16 +7,19 @@
#include <arch.h>
#include <asm_macros.S>
#include <common/bl_common.h>
-#include <context.h>
#include <cortex_a76.h>
#include <cpu_macros.S>
#include <plat_macros.S>
#include <services/arm_arch_svc.h>
+#include "wa_cve_2022_23960_bhb.S"
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
#error "Cortex-A76 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
+ .globl cortex_a76_reset_func
+ .globl cortex_a76_core_pwr_dwn
+ .globl cortex_a76_disable_wa_cve_2018_3639
/* 64-bit only core */
#if CTX_INCLUDE_AARCH32_REGS == 1
@@ -35,59 +38,17 @@
*
* The macro saves x2-x3 to the context. In the fast path
* x0-x3 registers do not need to be restored as the calling
- * context will have saved them.
+ * context will have saved them. The macro also saves
+ * x29-x30 to the context in the sync_exception path.
*/
.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-
.if \_is_sync_exception
- /*
- * Ensure SMC is coming from A64/A32 state on #0
- * with W0 = SMCCC_ARCH_WORKAROUND_2
- *
- * This sequence evaluates as:
- * (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
- * allowing use of a single branch operation
- */
- orr w2, wzr, #SMCCC_ARCH_WORKAROUND_2
- cmp x0, x2
- mrs x3, esr_el3
- mov_imm w2, \_esr_el3_val
- ccmp w2, w3, #0, eq
- /*
- * Static predictor will predict a fall-through, optimizing
- * the `SMCCC_ARCH_WORKAROUND_2` fast path.
- */
- bne 1f
-
- /*
- * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
- * fast path.
- */
- cmp x1, xzr /* enable/disable check */
-
- /*
- * When the calling context wants mitigation disabled,
- * we program the mitigation disable function in the
- * CPU context, which gets invoked on subsequent exits from
- * EL3 via the `el3_exit` function. Otherwise NULL is
- * programmed in the CPU context, which results in caller's
- * inheriting the EL3 mitigation state (enabled) on subsequent
- * `el3_exit`.
- */
- mov x0, xzr
- adr x1, cortex_a76_disable_wa_cve_2018_3639
- csel x1, x1, x0, eq
- str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
-
- mrs x2, CORTEX_A76_CPUACTLR2_EL1
- orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
- bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
- csel x3, x3, x1, eq
- msr CORTEX_A76_CPUACTLR2_EL1, x3
- exception_return /* exception_return contains ISB */
+ stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
+ mov_imm w2, \_esr_el3_val
+ bl apply_cve_2018_3639_sync_wa
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
.endif
-1:
/*
* Always enable v4 mitigation during EL3 execution. This is not
* required for the fast path above because it does not perform any
@@ -105,8 +66,10 @@
*/
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
.endm
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
-vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
+#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
+vector_base cortex_a76_wa_cve_vbar
/* ---------------------------------------------------------------------
* Current EL with SP_EL0 : 0x0 - 0x200
@@ -153,22 +116,54 @@ end_vector_entry cortex_a76_serror_sp_elx
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b sync_exception_aarch64
end_vector_entry cortex_a76_sync_exception_aarch64
vector_entry cortex_a76_irq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b irq_aarch64
end_vector_entry cortex_a76_irq_aarch64
vector_entry cortex_a76_fiq_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b fiq_aarch64
end_vector_entry cortex_a76_fiq_aarch64
vector_entry cortex_a76_serror_aarch64
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b serror_aarch64
end_vector_entry cortex_a76_serror_aarch64
@@ -177,284 +172,251 @@ end_vector_entry cortex_a76_serror_aarch64
* ---------------------------------------------------------------------
*/
vector_entry cortex_a76_sync_exception_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b sync_exception_aarch32
end_vector_entry cortex_a76_sync_exception_aarch32
vector_entry cortex_a76_irq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b irq_aarch32
end_vector_entry cortex_a76_irq_aarch32
vector_entry cortex_a76_fiq_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b fiq_aarch32
end_vector_entry cortex_a76_fiq_aarch32
vector_entry cortex_a76_serror_aarch32
+
+#if WORKAROUND_CVE_2022_23960
+ apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
+#endif /* WORKAROUND_CVE_2022_23960 */
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
+
b serror_aarch32
end_vector_entry cortex_a76_serror_aarch32
-#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1073348.
- * This applies only to revision <= r1p0 of Cortex A76.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * -----------------------------------------------------------------
+ * This function applies the mitigation for CVE-2018-3639
+ * specifically for sync exceptions. It implements a fast path
+ * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
+ * running in AArch64 will go through the fast and return early.
+ *
+ * In the fast path x0-x3 registers do not need to be restored as the
+ * calling context will have saved them.
+ *
+ * Caller must pass value of esr_el3 to compare via x2.
+ * Save and restore these registers outside of this function from the
+ * context before jumping to the main runtime vector table entry.
+ *
+ * Shall clobber: x0-x3, x30
+ * -----------------------------------------------------------------
*/
-func errata_a76_1073348_wa
+func apply_cve_2018_3639_sync_wa
/*
- * Compare x0 against revision r1p0
+ * Ensure SMC is coming from A64/A32 state on #0
+ * with W0 = SMCCC_ARCH_WORKAROUND_2
+ *
+ * This sequence evaluates as:
+ * (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+ * allowing use of a single branch operation
+ * X2 populated outside this function with the SMC FID.
*/
- mov x17, x30
- bl check_errata_1073348
- cbz x0, 1f
- mrs x1, CORTEX_A76_CPUACTLR_EL1
- orr x1, x1 ,#CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
- msr CORTEX_A76_CPUACTLR_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_a76_1073348_wa
+ orr w3, wzr, #SMCCC_ARCH_WORKAROUND_2
+ cmp x0, x3
+ mrs x3, esr_el3
-func check_errata_1073348
- mov x1, #0x10
- b cpu_rev_var_ls
-endfunc check_errata_1073348
-
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1130799.
- * This applies only to revision <= r2p0 of Cortex A76.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_a76_1130799_wa
+ ccmp w2, w3, #0, eq
/*
- * Compare x0 against revision r2p0
+ * Static predictor will predict a fall-through, optimizing
+ * the `SMCCC_ARCH_WORKAROUND_2` fast path.
*/
- mov x17, x30
- bl check_errata_1130799
- cbz x0, 1f
- mrs x1, CORTEX_A76_CPUACTLR2_EL1
- orr x1, x1 ,#(1 << 59)
- msr CORTEX_A76_CPUACTLR2_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_a76_1130799_wa
+ bne 1f
-func check_errata_1130799
- mov x1, #0x20
- b cpu_rev_var_ls
-endfunc check_errata_1130799
-
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1220197.
- * This applies only to revision <= r2p0 of Cortex A76.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_a76_1220197_wa
-/*
- * Compare x0 against revision r2p0
- */
- mov x17, x30
- bl check_errata_1220197
- cbz x0, 1f
- mrs x1, CORTEX_A76_CPUECTLR_EL1
- orr x1, x1, #CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
- msr CORTEX_A76_CPUECTLR_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_a76_1220197_wa
+ /*
+ * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+ * fast path.
+ */
+ cmp x1, xzr /* enable/disable check */
-func check_errata_1220197
- mov x1, #0x20
- b cpu_rev_var_ls
-endfunc check_errata_1220197
-
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1257314.
- * This applies only to revision <= r3p0 of Cortex A76.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_a76_1257314_wa
/*
- * Compare x0 against revision r3p0
+ * When the calling context wants mitigation disabled,
+ * we program the mitigation disable function in the
+ * CPU context, which gets invoked on subsequent exits from
+ * EL3 via the `el3_exit` function. Otherwise NULL is
+ * programmed in the CPU context, which results in caller's
+ * inheriting the EL3 mitigation state (enabled) on subsequent
+ * `el3_exit`.
*/
- mov x17, x30
- bl check_errata_1257314
- cbz x0, 1f
- mrs x1, CORTEX_A76_CPUACTLR3_EL1
- orr x1, x1, CORTEX_A76_CPUACTLR3_EL1_BIT_10
- msr CORTEX_A76_CPUACTLR3_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_a76_1257314_wa
+ mov x0, xzr
+ adr x1, cortex_a76_disable_wa_cve_2018_3639
+ csel x1, x1, x0, eq
+ str x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
-func check_errata_1257314
- mov x1, #0x30
- b cpu_rev_var_ls
-endfunc check_errata_1257314
-
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1262888.
- * This applies only to revision <= r3p0 of Cortex A76.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_a76_1262888_wa
+ mrs x2, CORTEX_A76_CPUACTLR2_EL1
+ orr x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ bic x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+ csel x3, x3, x1, eq
+ msr CORTEX_A76_CPUACTLR2_EL1, x3
+ ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
/*
- * Compare x0 against revision r3p0
- */
- mov x17, x30
- bl check_errata_1262888
- cbz x0, 1f
- mrs x1, CORTEX_A76_CPUECTLR_EL1
- orr x1, x1, CORTEX_A76_CPUECTLR_EL1_BIT_51
- msr CORTEX_A76_CPUECTLR_EL1, x1
- isb
+ * `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
+ */
+ exception_return /* exception_return contains ISB */
1:
- ret x17
-endfunc errata_a76_1262888_wa
-
-func check_errata_1262888
- mov x1, #0x30
- b cpu_rev_var_ls
-endfunc check_errata_1262888
-
- /* ---------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1286807.
- * This applies only to revision <= r3p0 of Cortex A76.
- * Due to the nature of the errata it is applied unconditionally
- * when built in, report it as applicable in this case
- * ---------------------------------------------------
- */
-func check_errata_1286807
-#if ERRATA_A76_1286807
- mov x0, #ERRATA_APPLIES
ret
-#else
- mov x1, #0x30
- b cpu_rev_var_ls
-#endif
-endfunc check_errata_1286807
-
- /* --------------------------------------------------
- * Errata workaround for Cortex A76 Errata #1791580.
- * This applies to revisions <= r4p0 of Cortex A76.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
-func errata_a76_1791580_wa
- /* Compare x0 against revision r4p0 */
- mov x17, x30
- bl check_errata_1791580
- cbz x0, 1f
- mrs x1, CORTEX_A76_CPUACTLR2_EL1
- orr x1, x1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
+endfunc apply_cve_2018_3639_sync_wa
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+
+workaround_reset_start cortex_a76, ERRATUM(1073348), ERRATA_A76_1073348
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1 ,CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
+workaround_reset_end cortex_a76, ERRATUM(1073348)
+
+check_erratum_ls cortex_a76, ERRATUM(1073348), CPU_REV(1, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1130799), ERRATA_A76_1130799
+ sysreg_bit_set CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_BIT_59
msr CORTEX_A76_CPUACTLR2_EL1, x1
- isb
-1:
- ret x17
-endfunc errata_a76_1791580_wa
+workaround_reset_end cortex_a76, ERRATUM(1130799)
-func check_errata_1791580
- /* Applies to everything <=r4p0. */
- mov x1, #0x40
- b cpu_rev_var_ls
-endfunc check_errata_1791580
-
- /* --------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1262606,
- * #1275112, and #1868343. #1262606 and #1275112
- * apply to revisions <= r3p0 and #1868343 applies to
- * revisions <= r4p0.
- * Inputs:
- * x0: variant[4:7] and revision[0:3] of current cpu.
- * Shall clobber: x0-x17
- * --------------------------------------------------
- */
+check_erratum_ls cortex_a76, ERRATUM(1130799), CPU_REV(2, 0)
-func errata_a76_1262606_1275112_1868343_wa
- mov x17, x30
+workaround_reset_start cortex_a76, ERRATUM(1220197), ERRATA_A76_1220197
+ sysreg_bit_set CORTEX_A76_CPUECTLR_EL1, CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
+workaround_reset_end cortex_a76, ERRATUM(1220197)
-/* Check for <= r3p0 cases and branch if check passes. */
-#if ERRATA_A76_1262606 || ERRATA_A76_1275112
- bl check_errata_1262606
- cbnz x0, 1f
-#endif
+check_erratum_ls cortex_a76, ERRATUM(1220197), CPU_REV(2, 0)
-/* Check for <= r4p0 cases and branch if check fails. */
-#if ERRATA_A76_1868343
- bl check_errata_1868343
- cbz x0, 2f
-#endif
-1:
- mrs x1, CORTEX_A76_CPUACTLR_EL1
- orr x1, x1, #CORTEX_A76_CPUACTLR_EL1_BIT_13
- msr CORTEX_A76_CPUACTLR_EL1, x1
- isb
-2:
- ret x17
-endfunc errata_a76_1262606_1275112_1868343_wa
+workaround_reset_start cortex_a76, ERRATUM(1257314), ERRATA_A76_1257314
+ sysreg_bit_set CORTEX_A76_CPUACTLR3_EL1, CORTEX_A76_CPUACTLR3_EL1_BIT_10
+workaround_reset_end cortex_a76, ERRATUM(1257314)
-func check_errata_1262606
- mov x1, #0x30
- b cpu_rev_var_ls
-endfunc check_errata_1262606
+check_erratum_ls cortex_a76, ERRATUM(1257314), CPU_REV(3, 0)
-func check_errata_1275112
- mov x1, #0x30
- b cpu_rev_var_ls
-endfunc check_errata_1275112
+workaround_reset_start cortex_a76, ERRATUM(1262606), ERRATA_A76_1262606
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+workaround_reset_end cortex_a76, ERRATUM(1262606)
-func check_errata_1868343
- mov x1, #0x40
- b cpu_rev_var_ls
-endfunc check_errata_1868343
+check_erratum_ls cortex_a76, ERRATUM(1262606), CPU_REV(3, 0)
-func check_errata_cve_2018_3639
-#if WORKAROUND_CVE_2018_3639
- mov x0, #ERRATA_APPLIES
+workaround_reset_start cortex_a76, ERRATUM(1262888), ERRATA_A76_1262888
+ sysreg_bit_set CORTEX_A76_CPUECTLR_EL1, CORTEX_A76_CPUECTLR_EL1_BIT_51
+workaround_reset_end cortex_a76, ERRATUM(1262888)
+
+check_erratum_ls cortex_a76, ERRATUM(1262888), CPU_REV(3, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1275112), ERRATA_A76_1275112
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+workaround_reset_end cortex_a76, ERRATUM(1275112)
+
+check_erratum_ls cortex_a76, ERRATUM(1275112), CPU_REV(3, 0)
+
+check_erratum_custom_start cortex_a76, ERRATUM(1286807)
+#if ERRATA_A76_1286807
+ mov x0, #ERRATA_APPLIES
+ ret
#else
- mov x0, #ERRATA_MISSING
+ mov x1, #0x30
+ b cpu_rev_var_ls
#endif
- ret
-endfunc check_errata_cve_2018_3639
+check_erratum_custom_end cortex_a76, ERRATUM(1286807)
+
+workaround_reset_start cortex_a76, ERRATUM(1791580), ERRATA_A76_1791580
+ sysreg_bit_set CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
+workaround_reset_end cortex_a76, ERRATUM(1791580)
+
+check_erratum_ls cortex_a76, ERRATUM(1791580), CPU_REV(4, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1868343), ERRATA_A76_1868343
+ sysreg_bit_set CORTEX_A76_CPUACTLR_EL1, CORTEX_A76_CPUACTLR_EL1_BIT_13
+workaround_reset_end cortex_a76, ERRATUM(1868343)
+
+check_erratum_ls cortex_a76, ERRATUM(1868343), CPU_REV(4, 0)
+
+workaround_reset_start cortex_a76, ERRATUM(1946160), ERRATA_A76_1946160
+ mov x0, #3
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3900002
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #4
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800082
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF00083
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+
+ mov x0, #5
+ msr S3_6_C15_C8_0, x0
+ ldr x0, =0x10E3800200
+ msr S3_6_C15_C8_2, x0
+ ldr x0, =0x10FFF003E0
+ msr S3_6_C15_C8_3, x0
+ ldr x0, =0x2001003FF
+ msr S3_6_C15_C8_1, x0
+workaround_reset_end cortex_a76, ERRATUM(1946160)
+
+check_erratum_range cortex_a76, ERRATUM(1946160), CPU_REV(3, 0), CPU_REV(4, 1)
+
+workaround_runtime_start cortex_a76, ERRATUM(2743102), ERRATA_A76_2743102
+ /* dsb before isb of power down sequence */
+ dsb sy
+workaround_runtime_end cortex_a76, ERRATUM(2743102)
+
+check_erratum_ls cortex_a76, ERRATUM(2743102), CPU_REV(4, 1)
+
+check_erratum_chosen cortex_a76, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
func cortex_a76_disable_wa_cve_2018_3639
- mrs x0, CORTEX_A76_CPUACTLR2_EL1
- bic x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
- msr CORTEX_A76_CPUACTLR2_EL1, x0
+ sysreg_bit_clear CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
isb
ret
endfunc cortex_a76_disable_wa_cve_2018_3639
- /* --------------------------------------------------------------
- * Errata Workaround for Cortex A76 Errata #1165522.
- * This applies only to revisions <= r3p0 of Cortex A76.
- * Due to the nature of the errata it is applied unconditionally
- * when built in, report it as applicable in this case
- * --------------------------------------------------------------
- */
-func check_errata_1165522
+/* --------------------------------------------------------------
+ * Errata Workaround for Cortex A76 Errata #1165522.
+ * This applies only to revisions <= r3p0 of Cortex A76.
+ * Due to the nature of the errata it is applied unconditionally
+ * when built in, report it as applicable in this case
+ * --------------------------------------------------------------
+ */
+check_erratum_custom_start cortex_a76, ERRATUM(1165522)
#if ERRATA_A76_1165522
mov x0, #ERRATA_APPLIES
ret
@@ -462,52 +424,32 @@ func check_errata_1165522
mov x1, #0x30
b cpu_rev_var_ls
#endif
-endfunc check_errata_1165522
-
- /* -------------------------------------------------
- * The CPU Ops reset function for Cortex-A76.
- * Shall clobber: x0-x19
- * -------------------------------------------------
- */
-func cortex_a76_reset_func
- mov x19, x30
- bl cpu_get_rev_var
- mov x18, x0
-
-#if ERRATA_A76_1073348
- mov x0, x18
- bl errata_a76_1073348_wa
-#endif
-
-#if ERRATA_A76_1130799
- mov x0, x18
- bl errata_a76_1130799_wa
-#endif
+check_erratum_custom_end cortex_a76, ERRATUM(1165522)
-#if ERRATA_A76_1220197
- mov x0, x18
- bl errata_a76_1220197_wa
-#endif
+check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
-#if ERRATA_A76_1257314
- mov x0, x18
- bl errata_a76_1257314_wa
-#endif
+/* erratum has no workaround in the cpu. Generic code must take care */
+add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET
-#if ERRATA_A76_1262606 || ERRATA_A76_1275112 || ERRATA_A76_1868343
- mov x0, x18
- bl errata_a76_1262606_1275112_1868343_wa
-#endif
-
-#if ERRATA_A76_1262888
- mov x0, x18
- bl errata_a76_1262888_wa
-#endif
+/* ERRATA_DSU_798953 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a76
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a76_798953, check_errata_dsu_798953
+.equ erratum_cortex_a76_798953_wa, errata_dsu_798953_wa
+add_erratum_entry cortex_a76, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+
+/* ERRATA_DSU_936184 :
+ * The errata is defined in dsu_helpers.S but applies to cortex_a76
+ * as well. Henceforth creating symbolic names to the already existing errata
+ * workaround functions to get them registered under the Errata Framework.
+ */
+.equ check_erratum_cortex_a76_936184, check_errata_dsu_936184
+.equ erratum_cortex_a76_936184_wa, errata_dsu_936184_wa
+add_erratum_entry cortex_a76, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
-#if ERRATA_A76_1791580
- mov x0, x18
- bl errata_a76_1791580_wa
-#endif
+cpu_reset_func_start cortex_a76
#if WORKAROUND_CVE_2018_3639
/* If the PE implements SSBS, we don't need the dynamic workaround */
@@ -520,9 +462,7 @@ func cortex_a76_reset_func
#endif
#if DYNAMIC_WORKAROUND_CVE_2018_3639
cbnz x0, 1f
- mrs x0, CORTEX_A76_CPUACTLR2_EL1
- orr x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
- msr CORTEX_A76_CPUACTLR2_EL1, x0
+ sysreg_bit_set CORTEX_A76_CPUACTLR2_EL1, CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
isb
#ifdef IMAGE_BL31
@@ -530,26 +470,29 @@ func cortex_a76_reset_func
* The Cortex-A76 generic vectors are overwritten to use the vectors
* defined above. This is required in order to apply mitigation
* against CVE-2018-3639 on exception entry from lower ELs.
+ * If the below vector table is used, skip overriding it again for
+ * CVE_2022_23960 as both use the same vbar.
*/
- adr x0, cortex_a76_wa_cve_2018_3639_a76_vbar
- msr vbar_el3, x0
+ override_vector_table cortex_a76_wa_cve_vbar
isb
+ b 2f
#endif /* IMAGE_BL31 */
1:
#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
#endif /* WORKAROUND_CVE_2018_3639 */
-#if ERRATA_DSU_798953
- bl errata_dsu_798953_wa
-#endif
-
-#if ERRATA_DSU_936184
- bl errata_dsu_936184_wa
-#endif
-
- ret x19
-endfunc cortex_a76_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
+ /*
+ * The Cortex-A76 generic vectors are overridden to apply errata
+ * mitigation on exception entry from lower ELs. This will be bypassed
+ * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
+ */
+ override_vector_table cortex_a76_wa_cve_vbar
+ isb
+#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
+2:
+cpu_reset_func_end cortex_a76
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
@@ -560,46 +503,15 @@ func cortex_a76_core_pwr_dwn
* Enable CPU power down bit in power control register
* ---------------------------------------------
*/
- mrs x0, CORTEX_A76_CPUPWRCTLR_EL1
- orr x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
- msr CORTEX_A76_CPUPWRCTLR_EL1, x0
+ sysreg_bit_set CORTEX_A76_CPUPWRCTLR_EL1, CORTEX_A76_CORE_PWRDN_EN_MASK
+
+ apply_erratum cortex_a76, ERRATUM(2743102), ERRATA_A76_2743102
+
isb
ret
endfunc cortex_a76_core_pwr_dwn
-#if REPORT_ERRATA
-/*
- * Errata printing function for Cortex A76. Must follow AAPCS.
- */
-func cortex_a76_errata_report
- stp x8, x30, [sp, #-16]!
-
- bl cpu_get_rev_var
- mov x8, x0
-
- /*
- * Report all errata. The revision-variant information is passed to
- * checking functions of each errata.
- */
- report_errata ERRATA_A76_1073348, cortex_a76, 1073348
- report_errata ERRATA_A76_1130799, cortex_a76, 1130799
- report_errata ERRATA_A76_1220197, cortex_a76, 1220197
- report_errata ERRATA_A76_1257314, cortex_a76, 1257314
- report_errata ERRATA_A76_1262606, cortex_a76, 1262606
- report_errata ERRATA_A76_1262888, cortex_a76, 1262888
- report_errata ERRATA_A76_1275112, cortex_a76, 1275112
- report_errata ERRATA_A76_1286807, cortex_a76, 1286807
- report_errata ERRATA_A76_1791580, cortex_a76, 1791580
- report_errata ERRATA_A76_1165522, cortex_a76, 1165522
- report_errata ERRATA_A76_1868343, cortex_a76, 1868343
- report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
- report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
- report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
-
- ldp x8, x30, [sp], #16
- ret
-endfunc cortex_a76_errata_report
-#endif
+errata_report_shim cortex_a76
/* ---------------------------------------------
* This function provides cortex_a76 specific
@@ -624,4 +536,5 @@ declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
cortex_a76_reset_func, \
CPU_NO_EXTRA1_FUNC, \
cortex_a76_disable_wa_cve_2018_3639, \
+ CPU_NO_EXTRA3_FUNC, \
cortex_a76_core_pwr_dwn