Merge changes from topic "for-lts-v2.12.2" into lts-v2.12
* changes:
docs: add playbook for new releases
fix(cpus): fix clang compilation issue
fix(cpus): remove errata setting PF_MODE to conservative
chore(cpus): rearrange the errata and cve in order in Cortex-X4
chore(cpus): rearrange the errata and cve in order in Neoverse-V3
chore(cpus): rearrange cve and errata order in Cortex-X3
chore(cpus): fix cve order in Neoverse-V2
chore(cpus): rearrange the errata and cve in order in Cortex-A710
chore(cpus): rearrange the errata and cve order in Neoverse-N2
chore(cpus): rearrange cve in order in Cortex-X1
chore(cpus): fix cve order in Neoverse-V1
chore(cpus): fix cve order in Cortex-X2
chore(cpus): fix cve order in Cortex-A78C
chore(cpus): fix cve order in Cortex-A78_AE
chore(cpus): fix cve order in Cortex-A78
chore(cpus): fix cve order in Cortex-A77
refactor(cpus): don't panic if errata out of order
fix(errata): workaround for Cortex-A510 erratum 2971420
fix(cpus): workaround for Cortex-A715 erratum 2804830
fix(errata-abi): add support for handling split workarounds
refactor(cpus): declare runtime errata correctly
perf(cpus): make reset errata do fewer branches
perf(cpus): inline the init_cpu_data_ptr function
perf(cpus): inline the reset function
perf(cpus): inline the cpu_get_rev_var call
perf(cpus): inline cpu_rev_var checks
refactor(cpus): register DSU errata with the errata framework's wrappers
refactor(cpus): convert checker functions to standard helpers
refactor(cpus): convert the Cortex-A65 to use the errata framework
fix(cpus): declare reset errata correctly
fix(cpus): fix a typo in errata doc
fix(cpus): workaround for Cortex-X925 erratum 2963999
fix(cpus): workaround for Neoverse-V3 erratum 2970647
fix(cpus): workaround for Cortex-X4 erratum 2957258
feat(cpus): add ENABLE_ERRATA_ALL flag
fix(cpus): clear CPUPWRCTLR_EL1.CORE_PWRDN_EN_BIT on reset
fix(arm): create build directory before key generation
refactor(cpus): undo errata mitigations
feat(cpus): add sysreg_bit_toggle
fix(zynqmp): fix length of clock name
diff --git a/Makefile b/Makefile
index b357b1d..ea380ee 100644
--- a/Makefile
+++ b/Makefile
@@ -1226,6 +1226,7 @@
ENABLE_MPMM_FCONF \
FEATURE_DETECTION \
TRNG_SUPPORT \
+ ENABLE_ERRATA_ALL \
ERRATA_ABI_SUPPORT \
ERRATA_NON_ARM_INTERCONNECT \
CONDITIONAL_CMO \
diff --git a/bl1/bl1.mk b/bl1/bl1.mk
index a8a0061..c068ea5 100644
--- a/bl1/bl1.mk
+++ b/bl1/bl1.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -18,8 +18,7 @@
${MBEDTLS_SOURCES}
ifeq (${ARCH},aarch64)
-BL1_SOURCES += lib/cpus/aarch64/dsu_helpers.S \
- lib/el3_runtime/aarch64/context.S \
+BL1_SOURCES += lib/el3_runtime/aarch64/context.S \
lib/cpus/errata_common.c
endif
diff --git a/bl2/bl2.mk b/bl2/bl2.mk
index 850d826..2a212e1 100644
--- a/bl2/bl2.mk
+++ b/bl2/bl2.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -43,10 +43,6 @@
bl2/${ARCH}/bl2_run_next_image.S \
lib/cpus/${ARCH}/cpu_helpers.S
-ifeq (${ARCH},aarch64)
-BL2_SOURCES += lib/cpus/aarch64/dsu_helpers.S
-endif
-
BL2_DEFAULT_LINKER_SCRIPT_SOURCE := bl2/bl2_el3.ld.S
endif
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 336ad2b..c427f1e 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -43,7 +43,6 @@
bl31/bl31_traps.c \
common/runtime_svc.c \
lib/cpus/errata_common.c \
- lib/cpus/aarch64/dsu_helpers.S \
plat/common/aarch64/platform_mp_stack.S \
services/arm_arch_svc/arm_arch_svc_setup.c \
services/std_svc/std_svc_setup.c \
diff --git a/docs/about/lts.rst b/docs/about/lts.rst
index f5e5f8e..5aa32a2 100644
--- a/docs/about/lts.rst
+++ b/docs/about/lts.rst
@@ -27,6 +27,8 @@
+-------------+--------------------+-------------------------------------------------------+
| 2025-01-07 | Govindraj Raja | Updates based on learnings and suggestions. |
+-------------+--------------------+-------------------------------------------------------+
+ | 2025-03-27 | Chris Palmer | Playbook for making a new release. |
+ +-------------+--------------------+-------------------------------------------------------+
This document proposes a plan for long-term support (LTS) of the |TF-A| project.
@@ -263,6 +265,40 @@
#. Monitor the mailing list for any LTS related issues
#. Propose or solicit patches to the main branch and tag them as candidates for LTS
+Playbook for new releases
+-------------------------
+To make a new minor release (e.g. 2.x.y → 2.x.y+1), follow these steps.
+
+#. Every Friday, LTS maintainers receive a triage report email (subject: “TF-A
+ LTS Triage report”) that contains attached CSV files, 1 per
+ currently-supported LTS major release branch (e.g. lts-2.8, lts-2.10,
+ lts-2.12, etc.). It contains a list of patches to be cherry-picked into a new
+ minor release of each supported LTS branch.
+#. Run ``git fetch origin``.
+#. Run ``git checkout -b lts-v2.x.y+1 --track origin/lts-v2.x``.
+#. Run ``git log`` and verify that the most recent commit is the changelog for
+ the v2.x.y release, and that it has the origin/lts-v2.x tag.
+#. For the version 2.x for which you want to create a new release, open its CSV
+ file. For each patch listed, **from the bottom to the top**, run ``git
+ cherry-pick -x sha1-hash``.
+#. Some of the patches of this list may not be taken, mainly due to false
+ positive. If in doubt, that can be discussed either in the “tf-a-lts” channel
+ on Discord or during the LTS weekly meeting. There could also be patches to
+ be taken in tf-a-ci-scripts or tf-a-tests.
+#. Push the stack of changes: ``git push origin
+ HEAD:refs/for/lts-v2.x%topic=for-lts-v2.x.y+1``. You might need the
+ ``--no-verify`` option: ``git push origin --no-verify
+ HEAD:refs/for/lts-v2.x%topic=for-lts-v2.x.y+1``.
+#. The AllowCI+2 job runs automatically on each LTS branch once a new
+ cherry-picked patch/patch-stack is pushed to the corresponding branch. If
+ this CI run passes, it automatically applies the Verified+1 (V+1) label to
+ the patch/all patches in the stack. The other LTS maintainers will provide
+ MR+1 and COR+1 votes. If the CI is OK and votes V+1, and if the
+ Maintainer-Review+1 (MR+1), Code-Owner-Review+1 (COR+1), and V+1 votes are
+ present, Gerrit will automatically merge the patch. LTS maintainers will then
+ trigger a Jenkins job that will take care of the release (tag, mail, and
+ readthedocs update).
+
Execution Plan
**************
This section lists the steps needed to put the LTS system in place. However,
diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst
index 4637908..117372f 100644
--- a/docs/design/cpu-specific-build-macros.rst
+++ b/docs/design/cpu-specific-build-macros.rst
@@ -311,10 +311,6 @@
- ``ERRATA_A78_1952683``: This applies errata 1952683 workaround to Cortex-A78
CPU. This needs to be enabled for revision r0p0, it is fixed in r1p0.
-- ``ERRATA_A78_2132060``: This applies errata 2132060 workaround to Cortex-A78
- CPU. This needs to be enabled for revisions r0p0, r1p0, r1p1, and r1p2. It
- is still open.
-
- ``ERRATA_A78_2242635``: This applies errata 2242635 workaround to Cortex-A78
CPU. This needs to be enabled for revisions r1p0, r1p1, and r1p2. The issue
is present in r0p0 but there is no workaround. It is still open.
@@ -377,10 +373,6 @@
Cortex-A78C CPU. This needs to be enabled for revision r0p0. The erratum is
fixed in r0p1.
-- ``ERRATA_A78C_2132064`` : This applies errata 2132064 workaround to
- Cortex-A78C CPU. This needs to be enabled for revisions r0p1, r0p2 and
- it is still open.
-
- ``ERRATA_A78C_2242638`` : This applies errata 2242638 workaround to
Cortex-A78C CPU. This needs to be enabled for revisions r0p1, r0p2 and
it is still open.
@@ -505,10 +497,6 @@
CPU. This needs to be enabled for revisions r0p0, r1p0, and r1p1 of the
CPU. It is still open.
-- ``ERRATA_V1_2108267``: This applies errata 2108267 workaround to Neoverse-V1
- CPU. This needs to be enabled for revisions r0p0, r1p0, and r1p1 of the CPU.
- It is still open.
-
- ``ERRATA_V1_2216392``: This applies errata 2216392 workaround to Neoverse-V1
CPU. This needs to be enabled for revisions r1p0 and r1p1 of the CPU, the
issue is present in r0p0 as well but there is no workaround for that
@@ -545,10 +533,6 @@
For Neoverse V2, the following errata build flags are defined :
-- ``ERRATA_V2_2331132``: This applies errata 2331132 workaround to Neoverse-V2
- CPU. This needs to be enabled for revisions r0p0, r0p1 and r0p2. It is still
- open.
-
- ``ERRATA_V2_2618597``: This applies errata 2618597 workaround to Neoverse-V2
CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in
r0p2.
@@ -580,6 +564,9 @@
For Neoverse V3, the following errata build flags are defined :
+- ``ERRATA_V3_2970647``: This applies errata 2970647 workaround to Neoverse-V3
+ CPU. This needs to be enabled for revision r0p0. It is fixed in r0p1.
+
- ``ERRATA_V3_3701767``: This applies errata 3701767 workaround to Neoverse-V3
CPU. This needs to be enabled for revisions r0p0, r0p1, r0p2 of the CPU and
is still open.
@@ -606,10 +593,6 @@
Cortex-A710 CPU. This needs to be enabled for revision r2p0 of the CPU and
is still open.
-- ``ERRATA_A710_2058056``: This applies errata 2058056 workaround to
- Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
- and r2p1 of the CPU and is still open.
-
- ``ERRATA_A710_2267065``: This applies errata 2267065 workaround to
Cortex-A710 CPU. This needs to be enabled for revisions r0p0, r1p0 and r2p0
of the CPU and is fixed in r2p1.
@@ -683,9 +666,6 @@
- ``ERRATA_N2_2138956``: This applies errata 2138956 workaround to Neoverse-N2
CPU. This needs to be enabled for revision r0p0 of the CPU and is fixed in r0p1.
-- ``ERRATA_N2_2138953``: This applies errata 2138953 workaround to Neoverse-N2
- CPU. This needs to be enabled for revisions r0p0, r0p1, r0p2, r0p3 and is still open.
-
- ``ERRATA_N2_2242415``: This applies errata 2242415 workaround to Neoverse-N2
CPU. This needs to be enabled for revision r0p0 of the CPU and is fixed in r0p1.
@@ -749,10 +729,6 @@
CPU. This needs to be enabled for revisions r0p0, r1p0, and r2p0 of the CPU,
it is still open.
-- ``ERRATA_X2_2058056``: This applies errata 2058056 workaround to Cortex-X2
- CPU. This needs to be enabled for revisions r0p0, r1p0, r2p0 and r2p1 of the CPU,
- it is still open.
-
- ``ERRATA_X2_2083908``: This applies errata 2083908 workaround to Cortex-X2
CPU. This needs to be enabled for revision r2p0 of the CPU, it is still open.
@@ -803,10 +779,6 @@
For Cortex-X3, the following errata build flags are defined :
-- ``ERRATA_X3_2070301``: This applies errata 2070301 workaround to the Cortex-X3
- CPU. This needs to be enabled only for revisions r0p0, r1p0, r1p1 and r1p2 of
- the CPU and is still open.
-
- ``ERRATA_X3_2266875``: This applies errata 2266875 workaround to the Cortex-X3
CPU. This needs to be enabled only for revisions r0p0 and r1p0 of the CPU, it
is fixed in r1p1.
@@ -882,6 +854,9 @@
- ``ERRATA_X4_2923985``: This applies errata 2923985 workaround to Cortex-X4
CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in r0p2.
+- ``ERRATA_X4_2957258``: This applies errata 2957258 workaround to Cortex-X4
+ CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in r0p2.
+
- ``ERRATA_X4_3076789``: This applies errata 3076789 workaround to Cortex-X4
CPU. This needs to be enabled for revisions r0p0 and r0p1. It is fixed in r0p2.
@@ -891,6 +866,9 @@
For Cortex-X925, the following errata build flags are defined :
+- ``ERRATA_X925_2963999``: This applies errata 2963999 workaround to Cortex-X925
+ CPU. This needs to be enabled for revision r0p0. It is fixed in r0p1.
+
- ``ERRATA_X925_3701747``: This applies errata 3701747 workaround to Cortex-X925
CPU. This needs to be enabled for revisions r0p0 and r0p1. It is still open.
@@ -947,6 +925,10 @@
Cortex-A510 CPU. This needs to be applied to revision r0p0, r0p1, r0p2,
r0p3, r1p0, r1p1 and r1p2. It is fixed in r1p3.
+- ``ERRATA_A510_2971420``: This applies erratum 2971420 workaround to
+ Cortex-A510 CPU. This needs to be applied to revisions r0p1, r0p2, r0p3,
+ r1p0, r1p1, r1p2 and r1p3 and is still open.
+
For Cortex-A520, the following errata build flags are defined :
- ``ERRATA_A520_2630792``: This applies errata 2630792 workaround to
@@ -992,9 +974,13 @@
Cortex-A715 CPU. This needs to be enabled for revisions r0p0, r1p0
and r1p1. It is fixed in r1p2.
+- ``ERRATA_A715_2804830``: This applies errata 2804830 workaround to
+ Cortex-A715 CPU. This needs to be enabled for revisions r0p0, r1p0,
+ r1p1 and r1p2. It is fixed in r1p3.
+
- ``ERRATA_A715_3699560``: This applies errata 3699560 workaround to
Cortex-A715 CPU. This needs to be enabled for revisions r0p0, r1p0,
- r1p2, r1p3. It is still open.
+ r1p2 and r1p3. It is still open.
For Cortex-A720, the following errata build flags are defined :
@@ -1021,7 +1007,7 @@
For Cortex-A720_AE, the following errata build flags are defined :
- ``ERRATA_A720_AE_3699562``: This applies errata 3699562 workaround
- to Cortex-A715_AE CPU. This needs to be enabled for revisions r0p0.
+ to Cortex-A720_AE CPU. This needs to be enabled for revisions r0p0.
It is still open.
For Cortex-A725, the following errata build flags are defined :
diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst
index 2ba54ea..ebbdc9b 100644
--- a/docs/design/firmware-design.rst
+++ b/docs/design/firmware-design.rst
@@ -247,7 +247,7 @@
- CPU initialization
- BL1 calls the ``reset_handler()`` function which in turn calls the CPU
+ BL1 calls the ``reset_handler`` macro/function which in turn calls the CPU
specific reset handler function (see the section: "CPU specific operations
framework").
@@ -1337,7 +1337,7 @@
TF-A implements a framework that allows CPU and platform ports to perform
actions very early after a CPU is released from reset in both the cold and warm
-boot paths. This is done by calling the ``reset_handler()`` function in both
+boot paths. This is done by calling the ``reset_handler`` macro/function in both
the BL1 and BL31 images. It in turn calls the platform and CPU specific reset
handling functions.
@@ -1481,7 +1481,9 @@
handling for that CPU and also any errata workarounds enabled by the platform.
It should be defined using the ``cpu_reset_func_{start,end}`` macros and its
-body may only clobber x0 to x14 with x14 being the cpu_rev parameter.
+body may only clobber x0 to x14 with x14 being the cpu_rev parameter. The cpu
+file should also include a call to ``cpu_reset_prologue`` at the start of the
+file for errata to work correctly.
CPU specific power down sequence
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -2891,7 +2893,7 @@
--------------
-*Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.*
+*Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.*
.. _SMCCC: https://developer.arm.com/docs/den0028/latest
.. _PSCI: https://developer.arm.com/documentation/den0022/latest/
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index ab0b94d..bcef239 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -571,6 +571,11 @@
platform hook needs to be implemented. The value is passed as the last
component of the option ``-fstack-protector-$ENABLE_STACK_PROTECTOR``.
+- ``ENABLE_ERRATA_ALL``: This option is used only for testing purposes, Boolean
+ option to enable the workarounds for all errata that TF-A implements. Normally
+ they should be explicitly enabled depending on each platform's needs. Not
+ recommended for release builds. This option is default set to 0.
+
- ``ENCRYPT_BL31``: Binary flag to enable encryption of BL31 firmware. This
flag depends on ``DECRYPTION_SUPPORT`` build flag.
diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S
index ec2acd5..6020378 100644
--- a/include/arch/aarch64/asm_macros.S
+++ b/include/arch/aarch64/asm_macros.S
@@ -8,6 +8,7 @@
#include <arch.h>
#include <common/asm_macros_common.S>
+#include <lib/cpus/cpu_ops.h>
#include <lib/spinlock.h>
/*
@@ -325,4 +326,40 @@
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
.endm
+
+ /*
+ * is_feat_sysreg128_present_asm - Set flags and reg if FEAT_SYSREG128
+ * is enabled at runtime.
+ *
+ * Arguments:
+ * reg: Register for temporary use.
+ *
+ * Clobbers: reg
+ */
+ .macro is_feat_sysreg128_present_asm reg:req
+ mrs \reg, ID_AA64ISAR2_EL1
+ ands \reg, \reg, #(ID_AA64ISAR2_SYSREG128_MASK << ID_AA64ISAR2_SYSREG128_SHIFT)
+ .endm
+
+.macro call_reset_handler
+#if !(defined(IMAGE_BL2) && ENABLE_RME)
+ /* ---------------------------------------------------------------------
+ * It is a cold boot.
+ * Perform any processor specific actions upon reset e.g. cache, TLB
+ * invalidations etc.
+ * ---------------------------------------------------------------------
+ */
+ /* The plat_reset_handler can clobber x0 - x18, x30 */
+ bl plat_reset_handler
+
+ /* Get the matching cpu_ops pointer */
+ bl get_cpu_ops_ptr
+
+ /* Get the cpu_ops reset handler */
+ ldr x2, [x0, #CPU_RESET_FUNC]
+
+ /* The cpu_ops reset handler can clobber x0 - x19, x30 */
+ blr x2
+#endif
+.endm
#endif /* ASM_MACROS_S */
diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S
index b9b0e3d..5db6831 100644
--- a/include/arch/aarch64/el2_common_macros.S
+++ b/include/arch/aarch64/el2_common_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2024, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -293,7 +293,7 @@
* invalidations etc.
* ---------------------------------------------------------------------
*/
- bl reset_handler
+ call_reset_handler
el2_arch_init_common
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 204625c..4864596 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -49,7 +49,9 @@
* due to a NULL TPIDR_EL3.
* ---------------------------------------------------------------------
*/
- bl init_cpu_data_ptr
+ bl plat_my_core_pos
+ bl _cpu_data_by_index
+ msr tpidr_el3, x0
#endif /* IMAGE_BL31 */
/* ---------------------------------------------------------------------
@@ -219,15 +221,7 @@
msr vbar_el3, x0
isb
-#if !(defined(IMAGE_BL2) && ENABLE_RME)
- /* ---------------------------------------------------------------------
- * It is a cold boot.
- * Perform any processor specific actions upon reset e.g. cache, TLB
- * invalidations etc.
- * ---------------------------------------------------------------------
- */
- bl reset_handler
-#endif
+ call_reset_handler
el3_arch_init_common
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
index cfa5831..31f8811 100644
--- a/include/lib/cpus/aarch32/cpu_macros.S
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -48,8 +48,7 @@
* _midr:
* Numeric value expected to read from CPU's MIDR
* _resetfunc:
- * Reset function for the CPU. If there's no CPU reset function,
- * specify CPU_NO_RESET_FUNC
+ * Reset function for the CPU
* _power_down_ops:
* Comma-separated list of functions to perform power-down
* operatios on the CPU. At least one, and up to
@@ -173,11 +172,6 @@
\_cpu\()_errata_list_start:
.endif
- /* unused on AArch32, maintain for portability */
- .word 0
- /* TODO(errata ABI): this prevents all checker functions from
- * being optimised away. Can be done away with unless the ABI
- * needs them */
.ifnb \_special
.word check_errata_\_special
.elseif \_cve
@@ -189,9 +183,7 @@
.word \_id
.hword \_cve
.byte \_chosen
- /* TODO(errata ABI): mitigated field for known but unmitigated
- * errata*/
- .byte 0x1
+ .byte 0x0 /* alignment */
.popsection
.endm
diff --git a/include/lib/cpus/aarch64/cortex_a510.h b/include/lib/cpus/aarch64/cortex_a510.h
index 337aac3..fb09411 100644
--- a/include/lib/cpus/aarch64/cortex_a510.h
+++ b/include/lib/cpus/aarch64/cortex_a510.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2022-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -52,4 +52,12 @@
#define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_SHIFT U(18)
#define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_WIDTH U(1)
+#ifndef __ASSEMBLER__
+
+#if ERRATA_A510_2971420
+long check_erratum_cortex_a510_2971420(long cpu_rev);
+#endif
+
+#endif /* __ASSEMBLER__ */
+
#endif /* CORTEX_A510_H */
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
index 650193c..a47a47e 100644
--- a/include/lib/cpus/aarch64/cortex_a710.h
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -52,14 +52,6 @@
#define CORTEX_A710_CPUACTLR5_EL1_BIT_44 (ULL(1) << 44)
/*******************************************************************************
- * CPU Auxiliary Control register specific definitions.
- ******************************************************************************/
-#define CORTEX_A710_CPUECTLR2_EL1 S3_0_C15_C1_5
-#define CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(9)
-#define CPUECTLR2_EL1_PF_MODE_LSB U(11)
-#define CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
-
-/*******************************************************************************
* CPU Selected Instruction Private register specific definitions.
******************************************************************************/
#define CORTEX_A710_CPUPSELR_EL3 S3_6_C15_C8_0
diff --git a/include/lib/cpus/aarch64/cortex_a715.h b/include/lib/cpus/aarch64/cortex_a715.h
index e9bd886..9980214 100644
--- a/include/lib/cpus/aarch64/cortex_a715.h
+++ b/include/lib/cpus/aarch64/cortex_a715.h
@@ -13,20 +13,14 @@
#define CORTEX_A715_BHB_LOOP_COUNT U(38)
/*******************************************************************************
- * CPU Auxiliary Control register 1 specific definitions.
+ * CPU Register Mappings
******************************************************************************/
+#define CORTEX_A715_CPUCFR_EL1 S3_0_C15_C0_0
#define CORTEX_A715_CPUACTLR_EL1 S3_0_C15_C1_0
-
-/*******************************************************************************
- * CPU Auxiliary Control register 2 specific definitions.
- ******************************************************************************/
#define CORTEX_A715_CPUACTLR2_EL1 S3_0_C15_C1_1
-
-/*******************************************************************************
- * CPU Extended Control register specific definitions
- ******************************************************************************/
+#define CORTEX_A715_CPUACTLR3_EL1 S3_0_C15_C1_2
#define CORTEX_A715_CPUECTLR_EL1 S3_0_C15_C1_4
-
+#define CORTEX_A715_CPUECTLR2_EL1 S3_0_C15_C1_5
#define CORTEX_A715_CPUPSELR_EL3 S3_6_C15_C8_0
#define CORTEX_A715_CPUPCR_EL3 S3_6_C15_C8_1
#define CORTEX_A715_CPUPOR_EL3 S3_6_C15_C8_2
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
index 2984f82..203bdfd 100644
--- a/include/lib/cpus/aarch64/cortex_a78.h
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,9 +19,6 @@
******************************************************************************/
#define CORTEX_A78_CPUECTLR_EL1 S3_0_C15_C1_4
#define CORTEX_A78_CPUECTLR_EL1_BIT_8 (ULL(1) << 8)
-#define CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV ULL(3)
-#define CPUECTLR_EL1_PF_MODE_LSB U(6)
-#define CPUECTLR_EL1_PF_MODE_WIDTH U(2)
/*******************************************************************************
* CPU Power Control register specific definitions
diff --git a/include/lib/cpus/aarch64/cortex_a78c.h b/include/lib/cpus/aarch64/cortex_a78c.h
index d600eca..2033120 100644
--- a/include/lib/cpus/aarch64/cortex_a78c.h
+++ b/include/lib/cpus/aarch64/cortex_a78c.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,8 +24,6 @@
* CPU Extended Control register specific definitions.
******************************************************************************/
#define CORTEX_A78C_CPUECTLR_EL1 S3_0_C15_C1_4
-#define CORTEX_A78C_CPUECTLR_EL1_BIT_6 (ULL(1) << 6)
-#define CORTEX_A78C_CPUECTLR_EL1_BIT_7 (ULL(1) << 7)
#define CORTEX_A78C_CPUECTLR_EL1_MM_ASP_EN (ULL(1) << 53)
/*******************************************************************************
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
index 9ec5177..4516339 100644
--- a/include/lib/cpus/aarch64/cortex_x2.h
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -19,15 +19,6 @@
#define CORTEX_X2_CPUECTLR_EL1_PFSTIDIS_BIT (ULL(1) << 8)
/*******************************************************************************
- * CPU Extended Control register 2 specific definitions
- ******************************************************************************/
-#define CORTEX_X2_CPUECTLR2_EL1 S3_0_C15_C1_5
-
-#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT U(11)
-#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
-#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(0x9)
-
-/*******************************************************************************
* CPU Auxiliary Control register 3 specific definitions.
******************************************************************************/
#define CORTEX_X2_CPUACTLR3_EL1 S3_0_C15_C1_2
diff --git a/include/lib/cpus/aarch64/cortex_x3.h b/include/lib/cpus/aarch64/cortex_x3.h
index 8834db1..2869ec8 100644
--- a/include/lib/cpus/aarch64/cortex_x3.h
+++ b/include/lib/cpus/aarch64/cortex_x3.h
@@ -49,15 +49,6 @@
#define CORTEX_X3_CPUACTLR6_EL1 S3_0_C15_C8_1
/*******************************************************************************
- * CPU Extended Control register 2 specific definitions.
- ******************************************************************************/
-#define CORTEX_X3_CPUECTLR2_EL1 S3_0_C15_C1_5
-
-#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB U(11)
-#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
-#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(0x9)
-
-/*******************************************************************************
* CPU Auxiliary Control register 3 specific definitions.
******************************************************************************/
#define CORTEX_X3_CPUACTLR3_EL1 S3_0_C15_C1_2
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index 0ce9c3c..07796c7 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -49,8 +49,7 @@
* _midr:
* Numeric value expected to read from CPU's MIDR
* _resetfunc:
- * Reset function for the CPU. If there's no CPU reset function,
- * specify CPU_NO_RESET_FUNC
+ * Reset function for the CPU.
* _extra1:
* This is a placeholder for future per CPU operations. Currently,
* some CPUs use this entry to set a test function to determine if
@@ -236,53 +235,27 @@
* _chosen:
* Compile time flag on whether the erratum is included
*
- * _apply_at_reset:
- * Whether the erratum should be automatically applied at reset
+ * _split_wa:
+ * Flag that indicates whether an erratum has split workaround or not.
+ * Default value is 0.
*/
-.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
+.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _split_wa=0
+#if REPORT_ERRATA || ERRATA_ABI_SUPPORT
.pushsection .rodata.errata_entries
.align 3
.ifndef \_cpu\()_errata_list_start
\_cpu\()_errata_list_start:
.endif
- /* check if unused and compile out if no references */
- .if \_apply_at_reset && \_chosen
- .quad erratum_\_cpu\()_\_id\()_wa
- .else
- .quad 0
- .endif
- /* TODO(errata ABI): this prevents all checker functions from
- * being optimised away. Can be done away with unless the ABI
- * needs them */
.quad check_erratum_\_cpu\()_\_id
/* Will fit CVEs with up to 10 character in the ID field */
.word \_id
.hword \_cve
- .byte \_chosen
- /* TODO(errata ABI): mitigated field for known but unmitigated
- * errata */
- .byte 0x1
+ /* bit magic that appends chosen field based on _split_wa */
+ .byte ((\_chosen * 0b11) & ((\_split_wa << 1) | \_chosen))
+ .byte 0x0 /* alignment */
.popsection
-.endm
-
-.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
- add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
-
- func erratum_\_cpu\()_\_id\()_wa
- mov x8, x30
-
- /* save rev_var for workarounds that might need it but don't
- * restore to x0 because few will care */
- mov x7, x0
- bl check_erratum_\_cpu\()_\_id
- cbz x0, erratum_\_cpu\()_\_id\()_skip
-.endm
-
-.macro _workaround_end _cpu:req, _id:req
- erratum_\_cpu\()_\_id\()_skip:
- ret x8
- endfunc erratum_\_cpu\()_\_id\()_wa
+#endif
.endm
/*******************************************************************************
@@ -305,14 +278,35 @@
* _chosen:
* Compile time flag on whether the erratum is included
*
+ * _split_wa:
+ * Flag that indicates whether an erratum has split workaround or not.
+ * Default value is 0.
+ *
* in body:
* clobber x0 to x7 (please only use those)
* argument x7 - cpu_rev_var
*
* _wa clobbers: x0-x8 (PCS compliant)
*/
-.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
- _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
+.macro workaround_reset_start _cpu:req, _cve:req, _id:req, \
+ _chosen:req, _split_wa=0
+
+ add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_split_wa
+
+ .if \_chosen
+ /* put errata directly into the reset function */
+ .pushsection .text.asm.\_cpu\()_reset_func, "ax"
+ .else
+ /* or something else that will get garbage collected by the
+ * linker */
+ .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
+ .endif
+ /* revision is stored in x14, get it */
+ mov x0, x14
+ bl check_erratum_\_cpu\()_\_id
+ /* save rev_var for workarounds that might need it */
+ mov x7, x14
+ cbz x0, erratum_\_cpu\()_\_id\()_skip_reset
.endm
/*
@@ -323,6 +317,10 @@
* for errata applied in generic code
*/
.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
+ add_erratum_entry \_cpu, \_cve, \_id, \_chosen
+
+ func erratum_\_cpu\()_\_id\()_wa
+ mov x8, x30
/*
* Let errata specify if they need MIDR checking. Sadly, storing the
* MIDR in an .equ to retrieve automatically blows up as it stores some
@@ -330,11 +328,15 @@
*/
.ifnb \_midr
jump_if_cpu_midr \_midr, 1f
- b erratum_\_cpu\()_\_id\()_skip
+ b erratum_\_cpu\()_\_id\()_skip_runtime
1:
.endif
- _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
+ /* save rev_var for workarounds that might need it but don't
+ * restore to x0 because few will care */
+ mov x7, x0
+ bl check_erratum_\_cpu\()_\_id
+ cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime
.endm
/*
@@ -342,7 +344,8 @@
* is kept here so the same #define can be used as that macro
*/
.macro workaround_reset_end _cpu:req, _cve:req, _id:req
- _workaround_end \_cpu, \_id
+ erratum_\_cpu\()_\_id\()_skip_reset:
+ .popsection
.endm
/*
@@ -362,7 +365,9 @@
.ifb \_no_isb
isb
.endif
- _workaround_end \_cpu, \_id
+ erratum_\_cpu\()_\_id\()_skip_runtime:
+ ret x8
+ endfunc erratum_\_cpu\()_\_id\()_wa
.endm
/*******************************************************************************
@@ -402,6 +407,18 @@
msr \_reg, x1
.endm
+/*
+ * Toggle a bit in a system register. Can toggle multiple bits but is limited by
+ * the way the EOR instrucion encodes them.
+ *
+ * see sysreg_bit_set for usage
+ */
+.macro sysreg_bit_toggle _reg:req, _bit:req, _assert=1
+ mrs x1, \_reg
+ eor x1, x1, #\_bit
+ msr \_reg, x1
+.endm
+
.macro override_vector_table _table:req
adr x1, \_table
msr vbar_el3, x1
@@ -429,6 +446,29 @@
.endm
/*
+ * Extract CPU revision and variant, and combine them into a single numeric for
+ * easier comparison.
+ *
+ * _res:
+ * register where the result will be placed
+ * _tmp:
+ * register to clobber for temporaries
+ */
+.macro get_rev_var _res:req, _tmp:req
+ mrs \_tmp, midr_el1
+
+ /*
+ * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
+ * as variant[7:4] and revision[3:0] of x0.
+ *
+ * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
+ * extract x1[3:0] into x0[3:0] retaining other bits.
+ */
+ ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
+ bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+.endm
+
+/*
* Apply erratum
*
* _cpu:
@@ -451,7 +491,7 @@
* clobbers: x0-x10 (PCS compliant)
*/
.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
- .if (\_chosen & \_get_rev)
+ .if (\_chosen && \_get_rev)
mov x9, x30
bl cpu_get_rev_var
mov x10, x0
@@ -467,8 +507,69 @@
.endm
/*
- * Helpers to select which revisions errata apply to. Don't leave a link
- * register as the cpu_rev_var_*** will call the ret and we can save on one.
+ * Helpers to report if an erratum applies. Compares the given revision variant
+ * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
+ *
+ * _rev_num: the given revision variant. Or
+ * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
+ *
+ * in body:
+ * clobber: x0
+ * argument: x0 - cpu_rev_var
+ */
+.macro cpu_rev_var_ls _rev_num:req
+ cmp x0, #\_rev_num
+ cset x0, ls
+.endm
+
+.macro cpu_rev_var_hs _rev_num:req
+ cmp x0, #\_rev_num
+ cset x0, hs
+.endm
+
+.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
+ cmp x0, #\_rev_num_lo
+ mov x1, #\_rev_num_hi
+ ccmp x0, x1, #2, hs
+ cset x0, ls
+.endm
+
+
+#if __clang_major__ < 17
+/*
+ * A problem with clang version < 17 can cause resolving nested
+ * 'cfi_startproc' to fail compilation.
+ * So add a compatibility variant for start and endfunc expansions
+ * to ignore `cfi_startproc` and `cfi_endproc`, this to be used only with
+ * check_errata/reset macros if we build TF-A with clang version < 17
+ */
+
+.macro func_compat _name, _align=2
+ .section .text.asm.\_name, "ax"
+ .type \_name, %function
+ .align \_align
+ \_name:
+#if ENABLE_BTI
+ bti jc
+#endif
+.endm
+
+/*
+ * This macro is used to mark the end of a function.
+ */
+.macro endfunc_compat _name
+ .size \_name, . - \_name
+.endm
+
+#else
+
+#define func_compat func
+#define endfunc_compat endfunc
+
+#endif /* __clang_version__ < 17 */
+
+/*
+ * Helpers to select which revisions errata apply to.
*
* _cpu:
* Name of cpu as given to declare_cpu_ops
@@ -484,58 +585,73 @@
* Revision to apply to
*
* in body:
- * clobber: x0 to x4
+ * clobber: x0 to x1
* argument: x0 - cpu_rev_var
*/
.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
- func check_erratum_\_cpu\()_\_id
- mov x1, #\_rev_num
- b cpu_rev_var_ls
- endfunc check_erratum_\_cpu\()_\_id
+ func_compat check_erratum_\_cpu\()_\_id
+ cpu_rev_var_ls \_rev_num
+ ret
+ endfunc_compat check_erratum_\_cpu\()_\_id
.endm
.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
- func check_erratum_\_cpu\()_\_id
- mov x1, #\_rev_num
- b cpu_rev_var_hs
- endfunc check_erratum_\_cpu\()_\_id
+ func_compat check_erratum_\_cpu\()_\_id
+ cpu_rev_var_hs \_rev_num
+ ret
+ endfunc_compat check_erratum_\_cpu\()_\_id
.endm
.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
- func check_erratum_\_cpu\()_\_id
- mov x1, #\_rev_num_lo
- mov x2, #\_rev_num_hi
- b cpu_rev_var_range
- endfunc check_erratum_\_cpu\()_\_id
+ func_compat check_erratum_\_cpu\()_\_id
+ cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
+ ret
+ endfunc_compat check_erratum_\_cpu\()_\_id
.endm
.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
- func check_erratum_\_cpu\()_\_id
+ func_compat check_erratum_\_cpu\()_\_id
.if \_chosen
mov x0, #ERRATA_APPLIES
.else
mov x0, #ERRATA_MISSING
.endif
ret
- endfunc check_erratum_\_cpu\()_\_id
+ endfunc_compat check_erratum_\_cpu\()_\_id
.endm
-/* provide a shorthand for the name format for annoying errata */
+/*
+ * provide a shorthand for the name format for annoying errata
+ * body: clobber x0 to x4
+ */
.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
- func check_erratum_\_cpu\()_\_id
+ func_compat check_erratum_\_cpu\()_\_id
.endm
.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
- endfunc check_erratum_\_cpu\()_\_id
+ endfunc_compat check_erratum_\_cpu\()_\_id
.endm
-
/*******************************************************************************
* CPU reset function wrapper
******************************************************************************/
/*
- * Wrapper to automatically apply all reset-time errata. Will end with an isb.
+ * Helper to register a cpu with the errata framework. Begins the definition of
+ * the reset function.
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ */
+.macro cpu_reset_prologue _cpu:req
+ func_compat \_cpu\()_reset_func
+ mov x15, x30
+ get_rev_var x14, x0
+.endm
+
+/*
+ * Wrapper of the reset function to automatically apply all reset-time errata.
+ * Will end with an isb.
*
* _cpu:
* Name of cpu as given to declare_cpu_ops
@@ -545,45 +661,15 @@
* argument x14 - cpu_rev_var
*/
.macro cpu_reset_func_start _cpu:req
- func \_cpu\()_reset_func
- mov x15, x30
- bl cpu_get_rev_var
- mov x14, x0
-
- /* short circuit the location to avoid searching the list */
- adrp x12, \_cpu\()_errata_list_start
- add x12, x12, :lo12:\_cpu\()_errata_list_start
- adrp x13, \_cpu\()_errata_list_end
- add x13, x13, :lo12:\_cpu\()_errata_list_end
-
- errata_begin:
- /* if head catches up with end of list, exit */
- cmp x12, x13
- b.eq errata_end
-
- ldr x10, [x12, #ERRATUM_WA_FUNC]
- /* TODO(errata ABI): check mitigated and checker function fields
- * for 0 */
- ldrb w11, [x12, #ERRATUM_CHOSEN]
-
- /* skip if not chosen */
- cbz x11, 1f
- /* skip if runtime erratum */
- cbz x10, 1f
-
- /* put cpu revision in x0 and call workaround */
- mov x0, x14
- blr x10
- 1:
- add x12, x12, #ERRATUM_ENTRY_SIZE
- b errata_begin
- errata_end:
+ /* the func/endfunc macros will change sections. So change the section
+ * back to the reset function's */
+ .section .text.asm.\_cpu\()_reset_func, "ax"
.endm
.macro cpu_reset_func_end _cpu:req
isb
ret x15
- endfunc \_cpu\()_reset_func
+ endfunc_compat \_cpu\()_reset_func
.endm
#endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/dsu_def.h b/include/lib/cpus/aarch64/dsu_def.h
index 51fbfd1..78b3e7f 100644
--- a/include/lib/cpus/aarch64/dsu_def.h
+++ b/include/lib/cpus/aarch64/dsu_def.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -40,7 +40,23 @@
********************************************************************/
#define DSU_ERRATA_936184_MASK (U(0x3) << 15)
+#define CPUCFR_EL1 S3_0_C15_C0_0
+/* SCU bit of CPU Configuration Register, EL1 */
+#define SCU_SHIFT U(2)
+
#ifndef __ASSEMBLER__
-void dsu_pwr_dwn(void);
+DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrctlr_el1, CLUSTERPWRCTLR_EL1);
+
+/* ---------------------------------------------
+ * controls power features of the cluster
+ * 1. Cache portion power not request
+ * 2. Disable the retention circuit
+ * ---------------------------------------------
+ */
+static inline void dsu_pwr_dwn(void)
+{
+ write_clusterpwrctlr_el1(0);
+ isb();
+}
#endif
#endif /* DSU_DEF_H */
diff --git a/include/lib/cpus/aarch64/dsu_macros.S b/include/lib/cpus/aarch64/dsu_macros.S
new file mode 100644
index 0000000..6c8cb69
--- /dev/null
+++ b/include/lib/cpus/aarch64/dsu_macros.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019-2025, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DSU_MACROS_S
+#define DSU_MACROS_S
+
+#include <asm_macros.S>
+#include <dsu_def.h>
+#include <lib/cpus/errata.h>
+
+.macro check_errata_dsu_798953_impl
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+
+ /* Check if DSU is equal to r0p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x0 << CLUSTERIDR_REV_SHIFT)
+ cmp x0, x1
+ csel x0, x2, x3, EQ
+.endm
+
+.macro errata_dsu_798953_wa_impl
+ /* If erratum applies, disable high-level clock gating */
+ mrs x0, CLUSTERACTLR_EL1
+ orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING
+ msr CLUSTERACTLR_EL1, x0
+.endm
+
+.macro branch_if_scu_not_present _target:req
+ /* Check if the SCU L3 Unit is present on the DSU */
+ mrs x0, CPUCFR_EL1
+ ubfx x0, x0, #SCU_SHIFT, #1
+ eor x0, x0, #1
+ /* If SCU is not present, return without applying patch */
+ cmp x0, xzr
+ mov x0, #ERRATA_NOT_APPLIES
+ b.eq \_target
+.endm
+
+.macro check_errata_dsu_936184_impl
+ mov x0, #ERRATA_NOT_APPLIES
+ /* Erratum applies only if DSU has the ACP interface */
+ mrs x1, CLUSTERCFR_EL1
+ ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
+ cbz x1, 1f
+
+ /* If ACP is present, check if DSU is older than r2p0 */
+ mrs x1, CLUSTERIDR_EL1
+
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
+ b.hs 1f
+ mov x0, #ERRATA_APPLIES
+1:
+.endm
+
+.macro errata_dsu_936184_wa_impl
+ /* If erratum applies, we set a mask to a DSU control register */
+ mrs x0, CLUSTERACTLR_EL1
+ ldr x1, =DSU_ERRATA_936184_MASK
+ orr x0, x0, x1
+ msr CLUSTERACTLR_EL1, x0
+.endm
+
+.macro check_errata_dsu_2313941_impl
+ mov x2, #ERRATA_APPLIES
+ mov x3, #ERRATA_NOT_APPLIES
+
+ /* Check if DSU version is less than or equal to r3p1 */
+ mrs x1, CLUSTERIDR_EL1
+
+ mov x0, #ERRATA_NOT_APPLIES
+ /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
+ ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
+ #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
+ mov x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
+ cmp x0, x1
+ csel x0, x2, x3, LS
+1:
+.endm
+
+.macro errata_dsu_2313941_wa_impl
+ /* If erratum applies, disable high-level clock gating */
+ mrs x0, CLUSTERACTLR_EL1
+ orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
+ msr CLUSTERACTLR_EL1, x0
+.endm
+#endif /* DSU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
index f5837d4..e4487c4 100644
--- a/include/lib/cpus/aarch64/neoverse_n2.h
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -62,9 +62,6 @@
* CPU Auxiliary Control register specific definitions.
******************************************************************************/
#define NEOVERSE_N2_CPUECTLR2_EL1 S3_0_C15_C1_5
-#define NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(9)
-#define CPUECTLR2_EL1_PF_MODE_LSB U(11)
-#define CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
#define CPUECTLR2_EL1_TXREQ_STATIC_FULL ULL(0)
#define CPUECTLR2_EL1_TXREQ_LSB U(0)
#define CPUECTLR2_EL1_TXREQ_WIDTH U(3)
diff --git a/include/lib/cpus/aarch64/neoverse_n_common.h b/include/lib/cpus/aarch64/neoverse_n_common.h
deleted file mode 100644
index 7cb91cd..0000000
--- a/include/lib/cpus/aarch64/neoverse_n_common.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef NEOVERSE_N_COMMON_H
-#define NEOVERSE_N_COMMON_H
-
-/******************************************************************************
- * Neoverse Nx CPU Configuration register definitions
- *****************************************************************************/
-#define CPUCFR_EL1 S3_0_C15_C0_0
-
-/* SCU bit of CPU Configuration Register, EL1 */
-#define SCU_SHIFT U(2)
-
-#endif /* NEOVERSE_N_COMMON_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
index 1e2d7ea..bbba2a7 100644
--- a/include/lib/cpus/aarch64/neoverse_v1.h
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -22,9 +22,6 @@
#define NEOVERSE_V1_CPUPCR_EL3 S3_6_C15_C8_1
#define NEOVERSE_V1_CPUECTLR_EL1_BIT_8 (ULL(1) << 8)
#define NEOVERSE_V1_CPUECTLR_EL1_BIT_53 (ULL(1) << 53)
-#define NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV ULL(3)
-#define CPUECTLR_EL1_PF_MODE_LSB U(6)
-#define CPUECTLR_EL1_PF_MODE_WIDTH U(2)
/*******************************************************************************
* CPU Power Control register specific definitions
diff --git a/include/lib/cpus/aarch64/neoverse_v2.h b/include/lib/cpus/aarch64/neoverse_v2.h
index 427cafa..cdbe2bb 100644
--- a/include/lib/cpus/aarch64/neoverse_v2.h
+++ b/include/lib/cpus/aarch64/neoverse_v2.h
@@ -32,9 +32,6 @@
* CPU Extended Control register 2 specific definitions.
******************************************************************************/
#define NEOVERSE_V2_CPUECTLR2_EL1 S3_0_C15_C1_5
-#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(9)
-#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_LSB U(11)
-#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
#define NEOVERSE_V2_CPUECTLR2_EL1_TXREQ_STATIC_FULL ULL(0)
#define NEOVERSE_V2_CPUECTLR2_EL1_TXREQ_LSB U(0)
#define NEOVERSE_V2_CPUECTLR2_EL1_TXREQ_WIDTH U(3)
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
index c1bdf8d..0b08919 100644
--- a/include/lib/cpus/cpu_ops.h
+++ b/include/lib/cpus/cpu_ops.h
@@ -21,8 +21,6 @@
/* The number of CPU operations allowed */
#define CPU_MAX_PWR_DWN_OPS 2
-/* Special constant to specify that CPU has no reset function */
-#define CPU_NO_RESET_FUNC 0
#if __aarch64__
#define CPU_NO_EXTRA1_FUNC 0
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
index b9166f7..8e28d46 100644
--- a/include/lib/cpus/errata.h
+++ b/include/lib/cpus/errata.h
@@ -9,20 +9,18 @@
#include <lib/cpus/cpu_ops.h>
-#define ERRATUM_WA_FUNC_SIZE CPU_WORD_SIZE
#define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE
#define ERRATUM_ID_SIZE 4
#define ERRATUM_CVE_SIZE 2
#define ERRATUM_CHOSEN_SIZE 1
-#define ERRATUM_MITIGATED_SIZE 1
+#define ERRATUM_ALIGNMENT_SIZE 1
-#define ERRATUM_WA_FUNC 0
-#define ERRATUM_CHECK_FUNC ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE
+#define ERRATUM_CHECK_FUNC 0
#define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE
#define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE
#define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE
-#define ERRATUM_MITIGATED ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
-#define ERRATUM_ENTRY_SIZE ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE
+#define ERRATUM_ALIGNMENT ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
+#define ERRATUM_ENTRY_SIZE ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE
/* Errata status */
#define ERRATA_NOT_APPLIES 0
@@ -39,15 +37,19 @@
* uintptr_t will reflect the change and the alignment will be correct in both.
*/
struct erratum_entry {
- uintptr_t (*wa_func)(uint64_t cpu_rev);
uintptr_t (*check_func)(uint64_t cpu_rev);
/* Will fit CVEs with up to 10 character in the ID field */
uint32_t id;
/* Denote CVEs with their year or errata with 0 */
uint16_t cve;
+ /*
+ * a bitfield:
+ * bit 0 - denotes if the erratum is enabled in build.
+ * bit 1 - denotes if the erratum workaround is split and
+ * also needs to be implemented at a lower EL.
+ */
uint8_t chosen;
- /* TODO(errata ABI): placeholder for the mitigated field */
- uint8_t _mitigated;
+ uint8_t _alignment;
} __packed;
CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE,
@@ -65,10 +67,8 @@
}
#endif
-#if ERRATA_A520_2938996 || ERRATA_X4_2726228
-unsigned int check_if_affected_core(void);
-#endif
+bool check_if_trbe_disable_affected_core(void);
int check_wa_cve_2024_7881(void);
bool errata_ich_vmcr_el2_applies(void);
@@ -100,4 +100,11 @@
/* Macro to get CPU revision code for checking errata version compatibility. */
#define CPU_REV(r, p) ((r << 4) | p)
+/* Used for errata that have split workaround */
+#define SPLIT_WA 1
+
+/* chosen bitfield entries */
+#define WA_ENABLED_MASK BIT(0)
+#define SPLIT_WA_MASK BIT(1)
+
#endif /* ERRATA_H */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
index 8b54806..e417f45 100644
--- a/include/lib/el3_runtime/cpu_data.h
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -230,7 +230,6 @@
* APIs for initialising and accessing per-cpu data
*************************************************************************/
-void init_cpu_data_ptr(void);
void init_cpu_ops(void);
#define get_cpu_data(_m) _cpu_data()->_m
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
index f4dc0d1..a424575 100644
--- a/lib/cpus/aarch32/aem_generic.S
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -40,8 +40,11 @@
b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn
+func aem_generic_reset_func
+ bx lr
+endfunc aem_generic_reset_func
/* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/a64fx.S b/lib/cpus/aarch64/a64fx.S
index 4893a44..a53467a 100644
--- a/lib/cpus/aarch64/a64fx.S
+++ b/lib/cpus/aarch64/a64fx.S
@@ -29,12 +29,15 @@
a64fx_regs: /* The ascii list of register names to be reported */
.asciz ""
+cpu_reset_func_start a64fx
+cpu_reset_func_end a64fx
+
func a64fx_cpu_reg_dump
adr x6, a64fx_regs
ret
endfunc a64fx_cpu_reg_dump
-declare_cpu_ops a64fx, A64FX_MIDR, CPU_NO_RESET_FUNC \
+declare_cpu_ops a64fx, A64FX_MIDR, a64fx_reset_func \
a64fx_core_pwr_dwn, \
a64fx_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index d5634cf..9843943 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,8 @@
#include <asm_macros.S>
#include <cpu_macros.S>
+cpu_reset_prologue aem_generic
+
func aem_generic_core_pwr_dwn
/* ---------------------------------------------
* Disable the Data Cache.
@@ -74,6 +76,9 @@
b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn
+cpu_reset_func_start aem_generic
+cpu_reset_func_end aem_generic
+
/* ---------------------------------------------
* This function provides cpu specific
* register information for crash reporting.
@@ -94,11 +99,11 @@
/* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn
/* cpu_ops for Foundation FVP */
-declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, aem_generic_reset_func, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
index c3d8c8d..40e6200 100644
--- a/lib/cpus/aarch64/cortex_a35.S
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,6 +11,7 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+cpu_reset_prologue cortex_a35
/* ---------------------------------------------
* Disable L1 data cache and unified L2 cache
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S
index b49d45a..d6cf69a 100644
--- a/lib/cpus/aarch64/cortex_a510.S
+++ b/lib/cpus/aarch64/cortex_a510.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <common/bl_common.h>
#include <cortex_a510.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
@@ -21,6 +22,8 @@
#error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_a510
+
workaround_reset_start cortex_a510, ERRATUM(1922240), ERRATA_A510_1922240
/* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */
sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE, \
@@ -180,15 +183,18 @@
check_erratum_ls cortex_a510, ERRATUM(2684597), CPU_REV(1, 2)
-/*
- * ERRATA_DSU_2313941 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a510
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a510_2313941, check_errata_dsu_2313941
-.equ erratum_cortex_a510_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+workaround_reset_start cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941
+ errata_dsu_2313941_wa_impl
+workaround_reset_end cortex_a510, ERRATUM(2313941)
+
+check_erratum_custom_start cortex_a510, ERRATUM(2313941)
+ check_errata_dsu_2313941_impl
+ ret
+check_erratum_custom_end cortex_a510, ERRATUM(2313941)
+
+.global check_erratum_cortex_a510_2971420
+add_erratum_entry cortex_a510, ERRATUM(2971420), ERRATA_A510_2971420
+check_erratum_range cortex_a510, ERRATUM(2971420), CPU_REV(0, 1), CPU_REV(1, 3)
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S
index 811c836..6714a53 100644
--- a/lib/cpus/aarch64/cortex_a520.S
+++ b/lib/cpus/aarch64/cortex_a520.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,7 +11,6 @@
#include <cpu_macros.S>
#include <plat_macros.S>
-/* .global erratum_cortex_a520_2938996_wa */
.global check_erratum_cortex_a520_2938996
/* Hardware handled coherency */
@@ -24,6 +23,8 @@
#error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_a520
+
workaround_reset_start cortex_a520, ERRATUM(2630792), ERRATA_A520_2630792
sysreg_bit_set CORTEX_A520_CPUACTLR_EL1, BIT(38)
workaround_reset_end cortex_a520, ERRATUM(2630792)
@@ -36,23 +37,9 @@
check_erratum_ls cortex_a520, ERRATUM(2858100), CPU_REV(0, 1)
-workaround_runtime_start cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996, CORTEX_A520_MIDR
-workaround_runtime_end cortex_a520, ERRATUM(2938996)
+add_erratum_entry cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996
-check_erratum_custom_start cortex_a520, ERRATUM(2938996)
-
- /* This erratum needs to be enabled for r0p0 and r0p1.
- * Check if revision is less than or equal to r0p1.
- */
-
-#if ERRATA_A520_2938996
- mov x1, #1
- b cpu_rev_var_ls
-#else
- mov x0, #ERRATA_MISSING
-#endif
- ret
-check_erratum_custom_end cortex_a520, ERRATUM(2938996)
+check_erratum_ls cortex_a520, ERRATUM(2938996), CPU_REV(0, 1)
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index 4a5b318..dbfff87 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,6 +12,8 @@
#include <plat_macros.S>
#include <lib/cpus/errata.h>
+cpu_reset_prologue cortex_a53
+
/* ---------------------------------------------
* Disable L1 data cache and unified L2 cache
* ---------------------------------------------
@@ -36,12 +38,12 @@
/* Due to the nature of the errata it is applied unconditionally when chosen */
check_erratum_ls cortex_a53, ERRATUM(819472), CPU_REV(0, 1)
/* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN
/* Due to the nature of the errata it is applied unconditionally when chosen */
check_erratum_ls cortex_a53, ERRATUM(824069), CPU_REV(0, 2)
/* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN
workaround_reset_start cortex_a53, ERRATUM(826319), ERRATA_A53_826319
mrs x1, CORTEX_A53_L2ACTLR_EL1
@@ -55,7 +57,7 @@
/* Due to the nature of the errata it is applied unconditionally when chosen */
check_erratum_ls cortex_a53, ERRATUM(827319), CPU_REV(0, 2)
/* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN
check_erratum_custom_start cortex_a53, ERRATUM(835769)
cmp x0, CPU_REV(0, 4)
@@ -78,7 +80,7 @@
check_erratum_custom_end cortex_a53, ERRATUM(835769)
/* workaround at build time */
-add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769
/*
* Disable the cache non-temporal hint.
@@ -114,7 +116,7 @@
check_erratum_custom_end cortex_a53, ERRATUM(843419)
/* workaround at build time */
-add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419
/*
* Earlier revisions of the core are affected as well, but don't
@@ -131,7 +133,7 @@
check_erratum_chosen cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
/* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
cpu_reset_func_start cortex_a53
/* Enable the SMP bit. */
diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S
index d5a74e9..cf91431 100644
--- a/lib/cpus/aarch64/cortex_a55.S
+++ b/lib/cpus/aarch64/cortex_a55.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <common/bl_common.h>
#include <cortex_a55.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
@@ -19,23 +20,25 @@
.globl cortex_a55_reset_func
.globl cortex_a55_core_pwr_dwn
-/* ERRATA_DSU_798953:
- * The errata is defined in dsu_helpers.S but applies to cortex_a55
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a55_798953, check_errata_dsu_798953
-.equ erratum_cortex_a55_798953_wa, errata_dsu_798953_wa
-add_erratum_entry cortex_a55, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+cpu_reset_prologue cortex_a55
-/* ERRATA_DSU_936184:
- * The errata is defined in dsu_helpers.S but applies to cortex_a55
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a55_936184, check_errata_dsu_936184
-.equ erratum_cortex_a55_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a55, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+workaround_reset_start cortex_a55, ERRATUM(798953), ERRATA_DSU_798953
+ errata_dsu_798953_wa_impl
+workaround_reset_end cortex_a55, ERRATUM(798953)
+
+check_erratum_custom_start cortex_a55, ERRATUM(798953)
+ check_errata_dsu_798953_impl
+ ret
+check_erratum_custom_end cortex_a55, ERRATUM(798953)
+
+workaround_reset_start cortex_a55, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a55, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a55, ERRATUM(936184)
+ check_errata_dsu_936184_impl
+ ret
+check_erratum_custom_end cortex_a55, ERRATUM(936184)
workaround_reset_start cortex_a55, ERRATUM(768277), ERRATA_A55_768277
sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE
@@ -50,8 +53,7 @@
check_erratum_custom_start cortex_a55, ERRATUM(778703)
mov x16, x30
- mov x1, #0x00
- bl cpu_rev_var_ls
+ cpu_rev_var_ls CPU_REV(0, 0)
/*
* Check that no private L2 cache is configured
*/
@@ -111,7 +113,7 @@
check_erratum_chosen cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
/* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
cpu_reset_func_start cortex_a55
cpu_reset_func_end cortex_a55
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index fecb56f..adacc5c 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,6 +13,8 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+cpu_reset_prologue cortex_a57
+
/* ---------------------------------------------
* Disable L1 data cache and unified L2 cache
* ---------------------------------------------
@@ -81,7 +83,7 @@
/* erratum always worked around, but report it correctly */
check_erratum_ls cortex_a57, ERRATUM(813419), CPU_REV(0, 0)
-add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN
workaround_reset_start cortex_a57, ERRATUM(813420), ERRATA_A57_813420
sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
@@ -95,7 +97,7 @@
check_erratum_ls cortex_a57, ERRATUM(814670), CPU_REV(0, 0)
-workaround_runtime_start cortex_a57, ERRATUM(817169), ERRATA_A57_817169, CORTEX_A57_MIDR
+workaround_runtime_start cortex_a57, ERRATUM(817169), ERRATA_A57_817169
/* Invalidate any TLB address */
mov x0, #0
tlbi vae3, x0
@@ -150,7 +152,7 @@
check_erratum_chosen cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
/* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
workaround_reset_start cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
#if IMAGE_BL31
diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S
index 3023ecb..3c32adb 100644
--- a/lib/cpus/aarch64/cortex_a65.S
+++ b/lib/cpus/aarch64/cortex_a65.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,7 @@
#include <common/debug.h>
#include <cortex_a65.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
@@ -22,20 +23,19 @@
#error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
-/* -------------------------------------------------
- * The CPU Ops reset function for Cortex-A65.
- * Shall clobber: x0-x19
- * -------------------------------------------------
- */
-func cortex_a65_reset_func
- mov x19, x30
+cpu_reset_prologue cortex_a65
-#if ERRATA_DSU_936184
- bl errata_dsu_936184_wa
-#endif
+workaround_reset_start cortex_a65, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a65, ERRATUM(936184)
- ret x19
-endfunc cortex_a65_reset_func
+check_erratum_custom_start cortex_a65, ERRATUM(936184)
+ check_errata_dsu_936184_impl
+ ret
+check_erratum_custom_end cortex_a65, ERRATUM(936184)
+
+cpu_reset_func_start cortex_a65
+cpu_reset_func_end cortex_a65
func cortex_a65_cpu_pwr_dwn
mrs x0, CORTEX_A65_CPUPWRCTLR_EL1
@@ -45,7 +45,6 @@
ret
endfunc cortex_a65_cpu_pwr_dwn
-
.section .rodata.cortex_a65_regs, "aS"
cortex_a65_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S
index 1cbb06a..dfe6f95 100644
--- a/lib/cpus/aarch64/cortex_a65ae.S
+++ b/lib/cpus/aarch64/cortex_a65ae.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,7 @@
#include <common/debug.h>
#include <cortex_a65ae.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
/* Hardware handled coherency */
@@ -22,15 +23,16 @@
#error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
- /*
- * ERRATA_DSU_936184 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a65ae
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a65ae_936184, check_errata_dsu_936184
-.equ erratum_cortex_a65ae_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+cpu_reset_prologue cortex_a65ae
+
+workaround_reset_start cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a65ae, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a65ae, ERRATUM(936184)
+ check_errata_dsu_936184_impl
+ ret
+check_erratum_custom_end cortex_a65ae, ERRATUM(936184)
cpu_reset_func_start cortex_a65ae
cpu_reset_func_end cortex_a65ae
diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S
index 71ed6db..a6a1c5e 100644
--- a/lib/cpus/aarch64/cortex_a710.S
+++ b/lib/cpus/aarch64/cortex_a710.S
@@ -9,6 +9,7 @@
#include <common/bl_common.h>
#include <cortex_a710.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
@@ -28,12 +29,7 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a710, CVE(2024, 5660)
-
-check_erratum_ls cortex_a710, CVE(2024, 5660), CPU_REV(2, 1)
+cpu_reset_prologue cortex_a710
workaround_reset_start cortex_a710, ERRATUM(1987031), ERRATA_A710_1987031
ldr x0,=0x6
@@ -91,13 +87,6 @@
check_erratum_range cortex_a710, ERRATUM(2055002), CPU_REV(1, 0), CPU_REV(2, 0)
-workaround_reset_start cortex_a710, ERRATUM(2058056), ERRATA_A710_2058056
- sysreg_bitfield_insert CORTEX_A710_CPUECTLR2_EL1, CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV, \
- CPUECTLR2_EL1_PF_MODE_LSB, CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end cortex_a710, ERRATUM(2058056)
-
-check_erratum_ls cortex_a710, ERRATUM(2058056), CPU_REV(2, 1)
-
workaround_reset_start cortex_a710, ERRATUM(2081180), ERRATA_A710_2081180
ldr x0,=0x3
msr S3_6_c15_c8_0,x0
@@ -164,21 +153,23 @@
check_erratum_ls cortex_a710, ERRATUM(2282622), CPU_REV(2, 1)
+.global erratum_cortex_a710_2291219_wa
workaround_runtime_start cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
- /* Set bit 36 in ACTLR2_EL1 */
- sysreg_bit_set CORTEX_A710_CPUACTLR2_EL1, CORTEX_A710_CPUACTLR2_EL1_BIT_36
+ /* Set/unset bit 36 in ACTLR2_EL1. The first call will set it, applying
+ * the workaround. Second call clears it to undo it. */
+ sysreg_bit_toggle CORTEX_A710_CPUACTLR2_EL1, CORTEX_A710_CPUACTLR2_EL1_BIT_36
workaround_runtime_end cortex_a710, ERRATUM(2291219), NO_ISB
check_erratum_ls cortex_a710, ERRATUM(2291219), CPU_REV(2, 0)
-/*
- * ERRATA_DSU_2313941 is defined in dsu_helpers.S but applies to Cortex-A710 as
- * well. Create a symbollic link to existing errata workaround to get them
- * registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a710_2313941, check_errata_dsu_2313941
-.equ erratum_cortex_a710_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+workaround_reset_start cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941
+ errata_dsu_2313941_wa_impl
+workaround_reset_end cortex_a710, ERRATUM(2313941)
+
+check_erratum_custom_start cortex_a710, ERRATUM(2313941)
+ check_errata_dsu_2313941_impl
+ ret
+check_erratum_custom_end cortex_a710, ERRATUM(2313941)
workaround_reset_start cortex_a710, ERRATUM(2371105), ERRATA_A710_2371105
/* Set bit 40 in CPUACTLR2_EL1 */
@@ -208,6 +199,10 @@
check_erratum_ls cortex_a710, ERRATUM(2778471), CPU_REV(2, 1)
+add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772
+
+check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1)
+
workaround_reset_start cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/*
@@ -220,9 +215,12 @@
check_erratum_chosen cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
-add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772, NO_APPLY_AT_RESET
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a710, CVE(2024, 5660)
-check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1)
+check_erratum_ls cortex_a710, CVE(2024, 5660), CPU_REV(2, 1)
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S
index fbc73ed..dcad5b7 100644
--- a/lib/cpus/aarch64/cortex_a715.S
+++ b/lib/cpus/aarch64/cortex_a715.S
@@ -28,6 +28,8 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A715_BHB_LOOP_COUNT, cortex_a715
#endif /* WORKAROUND_CVE_2022_23960 */
+cpu_reset_prologue cortex_a715
+
workaround_reset_start cortex_a715, ERRATUM(2331818), ERRATA_A715_2331818
sysreg_bit_set CORTEX_A715_CPUACTLR2_EL1, BIT(20)
workaround_reset_end cortex_a715, ERRATUM(2331818)
@@ -117,6 +119,29 @@
check_erratum_ls cortex_a715, ERRATUM(2728106), CPU_REV(1, 1)
+workaround_reset_start cortex_a715, ERRATUM(2804830), ERRATA_A715_2804830
+ /* Workaround changes based on CORE_CACHE_PROTECTIONS field (bit 1) */
+ mrs x0, CORTEX_A715_CPUCFR_EL1
+ tbz x0, #1, wa_2804830_core_cache_prot_false
+
+ /* CORE_CACHE_PROTECTIONS==true */
+ sysreg_bit_set CORTEX_A715_CPUACTLR3_EL1, BIT(2)
+ sysreg_bit_set CORTEX_A715_CPUECTLR_EL1, BIT(23)
+ b wa_2804830_done
+
+ /* CORE_CACHE_PROTECTIONS==false */
+wa_2804830_core_cache_prot_false:
+ sysreg_bit_set CORTEX_A715_CPUECTLR2_EL1, BIT(7)
+
+wa_2804830_done:
+workaround_reset_end cortex_a715, ERRATUM(2804830)
+
+check_erratum_ls cortex_a715, ERRATUM(2804830), CPU_REV(1, 2)
+
+add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560
+
+check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3)
+
workaround_reset_start cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/*
@@ -129,10 +154,6 @@
check_erratum_chosen cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
-add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3)
-
cpu_reset_func_start cortex_a715
/* Disable speculative loads */
msr SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index c300ea7..fee28ee 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -15,6 +15,8 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72
#endif /* WORKAROUND_CVE_2022_23960 */
+cpu_reset_prologue cortex_a72
+
/* ---------------------------------------------
* Disable L1 data cache and unified L2 cache
* ---------------------------------------------
@@ -92,7 +94,7 @@
/* Due to the nature of the errata it is applied unconditionally when chosen */
check_erratum_chosen cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
/* erratum workaround is interleaved with generic code */
-add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
workaround_reset_start cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
#if IMAGE_BL31
diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S
index ab2c12f..2991f93 100644
--- a/lib/cpus/aarch64/cortex_a720.S
+++ b/lib/cpus/aarch64/cortex_a720.S
@@ -22,6 +22,8 @@
#error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_a720
+
.global check_erratum_cortex_a720_3699561
#if WORKAROUND_CVE_2022_23960
@@ -74,7 +76,7 @@
check_erratum_chosen cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
-add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561
check_erratum_ls cortex_a720, ERRATUM(3699561), CPU_REV(0, 2)
diff --git a/lib/cpus/aarch64/cortex_a720_ae.S b/lib/cpus/aarch64/cortex_a720_ae.S
index 57a5030..c72a29e 100644
--- a/lib/cpus/aarch64/cortex_a720_ae.S
+++ b/lib/cpus/aarch64/cortex_a720_ae.S
@@ -21,9 +21,11 @@
#error "Cortex-A720AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_a720_ae
+
.global check_erratum_cortex_a720_ae_3699562
-add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562
check_erratum_ls cortex_a720_ae, ERRATUM(3699562), CPU_REV(0, 0)
diff --git a/lib/cpus/aarch64/cortex_a725.S b/lib/cpus/aarch64/cortex_a725.S
index c4d6034..a8c0db2 100644
--- a/lib/cpus/aarch64/cortex_a725.S
+++ b/lib/cpus/aarch64/cortex_a725.S
@@ -21,9 +21,11 @@
#error "Cortex-A725 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_a725
+
.global check_erratum_cortex_a725_3699564
-add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564
check_erratum_ls cortex_a725, ERRATUM(3699564), CPU_REV(0, 1)
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 2130ceb..d1fc6d4 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,8 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+cpu_reset_prologue cortex_a73
+
/* ---------------------------------------------
* Disable L1 data cache
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 152c81f..13599ca 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <cortex_a75.h>
#include <cpuamu.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
.global check_erratum_cortex_a75_764081
@@ -17,6 +18,8 @@
#error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
+cpu_reset_prologue cortex_a75
+
workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081
sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT
workaround_reset_end cortex_a75, ERRATUM(764081)
@@ -29,23 +32,23 @@
check_erratum_ls cortex_a75, ERRATUM(790748), CPU_REV(0, 0)
-/* ERRATA_DSU_798953 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a75
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a75_798953, check_errata_dsu_798953
-.equ erratum_cortex_a75_798953_wa, errata_dsu_798953_wa
-add_erratum_entry cortex_a75, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+workaround_reset_start cortex_a75, ERRATUM(798953), ERRATA_DSU_798953
+ errata_dsu_798953_wa_impl
+workaround_reset_end cortex_a75, ERRATUM(798953)
-/* ERRATA_DSU_936184 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a75
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a75_936184, check_errata_dsu_936184
-.equ erratum_cortex_a75_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a75, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+check_erratum_custom_start cortex_a75, ERRATUM(798953)
+ check_errata_dsu_798953_impl
+ ret
+check_erratum_custom_end cortex_a75, ERRATUM(798953)
+
+workaround_reset_start cortex_a75, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a75, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a75, ERRATUM(936184)
+ check_errata_dsu_936184_impl
+ ret
+check_erratum_custom_end cortex_a75, ERRATUM(936184)
workaround_reset_start cortex_a75, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
#if IMAGE_BL31
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 017086a..822ef05 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <common/bl_common.h>
#include <cortex_a76.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
#include <services/arm_arch_svc.h>
#include "wa_cve_2022_23960_bhb.S"
@@ -29,6 +30,8 @@
#define ESR_EL3_A64_SMC0 0x5e000000
#define ESR_EL3_A32_SMC0 0x4e000000
+cpu_reset_prologue cortex_a76
+
#if DYNAMIC_WORKAROUND_CVE_2018_3639
/*
* This macro applies the mitigation for CVE-2018-3639.
@@ -344,11 +347,10 @@
check_erratum_custom_start cortex_a76, ERRATUM(1286807)
#if ERRATA_A76_1286807
mov x0, #ERRATA_APPLIES
- ret
#else
- mov x1, #0x30
- b cpu_rev_var_ls
+ cpu_rev_var_ls CPU_REV(3, 0)
#endif
+ ret
check_erratum_custom_end cortex_a76, ERRATUM(1286807)
workaround_reset_start cortex_a76, ERRATUM(1791580), ERRATA_A76_1791580
@@ -419,35 +421,34 @@
check_erratum_custom_start cortex_a76, ERRATUM(1165522)
#if ERRATA_A76_1165522
mov x0, #ERRATA_APPLIES
- ret
#else
- mov x1, #0x30
- b cpu_rev_var_ls
+ cpu_rev_var_ls CPU_REV(3, 0)
#endif
+ ret
check_erratum_custom_end cortex_a76, ERRATUM(1165522)
check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
/* erratum has no workaround in the cpu. Generic code must take care */
-add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET
+add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
-/* ERRATA_DSU_798953 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a76
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a76_798953, check_errata_dsu_798953
-.equ erratum_cortex_a76_798953_wa, errata_dsu_798953_wa
-add_erratum_entry cortex_a76, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET
+workaround_reset_start cortex_a76, ERRATUM(798953), ERRATA_DSU_798953
+ errata_dsu_798953_wa_impl
+workaround_reset_end cortex_a76, ERRATUM(798953)
-/* ERRATA_DSU_936184 :
- * The errata is defined in dsu_helpers.S but applies to cortex_a76
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_a76_936184, check_errata_dsu_936184
-.equ erratum_cortex_a76_936184_wa, errata_dsu_936184_wa
-add_erratum_entry cortex_a76, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+check_erratum_custom_start cortex_a76, ERRATUM(798953)
+ check_errata_dsu_798953_impl
+ ret
+check_erratum_custom_end cortex_a76, ERRATUM(798953)
+
+workaround_reset_start cortex_a76, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end cortex_a76, ERRATUM(936184)
+
+check_erratum_custom_start cortex_a76, ERRATUM(936184)
+ check_errata_dsu_936184_impl
+ ret
+check_erratum_custom_end cortex_a76, ERRATUM(936184)
cpu_reset_func_start cortex_a76
diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S
index 2fe3dbc..54af9a0 100644
--- a/lib/cpus/aarch64/cortex_a76ae.S
+++ b/lib/cpus/aarch64/cortex_a76ae.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,8 @@
#error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_a76ae
+
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae
#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S
index 766bdc0..82a20ec 100644
--- a/lib/cpus/aarch64/cortex_a77.S
+++ b/lib/cpus/aarch64/cortex_a77.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -17,6 +17,8 @@
#error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
+cpu_reset_prologue cortex_a77
+
/* 64-bit only core */
#if CTX_INCLUDE_AARCH32_REGS == 1
#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
@@ -26,18 +28,10 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A77_BHB_LOOP_COUNT, cortex_a77
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a77, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_A77_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a77, CVE(2024, 5660)
-
-check_erratum_ls cortex_a77, CVE(2024, 5660), CPU_REV(1, 1)
-
workaround_reset_start cortex_a77, ERRATUM(1508412), ERRATA_A77_1508412
/* move cpu revision in again and compare against r0p0 */
mov x0, x7
- mov x1, #CPU_REV(0, 0)
- bl cpu_rev_var_ls
+ cpu_rev_var_ls CPU_REV(0, 0)
cbz x0, 1f
ldr x0, =0x0
@@ -149,6 +143,13 @@
check_erratum_chosen cortex_a77, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a77, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_A77_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a77, CVE(2024, 5660)
+
+check_erratum_ls cortex_a77, CVE(2024, 5660), CPU_REV(1, 1)
+
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A77. Must follow AAPCS.
* -------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S
index 9f2ffdf..b166823 100644
--- a/lib/cpus/aarch64/cortex_a78.S
+++ b/lib/cpus/aarch64/cortex_a78.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,12 +24,7 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a78, CVE(2024, 5660)
-
-check_erratum_ls cortex_a78, CVE(2024, 5660), CPU_REV(1, 2)
+cpu_reset_prologue cortex_a78
workaround_reset_start cortex_a78, ERRATUM(1688305), ERRATA_A78_1688305
sysreg_bit_set CORTEX_A78_ACTLR2_EL1, CORTEX_A78_ACTLR2_EL1_BIT_1
@@ -104,16 +99,6 @@
check_erratum_ls cortex_a78, ERRATUM(1952683), CPU_REV(0, 0)
-workaround_reset_start cortex_a78, ERRATUM(2132060), ERRATA_A78_2132060
- /* Apply the workaround. */
- mrs x1, CORTEX_A78_CPUECTLR_EL1
- mov x0, #CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV
- bfi x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
- msr CORTEX_A78_CPUECTLR_EL1, x1
-workaround_reset_end cortex_a78, ERRATUM(2132060)
-
-check_erratum_ls cortex_a78, ERRATUM(2132060), CPU_REV(1, 2)
-
workaround_reset_start cortex_a78, ERRATUM(2242635), ERRATA_A78_2242635
ldr x0, =0x5
msr S3_6_c15_c8_0, x0 /* CPUPSELR_EL3 */
@@ -174,6 +159,13 @@
check_erratum_chosen cortex_a78, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a78, CVE(2024, 5660)
+
+check_erratum_ls cortex_a78, CVE(2024, 5660), CPU_REV(1, 2)
+
cpu_reset_func_start cortex_a78
#if ENABLE_FEAT_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S
index 7fa1f9b..63bc936 100644
--- a/lib/cpus/aarch64/cortex_a78_ae.S
+++ b/lib/cpus/aarch64/cortex_a78_ae.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -22,12 +22,7 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a78_ae, CVE(2024, 5660)
-
-check_erratum_ls cortex_a78_ae, CVE(2024, 5660), CPU_REV(0, 3)
+cpu_reset_prologue cortex_a78_ae
workaround_reset_start cortex_a78_ae, ERRATUM(1941500), ERRATA_A78_AE_1941500
sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, CORTEX_A78_AE_CPUECTLR_EL1_BIT_8
@@ -103,6 +98,13 @@
check_erratum_chosen cortex_a78_ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a78_ae, CVE(2024, 5660)
+
+check_erratum_ls cortex_a78_ae, CVE(2024, 5660), CPU_REV(0, 3)
+
cpu_reset_func_start cortex_a78_ae
#if ENABLE_FEAT_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S
index 3f6944a..19d988e 100644
--- a/lib/cpus/aarch64/cortex_a78c.S
+++ b/lib/cpus/aarch64/cortex_a78c.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,12 +21,7 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_a78c, CVE(2024, 5660)
-
-check_erratum_ls cortex_a78c, CVE(2024, 5660), CPU_REV(0, 2)
+cpu_reset_prologue cortex_a78c
workaround_reset_start cortex_a78c, ERRATUM(1827430), ERRATA_A78C_1827430
/* Disable allocation of splintered pages in the L2 TLB */
@@ -42,18 +37,6 @@
check_erratum_ls cortex_a78c, ERRATUM(1827440), CPU_REV(0, 0)
-workaround_reset_start cortex_a78c, ERRATUM(2132064), ERRATA_A78C_2132064
- /* --------------------------------------------------------
- * Place the data prefetcher in the most conservative mode
- * to reduce prefetches by writing the following bits to
- * the value indicated: ecltr[7:6], PF_MODE = 2'b11
- * --------------------------------------------------------
- */
- sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, (CORTEX_A78C_CPUECTLR_EL1_BIT_6 | CORTEX_A78C_CPUECTLR_EL1_BIT_7)
-workaround_reset_end cortex_a78c, ERRATUM(2132064)
-
-check_erratum_range cortex_a78c, ERRATUM(2132064), CPU_REV(0, 1), CPU_REV(0, 2)
-
workaround_reset_start cortex_a78c, ERRATUM(2242638), ERRATA_A78C_2242638
ldr x0, =0x5
msr CORTEX_A78C_IMP_CPUPSELR_EL3, x0
@@ -125,6 +108,13 @@
#endif /* IMAGE_BL31 */
workaround_reset_end cortex_a78c, CVE(2022, 23960)
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_a78c, CVE(2024, 5660)
+
+check_erratum_ls cortex_a78c, CVE(2024, 5660), CPU_REV(0, 2)
+
cpu_reset_func_start cortex_a78c
cpu_reset_func_end cortex_a78c
diff --git a/lib/cpus/aarch64/cortex_alto.S b/lib/cpus/aarch64/cortex_alto.S
new file mode 100644
index 0000000..d91b424
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_alto.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2024-2025, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <cortex_alto.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+/* Hardware handled coherency */
+#if HW_ASSISTED_COHERENCY == 0
+#error "Alto must be compiled with HW_ASSISTED_COHERENCY enabled"
+#endif
+
+cpu_reset_prologue cortex_alto
+
+cpu_reset_func_start cortex_alto
+ /* Disable speculative loads */
+ msr SSBS, xzr
+cpu_reset_func_end cortex_alto
+
+func cortex_alto_core_pwr_dwn
+ /* ---------------------------------------------------
+ * Enable CPU power down bit in power control register
+ * ---------------------------------------------------
+ */
+ sysreg_bit_set CORTEX_ALTO_IMP_CPUPWRCTLR_EL1, \
+ CORTEX_ALTO_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
+ isb
+ ret
+endfunc cortex_alto_core_pwr_dwn
+
+.section .rodata.cortex_alto_regs, "aS"
+cortex_alto_regs: /* The ASCII list of register names to be reported */
+ .asciz "cpuectlr_el1", ""
+
+func cortex_alto_cpu_reg_dump
+ adr x6, cortex_alto_regs
+ mrs x8, CORTEX_ALTO_IMP_CPUECTLR_EL1
+ ret
+endfunc cortex_alto_cpu_reg_dump
+
+declare_cpu_ops cortex_alto, CORTEX_ALTO_MIDR, \
+ cortex_alto_reset_func, \
+ cortex_alto_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_arcadia.S b/lib/cpus/aarch64/cortex_arcadia.S
index c97d87d..ae8eb91 100644
--- a/lib/cpus/aarch64/cortex_arcadia.S
+++ b/lib/cpus/aarch64/cortex_arcadia.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2024-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,8 @@
#error "Cortex-ARCADIA supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_arcadia
+
cpu_reset_func_start cortex_arcadia
/* Disable speculative loads */
msr SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S
index 891e9a6..43e09e5 100644
--- a/lib/cpus/aarch64/cortex_gelas.S
+++ b/lib/cpus/aarch64/cortex_gelas.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,12 +21,17 @@
#error "Gelas supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_gelas
+
cpu_reset_func_start cortex_gelas
/* ----------------------------------------------------
* Disable speculative loads
* ----------------------------------------------------
*/
msr SSBS, xzr
+ /* model bug: not cleared on reset */
+ sysreg_bit_clear CORTEX_GELAS_CPUPWRCTLR_EL1, \
+ CORTEX_GELAS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
cpu_reset_func_end cortex_gelas
/* ----------------------------------------------------
@@ -49,10 +54,11 @@
1:
#endif
/* ---------------------------------------------------
- * Enable CPU power down bit in power control register
+ * Flip CPU power down bit in power control register.
+ * It will be set on powerdown and cleared on wakeup
* ---------------------------------------------------
*/
- sysreg_bit_set CORTEX_GELAS_CPUPWRCTLR_EL1, \
+ sysreg_bit_toggle CORTEX_GELAS_CPUPWRCTLR_EL1, \
CORTEX_GELAS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
isb
ret
diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S
index 5bd020c..cb759cc 100644
--- a/lib/cpus/aarch64/cortex_x1.S
+++ b/lib/cpus/aarch64/cortex_x1.S
@@ -23,12 +23,7 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x1, CVE(2024, 5660)
-
-check_erratum_ls cortex_x1, CVE(2024, 5660), CPU_REV(1, 2)
+cpu_reset_prologue cortex_x1
workaround_reset_start cortex_x1, ERRATUM(1688305), ERRATA_X1_1688305
sysreg_bit_set CORTEX_X1_ACTLR2_EL1, BIT(1)
@@ -60,6 +55,13 @@
#endif /* IMAGE_BL31 */
workaround_reset_end cortex_x1, CVE(2022, 23960)
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x1, CVE(2024, 5660)
+
+check_erratum_ls cortex_x1, CVE(2024, 5660), CPU_REV(1, 2)
+
cpu_reset_func_start cortex_x1
cpu_reset_func_end cortex_x1
diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S
index c18ce3c..8af6867 100644
--- a/lib/cpus/aarch64/cortex_x2.S
+++ b/lib/cpus/aarch64/cortex_x2.S
@@ -9,6 +9,7 @@
#include <common/bl_common.h>
#include <cortex_x2.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <plat_macros.S>
#include "wa_cve_2022_23960_bhb_vector.S"
@@ -24,20 +25,11 @@
.global check_erratum_cortex_x2_3701772
-add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1)
-
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x2, CVE(2024, 5660)
-
-check_erratum_ls cortex_x2, CVE(2024, 5660), CPU_REV(2, 1)
+cpu_reset_prologue cortex_x2
workaround_reset_start cortex_x2, ERRATUM(2002765), ERRATA_X2_2002765
ldr x0, =0x6
@@ -58,13 +50,6 @@
check_erratum_ls cortex_x2, ERRATUM(2017096), CPU_REV(2, 0)
-workaround_reset_start cortex_x2, ERRATUM(2058056), ERRATA_X2_2058056
- sysreg_bitfield_insert CORTEX_X2_CPUECTLR2_EL1, CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV, \
- CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT, CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end cortex_x2, ERRATUM(2058056)
-
-check_erratum_ls cortex_x2, ERRATUM(2058056), CPU_REV(2, 1)
-
workaround_reset_start cortex_x2, ERRATUM(2081180), ERRATA_X2_2081180
/* Apply instruction patching sequence */
ldr x0, =0x3
@@ -124,6 +109,15 @@
check_erratum_ls cortex_x2, ERRATUM(2282622), CPU_REV(2, 1)
+workaround_reset_start cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941
+ errata_dsu_2313941_wa_impl
+workaround_reset_end cortex_x2, ERRATUM(2313941)
+
+check_erratum_custom_start cortex_x2, ERRATUM(2313941)
+ check_errata_dsu_2313941_impl
+ ret
+check_erratum_custom_end cortex_x2, ERRATUM(2313941)
+
workaround_reset_start cortex_x2, ERRATUM(2371105), ERRATA_X2_2371105
/* Set bit 40 in CPUACTLR2_EL1 */
sysreg_bit_set CORTEX_X2_CPUACTLR2_EL1, CORTEX_X2_CPUACTLR2_EL1_BIT_40
@@ -152,6 +146,10 @@
check_erratum_ls cortex_x2, ERRATUM(2778471), CPU_REV(2, 1)
+add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772
+
+check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1)
+
workaround_reset_start cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/*
@@ -164,15 +162,12 @@
check_erratum_chosen cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
-/*
- * ERRATA_DSU_2313941 :
- * The errata is defined in dsu_helpers.S but applies to cortex_x2
- * as well. Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_cortex_x2_2313941, check_errata_dsu_2313941
-.equ erratum_cortex_x2_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x2, CVE(2024, 5660)
+
+check_erratum_ls cortex_x2, CVE(2024, 5660), CPU_REV(2, 1)
/* ----------------------------------------------------
* HW will do the cache maintenance while powering down
diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S
index 24dbf9d..81ff9ed 100644
--- a/lib/cpus/aarch64/cortex_x3.S
+++ b/lib/cpus/aarch64/cortex_x3.S
@@ -24,27 +24,11 @@
.global check_erratum_cortex_x3_3701769
-add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2)
-
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x3, CVE(2024, 5660)
-
-check_erratum_ls cortex_x3, CVE(2024, 5660), CPU_REV(1, 2)
-
-workaround_reset_start cortex_x3, ERRATUM(2070301), ERRATA_X3_2070301
- sysreg_bitfield_insert CORTEX_X3_CPUECTLR2_EL1, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV, \
- CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB, CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end cortex_x3, ERRATUM(2070301)
-
-check_erratum_ls cortex_x3, ERRATUM(2070301), CPU_REV(1, 2)
+cpu_reset_prologue cortex_x3
workaround_reset_start cortex_x3, ERRATUM(2266875), ERRATA_X3_2266875
sysreg_bit_set CORTEX_X3_CPUACTLR_EL1, BIT(22)
@@ -52,14 +36,17 @@
check_erratum_ls cortex_x3, ERRATUM(2266875), CPU_REV(1, 0)
-workaround_runtime_start cortex_x3, ERRATUM(2302506), ERRATA_X3_2302506
+workaround_reset_start cortex_x3, ERRATUM(2302506), ERRATA_X3_2302506
sysreg_bit_set CORTEX_X3_CPUACTLR2_EL1, BIT(0)
-workaround_runtime_end cortex_x3, ERRATUM(2302506), NO_ISB
+workaround_reset_end cortex_x3, ERRATUM(2302506)
check_erratum_ls cortex_x3, ERRATUM(2302506), CPU_REV(1, 1)
+.global erratum_cortex_x3_2313909_wa
workaround_runtime_start cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909
- sysreg_bit_set CORTEX_X3_CPUACTLR2_EL1, CORTEX_X3_CPUACTLR2_EL1_BIT_36
+ /* Set/unset bit 36 in ACTLR2_EL1. The first call will set it, applying
+ * the workaround. Second call clears it to undo it. */
+ sysreg_bit_toggle CORTEX_X3_CPUACTLR2_EL1, CORTEX_X3_CPUACTLR2_EL1_BIT_36
workaround_runtime_end cortex_x3, ERRATUM(2313909), NO_ISB
check_erratum_ls cortex_x3, ERRATUM(2313909), CPU_REV(1, 0)
@@ -81,9 +68,9 @@
check_erratum_ls cortex_x3, ERRATUM(2615812), CPU_REV(1, 1)
-workaround_runtime_start cortex_x3, ERRATUM(2641945), ERRATA_X3_2641945
+workaround_reset_start cortex_x3, ERRATUM(2641945), ERRATA_X3_2641945
sysreg_bit_set CORTEX_X3_CPUACTLR6_EL1, BIT(41)
-workaround_runtime_end cortex_x3, ERRATUM(2641945), NO_ISB
+workaround_reset_end cortex_x3, ERRATUM(2641945)
check_erratum_ls cortex_x3, ERRATUM(2641945), CPU_REV(1, 0)
@@ -109,6 +96,10 @@
check_erratum_ls cortex_x3, ERRATUM(2779509), CPU_REV(1, 1)
+add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769
+
+check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2)
+
workaround_reset_start cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
override_vector_table wa_cve_vbar_cortex_x3
@@ -117,6 +108,13 @@
check_erratum_chosen cortex_x3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x3, CVE(2024, 5660)
+
+check_erratum_ls cortex_x3, CVE(2024, 5660), CPU_REV(1, 2)
+
workaround_reset_start cortex_x3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
/* ---------------------------------
* Sets BIT41 of CPUACTLR6_EL1 which
diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S
index fded73f..74687a9 100644
--- a/lib/cpus/aarch64/cortex_x4.S
+++ b/lib/cpus/aarch64/cortex_x4.S
@@ -22,6 +22,8 @@
#error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue cortex_x4
+
.global check_erratum_cortex_x4_2726228
.global check_erratum_cortex_x4_3701758
@@ -29,30 +31,9 @@
wa_cve_2022_23960_bhb_vector_table CORTEX_X4_BHB_LOOP_COUNT, cortex_x4
#endif /* WORKAROUND_CVE_2022_23960 */
-workaround_runtime_start cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228, CORTEX_X4_MIDR
-workaround_runtime_end cortex_x4, ERRATUM(2726228)
+add_erratum_entry cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228
-check_erratum_custom_start cortex_x4, ERRATUM(2726228)
-
- /* This erratum needs to be enabled for r0p0 and r0p1.
- * Check if revision is less than or equal to r0p1.
- */
-
-#if ERRATA_X4_2726228
- mov x1, #1
- b cpu_rev_var_ls
-#else
- mov x0, #ERRATA_MISSING
-#endif
- ret
-check_erratum_custom_end cortex_x4, ERRATUM(2726228)
-
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start cortex_x4, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set CORTEX_X4_CPUECTLR_EL1, BIT(46)
-workaround_reset_end cortex_x4, CVE(2024, 5660)
-
-check_erratum_ls cortex_x4, CVE(2024, 5660), CPU_REV(0, 2)
+check_erratum_ls cortex_x4, ERRATUM(2726228), CPU_REV(0, 1)
workaround_runtime_start cortex_x4, ERRATUM(2740089), ERRATA_X4_2740089
/* dsb before isb of power down sequence */
@@ -89,6 +70,21 @@
check_erratum_ls cortex_x4, ERRATUM(2923985), CPU_REV(0, 1)
+workaround_reset_start cortex_x4, ERRATUM(2957258), ERRATA_X4_2957258
+ /* Add ISB before MRS reads of MPIDR_EL1/MIDR_EL1 */
+ ldr x0, =0x1
+ msr S3_6_c15_c8_0, x0 /* msr CPUPSELR_EL3, X0 */
+ ldr x0, =0xd5380000
+ msr S3_6_c15_c8_2, x0 /* msr CPUPOR_EL3, X0 */
+ ldr x0, =0xFFFFFF40
+ msr S3_6_c15_c8_3,x0 /* msr CPUPMR_EL3, X0 */
+ ldr x0, =0x000080010033f
+ msr S3_6_c15_c8_1, x0 /* msr CPUPCR_EL3, X0 */
+ isb
+workaround_reset_end cortex_x4, ERRATUM(2957258)
+
+check_erratum_ls cortex_x4, ERRATUM(2957258), CPU_REV(0, 1)
+
workaround_reset_start cortex_x4, ERRATUM(3076789), ERRATA_X4_3076789
sysreg_bit_set CORTEX_X4_CPUACTLR3_EL1, BIT(14)
sysreg_bit_set CORTEX_X4_CPUACTLR3_EL1, BIT(13)
@@ -97,6 +93,10 @@
check_erratum_ls cortex_x4, ERRATUM(3076789), CPU_REV(0, 1)
+add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758
+
+check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3)
+
workaround_reset_start cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/*
@@ -109,6 +109,13 @@
check_erratum_chosen cortex_x4, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start cortex_x4, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set CORTEX_X4_CPUECTLR_EL1, BIT(46)
+workaround_reset_end cortex_x4, CVE(2024, 5660)
+
+check_erratum_ls cortex_x4, CVE(2024, 5660), CPU_REV(0, 2)
+
workaround_reset_start cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
/* ---------------------------------
* Sets BIT41 of CPUACTLR6_EL1 which
@@ -120,10 +127,6 @@
check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
-add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758, NO_APPLY_AT_RESET
-
-check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3)
-
cpu_reset_func_start cortex_x4
/* Disable speculative loads */
msr SSBS, xzr
diff --git a/lib/cpus/aarch64/cortex_x925.S b/lib/cpus/aarch64/cortex_x925.S
index e2e70dd..7dec375 100644
--- a/lib/cpus/aarch64/cortex_x925.S
+++ b/lib/cpus/aarch64/cortex_x925.S
@@ -21,12 +21,27 @@
#error "Cortex-X925 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
-.global check_erratum_cortex_x925_3701747
+cpu_reset_prologue cortex_x925
-add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747, NO_APPLY_AT_RESET
+add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747
check_erratum_ls cortex_x925, ERRATUM(3701747), CPU_REV(0, 1)
+workaround_reset_start cortex_x925, ERRATUM(2963999), ERRATA_X925_2963999
+ /* Add ISB before MRS reads of MPIDR_EL1/MIDR_EL1 */
+ ldr x0, =0x0
+ msr S3_6_c15_c8_0, x0 /* msr CPUPSELR_EL3, X0 */
+ ldr x0, =0xd5380000
+ msr S3_6_c15_c8_2, x0 /* msr CPUPOR_EL3, X0 */
+ ldr x0, =0xFFFFFF40
+ msr S3_6_c15_c8_3,x0 /* msr CPUPMR_EL3, X0 */
+ ldr x0, =0x000080010033f
+ msr S3_6_c15_c8_1, x0 /* msr CPUPCR_EL3, X0 */
+ isb
+workaround_reset_end cortex_x925, ERRATUM(2963999)
+
+check_erratum_ls cortex_x925, ERRATUM(2963999), CPU_REV(0, 0)
+
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
workaround_reset_start cortex_x925, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
sysreg_bit_set CORTEX_X925_CPUECTLR_EL1, BIT(46)
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 0f9a3b8..e608422 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,47 +14,6 @@
#include <lib/cpus/errata.h>
#include <lib/el3_runtime/cpu_data.h>
- /* Reset fn is needed in BL at reset vector */
-#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \
- (defined(IMAGE_BL2) && RESET_TO_BL2)
- /*
- * The reset handler common to all platforms. After a matching
- * cpu_ops structure entry is found, the correponding reset_handler
- * in the cpu_ops is invoked.
- * Clobbers: x0 - x19, x30
- */
- .globl reset_handler
-func reset_handler
- mov x19, x30
-
- /* The plat_reset_handler can clobber x0 - x18, x30 */
- bl plat_reset_handler
-
- /* Get the matching cpu_ops pointer */
- bl get_cpu_ops_ptr
-
-#if ENABLE_ASSERTIONS
- /*
- * Assert if invalid cpu_ops obtained. If this is not valid, it may
- * suggest that the proper CPU file hasn't been included.
- */
- cmp x0, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the cpu_ops reset handler */
- ldr x2, [x0, #CPU_RESET_FUNC]
- mov x30, x19
- cbz x2, 1f
-
- /* The cpu_ops reset handler can clobber x0 - x19, x30 */
- br x2
-1:
- ret
-endfunc reset_handler
-
-#endif
-
#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
/*
* void prepare_cpu_pwr_dwn(unsigned int power_level)
@@ -213,83 +172,24 @@
b 1b
error_exit:
#endif
+#if ENABLE_ASSERTIONS
+ /*
+ * Assert if invalid cpu_ops obtained. If this is not valid, it may
+ * suggest that the proper CPU file hasn't been included.
+ */
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
ret
endfunc get_cpu_ops_ptr
-/*
- * Extract CPU revision and variant, and combine them into a single numeric for
- * easier comparison.
- */
.globl cpu_get_rev_var
func cpu_get_rev_var
- mrs x1, midr_el1
-
- /*
- * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
- * as variant[7:4] and revision[3:0] of x0.
- *
- * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
- * extract x1[3:0] into x0[3:0] retaining other bits.
- */
- ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
- bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
+ get_rev_var x0, x1
ret
endfunc cpu_get_rev_var
/*
- * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
- * application purposes. If the revision-variant is less than or same as a given
- * value, indicates that errata applies; otherwise not.
- *
- * Shall clobber: x0-x3
- */
- .globl cpu_rev_var_ls
-func cpu_rev_var_ls
- mov x2, #ERRATA_APPLIES
- mov x3, #ERRATA_NOT_APPLIES
- cmp x0, x1
- csel x0, x2, x3, ls
- ret
-endfunc cpu_rev_var_ls
-
-/*
- * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
- * application purposes. If the revision-variant is higher than or same as a
- * given value, indicates that errata applies; otherwise not.
- *
- * Shall clobber: x0-x3
- */
- .globl cpu_rev_var_hs
-func cpu_rev_var_hs
- mov x2, #ERRATA_APPLIES
- mov x3, #ERRATA_NOT_APPLIES
- cmp x0, x1
- csel x0, x2, x3, hs
- ret
-endfunc cpu_rev_var_hs
-
-/*
- * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
- * application purposes. If the revision-variant is between or includes the given
- * values, this indicates that errata applies; otherwise not.
- *
- * Shall clobber: x0-x4
- */
- .globl cpu_rev_var_range
-func cpu_rev_var_range
- mov x3, #ERRATA_APPLIES
- mov x4, #ERRATA_NOT_APPLIES
- cmp x0, x1
- csel x1, x3, x4, hs
- cbz x1, 1f
- cmp x0, x2
- csel x1, x3, x4, ls
-1:
- mov x0, x1
- ret
-endfunc cpu_rev_var_range
-
-/*
* int check_wa_cve_2017_5715(void);
*
* This function returns:
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index ca250d3..64158e7 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,6 +13,8 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+cpu_reset_prologue denver
+
/* -------------------------------------------------
* CVE-2017-5715 mitigation
*
diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S
deleted file mode 100644
index 3c5bf2e..0000000
--- a/lib/cpus/aarch64/dsu_helpers.S
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (c) 2019-2023, Arm Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-#include <dsu_def.h>
-#include <lib/cpus/errata.h>
-
- /* -----------------------------------------------------------------------
- * DSU erratum 798953 check function
- * Checks the DSU variant, revision and configuration to determine if
- * the erratum applies. Erratum applies on all configurations of the
- * DSU and if revision-variant is r0p0.
- *
- * The erratum was fixed in r0p1.
- *
- * This function is called from both assembly and C environment. So it
- * follows AAPCS.
- *
- * Clobbers: x0-x3
- * -----------------------------------------------------------------------
- */
- .globl check_errata_dsu_798953
- .globl errata_dsu_798953_wa
- .globl dsu_pwr_dwn
-
-func check_errata_dsu_798953
- mov x2, #ERRATA_APPLIES
- mov x3, #ERRATA_NOT_APPLIES
-
- /* Check if DSU is equal to r0p0 */
- mrs x1, CLUSTERIDR_EL1
-
- /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
- ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
- #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
- mov x1, #(0x0 << CLUSTERIDR_REV_SHIFT)
- cmp x0, x1
- csel x0, x2, x3, EQ
- ret
-endfunc check_errata_dsu_798953
-
- /* --------------------------------------------------
- * Errata Workaround for DSU erratum #798953.
- *
- * Can clobber only: x0-x8
- * --------------------------------------------------
- */
-func errata_dsu_798953_wa
- mov x8, x30
- bl check_errata_dsu_798953
- cbz x0, 1f
-
- /* If erratum applies, disable high-level clock gating */
- mrs x0, CLUSTERACTLR_EL1
- orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING
- msr CLUSTERACTLR_EL1, x0
- isb
-1:
- ret x8
-endfunc errata_dsu_798953_wa
-
- /* -----------------------------------------------------------------------
- * DSU erratum 936184 check function
- * Checks the DSU variant, revision and configuration to determine if
- * the erratum applies. Erratum applies if ACP interface is present
- * in the DSU and revision-variant < r2p0.
- *
- * The erratum was fixed in r2p0.
- *
- * This function is called from both assembly and C environment. So it
- * follows AAPCS.
- *
- * Clobbers: x0-x4
- * -----------------------------------------------------------------------
- */
- .globl check_errata_dsu_936184
- .globl errata_dsu_936184_wa
- .weak is_scu_present_in_dsu
-
- /* --------------------------------------------------------------------
- * Default behaviour respresents SCU is always present with DSU.
- * CPUs can override this definition if required.
- *
- * Can clobber only: x0-x3
- * --------------------------------------------------------------------
- */
-func is_scu_present_in_dsu
- mov x0, #1
- ret
-endfunc is_scu_present_in_dsu
-
-func check_errata_dsu_936184
- mov x4, x30
- bl is_scu_present_in_dsu
- cmp x0, xzr
- /* Default error status */
- mov x0, #ERRATA_NOT_APPLIES
-
- /* If SCU is not present, return without applying patch */
- b.eq 1f
-
- /* Erratum applies only if DSU has the ACP interface */
- mrs x1, CLUSTERCFR_EL1
- ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1
- cbz x1, 1f
-
- /* If ACP is present, check if DSU is older than r2p0 */
- mrs x1, CLUSTERIDR_EL1
-
- /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
- ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\
- #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
- cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT)
- b.hs 1f
- mov x0, #ERRATA_APPLIES
-1:
- ret x4
-endfunc check_errata_dsu_936184
-
- /* --------------------------------------------------
- * Errata Workaround for DSU erratum #936184.
- *
- * Can clobber only: x0-x8
- * --------------------------------------------------
- */
-func errata_dsu_936184_wa
- mov x8, x30
- bl check_errata_dsu_936184
- cbz x0, 1f
-
- /* If erratum applies, we set a mask to a DSU control register */
- mrs x0, CLUSTERACTLR_EL1
- ldr x1, =DSU_ERRATA_936184_MASK
- orr x0, x0, x1
- msr CLUSTERACTLR_EL1, x0
- isb
-1:
- ret x8
-endfunc errata_dsu_936184_wa
-
- /* -----------------------------------------------------------------------
- * DSU erratum 2313941 check function
- * Checks the DSU variant, revision and configuration to determine if
- * the erratum applies. Erratum applies on all configurations of the
- * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1.
- *
- * The erratum is still open.
- *
- * This function is called from both assembly and C environment. So it
- * follows AAPCS.
- *
- * Clobbers: x0-x4
- * -----------------------------------------------------------------------
- */
- .globl check_errata_dsu_2313941
- .globl errata_dsu_2313941_wa
-
-func check_errata_dsu_2313941
- mov x4, x30
- bl is_scu_present_in_dsu
- cmp x0, xzr
- /* Default error status */
- mov x0, #ERRATA_NOT_APPLIES
-
- /* If SCU is not present, return without applying patch */
- b.eq 1f
-
- mov x2, #ERRATA_APPLIES
- mov x3, #ERRATA_NOT_APPLIES
-
- /* Check if DSU version is less than or equal to r3p1 */
- mrs x1, CLUSTERIDR_EL1
-
- /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */
- ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\
- #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS)
- mov x1, #(0x31 << CLUSTERIDR_REV_SHIFT)
- cmp x0, x1
- csel x0, x2, x3, LS
-1:
- ret x4
-endfunc check_errata_dsu_2313941
-
- /* --------------------------------------------------
- * Errata Workaround for DSU erratum #2313941.
- *
- * Can clobber only: x0-x8
- * --------------------------------------------------
- */
-func errata_dsu_2313941_wa
- mov x8, x30
- bl check_errata_dsu_2313941
- cbz x0, 1f
-
- /* If erratum applies, disable high-level clock gating */
- mrs x0, CLUSTERACTLR_EL1
- orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING
- msr CLUSTERACTLR_EL1, x0
- isb
-1:
- ret x8
-endfunc errata_dsu_2313941_wa
-
- /* ---------------------------------------------
- * controls power features of the cluster
- * 1. Cache portion power not request
- * 2. Disable the retention circuit
- * ---------------------------------------------
- */
-func dsu_pwr_dwn
- msr CLUSTERPWRCTLR_EL1, xzr
- isb
- ret
-endfunc dsu_pwr_dwn
diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S
index 5d7a857..0a10eed 100644
--- a/lib/cpus/aarch64/generic.S
+++ b/lib/cpus/aarch64/generic.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,6 +11,8 @@
#include <cpu_macros.S>
#include <plat_macros.S>
+cpu_reset_prologue generic
+
/* ---------------------------------------------
* Disable L1 data cache and unified L2 cache
* ---------------------------------------------
@@ -80,7 +82,9 @@
* ---------------------------------------------
*/
.equ generic_cpu_reg_dump, 0
-.equ generic_reset_func, 0
+
+cpu_reset_func_start generic
+cpu_reset_func_end generic
declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \
generic_reset_func, \
diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S
index 4bc95d0..f37bb28 100644
--- a/lib/cpus/aarch64/neoverse_e1.S
+++ b/lib/cpus/aarch64/neoverse_e1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,7 @@
#include <asm_macros.S>
#include <common/bl_common.h>
#include <common/debug.h>
+#include <dsu_macros.S>
#include <neoverse_e1.h>
#include <cpu_macros.S>
#include <plat_macros.S>
@@ -21,15 +22,18 @@
#error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
-/*
- * ERRATA_DSU_936184:
- * The errata is defined in dsu_helpers.S and applies to neoverse_e1.
- * Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_neoverse_e1_936184, check_errata_dsu_936184
-.equ erratum_neoverse_e1_936184_wa, errata_dsu_936184_wa
-add_erratum_entry neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+cpu_reset_prologue neoverse_e1
+
+workaround_reset_start neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end neoverse_e1, ERRATUM(936184)
+
+check_erratum_custom_start neoverse_e1, ERRATUM(936184)
+ branch_if_scu_not_present 2f /* label 1 is used in the macro */
+ check_errata_dsu_936184_impl
+ 2:
+ ret
+check_erratum_custom_end neoverse_e1, ERRATUM(936184)
cpu_reset_func_start neoverse_e1
cpu_reset_func_end neoverse_e1
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index f727226..5868bf2 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,7 @@
#include <asm_macros.S>
#include <cpuamu.h>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <neoverse_n1.h>
#include "wa_cve_2022_23960_bhb_vector.S"
@@ -27,15 +28,18 @@
wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
#endif /* WORKAROUND_CVE_2022_23960 */
-/*
- * ERRATA_DSU_936184:
- * The errata is defined in dsu_helpers.S and applies to Neoverse N1.
- * Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_neoverse_n1_936184, check_errata_dsu_936184
-.equ erratum_neoverse_n1_936184_wa, errata_dsu_936184_wa
-add_erratum_entry neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET
+cpu_reset_prologue neoverse_n1
+
+workaround_reset_start neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184
+ errata_dsu_936184_wa_impl
+workaround_reset_end neoverse_n1, ERRATUM(936184)
+
+check_erratum_custom_start neoverse_n1, ERRATUM(936184)
+ branch_if_scu_not_present 2f /* label 1 is used in the macro */
+ check_errata_dsu_936184_impl
+ 2:
+ ret
+check_erratum_custom_end neoverse_n1, ERRATUM(936184)
workaround_reset_start neoverse_n1, ERRATUM(1043202), ERRATA_N1_1043202
/* Apply instruction patching sequence */
@@ -112,7 +116,7 @@
check_erratum_ls neoverse_n1, ERRATUM(1315703), CPU_REV(3, 0)
-workaround_reset_start neoverse_n1, ERRATUM(1542419), ERRATA_N1_1542419
+workaround_reset_start neoverse_n1, ERRATUM(1542419), ERRATA_N1_1542419, SPLIT_WA
/* Apply instruction patching sequence */
ldr x0, =0x0
msr CPUPSELR_EL3, x0
diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S
index 3df3839..7d9d7f1 100644
--- a/lib/cpus/aarch64/neoverse_n2.S
+++ b/lib/cpus/aarch64/neoverse_n2.S
@@ -7,6 +7,7 @@
#include <arch.h>
#include <asm_macros.S>
#include <cpu_macros.S>
+#include <dsu_macros.S>
#include <neoverse_n2.h>
#include "wa_cve_2022_23960_bhb_vector.S"
@@ -22,30 +23,11 @@
.global check_erratum_neoverse_n2_3701773
-add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773, NO_APPLY_AT_RESET
-
-check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3)
-
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
#endif /* WORKAROUND_CVE_2022_23960 */
-/*
- * ERRATA_DSU_2313941:
- * The errata is defined in dsu_helpers.S and applies to Neoverse N2.
- * Henceforth creating symbolic names to the already existing errata
- * workaround functions to get them registered under the Errata Framework.
- */
-.equ check_erratum_neoverse_n2_2313941, check_errata_dsu_2313941
-.equ erratum_neoverse_n2_2313941_wa, errata_dsu_2313941_wa
-add_erratum_entry neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET
-
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_n2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_n2, CVE(2024, 5660)
-
-check_erratum_ls neoverse_n2, CVE(2024, 5660), CPU_REV(0, 3)
+cpu_reset_prologue neoverse_n2
workaround_reset_start neoverse_n2, ERRATUM(2002655), ERRATA_N2_2002655
/* Apply instruction patching sequence */
@@ -69,18 +51,6 @@
check_erratum_ls neoverse_n2, ERRATUM(2002655), CPU_REV(0, 0)
-workaround_reset_start neoverse_n2, ERRATUM(2025414), ERRATA_N2_2025414
- sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT
-workaround_reset_end neoverse_n2, ERRATUM(2025414)
-
-check_erratum_ls neoverse_n2, ERRATUM(2025414), CPU_REV(0, 0)
-
-workaround_reset_start neoverse_n2, ERRATUM(2067956), ERRATA_N2_2067956
- sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46
-workaround_reset_end neoverse_n2, ERRATUM(2067956)
-
-check_erratum_ls neoverse_n2, ERRATUM(2067956), CPU_REV(0, 0)
-
workaround_runtime_start neoverse_n2, ERRATUM(2009478), ERRATA_N2_2009478
/* Stash ERRSELR_EL1 in x2 */
mrs x2, ERRSELR_EL1
@@ -97,15 +67,17 @@
check_erratum_ls neoverse_n2, ERRATUM(2009478), CPU_REV(0, 0)
-workaround_reset_start neoverse_n2, ERRATUM(2138953), ERRATA_N2_2138953
- /* Apply instruction patching sequence */
- mrs x1, NEOVERSE_N2_CPUECTLR2_EL1
- mov x0, #NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV
- bfi x1, x0, #CPUECTLR2_EL1_PF_MODE_LSB, #CPUECTLR2_EL1_PF_MODE_WIDTH
- msr NEOVERSE_N2_CPUECTLR2_EL1, x1
-workaround_reset_end neoverse_n2, ERRATUM(2138953)
+workaround_reset_start neoverse_n2, ERRATUM(2025414), ERRATA_N2_2025414
+ sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT
+workaround_reset_end neoverse_n2, ERRATUM(2025414)
-check_erratum_ls neoverse_n2, ERRATUM(2138953), CPU_REV(0, 3)
+check_erratum_ls neoverse_n2, ERRATUM(2025414), CPU_REV(0, 0)
+
+workaround_reset_start neoverse_n2, ERRATUM(2067956), ERRATA_N2_2067956
+ sysreg_bit_set NEOVERSE_N2_CPUACTLR_EL1, NEOVERSE_N2_CPUACTLR_EL1_BIT_46
+workaround_reset_end neoverse_n2, ERRATUM(2067956)
+
+check_erratum_ls neoverse_n2, ERRATUM(2067956), CPU_REV(0, 0)
workaround_reset_start neoverse_n2, ERRATUM(2138956), ERRATA_N2_2138956
/* Apply instruction patching sequence */
@@ -171,27 +143,40 @@
check_erratum_ls neoverse_n2, ERRATUM(2280757), CPU_REV(0, 0)
+workaround_reset_start neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941
+ errata_dsu_2313941_wa_impl
+workaround_reset_end neoverse_n2, ERRATUM(2313941)
+
+check_erratum_custom_start neoverse_n2, ERRATUM(2313941)
+ branch_if_scu_not_present 2f /* label 1 is used in the macro */
+ check_errata_dsu_2313941_impl
+ 2:
+ ret
+check_erratum_custom_end neoverse_n2, ERRATUM(2313941)
+
+.global erratum_neoverse_n2_2326639_wa
workaround_runtime_start neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639
- /* Set bit 36 in ACTLR2_EL1 */
- sysreg_bit_set NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_36
+ /* Set/unset bit 36 in ACTLR2_EL1. The first call will set it, applying
+ * the workaround. Second call clears it to undo it. */
+ sysreg_bit_toggle NEOVERSE_N2_CPUACTLR2_EL1, NEOVERSE_N2_CPUACTLR2_EL1_BIT_36
workaround_runtime_end neoverse_n2, ERRATUM(2326639)
check_erratum_ls neoverse_n2, ERRATUM(2326639), CPU_REV(0, 0)
-workaround_runtime_start neoverse_n2, ERRATUM(2340933), ERRATA_N2_2340933
+workaround_reset_start neoverse_n2, ERRATUM(2340933), ERRATA_N2_2340933
/* Set bit 61 in CPUACTLR5_EL1 */
sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, BIT(61)
-workaround_runtime_end neoverse_n2, ERRATUM(2340933)
+workaround_reset_end neoverse_n2, ERRATUM(2340933)
check_erratum_ls neoverse_n2, ERRATUM(2340933), CPU_REV(0, 0)
-workaround_runtime_start neoverse_n2, ERRATUM(2346952), ERRATA_N2_2346952
+workaround_reset_start neoverse_n2, ERRATUM(2346952), ERRATA_N2_2346952
/* Set TXREQ to STATIC and full L2 TQ size */
mrs x1, NEOVERSE_N2_CPUECTLR2_EL1
mov x0, #CPUECTLR2_EL1_TXREQ_STATIC_FULL
bfi x1, x0, #CPUECTLR2_EL1_TXREQ_LSB, #CPUECTLR2_EL1_TXREQ_WIDTH
msr NEOVERSE_N2_CPUECTLR2_EL1, x1
-workaround_runtime_end neoverse_n2, ERRATUM(2346952)
+workaround_reset_end neoverse_n2, ERRATUM(2346952)
check_erratum_ls neoverse_n2, ERRATUM(2346952), CPU_REV(0, 2)
@@ -234,6 +219,10 @@
check_erratum_ls neoverse_n2, ERRATUM(2779511), CPU_REV(0, 2)
+add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773
+
+check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3)
+
workaround_reset_start neoverse_n2, CVE(2022,23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/*
@@ -246,6 +235,13 @@
check_erratum_chosen neoverse_n2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_n2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set NEOVERSE_N2_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_n2, CVE(2024, 5660)
+
+check_erratum_ls neoverse_n2, CVE(2024, 5660), CPU_REV(0, 3)
+
/* -------------------------------------------
* The CPU Ops reset function for Neoverse N2.
* -------------------------------------------
diff --git a/lib/cpus/aarch64/neoverse_n3.S b/lib/cpus/aarch64/neoverse_n3.S
index 8abcafe..1b7a3e1 100644
--- a/lib/cpus/aarch64/neoverse_n3.S
+++ b/lib/cpus/aarch64/neoverse_n3.S
@@ -21,9 +21,11 @@
#error "Neoverse-N3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue neoverse_n3
+
.global check_erratum_neoverse_n3_3699563
-add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563, NO_APPLY_AT_RESET
+add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563
check_erratum_ls neoverse_n3, ERRATUM(3699563), CPU_REV(0, 0)
diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S
deleted file mode 100644
index b816342..0000000
--- a/lib/cpus/aarch64/neoverse_n_common.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-#include <neoverse_n_common.h>
-
- .global is_scu_present_in_dsu
-
-/*
- * Check if the SCU L3 Unit is present on the DSU
- * 1-> SCU present
- * 0-> SCU not present
- *
- * This function is implemented as weak on dsu_helpers.S and must be
- * overwritten for Neoverse Nx cores.
- */
-
-func is_scu_present_in_dsu
- mrs x0, CPUCFR_EL1
- ubfx x0, x0, #SCU_SHIFT, #1
- eor x0, x0, #1
- ret
-endfunc is_scu_present_in_dsu
diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S
index d1a2c24..f975be0 100644
--- a/lib/cpus/aarch64/neoverse_v1.S
+++ b/lib/cpus/aarch64/neoverse_v1.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -22,17 +22,12 @@
#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue neoverse_v1
+
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_v1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set NEOVERSE_V1_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_v1, CVE(2024, 5660)
-
-check_erratum_ls neoverse_v1, CVE(2024, 5660), CPU_REV(1, 2)
-
workaround_reset_start neoverse_v1, ERRATUM(1618635), ERRATA_V1_1618635
/* Inserts a DMB SY before and after MRS PAR_EL1 */
ldr x0, =0x0
@@ -161,15 +156,6 @@
check_erratum_range neoverse_v1, ERRATUM(1966096), CPU_REV(1, 0), CPU_REV(1, 1)
-workaround_reset_start neoverse_v1, ERRATUM(2108267), ERRATA_V1_2108267
- mrs x1, NEOVERSE_V1_CPUECTLR_EL1
- mov x0, #NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV
- bfi x1, x0, #CPUECTLR_EL1_PF_MODE_LSB, #CPUECTLR_EL1_PF_MODE_WIDTH
- msr NEOVERSE_V1_CPUECTLR_EL1, x1
-workaround_reset_end neoverse_v1, ERRATUM(2108267)
-
-check_erratum_ls neoverse_v1, ERRATUM(2108267), CPU_REV(1, 2)
-
workaround_reset_start neoverse_v1, ERRATUM(2139242), ERRATA_V1_2139242
mov x0, #0x3
msr S3_6_C15_C8_0, x0
@@ -203,10 +189,10 @@
check_erratum_ls neoverse_v1, ERRATUM(2294912), CPU_REV(1, 2)
-workaround_runtime_start neoverse_v1, ERRATUM(2348377), ERRATA_V1_2348377
+workaround_reset_start neoverse_v1, ERRATUM(2348377), ERRATA_V1_2348377
/* Set bit 61 in CPUACTLR5_EL1 */
sysreg_bit_set NEOVERSE_V1_ACTLR5_EL1, NEOVERSE_V1_ACTLR5_EL1_BIT_61
-workaround_runtime_end neoverse_v1, ERRATUM(2348377)
+workaround_reset_end neoverse_v1, ERRATUM(2348377)
check_erratum_ls neoverse_v1, ERRATUM(2348377), CPU_REV(1, 1)
@@ -250,6 +236,13 @@
check_erratum_chosen neoverse_v1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_v1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set NEOVERSE_V1_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_v1, CVE(2024, 5660)
+
+check_erratum_ls neoverse_v1, CVE(2024, 5660), CPU_REV(1, 2)
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S
index b43f6dd..ce84942 100644
--- a/lib/cpus/aarch64/neoverse_v2.S
+++ b/lib/cpus/aarch64/neoverse_v2.S
@@ -22,19 +22,7 @@
#error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_v2, CVE(2024, 5660)
-
-check_erratum_ls neoverse_v2, CVE(2024, 5660), CPU_REV(0, 2)
-
-workaround_reset_start neoverse_v2, ERRATUM(2331132), ERRATA_V2_2331132
- sysreg_bitfield_insert NEOVERSE_V2_CPUECTLR2_EL1, NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_CNSRV, \
- NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_LSB, NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_WIDTH
-workaround_reset_end neoverse_v2, ERRATUM(2331132)
-
-check_erratum_ls neoverse_v2, ERRATUM(2331132), CPU_REV(0, 2)
+cpu_reset_prologue neoverse_v2
workaround_reset_start neoverse_v2, ERRATUM(2618597), ERRATA_V2_2618597
/* Disable retention control for WFI and WFE. */
@@ -93,6 +81,13 @@
check_erratum_chosen neoverse_v2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_v2, CVE(2024, 5660)
+
+check_erratum_ls neoverse_v2, CVE(2024, 5660), CPU_REV(0, 2)
+
#if WORKAROUND_CVE_2022_23960
wa_cve_2022_23960_bhb_vector_table NEOVERSE_V2_BHB_LOOP_COUNT, neoverse_v2
#endif /* WORKAROUND_CVE_2022_23960 */
diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S
index 7fe2d7f..2ead062 100644
--- a/lib/cpus/aarch64/neoverse_v3.S
+++ b/lib/cpus/aarch64/neoverse_v3.S
@@ -22,9 +22,26 @@
#error "Neoverse V3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue neoverse_v3
+
.global check_erratum_neoverse_v3_3701767
-add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767, NO_APPLY_AT_RESET
+workaround_reset_start neoverse_v3, ERRATUM(2970647), ERRATA_V3_2970647
+ /* Add ISB before MRS reads of MPIDR_EL1/MIDR_EL1 */
+ ldr x0, =0x1
+ msr S3_6_c15_c8_0, x0 /* msr CPUPSELR_EL3, X0 */
+ ldr x0, =0xd5380000
+ msr S3_6_c15_c8_2, x0 /* msr CPUPOR_EL3, X0 */
+ ldr x0, =0xFFFFFF40
+ msr S3_6_c15_c8_3,x0 /* msr CPUPMR_EL3, X0 */
+ ldr x0, =0x000080010033f
+ msr S3_6_c15_c8_1, x0 /* msr CPUPCR_EL3, X0 */
+ isb
+workaround_reset_end neoverse_v3, ERRATUM(2970647)
+
+check_erratum_ls neoverse_v3, ERRATUM(2970647), CPU_REV(0, 0)
+
+add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767
check_erratum_ls neoverse_v3, ERRATUM(3701767), CPU_REV(0, 2)
@@ -32,13 +49,6 @@
wa_cve_2022_23960_bhb_vector_table NEOVERSE_V3_BHB_LOOP_COUNT, neoverse_v3
#endif /* WORKAROUND_CVE_2022_23960 */
-/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
-workaround_reset_start neoverse_v3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
- sysreg_bit_set NEOVERSE_V3_CPUECTLR_EL1, BIT(46)
-workaround_reset_end neoverse_v3, CVE(2024, 5660)
-
-check_erratum_ls neoverse_v3, CVE(2024, 5660), CPU_REV(0, 1)
-
workaround_reset_start neoverse_v3, CVE(2022,23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/*
@@ -52,6 +62,13 @@
check_erratum_chosen neoverse_v3, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
+/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
+workaround_reset_start neoverse_v3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
+ sysreg_bit_set NEOVERSE_V3_CPUECTLR_EL1, BIT(46)
+workaround_reset_end neoverse_v3, CVE(2024, 5660)
+
+check_erratum_ls neoverse_v3, CVE(2024, 5660), CPU_REV(0, 1)
+
workaround_reset_start neoverse_v3, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
/* ---------------------------------
* Sets BIT41 of CPUACTLR6_EL1 which
diff --git a/lib/cpus/aarch64/nevis.S b/lib/cpus/aarch64/nevis.S
index 0180ab7..0d04e65 100644
--- a/lib/cpus/aarch64/nevis.S
+++ b/lib/cpus/aarch64/nevis.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,8 @@
#error "Nevis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue nevis
+
cpu_reset_func_start nevis
/* ----------------------------------------------------
* Disable speculative loads
diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S
index fb03cf1..a727379 100644
--- a/lib/cpus/aarch64/qemu_max.S
+++ b/lib/cpus/aarch64/qemu_max.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,8 @@
#include <cpu_macros.S>
#include <qemu_max.h>
+cpu_reset_prologue qemu_max
+
func qemu_max_core_pwr_dwn
/* ---------------------------------------------
* Disable the Data Cache.
@@ -47,6 +49,9 @@
b dcsw_op_all
endfunc qemu_max_cluster_pwr_dwn
+cpu_reset_func_start qemu_max
+cpu_reset_func_end qemu_max
+
/* ---------------------------------------------
* This function provides cpu specific
* register information for crash reporting.
@@ -67,6 +72,6 @@
/* cpu_ops for QEMU MAX */
-declare_cpu_ops qemu_max, QEMU_MAX_MIDR, CPU_NO_RESET_FUNC, \
+declare_cpu_ops qemu_max, QEMU_MAX_MIDR, qemu_max_reset_func, \
qemu_max_core_pwr_dwn, \
qemu_max_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S
index ea687be..9ad9362 100644
--- a/lib/cpus/aarch64/rainier.S
+++ b/lib/cpus/aarch64/rainier.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,8 @@
#error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue rainier
+
/* --------------------------------------------------
* Disable speculative loads if Rainier supports
* SSBS.
diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S
index e8b3860..2d61121 100644
--- a/lib/cpus/aarch64/travis.S
+++ b/lib/cpus/aarch64/travis.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2023-2025, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,12 +21,17 @@
#error "Travis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
#endif
+cpu_reset_prologue travis
+
cpu_reset_func_start travis
/* ----------------------------------------------------
* Disable speculative loads
* ----------------------------------------------------
*/
msr SSBS, xzr
+ /* model bug: not cleared on reset */
+ sysreg_bit_clear TRAVIS_IMP_CPUPWRCTLR_EL1, \
+ TRAVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
cpu_reset_func_end travis
func travis_core_pwr_dwn
@@ -45,10 +50,11 @@
1:
#endif
/* ---------------------------------------------------
- * Enable CPU power down bit in power control register
+ * Flip CPU power down bit in power control register.
+ * It will be set on powerdown and cleared on wakeup
* ---------------------------------------------------
*/
- sysreg_bit_set TRAVIS_IMP_CPUPWRCTLR_EL1, \
+ sysreg_bit_toggle TRAVIS_IMP_CPUPWRCTLR_EL1, \
TRAVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
isb
ret
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index fb904e2..4b8de00 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -315,10 +315,6 @@
# to revision r0p0 of the A78 cpu and was fixed in the revision r1p0.
CPU_FLAG_LIST += ERRATA_A78_1952683
-# Flag to apply erratum 2132060 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, r1p1, and r1p2 of the A78 cpu. It is still open.
-CPU_FLAG_LIST += ERRATA_A78_2132060
-
# Flag to apply erratum 2242635 workaround during reset. This erratum applies
# to revisions r1p0, r1p1, and r1p2 of the A78 cpu and is open. The issue is
# present in r0p0 as well but there is no workaround for that revision.
@@ -380,10 +376,6 @@
# It is still open.
CPU_FLAG_LIST += ERRATA_A78_AE_2712574
-# Flag to apply erratum 2132064 workaround during reset. This erratum applies
-# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
-CPU_FLAG_LIST += ERRATA_A78C_2132064
-
# Flag to apply erratum 2242638 workaround during reset. This erratum applies
# to revisions r0p1 and r0p2 of the A78C cpu. It is still open.
CPU_FLAG_LIST += ERRATA_A78C_2242638
@@ -523,10 +515,6 @@
# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
CPU_FLAG_LIST += ERRATA_V1_2139242
-# Flag to apply erratum 2108267 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, and r1p1 of the Neoverse V1 cpu and is still open.
-CPU_FLAG_LIST += ERRATA_V1_2108267
-
# Flag to apply erratum 2216392 workaround during reset. This erratum applies
# to revisions r1p0 and r1p1 of the Neoverse V1 cpu and is still open. This
# issue exists in r0p0 as well but there is no workaround for that revision.
@@ -564,6 +552,10 @@
# still open.
CPU_FLAG_LIST += ERRATA_V1_2779461
+# Flag to apply erratum 2970647 workaround during reset. This erratum applies
+# to revisions r0p0 of the Neoverse V3 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_V3_2970647
+
# Flag to apply erratum 3701767 workaround during context save/restore of
# ICH_VMCR_EL2 reg. This erratum applies to revisions r0p0, r0p1 and r0p2 of
# the Neoverse V3 cpu and is still open.
@@ -581,11 +573,6 @@
# to revision r2p0 of the Cortex-A710 cpu and is still open.
CPU_FLAG_LIST += ERRATA_A710_2083908
-# Flag to apply erratum 2058056 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-A710 cpu and is still
-# open.
-CPU_FLAG_LIST += ERRATA_A710_2058056
-
# Flag to apply erratum 2055002 workaround during reset. This erratum applies
# to revision r1p0, r2p0 of the Cortex-A710 cpu and is still open.
CPU_FLAG_LIST += ERRATA_A710_2055002
@@ -676,10 +663,6 @@
# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
CPU_FLAG_LIST += ERRATA_N2_2138956
-# Flag to apply erratum 2138953 workaround during reset. This erratum applies
-# to revision r0p0, r0p1, r0p2, r0p3 of the Neoverse N2 cpu and is still open.
-CPU_FLAG_LIST += ERRATA_N2_2138953
-
# Flag to apply erratum 2242415 workaround during reset. This erratum applies
# to revision r0p0 of the Neoverse N2 cpu and is fixed in r0p1.
CPU_FLAG_LIST += ERRATA_N2_2242415
@@ -747,10 +730,6 @@
# to revisions r0p0, r1p0, and r2p0 of the Cortex-X2 cpu and is still open.
CPU_FLAG_LIST += ERRATA_X2_2002765
-# Flag to apply erratum 2058056 workaround during reset. This erratum applies
-# to revisions r0p0, r1p0, r2p0 and r2p1 of the Cortex-X2 cpu and is still open.
-CPU_FLAG_LIST += ERRATA_X2_2058056
-
# Flag to apply erratum 2083908 workaround during reset. This erratum applies
# to revision r2p0 of the Cortex-X2 cpu and is still open.
CPU_FLAG_LIST += ERRATA_X2_2083908
@@ -806,11 +785,6 @@
# of the Cortex-X2 cpu and is still open.
CPU_FLAG_LIST += ERRATA_X2_3701772
-# Flag to apply erratum 2070301 workaround on reset. This erratum applies
-# to revisions r0p0, r1p0, r1p1 and r1p2 of the Cortex-X3 cpu and is
-# still open.
-CPU_FLAG_LIST += ERRATA_X3_2070301
-
# Flag to apply erratum 2266875 workaround during reset. This erratum applies
# to revisions r0p0 and r1p0 of the Cortex-X3 cpu, it is fixed in r1p1.
CPU_FLAG_LIST += ERRATA_X3_2266875
@@ -885,6 +859,12 @@
# to revisions r0p0 and r0p1 of the Cortex-X4 cpu. It is fixed in r0p2.
CPU_FLAG_LIST += ERRATA_X4_2923985
+# Flag to apply erratum 2957258 workaround to avoid incorrect virtualization of
+# MPIDR_EL1/VMPIDR_EL2 and MIDR_EL1/VPIDR_EL2 when reading in EL2/EL3. This
+# erratum applies to revisions r0p0, r0p1 of the Cortex-X4 cpu. It is fixed
+# in r0p2.
+CPU_FLAG_LIST += ERRATA_X4_2957258
+
# Flag to apply erratum 3076789 workaround on reset. This erratum applies
# to revisions r0p0 and r0p1 of the Cortex-X4 cpu. It is fixed in r0p2.
CPU_FLAG_LIST += ERRATA_X4_3076789
@@ -894,6 +874,10 @@
# of the Cortex-X4 cpu and is still open.
CPU_FLAG_LIST += ERRATA_X4_3701758
+# Flag to apply erratum 2963999 workaround during reset. This erratum applies
+# to revisions r0p0 of the Cortex-X925 cpu and is fixed in r0p1.
+CPU_FLAG_LIST += ERRATA_X925_2963999
+
# Flag to apply erratum 3701747 workaround during context save/restore of
# ICH_VMCR_EL2 reg. This erratum applies to revisions r0p0, r0p1 of the
# Cortex-X925 cpu and is still open.
@@ -952,6 +936,11 @@
# Cortex-A510 cpu and is fixed in r1p3.
CPU_FLAG_LIST += ERRATA_A510_2684597
+# Flag to apply erratum 2971420 workaround during context switch. This erratum
+# applies to revisions r0p1, r0p2, r0p3, r1p0, r1p1, r1p2 and r1p3 of the
+# Cortex-A510 cpu and is still open.
+CPU_FLAG_LIST += ERRATA_A510_2971420
+
# Flag to apply erratum 2630792 workaround during reset. This erratum applies
# to revisions r0p0, r0p1 of the Cortex-A520 cpu and is still open.
CPU_FLAG_LIST += ERRATA_A520_2630792
@@ -964,10 +953,6 @@
# applies to revision r0p0 and r0p1 of the Cortex-A520 cpu and is fixed in r0p2.
CPU_FLAG_LIST += ERRATA_A520_2938996
-# Flag to apply erratum 2331132 workaround during reset. This erratum applies
-# to revisions r0p0, r0p1 and r0p2. It is still open.
-CPU_FLAG_LIST += ERRATA_V2_2331132
-
# Flag to apply erratum 2618597 workaround during reset. This erratum applies
# to revisions r0p0 and r0p1. It is fixed in r0p2.
CPU_FLAG_LIST += ERRATA_V2_2618597
@@ -1024,6 +1009,10 @@
# only to revision r0p0, r1p0 and r1p1. It is fixed in r1p2.
CPU_FLAG_LIST += ERRATA_A715_2728106
+# Flag to apply erratum 2804830 workaround during reset. This erratum applies
+# to revisions r0p0, r1p0, r1p1 and r1p2. It is fixed in r1p3.
+CPU_FLAG_LIST += ERRATA_A715_2804830
+
# Flag to apply erratum 3699560 workaround during context save/restore of
# ICH_VMCR_EL2 reg. This erratum applies to revisions r0p0, r1p0, r1p2, r1p3
# of the Cortex-A715 cpu and is still open.
@@ -1081,7 +1070,11 @@
endif
# process all flags
+ifeq (${ENABLE_ERRATA_ALL},1)
+$(eval $(call default_ones, $(CPU_FLAG_LIST)))
+else
$(eval $(call default_zeros, $(CPU_FLAG_LIST)))
+endif
$(eval $(call add_defines, $(CPU_FLAG_LIST)))
$(eval $(call assert_booleans, $(CPU_FLAG_LIST)))
diff --git a/lib/cpus/errata_common.c b/lib/cpus/errata_common.c
index a391430..df9a3d7 100644
--- a/lib/cpus/errata_common.c
+++ b/lib/cpus/errata_common.c
@@ -8,8 +8,9 @@
#include <arch.h>
#include <arch_helpers.h>
-#include <cortex_a75.h>
+#include <cortex_a510.h>
#include <cortex_a520.h>
+#include <cortex_a75.h>
#include <cortex_a710.h>
#include <cortex_a715.h>
#include <cortex_a720.h>
@@ -25,21 +26,26 @@
#include <neoverse_n3.h>
#include <neoverse_v3.h>
-#if ERRATA_A520_2938996 || ERRATA_X4_2726228
-unsigned int check_if_affected_core(void)
+bool check_if_trbe_disable_affected_core(void)
{
- uint32_t midr_val = read_midr();
- long rev_var = cpu_get_rev_var();
-
- if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(CORTEX_A520_MIDR)) {
- return check_erratum_cortex_a520_2938996(rev_var);
- } else if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(CORTEX_X4_MIDR)) {
- return check_erratum_cortex_x4_2726228(rev_var);
- }
-
- return ERRATA_NOT_APPLIES;
-}
+ switch (EXTRACT_PARTNUM(read_midr())) {
+#if ERRATA_A520_2938996
+ case EXTRACT_PARTNUM(CORTEX_A520_MIDR):
+ return check_erratum_cortex_a520_2938996(cpu_get_rev_var()) == ERRATA_APPLIES;
#endif
+#if ERRATA_X4_2726228
+ case EXTRACT_PARTNUM(CORTEX_X4_MIDR):
+ return check_erratum_cortex_x4_2726228(cpu_get_rev_var()) == ERRATA_APPLIES;
+#endif
+#if ERRATA_A510_2971420
+ case EXTRACT_PARTNUM(CORTEX_A510_MIDR):
+ return check_erratum_cortex_a510_2971420(cpu_get_rev_var()) == ERRATA_APPLIES;
+#endif
+ default:
+ break;
+ }
+ return false;
+}
#if ERRATA_A75_764081
bool errata_a75_764081_applies(void)
diff --git a/lib/cpus/errata_report.c b/lib/cpus/errata_report.c
index e0a9076..03d18ec 100644
--- a/lib/cpus/errata_report.c
+++ b/lib/cpus/errata_report.c
@@ -77,7 +77,6 @@
uint32_t last_erratum_id = 0;
uint16_t last_cve_yr = 0;
bool check_cve = false;
- bool failed = false;
#endif /* FEATURE_DETECTION */
for (; entry != end; entry += 1) {
@@ -100,30 +99,20 @@
if (entry->cve) {
if (last_cve_yr > entry->cve ||
(last_cve_yr == entry->cve && last_erratum_id >= entry->id)) {
- ERROR("CVE %u_%u was out of order!\n",
+ WARN("CVE %u_%u was out of order!\n",
entry->cve, entry->id);
- failed = true;
}
check_cve = true;
last_cve_yr = entry->cve;
} else {
if (last_erratum_id >= entry->id || check_cve) {
- ERROR("Erratum %u was out of order!\n",
+ WARN("Erratum %u was out of order!\n",
entry->id);
- failed = true;
}
}
last_erratum_id = entry->id;
#endif /* FEATURE_DETECTION */
}
-
-#if FEATURE_DETECTION
- /*
- * enforce errata and CVEs are in ascending order and that CVEs are
- * after errata
- */
- assert(!failed);
-#endif /* FEATURE_DETECTION */
}
/*
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 7e64966..9e0e354 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -1631,13 +1631,11 @@
}
#endif
-#if ERRATA_A520_2938996 || ERRATA_X4_2726228
- if (check_if_affected_core() == ERRATA_APPLIES) {
+ if (check_if_trbe_disable_affected_core()) {
if (is_feat_trbe_supported()) {
trbe_disable(ctx);
}
}
-#endif
#if ENABLE_FEAT_TCR2 == FEAT_STATE_CHECK_ASYMMETRIC
el3_state_t *el3_state = get_el3state_ctx(ctx);
diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S
index 313f882..02d9415 100644
--- a/lib/el3_runtime/aarch64/cpu_data.S
+++ b/lib/el3_runtime/aarch64/cpu_data.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,29 +7,9 @@
#include <asm_macros.S>
#include <lib/el3_runtime/cpu_data.h>
-.globl init_cpu_data_ptr
.globl _cpu_data_by_index
/* -----------------------------------------------------------------
- * void init_cpu_data_ptr(void)
- *
- * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
- * for the calling CPU. This must be called before cm_get_cpu_data()
- *
- * This can be called without a valid stack. It assumes that
- * plat_my_core_pos() does not clobber register x10.
- * clobbers: x0, x1, x10
- * -----------------------------------------------------------------
- */
-func init_cpu_data_ptr
- mov x10, x30
- bl plat_my_core_pos
- bl _cpu_data_by_index
- msr tpidr_el3, x0
- ret x10
-endfunc init_cpu_data_ptr
-
-/* -----------------------------------------------------------------
* cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
*
* Return the cpu_data structure for the CPU with given linear index
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index cca08c1..088ab43 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -131,4 +131,12 @@
1:
wfi
b 1b
+
+ /*
+ * in case the WFI wasn't terminal, we have to undo errata mitigations.
+ * These will be smart enough to handle being called the same way
+ */
+ apply_erratum cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
+ apply_erratum cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909, NO_GET_CPU_REV
+ apply_erratum neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639, NO_GET_CPU_REV
endfunc psci_power_down_wfi
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index d6c09de..deb89ae 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -106,6 +106,10 @@
# Flag to enable exception handling in EL3
EL3_EXCEPTION_HANDLING := 0
+# Flag to include all errata for all CPUs TF-A implements workarounds for
+# Its supposed to be used only for testing.
+ENABLE_ERRATA_ALL := 0
+
# By default BL31 encryption disabled
ENCRYPT_BL31 := 0
diff --git a/plat/arm/board/arm_fpga/platform.mk b/plat/arm/board/arm_fpga/platform.mk
index 967bf21..31835f1 100644
--- a/plat/arm/board/arm_fpga/platform.mk
+++ b/plat/arm/board/arm_fpga/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+# Copyright (c) 2021-2025, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -78,7 +78,6 @@
lib/cpus/aarch64/cortex_a720.S \
lib/cpus/aarch64/cortex_x3.S \
lib/cpus/aarch64/cortex_x4.S \
- lib/cpus/aarch64/neoverse_n_common.S \
lib/cpus/aarch64/neoverse_n1.S \
lib/cpus/aarch64/neoverse_n2.S \
lib/cpus/aarch64/neoverse_v1.S \
diff --git a/plat/arm/board/common/board_common.mk b/plat/arm/board/common/board_common.mk
index 365a960..ebf79f3 100644
--- a/plat/arm/board/common/board_common.mk
+++ b/plat/arm/board/common/board_common.mk
@@ -64,7 +64,8 @@
HASH_PREREQUISITES = $(ROT_KEY) FORCE
endif
-$(ARM_ROTPK_HASH) : $(HASH_PREREQUISITES)
+$(ARM_ROTPK) : $(PK_PREREQUISITES) | $$(@D)/
+
ifndef ROT_KEY
$(error Cannot generate hash: no ROT_KEY defined)
endif
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 0156b31..aaae87a 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -199,7 +199,6 @@
lib/cpus/aarch64/cortex_a715.S \
lib/cpus/aarch64/cortex_a720.S \
lib/cpus/aarch64/cortex_a720_ae.S \
- lib/cpus/aarch64/neoverse_n_common.S \
lib/cpus/aarch64/neoverse_n1.S \
lib/cpus/aarch64/neoverse_n2.S \
lib/cpus/aarch64/neoverse_v1.S \
@@ -212,6 +211,20 @@
lib/cpus/aarch64/cortex_a75.S
endif
+#Include all CPUs to build to support all-errata build.
+ifeq (${ENABLE_ERRATA_ALL},1)
+ BUILD_CPUS_WITH_NO_FVP_MODEL = 1
+ FVP_CPU_LIBS += lib/cpus/aarch64/cortex_a510.S \
+ lib/cpus/aarch64/cortex_a520.S \
+ lib/cpus/aarch64/cortex_a725.S \
+ lib/cpus/aarch64/cortex_x1.S \
+ lib/cpus/aarch64/cortex_x3.S \
+ lib/cpus/aarch64/cortex_x925.S \
+ lib/cpus/aarch64/neoverse_n3.S \
+ lib/cpus/aarch64/neoverse_v2.S \
+ lib/cpus/aarch64/neoverse_v3.S
+endif
+
#Build AArch64-only CPUs with no FVP model yet.
ifeq (${BUILD_CPUS_WITH_NO_FVP_MODEL},1)
FVP_CPU_LIBS += lib/cpus/aarch64/neoverse_n3.S \
diff --git a/plat/arm/board/neoverse_rd/platform/rdn2/platform.mk b/plat/arm/board/neoverse_rd/platform/rdn2/platform.mk
index c2dfba6..9d2ff6c 100644
--- a/plat/arm/board/neoverse_rd/platform/rdn2/platform.mk
+++ b/plat/arm/board/neoverse_rd/platform/rdn2/platform.mk
@@ -123,7 +123,6 @@
ERRATA_N2_2025414 := 1
ERRATA_N2_2189731 := 1
ERRATA_N2_2138956 := 1
-ERRATA_N2_2138953 := 1
ERRATA_N2_2242415 := 1
ERRATA_N2_2138958 := 1
ERRATA_N2_2242400 := 1
diff --git a/plat/mediatek/mt8188/plat_config.mk b/plat/mediatek/mt8188/plat_config.mk
index 82ef7e8..3f7d187 100644
--- a/plat/mediatek/mt8188/plat_config.mk
+++ b/plat/mediatek/mt8188/plat_config.mk
@@ -27,7 +27,6 @@
ERRATA_A78_1941498 := 1
ERRATA_A78_1951500 := 1
ERRATA_A78_1821534 := 1
-ERRATA_A78_2132060 := 1
ERRATA_A78_2242635 := 1
ERRATA_A78_2376745 := 1
ERRATA_A78_2395406 := 1
diff --git a/plat/mediatek/mt8195/platform.mk b/plat/mediatek/mt8195/platform.mk
index 48dafa3..e604d4f 100644
--- a/plat/mediatek/mt8195/platform.mk
+++ b/plat/mediatek/mt8195/platform.mk
@@ -99,7 +99,6 @@
ERRATA_A78_1941498 := 1
ERRATA_A78_1951500 := 1
ERRATA_A78_1821534 := 1
-ERRATA_A78_2132060 := 1
ERRATA_A78_2242635 := 1
# indicate the reset vector address can be programmed
diff --git a/plat/qemu/common/common.mk b/plat/qemu/common/common.mk
index ed95bc6..1cc84f7 100644
--- a/plat/qemu/common/common.mk
+++ b/plat/qemu/common/common.mk
@@ -22,7 +22,6 @@
lib/cpus/aarch64/cortex_a72.S \
lib/cpus/aarch64/cortex_a76.S \
lib/cpus/aarch64/cortex_a710.S \
- lib/cpus/aarch64/neoverse_n_common.S \
lib/cpus/aarch64/neoverse_n1.S \
lib/cpus/aarch64/neoverse_v1.S \
lib/cpus/aarch64/neoverse_n2.S \
diff --git a/plat/qti/sc7280/platform.mk b/plat/qti/sc7280/platform.mk
index 3d7d728..0b5ae52 100644
--- a/plat/qti/sc7280/platform.mk
+++ b/plat/qti/sc7280/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
# Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
@@ -20,7 +20,6 @@
ERRATA_A55_1530923 := 1
ERRATA_A78_1941498 := 1
ERRATA_A78_1951500 := 1
-ERRATA_A78_2132060 := 1
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_clock.h b/plat/xilinx/zynqmp/pm_service/pm_api_clock.h
index 3498f91..89031b8 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_clock.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_clock.h
@@ -15,7 +15,7 @@
#include "pm_common.h"
-#define CLK_NAME_LEN (15U)
+#define CLK_NAME_LEN (16U)
#define MAX_PARENTS (100U)
#define CLK_NA_PARENT -1
#define CLK_DUMMY_PARENT -2
diff --git a/services/std_svc/errata_abi/errata_abi_main.c b/services/std_svc/errata_abi/errata_abi_main.c
index 0d0ecc3..a945637 100644
--- a/services/std_svc/errata_abi/errata_abi_main.c
+++ b/services/std_svc/errata_abi/errata_abi_main.c
@@ -197,8 +197,11 @@
while ((entry <= end) && (ret_val == EM_UNKNOWN_ERRATUM)) {
if (entry->id == errata_id) {
if (entry->check_func(rev_var)) {
- if (entry->chosen)
- return EM_HIGHER_EL_MITIGATION;
+ if (entry->chosen & WA_ENABLED_MASK)
+ if (entry->chosen & SPLIT_WA_MASK)
+ return EM_AFFECTED;
+ else
+ return EM_HIGHER_EL_MITIGATION;
else
return EM_AFFECTED;
}